Commit 926cd70a authored by Manish Pandey's avatar Manish Pandey Committed by TrustedFirmware Code Review
Browse files

Merge changes from topic "brcm_initial_support" into integration

* changes:
  doc: brcm: Add documentation file for brcm stingray platform
  drivers: Add SPI Nor flash support
  drivers: Add iproc spi driver
  drivers: Add emmc driver for Broadcom platforms
  Add BL31 support for Broadcom stingray platform
  Add BL2 support for Broadcom stingray platform
  Add bl31 support common across Broadcom platforms
  Add bl2 setup code common across Broadcom platforms
  drivers: Add support to retrieve plat_toc_flags
parents 33f1dd9c fd1017b1
Description
===========
Broadcom's Stingray(BCM958742t) is a multi-core processor with 8 Cortex-A72 cores.
Trusted Firmware-A (TF-A) is used to implement secure world firmware, supporting
BL2 and BL31 for Broadcom Stingray SoCs
On Poweron, Boot ROM will load bl2 image and Bl2 will initialize the hardware,
then loads bl31 and bl33 into DDR and boots to bl33.
Boot Sequence
=============
Bootrom --> TF-A BL2 --> TF-A BL31 --> BL33(u-boot)
Code Locations
--------------
- Trusted Firmware-A:
`link <https://github.com/ARM-software/arm-trusted-firmware>`__
How to build
============
Build Procedure
---------------
- Prepare AARCH64 toolchain.
- Build u-boot first, and get the binary image: u-boot.bin,
- Build TF-A
Build fip:
.. code::shell
make CROSS_COMPILE=aarch64-linux-gnu- PLAT=stingray BOARD_CFG=bcm958742t all fip BL33=u-boot.bin
Deploy TF-A Images
-----------------
The u-boot will be upstreamed soon, this doc will be updated once they are ready, and the link will be posted.
/*
* Copyright (c) 2016 - 2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <string.h>
#include <drivers/delay_timer.h>
#include <chimp.h>
#include <chimp_nv_defs.h>
#define CHIMP_DEFAULT_STARTUP_ADDR 0xb4300000
/* ChiMP's view of APE scratchpad memory for fastboot */
#define CHIMP_FASTBOOT_ADDR 0x61000000
#define CHIMP_PREPARE_ACCESS_WINDOW(addr) \
(\
mmio_write_32(\
NIC400_NITRO_CHIMP_S_IDM_IO_CONTROL_DIRECT, \
addr & 0xffc00000)\
)
#define CHIMP_INDIRECT_TGT_ADDR(addr) \
(CHIMP_INDIRECT_BASE + (addr & CHIMP_INDIRECT_ADDR_MASK))
#define CHIMP_CTRL_ADDR(x) (CHIMP_REG_CTRL_BASE + x)
/* For non-PAXC builds */
#ifndef CHIMP_FB1_ENTRY
#define CHIMP_FB1_ENTRY 0
#endif
#define CHIMP_DBG VERBOSE
void bcm_chimp_write(uintptr_t addr, uint32_t value)
{
CHIMP_PREPARE_ACCESS_WINDOW(addr);
mmio_write_32(CHIMP_INDIRECT_TGT_ADDR(addr), value);
}
uint32_t bcm_chimp_read(uintptr_t addr)
{
CHIMP_PREPARE_ACCESS_WINDOW(addr);
return mmio_read_32(CHIMP_INDIRECT_TGT_ADDR(addr));
}
void bcm_chimp_clrbits(uintptr_t addr, uint32_t bits)
{
CHIMP_PREPARE_ACCESS_WINDOW(addr);
mmio_clrbits_32(CHIMP_INDIRECT_TGT_ADDR(addr), bits);
}
void bcm_chimp_setbits(uintptr_t addr, uint32_t bits)
{
CHIMP_PREPARE_ACCESS_WINDOW(addr);
mmio_setbits_32(CHIMP_INDIRECT_TGT_ADDR(addr), bits);
}
int bcm_chimp_is_nic_mode(void)
{
uint32_t val;
/* Check if ChiMP straps are set */
val = mmio_read_32(CDRU_CHIP_STRAP_DATA_LSW);
val &= CDRU_CHIP_STRAP_DATA_LSW__NIC_MODE_MASK;
return val == CDRU_CHIP_STRAP_DATA_LSW__NIC_MODE_MASK;
}
void bcm_chimp_fru_prog_done(bool is_done)
{
uint32_t val;
val = is_done ? (1 << CHIMP_FRU_PROG_DONE_BIT) : 0;
bcm_chimp_setbits(CHIMP_REG_ECO_RESERVED, val);
}
int bcm_chimp_handshake_done(void)
{
uint32_t value;
value = bcm_chimp_read(CHIMP_REG_ECO_RESERVED);
value &= (1 << CHIMP_FLASH_ACCESS_DONE_BIT);
return value != 0;
}
int bcm_chimp_wait_handshake(void)
{
uint32_t timeout = CHIMP_HANDSHAKE_TIMEOUT_MS;
uint32_t status;
INFO("Waiting for ChiMP handshake...\n");
do {
if (bcm_chimp_handshake_done())
break;
/* No need to wait if ChiMP reported an error */
status = bcm_chimp_read_ctrl(CHIMP_REG_CTRL_BPE_STAT_REG);
if (status & CHIMP_ERROR_MASK) {
ERROR("ChiMP error 0x%x. Wait aborted\n", status);
break;
}
mdelay(1);
} while (--timeout);
if (!bcm_chimp_handshake_done()) {
if (timeout == 0) {
WARN("Timeout waiting for ChiMP handshake\n");
}
} else {
INFO("Got handshake from ChiMP!\n");
}
return bcm_chimp_handshake_done();
}
uint32_t bcm_chimp_read_ctrl(uint32_t offset)
{
return bcm_chimp_read(CHIMP_CTRL_ADDR(offset));
}
static int bcm_chimp_nitro_reset(void)
{
uint32_t timeout;
/* Perform tasks done by M0 in NIC mode */
CHIMP_DBG("Taking Nitro out of reset\n");
mmio_setbits_32(CDRU_MISC_RESET_CONTROL,
/* MHB_RESET_N */
(1 << CDRU_MISC_RESET_CONTROL__CDRU_MHB_RESET_N_R) |
/* PCI_RESET_N */
(1 << CDRU_MISC_RESET_CONTROL__CDRU_PCIE_RESET_N_R) |
/* PM_RESET_N */
(1 << CDRU_MISC_RESET_CONTROL__CDRU_PM_RESET_N_R) |
/* NIC_RESET_N */
(1 << CDRU_MISC_RESET_CONTROL__CDRU_NITRO_RESET_N_R)
);
/* Wait until Nitro is out of reset */
timeout = NIC_RESET_RELEASE_TIMEOUT_US;
do {
uint32_t value;
value = bcm_chimp_read_ctrl(CHIMP_REG_CTRL_BPE_MODE_REG);
if ((value & CHIMP_BPE_MODE_ID_MASK) ==
CHIMP_BPE_MODE_ID_PATTERN)
break;
udelay(1);
} while (--timeout);
if (timeout == 0) {
ERROR("NIC reset release timed out\n");
return -1;
}
return 0;
}
static void bcm_nitro_secure_mode_enable(void)
{
mmio_setbits_32(CDRU_NITRO_CONTROL,
(1 << CDRU_NITRO_CONTROL__CDRU_NITRO_SEC_MODE_R) |
(1 << CDRU_NITRO_CONTROL__CDRU_NITRO_SEC_OVERRIDE_R));
mmio_write_32(NITRO_TZPC_TZPCDECPROT0clr,
/* NITRO_TZPC */
1 << NITRO_TZPC_TZPCDECPROT0clr__DECPROT0_chimp_m_clr_R);
}
static int bcm_chimp_reset_and_initial_setup(void)
{
int err;
uint32_t handshake_reg;
err = bcm_chimp_nitro_reset();
if (err)
return err;
/* Enable Nitro secure mode */
bcm_nitro_secure_mode_enable();
/* Force ChiMP back into reset */
bcm_chimp_setbits(CHIMP_CTRL_ADDR(CHIMP_REG_CTRL_BPE_MODE_REG),
1 << CHIMP_REG_CHIMP_REG_CTRL_BPE_MODE_REG__cm3_rst_R);
handshake_reg = (1 << SR_IN_SMARTNIC_MODE_BIT);
/* Get OTP secure Chimp boot status */
if (mmio_read_32(CRMU_OTP_STATUS) & (1 << CRMU_OTP_STATUS_BIT))
handshake_reg |= (1 << SR_CHIMP_SECURE_BOOT_BIT);
bcm_chimp_write(CHIMP_REG_ECO_RESERVED, handshake_reg);
CHIMP_DBG("ChiMP reset and initial handshake parameters set\n");
return 0;
}
static void bcm_nitro_chimp_release_reset(void)
{
bcm_chimp_clrbits(CHIMP_CTRL_ADDR(CHIMP_REG_CTRL_BPE_MODE_REG),
1 << CHIMP_REG_CHIMP_REG_CTRL_BPE_MODE_REG__cm3_rst_R);
CHIMP_DBG("Nitro Reset Released\n");
}
static void bcm_chimp_set_fastboot(int mode)
{
uint32_t fb_entry;
/* 1. Enable fastboot */
bcm_chimp_setbits(CHIMP_CTRL_ADDR(CHIMP_REG_CTRL_BPE_MODE_REG),
(1 << CHIMP_FAST_BOOT_MODE_BIT));
fb_entry = CHIMP_FASTBOOT_ADDR | mode;
if (mode == CHIMP_FASTBOOT_JUMP_IN_PLACE)
fb_entry = CHIMP_FB1_ENTRY;
/* 2. Write startup address and mode */
INFO("Setting fastboot type %d entry to 0x%x\n", mode, fb_entry);
bcm_chimp_write(
CHIMP_CTRL_ADDR(CHIMP_REG_CTRL_FSTBOOT_PTR_REG),
fb_entry);
}
#ifndef CHIMPFW_USE_SIDELOAD
static void bcm_chimp_load_fw_from_spi(uintptr_t spi_addr, size_t size)
{
uintptr_t ape_scpad;
uintptr_t dest;
size_t bytes_left;
ape_scpad = CHIMP_REG_CHIMP_APE_SCPAD;
dest = CHIMP_INDIRECT_TGT_ADDR(CHIMP_REG_CHIMP_APE_SCPAD);
bytes_left = size;
while (bytes_left) {
uint32_t delta;
delta = bytes_left > CHIMP_WINDOW_SIZE ?
bytes_left - CHIMP_WINDOW_SIZE : bytes_left;
CHIMP_PREPARE_ACCESS_WINDOW(ape_scpad);
INFO("Transferring %d byte(s) from 0x%lx to 0x%lx\n",
delta, spi_addr, dest);
/*
* This single memcpy call takes significant amount of time
* on Palladium. Be patient
*/
memcpy((void *)dest, (void *)spi_addr, delta);
bytes_left -= delta;
INFO("Transferred %d byte(s) from 0x%lx to 0x%lx (%lu%%)\n",
delta, spi_addr, dest,
((size - bytes_left) * 100)/size);
spi_addr += delta;
dest += delta;
ape_scpad += delta;
}
}
static int bcm_chimp_find_fw_in_spi(uintptr_t *addr, size_t *size)
{
int i;
bnxnvm_master_block_header_t *master_block_hdr;
bnxnvm_directory_block_header_t *dir_block_hdr;
bnxnvm_directory_entry_t *dir_entry;
int found;
found = 0;
/* Read the master block */
master_block_hdr =
(bnxnvm_master_block_header_t *)(uintptr_t)QSPI_BASE_ADDR;
if (master_block_hdr->sig != BNXNVM_MASTER_BLOCK_SIG) {
WARN("Invalid masterblock 0x%x (expected 0x%x)\n",
master_block_hdr->sig,
BNXNVM_MASTER_BLOCK_SIG);
return -NV_NOT_NVRAM;
}
if ((master_block_hdr->block_size > NV_MAX_BLOCK_SIZE) ||
(master_block_hdr->directory_offset >=
master_block_hdr->nvram_size)) {
WARN("Invalid masterblock block size 0x%x or directory offset 0x%x\n",
master_block_hdr->block_size,
master_block_hdr->directory_offset);
return -NV_BAD_MB;
}
/* Skip to the Directory block start */
dir_block_hdr =
(bnxnvm_directory_block_header_t *)
((uintptr_t)QSPI_BASE_ADDR +
master_block_hdr->directory_offset);
if (dir_block_hdr->sig != BNXNVM_DIRECTORY_BLOCK_SIG) {
WARN("Invalid directory header 0x%x (expected 0x%x)\n",
dir_block_hdr->sig,
BNXNVM_DIRECTORY_BLOCK_SIG);
return -NV_BAD_DIR_HEADER;
}
/* Locate the firmware */
for (i = 0; i < dir_block_hdr->entries; i++) {
*addr = ((uintptr_t)dir_block_hdr + dir_block_hdr->length +
i * dir_block_hdr->entry_length);
dir_entry = (bnxnvm_directory_entry_t *)(*addr);
if ((dir_entry->type == BNX_DIR_TYPE_BOOTCODE) ||
(dir_entry->type == BNX_DIR_TYPE_BOOTCODE_2)) {
found = 1;
break;
}
}
if (!found)
return -NV_FW_NOT_FOUND;
*addr = QSPI_BASE_ADDR + dir_entry->item_location;
*size = dir_entry->data_length;
INFO("Found chimp firmware at 0x%lx, size %lu byte(s)\n",
*addr, *size);
return NV_OK;
}
#endif
int bcm_chimp_initiate_fastboot(int fastboot_type)
{
int err;
if ((fastboot_type != CHIMP_FASTBOOT_NITRO_RESET) &&
(fastboot_type <= CHIMP_FASTBOOT_JUMP_DECOMPRESS)) {
CHIMP_DBG("Initiating ChiMP fastboot type %d\n", fastboot_type);
}
/*
* If we are here, M0 did not setup Nitro because NIC mode
* strap was not present
*/
err = bcm_chimp_reset_and_initial_setup();
if (err)
return err;
if (fastboot_type > CHIMP_FASTBOOT_JUMP_DECOMPRESS) {
WARN("ChiMP setup deferred\n");
return -1;
}
if (fastboot_type != CHIMP_FASTBOOT_NITRO_RESET) {
if ((fastboot_type == CHIMP_FASTBOOT_JUMP_IN_PLACE) &&
(CHIMP_FB1_ENTRY == 0)) {
ERROR("Missing ESAL entry point for fastboot type 1.\n"
"Fastboot failed\n");
return -1;
}
/*
* TODO: We need to think of the way to load the ChiMP fw.
* This could be SPI, NAND, etc.
* For now we temporarily stick to the SPI load unless
* CHIMPFW_USE_SIDELOAD is defined. Note that for the SPI NVRAM
* image we need to parse directory and get the image.
* When we load image from other media there is no need to
* parse because fw image can be directly placed into the APE's
* scratchpad.
* For sideload method we simply reset the ChiMP, set bpe_reg
* to do fastboot with the type we define, and release from
* reset so that ROM loader would initiate fastboot immediately
*/
#ifndef CHIMPFW_USE_SIDELOAD
{
uintptr_t spi_addr;
size_t size;
err = bcm_chimp_find_fw_in_spi(&spi_addr, &size);
if (!err) {
INFO("Loading ChiMP firmware, addr 0x%lx, size %lu byte(s)\n",
spi_addr, size);
bcm_chimp_load_fw_from_spi(spi_addr, size);
} else {
ERROR("Error %d ChiMP firmware not in NVRAM directory!\n",
err);
}
}
#else
INFO("Skip ChiMP QSPI fastboot type %d due to sideload requested\n",
fastboot_type);
#endif
if (!err) {
INFO("Instruct ChiMP to fastboot\n");
bcm_chimp_set_fastboot(fastboot_type);
INFO("Fastboot mode set\n");
}
}
bcm_nitro_chimp_release_reset();
return err;
}
/*
* Copyright (c) 2016 - 2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
#include <string.h>
#include <lib/mmio.h>
#include <platform_def.h>
#include "bcm_emmc.h"
#include "emmc_chal_types.h"
#include "emmc_chal_sd.h"
#include "emmc_pboot_hal_memory_drv.h"
extern void emmc_soft_reset(void);
#define SD_VDD_WINDOW_1_6_TO_1_7 0x00000010 // 1.6 V to 1.7 Volts
#define SD_VDD_WINDOW_1_7_TO_1_8 0x00000020 // 1.7 V to 1.8 Volts
#define SD_VDD_WINDOW_1_8_TO_1_9 0x00000040 // 1.8 V to 1.9 Volts
#define SD_VDD_WINDOW_1_9_TO_2_0 0x00000080 // 1.9 V to 2.0 Volts
#define SD_VDD_WINDOW_2_0_TO_2_1 0x00000100 // 2.0 V to 2.1 Volts
#define SD_VDD_WINDOW_2_1_TO_2_2 0x00000200 // 2.1 V to 2.2 Volts
#define SD_VDD_WINDOW_2_2_TO_2_3 0x00000400 // 2.2 V to 2.3 Volts
#define SD_VDD_WINDOW_2_3_TO_2_4 0x00000800 // 2.3 V to 2.4 Volts
#define SD_VDD_WINDOW_2_4_TO_2_5 0x00001000 // 2.4 V to 2.5 Volts
#define SD_VDD_WINDOW_2_5_TO_2_6 0x00002000 // 2.5 V to 2.6 Volts
#define SD_VDD_WINDOW_2_6_TO_2_7 0x00004000 // 2.6 V to 2.7 Volts
#define SD_VDD_WINDOW_2_7_TO_2_8 0x00008000 // 2.7 V to 2.8 Volts
#define SD_VDD_WINDOW_2_8_TO_2_9 0x00010000 // 2.8 V to 2.9 Volts
#define SD_VDD_WINDOW_2_9_TO_3_0 0x00020000 // 2.9 V to 3.0 Volts
#define SD_VDD_WINDOW_3_0_TO_3_1 0x00040000 // 3.0 V to 3.1 Volts
#define SD_VDD_WINDOW_3_1_TO_3_2 0x00080000 // 3.1 V to 3.2 Volts
#define SD_VDD_WINDOW_3_2_TO_3_3 0x00100000 // 3.2 V to 3.3 Volts
#define SD_VDD_WINDOW_3_3_TO_3_4 0x00200000 // 3.3 V to 3.4 Volts
#define SD_VDD_WINDOW_3_4_TO_3_5 0x00400000 // 3.4 V to 3.5 Volts
#define SD_VDD_WINDOW_3_5_TO_3_6 0x00800000 // 3.5 V to 3.6 Volts
#define SD_VDD_WINDOW_1_6_TO_2_6 (SD_VDD_WINDOW_1_6_TO_1_7 | \
SD_VDD_WINDOW_1_7_TO_1_8 | \
SD_VDD_WINDOW_1_8_TO_1_9 | \
SD_VDD_WINDOW_1_9_TO_2_0 | \
SD_VDD_WINDOW_2_0_TO_2_1 | \
SD_VDD_WINDOW_2_1_TO_2_2 | \
SD_VDD_WINDOW_2_2_TO_2_3 | \
SD_VDD_WINDOW_2_3_TO_2_4 | \
SD_VDD_WINDOW_2_4_TO_2_5 | \
SD_VDD_WINDOW_2_5_TO_2_6)
#define SD_VDD_WINDOW_2_6_TO_3_2 (SD_VDD_WINDOW_2_6_TO_2_7 | \
SD_VDD_WINDOW_2_7_TO_2_8 | \
SD_VDD_WINDOW_2_8_TO_2_9 | \
SD_VDD_WINDOW_2_9_TO_3_0 | \
SD_VDD_WINDOW_3_0_TO_3_1 | \
SD_VDD_WINDOW_3_1_TO_3_2)
#define SD_VDD_WINDOW_3_2_TO_3_6 (SD_VDD_WINDOW_3_2_TO_3_3 | \
SD_VDD_WINDOW_3_3_TO_3_4 | \
SD_VDD_WINDOW_3_4_TO_3_5 | \
SD_VDD_WINDOW_3_5_TO_3_6)
static int32_t chal_sd_set_power(struct sd_dev *handle,
uint32_t voltage, uint32_t state);
static void chal_sd_set_dma_boundary(struct sd_dev *handle, uint32_t boundary);
static int32_t chal_sd_setup_handler(struct sd_dev *handle,
uint32_t sdBbase, uint32_t hostBase);
/*
* Configure host controller pwr settings,
* to match voltage requirements by SD Card
*/
static int32_t chal_sd_set_power(struct sd_dev *handle,
uint32_t voltage, uint32_t state)
{
int32_t rc, rval = SD_FAIL;
uint32_t time = 0;
if (handle == NULL)
return SD_INVALID_HANDLE;
mmio_clrsetbits_32(handle->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_CTRL_OFFSET,
(SD4_EMMC_TOP_CTRL_SDVSELVDD1_MASK |
SD4_EMMC_TOP_CTRL_SDPWR_MASK),
(voltage << 9));
/*
* Long delay is required here in emulation. Without this, the initial
* commands sent to the eMMC card timeout. We don't know if this
* delay is necessary with silicon, leaving in for safety.
* It is observed that 403ms on emulation system and as per the clock
* calculations it is expected to complete with in 1ms on chip
*/
do {
rc = mmio_read_32(handle->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_INTR_OFFSET);
if ((rc & SD4_EMMC_TOP_INTR_CRDINS_MASK) ==
SD4_EMMC_TOP_INTR_CRDINS_MASK)
break;
mdelay(1);
} while (time++ < EMMC_CARD_DETECT_TIMEOUT_MS);
if (time >= EMMC_CARD_DETECT_TIMEOUT_MS) {
ERROR("EMMC: Card insert event detection timeout\n");
return rval;
}
VERBOSE("EMMC: Card detection delay: %dms\n", time);
if (state)
mmio_setbits_32(handle->ctrl.sdRegBaseAddr + SD4_EMMC_TOP_CTRL_OFFSET,
SD4_EMMC_TOP_CTRL_SDPWR_MASK);
/* dummy write & ack to verify if the sdio is ready to send commads */
mmio_write_32(handle->ctrl.sdRegBaseAddr + SD4_EMMC_TOP_ARG_OFFSET, 0);
mmio_write_32(handle->ctrl.sdRegBaseAddr + SD4_EMMC_TOP_CMD_OFFSET, 0);
/*
* 63ms observed on emulation system, As per clock calculations
* it will complete < 1ms on chip.
*/
time = 0;
do {
rc = mmio_read_32(handle->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_INTR_OFFSET);
if (rc & SD4_EMMC_TOP_INTR_ERRIRQ_MASK)
break;
if ((rc & SD4_EMMC_TOP_INTR_CMDDONE_MASK) ==
SD4_EMMC_TOP_INTR_CMDDONE_MASK)
break;
mdelay(1);
} while (time++ < EMMC_CMD_TIMEOUT_MS);
if (time >= EMMC_CMD_TIMEOUT_MS) {
WARN("%s %d Initial dummy command timeout is happened\n",
__func__, __LINE__);
return rval;
}
VERBOSE("EMMC: Dummy Command delay: %dms\n", time);
return SD_OK;
}
/*
* Configure DMA Boundaries
*/
static void chal_sd_set_dma_boundary(struct sd_dev *handle, uint32_t boundary)
{
if (handle == NULL)
return;
mmio_clrsetbits_32(handle->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_BLOCK_OFFSET,
SD4_EMMC_TOP_BLOCK_HSBS_MASK, boundary);
}
static int32_t chal_sd_setup_handler(struct sd_dev *handle, uint32_t sdBase,
uint32_t hostBase)
{
if (handle == NULL)
return SD_INVALID_HANDLE;
handle->ctrl.sdRegBaseAddr = sdBase;
handle->ctrl.hostRegBaseAddr = hostBase;
handle->ctrl.present = 0;
handle->ctrl.rca = 0;
handle->ctrl.blkGapEnable = 0;
handle->ctrl.cmdStatus = 0;
return SD_OK;
}
/*
* Initialize SD Host controller
*/
int32_t chal_sd_init(CHAL_HANDLE *sd_handle)
{
uint32_t cap_val_l = 0;
uint32_t ctl_val, voltage;
uint32_t timeout_val;
struct sd_dev *handle;
uint32_t reg_val;
int32_t rval = SD_FAIL;
if (sd_handle == NULL)
return SD_INVALID_HANDLE;
handle = (struct sd_dev *)sd_handle;
/*
* Set SDIO Host Controller capabilities register
*/
EMMC_TRACE("Set Host Controller Capabilities register\n");
reg_val = 0;
reg_val |= (1 << ICFG_SDIO0_CAP0__SLOT_TYPE_R);
reg_val |= (0 << ICFG_SDIO0_CAP0__INT_MODE_R);
reg_val |= (0 << ICFG_SDIO0_CAP0__SYS_BUS_64BIT_R);
reg_val |= (1 << ICFG_SDIO0_CAP0__VOLTAGE_1P8V_R);
reg_val |= (1 << ICFG_SDIO0_CAP0__VOLTAGE_3P0V_R);
reg_val |= (1 << ICFG_SDIO0_CAP0__VOLTAGE_3P3V_R);
reg_val |= (1 << ICFG_SDIO0_CAP0__SUSPEND_RESUME_R);
reg_val |= (1 << ICFG_SDIO0_CAP0__SDMA_R);
reg_val |= (1 << ICFG_SDIO0_CAP0__HIGH_SPEED_R);
reg_val |= (1 << ICFG_SDIO0_CAP0__ADMA2_R);
reg_val |= (1 << ICFG_SDIO0_CAP0__EXTENDED_MEDIA_R);
reg_val |= (2 << ICFG_SDIO0_CAP0__MAX_BLOCK_LEN_R);
reg_val |= (0xd0 << ICFG_SDIO0_CAP0__BASE_CLK_FREQ_R);
reg_val |= (1 << ICFG_SDIO0_CAP0__TIMEOUT_UNIT_R);
reg_val |= (0x30 << ICFG_SDIO0_CAP0__TIMEOUT_CLK_FREQ_R);
mmio_write_32(ICFG_SDIO0_CAP0, reg_val);
reg_val = 0;
reg_val |= (1 << ICFG_SDIO0_CAP1__SPI_BLOCK_MODE_R);
reg_val |= (1 << ICFG_SDIO0_CAP1__SPI_MODE_R);
reg_val |= (0 << ICFG_SDIO0_CAP1__CLK_MULT_R);
reg_val |= (0 << ICFG_SDIO0_CAP1__RETUNING_MODE_R);
reg_val |= (1 << ICFG_SDIO0_CAP1__TUNE_SDR50_R);
reg_val |= (1 << ICFG_SDIO0_CAP1__TIME_RETUNE_R);
reg_val |= (1 << ICFG_SDIO0_CAP1__DRIVER_D_R);
reg_val |= (1 << ICFG_SDIO0_CAP1__DRIVER_C_R);
reg_val |= (1 << ICFG_SDIO0_CAP1__DRIVER_A_R);
reg_val |= (1 << ICFG_SDIO0_CAP1__DDR50_R);
reg_val |= (1 << ICFG_SDIO0_CAP1__SDR104_R);
reg_val |= (1 << ICFG_SDIO0_CAP1__SDR50_R);
mmio_write_32(ICFG_SDIO0_CAP1, reg_val);
/* Reset the SDIO controller */
chal_sd_stop();
/* Turn on SD clock */
chal_sd_set_clock(sd_handle,
chal_sd_freq_2_div_ctrl_setting(INIT_CLK_FREQ), 1);
/* program data time out value to the max */
timeout_val = SD_HOST_CORE_TIMEOUT;
ctl_val = mmio_read_32(handle->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_CTRL1_OFFSET);
ctl_val |= ((timeout_val & 0xf) << SD4_EMMC_TOP_CTRL1_DTCNT_SHIFT);
mmio_write_32(handle->ctrl.sdRegBaseAddr + SD4_EMMC_TOP_CTRL1_OFFSET,
ctl_val);
/* enable all interrupt status */
mmio_write_32(handle->ctrl.sdRegBaseAddr + SD4_EMMC_TOP_INTREN1_OFFSET,
0);
mmio_write_32(handle->ctrl.sdRegBaseAddr + SD4_EMMC_TOP_INTREN2_OFFSET,
0);
SD_US_DELAY(100);
mmio_write_32(handle->ctrl.sdRegBaseAddr + SD4_EMMC_TOP_INTREN1_OFFSET,
SD_NOR_INTERRUPTS | SD_ERR_INTERRUPTS);
mmio_write_32(handle->ctrl.sdRegBaseAddr + SD4_EMMC_TOP_INTREN2_OFFSET,
SD_NOR_INTERRUPTS | SD_ERR_INTERRUPTS);
/* Select SD bus voltage */
cap_val_l = mmio_read_32(handle->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_CAPABILITIES1_OFFSET);
handle->cfg.voltage = 0;
voltage = 0x7;
if (cap_val_l & SD4_EMMC_TOP_CAPABILITIES1_V33_MASK) {
handle->cfg.voltage |= SD_VDD_WINDOW_3_3_TO_3_4;
voltage = 0x7;
} else if (cap_val_l & SD4_EMMC_TOP_CAPABILITIES1_V3_MASK) {
handle->cfg.voltage |= SD_VDD_WINDOW_3_0_TO_3_1;
voltage = 0x6;
} else if (cap_val_l & SD4_EMMC_TOP_CAPABILITIES1_V18_MASK) {
handle->cfg.voltage |= SD_VDD_WINDOW_1_8_TO_1_9;
voltage = 0x5;
}
rval = chal_sd_set_power(handle, voltage, SD4_EMMC_TOP_CTRL_SDPWR_MASK);
ctl_val = mmio_read_32(handle->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_HCVERSIRQ_OFFSET);
handle->ctrl.version = ((ctl_val >> 16) & 0xFF);
return rval;
}
void chal_sd_set_speed(CHAL_HANDLE *sd_handle, uint32_t speed)
{
struct sd_dev *handle;
if (sd_handle == NULL)
return;
handle = (struct sd_dev *) sd_handle;
if (speed) {
EMMC_TRACE("enable HighSpeed\n");
mmio_setbits_32(handle->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_CTRL_OFFSET,
SD4_EMMC_TOP_CTRL_HSEN_MASK);
} else {
EMMC_TRACE("disable HighSpeed\n");
mmio_clrbits_32(handle->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_CTRL_OFFSET,
SD4_EMMC_TOP_CTRL_HSEN_MASK);
}
}
int32_t chal_sd_stop(void)
{
uintptr_t idm_rst_ctrl_addr = EMMC_IDM_RESET_CTRL_ADDR;
/* Configure IO pins */
emmc_soft_reset();
/* Reset the SDIO controller */
mmio_write_32(idm_rst_ctrl_addr, 1);
SD_US_DELAY(100);
mmio_write_32(idm_rst_ctrl_addr, 0);
SD_US_DELAY(100);
return SD_OK;
}
/*
* Check if host supports specified capability
* returns -ve val on error, 0 if capability not supported else 1.
*/
int32_t chal_sd_check_cap(CHAL_HANDLE *sd_handle, uint32_t caps)
{
struct sd_dev *handle;
if (sd_handle == NULL)
return SD_INVALID_HANDLE;
handle = (struct sd_dev *) sd_handle;
if (caps & mmio_read_32(handle->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_CAPABILITIES1_OFFSET))
return 1;
else
return 0;
}
int32_t chal_sd_start(CHAL_HANDLE *sd_handle,
uint32_t mode, uint32_t sd_base, uint32_t host_base)
{
struct sd_dev *handle;
int32_t rval = SD_FAIL;
if (sd_handle == NULL)
return SD_INVALID_HANDLE;
handle = (struct sd_dev *) sd_handle;
handle->cfg.mode = SD_PIO_MODE; /* set to PIO mode first for init */
handle->cfg.dma = SD_DMA_OFF;
chal_sd_setup_handler(handle, sd_base, host_base);
/* init and start hw */
rval = chal_sd_init(sd_handle);
if (rval != SD_OK)
return rval;
chal_sd_clear_pending_irq(sd_handle);
handle->ctrl.eventList = 0;
handle->cfg.mode = mode;
return SD_OK;
}
/*
* Function to check 8bits of err generated from auto CMD12
*/
int32_t chal_sd_get_atuo12_error(CHAL_HANDLE *sd_handle)
{
struct sd_dev *handle;
if (sd_handle == NULL)
return SD_INVALID_HANDLE;
handle = (struct sd_dev *) sd_handle;
return (mmio_read_32(handle->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_ERRSTAT_OFFSET) & 0xFF);
}
/*
* Read present state register
*/
uint32_t chal_sd_get_present_status(CHAL_HANDLE *sd_handle)
{
struct sd_dev *handle;
if (sd_handle == NULL)
return SD_INVALID_HANDLE;
handle = (struct sd_dev *) sd_handle;
return mmio_read_32(handle->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_PSTATE_OFFSET);
}
/*
* Set SD bus width
*/
int32_t chal_sd_config_bus_width(CHAL_HANDLE *sd_handle, int32_t width)
{
uint32_t ctl_val;
struct sd_dev *handle;
if (sd_handle == NULL)
return SD_INVALID_HANDLE;
handle = (struct sd_dev *)sd_handle;
ctl_val = mmio_read_32(handle->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_CTRL_OFFSET);
switch (width) {
#ifdef DRIVER_EMMC_ENABLE_DATA_WIDTH_8BIT
case SD_BUS_DATA_WIDTH_8BIT:
ctl_val &= ~SD_BUS_DATA_WIDTH_4BIT;
ctl_val |= SD_BUS_DATA_WIDTH_8BIT;
break;
#endif
case SD_BUS_DATA_WIDTH_4BIT:
ctl_val &= ~SD_BUS_DATA_WIDTH_8BIT;
ctl_val |= SD_BUS_DATA_WIDTH_4BIT;
break;
case SD_BUS_DATA_WIDTH_1BIT:
ctl_val &= ~(SD_BUS_DATA_WIDTH_4BIT | SD_BUS_DATA_WIDTH_8BIT);
break;
default:
return SD_INV_DATA_WIDTH;
};
mmio_write_32(handle->ctrl.sdRegBaseAddr + SD4_EMMC_TOP_CTRL_OFFSET,
ctl_val);
return SD_OK;
}
/*
* Function to enable or disable DMA control.
*/
int32_t chal_sd_set_dma(CHAL_HANDLE *sd_handle, uint32_t mode)
{
uint32_t val;
struct sd_dev *handle;
int32_t rc;
if (sd_handle == NULL)
return SD_INVALID_HANDLE;
handle = (struct sd_dev *)sd_handle;
if (mode) {
rc = chal_sd_check_cap(sd_handle,
SD4_EMMC_TOP_CAPABILITIES1_SDMA_MASK |
SD4_EMMC_TOP_CAPABILITIES1_ADMA2_MASK);
if (rc < 0)
return rc;
if (rc) {
handle->cfg.dma = mode;
val = mmio_read_32(handle->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_CTRL_OFFSET);
val &= ~(SD4_EMMC_TOP_CTRL_DMASEL_MASK);
val |= handle->cfg.dma - 1;
mmio_write_32(handle->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_CTRL_OFFSET, val);
return SD_OK;
}
}
handle->cfg.dma = 0;
return SD_FAIL;
}
/*
* Get current DMA address.
* Called only when there is no data transaction activity.
*/
uintptr_t chal_sd_get_dma_addr(CHAL_HANDLE *sd_handle)
{
struct sd_dev *handle;
if (sd_handle == NULL)
return SD_INVALID_HANDLE;
handle = (struct sd_dev *) sd_handle;
if (handle->cfg.dma == SD_DMA_OFF)
return 0;
return (uintptr_t)mmio_read_32(handle->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_SYSADDR_OFFSET);
}
int32_t chal_sd_send_cmd(CHAL_HANDLE *sd_handle, uint32_t cmd_idx,
uint32_t argument, uint32_t options)
{
uint32_t cmd_mode_reg = 0;
struct sd_dev *handle;
if (sd_handle == NULL)
return SD_INVALID_HANDLE;
handle = (struct sd_dev *) sd_handle;
EMMC_TRACE("%s %d cmd:%d argReg:%x options:%x\n",
__func__, __LINE__, cmd_idx, argument, options);
/* Configure the value for command and mode registers */
cmd_mode_reg = (cmd_idx << 24) | options;
/*
* 1. Write block size reg & block count reg,
* this is done in the tx or rx setup
*/
mmio_write_32(handle->ctrl.sdRegBaseAddr + SD4_EMMC_TOP_BLOCK_OFFSET,
handle->ctrl.blkReg);
/* 2. Write argument reg */
mmio_write_32(handle->ctrl.sdRegBaseAddr + SD4_EMMC_TOP_ARG_OFFSET,
argument);
handle->ctrl.argReg = argument;
/*
* 3. Write transfer mode reg & command reg, check the DMA bit which is
* set before this function call if it is selected.
*/
if (cmd_idx == 24 || cmd_idx == 25 || cmd_idx == 18 || cmd_idx == 17 ||
cmd_idx == 42 || cmd_idx == 51 || cmd_idx == 53)
cmd_mode_reg |= ((handle->cfg.dma) ? 1 : 0);
mmio_write_32(handle->ctrl.sdRegBaseAddr + SD4_EMMC_TOP_CMD_OFFSET,
cmd_mode_reg);
handle->ctrl.cmdIndex = cmd_idx;
return SD_OK;
}
int32_t chal_sd_set_dma_addr(CHAL_HANDLE *sd_handle, uintptr_t address)
{
struct sd_dev *handle;
if (sd_handle == NULL)
return SD_INVALID_HANDLE;
handle = (struct sd_dev *) sd_handle;
if (handle->cfg.dma == SD_DMA_OFF)
return SD_FAIL;
mmio_write_32(handle->ctrl.sdRegBaseAddr + SD4_EMMC_TOP_SYSADDR_OFFSET,
address);
return SD_OK;
}
uint32_t chal_sd_freq_2_div_ctrl_setting(uint32_t desired_freq)
{
/*
* Divider control setting represents 1/2 of the actual divider value.
*
* DesiredFreq = BaseClockFreq / (2 * div_ctrl_setting)
*
* ==> div_ctrl_setting = BaseClockFreq / (2 * DesiredFreq)
*/
uint32_t div_ctrl_setting;
uint32_t actual_freq;
assert(desired_freq != 0);
/* Special case, 0 = divider of 1. */
if (desired_freq >= BASE_CLK_FREQ)
return 0;
/* Normal case, desired_freq < BASE_CLK_FREQ */
div_ctrl_setting = BASE_CLK_FREQ / (2 * desired_freq);
actual_freq = BASE_CLK_FREQ / (2 * div_ctrl_setting);
if (actual_freq > desired_freq) {
/*
* Division does not result in exact freqency match.
* Make sure resulting frequency does not exceed requested freq.
*/
div_ctrl_setting++;
}
return div_ctrl_setting;
}
int32_t chal_sd_set_clock(CHAL_HANDLE *sd_handle, uint32_t div_ctrl_setting,
uint32_t on)
{
uint32_t value;
struct sd_dev *handle;
uint32_t time;
uint32_t clk_sel_high_byte = 0xFF & (div_ctrl_setting >> 8);
uint32_t clk_sel_low_byte = 0xFF & div_ctrl_setting;
if (sd_handle == NULL)
return SD_INVALID_HANDLE;
EMMC_TRACE("set_clock(div_ctrl_setting=%d,on=%d)\n",
div_ctrl_setting, on);
handle = (struct sd_dev *) sd_handle;
/* Read control register content. */
value = mmio_read_32(handle->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_CTRL1_OFFSET);
/* Disable Clock */
value &= ~(SD4_EMMC_TOP_CTRL1_SDCLKEN_MASK);
mmio_write_32(handle->ctrl.sdRegBaseAddr + SD4_EMMC_TOP_CTRL1_OFFSET,
value);
/* Clear bits of interest. */
value &= ~(SD4_EMMC_TOP_CTRL1_SDCLKSEL_MASK |
SD4_EMMC_TOP_CTRL1_SDCLKSEL_UP_MASK);
/* Set bits of interest to new value. */
value |= (SD4_EMMC_TOP_CTRL1_SDCLKSEL_MASK &
(clk_sel_low_byte << SD4_EMMC_TOP_CTRL1_SDCLKSEL_SHIFT));
value |= (SD4_EMMC_TOP_CTRL1_SDCLKSEL_UP_MASK &
(clk_sel_high_byte << SD4_EMMC_TOP_CTRL1_SDCLKSEL_UP_SHIFT));
value |= SD4_EMMC_TOP_CTRL1_ICLKEN_MASK;
/* Write updated value back to control register. */
mmio_write_32(handle->ctrl.sdRegBaseAddr + SD4_EMMC_TOP_CTRL1_OFFSET,
value);
time = 0;
do {
value = mmio_read_32(handle->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_CTRL1_OFFSET);
if ((value & SD4_EMMC_TOP_CTRL1_ICLKSTB_MASK) ==
SD4_EMMC_TOP_CTRL1_ICLKSTB_MASK)
break;
mdelay(1);
} while (time++ < EMMC_CLOCK_SETTING_TIMEOUT_MS);
if (time >= EMMC_CLOCK_SETTING_TIMEOUT_MS)
WARN("%s %d clock settings timeout happenedi (%dms)\n",
__func__, __LINE__, time);
VERBOSE("EMMC: clock settings delay: %dms\n", time);
value = mmio_read_32(handle->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_CTRL1_OFFSET);
if (on)
value |= SD4_EMMC_TOP_CTRL1_SDCLKEN_MASK;
mmio_write_32(handle->ctrl.sdRegBaseAddr + SD4_EMMC_TOP_CTRL1_OFFSET,
value);
return SD_OK;
}
/*
* function to setup DMA buffer and data length, calculates block
* size and the number of blocks to be transferred and return
* the DMA buffer address.
*/
int32_t chal_sd_setup_xfer(CHAL_HANDLE *sd_handle,
uint8_t *data, uint32_t length, int32_t dir)
{
uint32_t blocks = 0;
struct sd_dev *handle;
if (sd_handle == NULL)
return SD_INVALID_HANDLE;
handle = (struct sd_dev *) sd_handle;
if (length <= handle->cfg.blockSize) {
handle->ctrl.blkReg = length | handle->cfg.dmaBoundary;
} else {
blocks = length / handle->cfg.blockSize;
handle->ctrl.blkReg = (blocks << 16) | handle->cfg.blockSize |
handle->cfg.dmaBoundary;
}
if (handle->cfg.dma != SD_DMA_OFF) {
/* For DMA target address setting, physical address should be used */
mmio_write_32(handle->ctrl.sdRegBaseAddr + SD4_EMMC_TOP_SYSADDR_OFFSET,
(uintptr_t)data);
}
return SD_OK;
}
#ifdef INCLUDE_EMMC_DRIVER_WRITE_CODE
/*
* function to write one block data directly to the
* host controller's FIFO which is 1K uint8_t or
* 2K uint8_t in size.
* It is used in Non-DMA mode for data transmission.
*/
int32_t chal_sd_write_buffer(CHAL_HANDLE *sd_handle, uint32_t length,
uint8_t *data)
{
uint32_t i, leftOver = 0, blockSize, size, value = 0;
struct sd_dev *handle;
if (sd_handle == NULL)
return SD_INVALID_HANDLE;
handle = (struct sd_dev *) sd_handle;
blockSize = handle->cfg.blockSize;
if (length == 0)
return SD_OK;
/* PIO mode, push into fifo word by word */
if (length >= blockSize) {
size = blockSize;
} else {
size = ((length >> 2) << 2);
leftOver = length % 4;
}
for (i = 0; i < size; i += 4) {
value = *(uint32_t *)(data + i);
mmio_write_32(handle->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_BUFDAT_OFFSET, value);
}
/*
* BUG ALERT:
* This implementation has TWO issues that must be addressed before you
* can safely INCLUDE_EMMC_DRIVER_WRITE_CODE.
*
* (1) For the last leftOver bytes, driver writes full word, which means
* some of the eMMC content (i.e. "4 - leftOver" will be erroneously
* overwritten).
* (2) eMMC is a block device. What happens when less than a full block of
* data is submitted???
*/
if (leftOver > 0) {
value = ((*(uint32_t *)(data + i)) << (4 - leftOver));
mmio_write_32(handle->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_BUFDAT_OFFSET, value);
}
return SD_OK;
}
#endif /* INCLUDE_EMMC_DRIVER_WRITE_CODE */
/*
* Function to read maximal one block data directly
* from the data port of the host controller (FIFO). It is used
* in Non-DMA mode for data transmission.
*/
int32_t chal_sd_read_buffer(CHAL_HANDLE *sd_handle, uint32_t length,
uint8_t *data)
{
uint32_t i, size, leftOver, blockSize, value;
struct sd_dev *handle;
if (sd_handle == NULL)
return SD_INVALID_HANDLE;
handle = (struct sd_dev *)sd_handle;
value = 0;
blockSize = handle->cfg.blockSize;
/* PIO mode, extract fifo word by word */
if (length >= blockSize) {
size = blockSize;
leftOver = 0;
} else {
leftOver = length % 4;
size = ((length >> 2) << 2);
}
for (i = 0; i < size; i += 4) {
value =
mmio_read_32(handle->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_BUFDAT_OFFSET);
memcpy((void *)(data + i), &value, sizeof(uint32_t));
}
if (leftOver > 0) {
value = mmio_read_32(handle->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_BUFDAT_OFFSET);
/*
* Copy remaining non-full word bytes.
* (We run ARM as Little Endian)
*/
uint8_t j = 0;
for (j = 0; j < leftOver; j++) {
data[i + j] = (value >> (j * 8)) & 0xFF;
}
}
return SD_OK;
}
/*
* Resets both DAT or CMD line.
*/
int32_t chal_sd_reset_line(CHAL_HANDLE *sd_handle, uint32_t line)
{
uint32_t control, flag;
struct sd_dev *handle;
if (sd_handle == NULL)
return SD_INVALID_HANDLE;
handle = (struct sd_dev *) sd_handle;
flag = SD4_EMMC_TOP_CTRL1_CMDRST_MASK | SD4_EMMC_TOP_CTRL1_DATRST_MASK;
if (flag != (line | flag))
return SD_FAIL;
control = mmio_read_32(handle->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_CTRL1_OFFSET);
control |= line;
mmio_write_32(handle->ctrl.sdRegBaseAddr + SD4_EMMC_TOP_CTRL1_OFFSET,
control);
/* reset CMD and DATA line should always work, no need to timed out */
do {
control = mmio_read_32(handle->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_CTRL1_OFFSET);
} while (control & line);
return SD_OK;
}
/*
* Function to be called once a SD command is done to read
* back it's response data.
*/
int32_t chal_sd_get_response(CHAL_HANDLE *sd_handle, uint32_t *resp)
{
struct sd_dev *handle;
if (sd_handle == NULL)
return SD_INVALID_HANDLE;
handle = (struct sd_dev *) sd_handle;
resp[0] = mmio_read_32(handle->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_RESP0_OFFSET);
resp[1] = mmio_read_32(handle->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_RESP2_OFFSET);
resp[2] = mmio_read_32(handle->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_RESP4_OFFSET);
resp[3] = mmio_read_32(handle->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_RESP6_OFFSET);
return SD_OK;
}
/*
* The function is called to clean all the pending interrupts.
*/
int32_t chal_sd_clear_pending_irq(CHAL_HANDLE *sd_handle)
{
uint32_t status = SD_OK;
struct sd_dev *handle;
if (sd_handle == NULL)
return SD_INVALID_HANDLE;
handle = (struct sd_dev *)sd_handle;
/* Make sure clean all interrupts */
do {
mmio_write_32(handle->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_INTR_OFFSET, 0xFFFFFFFF);
SD_US_DELAY(10);
} while (mmio_read_32(handle->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_INTR_OFFSET));
return status;
}
/*
* The function returns interrupt status register value.
*/
int32_t chal_sd_get_irq_status(CHAL_HANDLE *sd_handle)
{
struct sd_dev *handle;
if (sd_handle == NULL)
return SD_INVALID_HANDLE;
handle = (struct sd_dev *) sd_handle;
return (mmio_read_32(handle->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_INTR_OFFSET));
}
/*
* The function clears interrupt(s) specified in the mask.
*/
int32_t chal_sd_clear_irq(CHAL_HANDLE *sd_handle, uint32_t mask)
{
struct sd_dev *handle;
if (sd_handle == NULL)
return SD_INVALID_HANDLE;
handle = (struct sd_dev *) sd_handle;
/* Make sure clean masked interrupts */
do {
mmio_write_32(handle->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_INTR_OFFSET, mask);
SD_US_DELAY(10);
} while (mask &
mmio_read_32(handle->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_INTR_OFFSET));
return SD_OK;
}
/*
* Description: The function configures the SD host controller.
*/
int32_t chal_sd_config(CHAL_HANDLE *sd_handle, uint32_t speed, uint32_t retry,
uint32_t boundary, uint32_t blkSize, uint32_t dma)
{
struct sd_dev *handle;
if (sd_handle == NULL)
return SD_INVALID_HANDLE;
handle = (struct sd_dev *) sd_handle;
handle->cfg.speedMode = speed;
handle->cfg.retryLimit = retry;
handle->cfg.dmaBoundary = boundary;
handle->cfg.blockSize = blkSize;
chal_sd_set_dma(sd_handle, dma);
SD_US_DELAY(100);
chal_sd_set_dma_boundary(handle, boundary);
SD_US_DELAY(100);
chal_sd_set_speed(sd_handle, speed);
SD_US_DELAY(100);
return SD_OK;
}
/*
* Cleans up HC FIFO.
*/
void chal_sd_dump_fifo(CHAL_HANDLE *sd_handle)
{
struct sd_dev *handle;
if (sd_handle == NULL)
return;
handle = (struct sd_dev *)sd_handle;
/* in case there still data in the host buffer */
while (mmio_read_32(handle->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_PSTATE_OFFSET) & 0x800) {
mmio_read_32(handle->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_BUFDAT_OFFSET);
};
}
/*
* Enable or disable a SD interrupt signal.
*/
void chal_sd_set_irq_signal(CHAL_HANDLE *sd_handle, uint32_t mask,
uint32_t state)
{
struct sd_dev *handle;
if (sd_handle == NULL)
return;
handle = (struct sd_dev *)sd_handle;
if (state)
mmio_setbits_32(handle->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_INTREN2_OFFSET, mask);
else
mmio_clrbits_32(handle->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_INTREN2_OFFSET, mask);
}
/*
* Copyright (c) 2016 - 2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <stdlib.h>
#include <string.h>
#include <stddef.h>
#include <arch_helpers.h>
#include <lib/mmio.h>
#include "bcm_emmc.h"
#include "emmc_chal_types.h"
#include "emmc_csl_sdprot.h"
#include "emmc_chal_sd.h"
#include "emmc_csl_sdcmd.h"
#include "emmc_csl_sd.h"
#include "emmc_pboot_hal_memory_drv.h"
#define SD_CARD_BUSY 0x80000000
#define SD_CARD_RETRY_LIMIT 1000
#define SD_CARD_HIGH_SPEED_PS 13
#define SD_CHK_HIGH_SPEED_MODE 0x00FFFFF1
#define SD_SET_HIGH_SPEED_MODE 0x80FFFFF1
#define SD_MMC_ENABLE_HIGH_SPEED 0x03b90100 //0x03b90103
#define SD_MMC_8BIT_MODE 0x03b70200
#define SD_MMC_4BIT_MODE 0x03b70100
#define SD_MMC_1BIT_MODE 0x03b70000
#define SD_MMC_BOOT_8BIT_MODE 0x03b10200
#define SD_MMC_BOOT_4BIT_MODE 0x03b10100
#define SD_MMC_BOOT_1BIT_MODE 0x03b10000
#define SDIO_HW_EMMC_EXT_CSD_BOOT_CNF 0X03B30000
#ifdef USE_EMMC_FIP_TOC_CACHE
/*
* Cache size mirrors the size of the global eMMC temp buffer
* which is used for non-image body reads such as headers, ToC etc.
*/
#define CACHE_SIZE ((EMMC_BLOCK_SIZE) * 2)
#define PARTITION_BLOCK_ADDR ((PLAT_FIP_ATTEMPT_OFFSET)/(EMMC_BLOCK_SIZE))
static uint32_t cached_partition_block;
static uint8_t cached_block[CACHE_SIZE];
#endif
static int set_card_data_width(struct sd_handle *handle, int width);
static int abort_err(struct sd_handle *handle);
static int err_recovery(struct sd_handle *handle, uint32_t errors);
static int xfer_data(struct sd_handle *handle, uint32_t mode, uint32_t addr,
uint32_t length, uint8_t *base);
int set_boot_config(struct sd_handle *handle, uint32_t config)
{
return mmc_cmd6(handle, SDIO_HW_EMMC_EXT_CSD_BOOT_CNF | config);
}
void process_csd_mmc_speed(struct sd_handle *handle, uint32_t csd_mmc_speed)
{
uint32_t div_ctrl_setting;
/* CSD field TRAN_SPEED:
* Bits [2:0] 0 = 100 KHz
* 1 = 1 MHz
* 2 = 10 MHz
* 3 = 100 MHz
* 4...7 Reserved.
* Bits [6:3] 0 = Reserved
* 1 = 1.0
* 2 = 1.2
* 3 = 1.3
* 4 = 1.5
* 5 = 2.0
* 6 = 2.6
* 7 = 3.0
* 8 = 3.5
* 9 = 4.0
* A = 4.5
* B = 5.2
* C = 5.5
* D = 6.0
* E = 7.0
* F = 8.0
* For cards supporting version 4.0, 4.1, and 4.2 of the standard,
* the value shall be 20 MHz (0x2A).
* For cards supporting version 4.3 , the value shall be 26 MHz (0x32)
*/
switch (csd_mmc_speed & 0x7F) {
case 0x2A:
EMMC_TRACE("Speeding up eMMC clock to 20MHz\n");
div_ctrl_setting =
chal_sd_freq_2_div_ctrl_setting(20 * 1000 * 1000);
break;
case 0x32:
EMMC_TRACE("Speeding up eMMC clock to 26MHz\n");
div_ctrl_setting =
chal_sd_freq_2_div_ctrl_setting(26 * 1000 * 1000);
break;
default:
/* Unknown */
return;
}
chal_sd_set_clock((CHAL_HANDLE *) handle->device, div_ctrl_setting, 0);
chal_sd_set_clock((CHAL_HANDLE *) handle->device, div_ctrl_setting, 1);
SD_US_DELAY(1000);
}
/*
* The function changes SD/SDIO/MMC card data width if
* the card support configurable data width. The host controller
* and the card has to be in the same bus data width.
*/
int set_card_data_width(struct sd_handle *handle, int width)
{
uint32_t data_width = 0;
int is_valid_arg = 1;
int rc = SD_FAIL;
char *bitwidth_str = " ";
char *result_str = "failed";
switch (width) {
#ifdef DRIVER_EMMC_ENABLE_DATA_WIDTH_8BIT
case SD_BUS_DATA_WIDTH_8BIT:
data_width = SD_MMC_8BIT_MODE;
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
bitwidth_str = "8_BIT";
#endif
break;
#endif
case SD_BUS_DATA_WIDTH_4BIT:
data_width = SD_MMC_4BIT_MODE;
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
bitwidth_str = "4_BIT";
#endif
break;
case SD_BUS_DATA_WIDTH_1BIT:
data_width = SD_MMC_1BIT_MODE;
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
bitwidth_str = "1_BIT";
#endif
break;
default:
is_valid_arg = 0;
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
bitwidth_str = "unknown";
#endif
break;
}
if (is_valid_arg) {
rc = mmc_cmd6(handle, data_width);
if (rc == SD_OK) {
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
result_str = "succeeded";
#endif
chal_sd_config_bus_width((CHAL_HANDLE *) handle->device,
width);
} else {
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
result_str = "failed";
#endif
}
} else {
rc = SD_FAIL;
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
result_str = "ignored";
#endif
}
VERBOSE("SDIO Data Width(%s) %s.\n", bitwidth_str, result_str);
return rc;
}
/*
* Error handling routine. Does abort data
* transmission if error is found.
*/
static int abort_err(struct sd_handle *handle)
{
uint32_t present, options, event, rel = 0;
struct sd_resp cmdRsp;
handle->device->ctrl.argReg = 0;
handle->device->ctrl.cmdIndex = SD_CMD_STOP_TRANSMISSION;
options = (SD_CMD_STOP_TRANSMISSION << 24) |
(SD_CMDR_RSP_TYPE_R1b_5b << SD_CMDR_RSP_TYPE_S) |
SD4_EMMC_TOP_CMD_CRC_EN_MASK |
SD4_EMMC_TOP_CMD_CCHK_EN_MASK;
chal_sd_send_cmd((CHAL_HANDLE *) handle->device,
handle->device->ctrl.cmdIndex,
handle->device->ctrl.argReg, options);
event = wait_for_event(handle,
SD4_EMMC_TOP_INTR_CMDDONE_MASK |
SD_ERR_INTERRUPTS,
handle->device->cfg.wfe_retry);
if (event & SD_CMD_ERROR_INT) {
rel = SD_ERROR_NON_RECOVERABLE;
} else {
if (event & SD_DAT_TIMEOUT) {
return SD_ERROR_NON_RECOVERABLE;
}
chal_sd_get_response((CHAL_HANDLE *) handle->device,
(uint32_t *)&cmdRsp);
process_cmd_response(handle, handle->device->ctrl.cmdIndex,
cmdRsp.data.r2.rsp1, cmdRsp.data.r2.rsp2,
cmdRsp.data.r2.rsp3, cmdRsp.data.r2.rsp4,
&cmdRsp);
SD_US_DELAY(2000);
present =
chal_sd_get_present_status((CHAL_HANDLE *) handle->device);
if ((present & 0x00F00000) == 0x00F00000)
rel = SD_ERROR_RECOVERABLE;
else
rel = SD_ERROR_NON_RECOVERABLE;
}
return rel;
}
/*
* The function handles real data transmission on both DMA and
* none DMA mode, In None DMA mode the data transfer starts
* when the command is sent to the card, data has to be written
* into the host contollers buffer at this time one block
* at a time.
* In DMA mode, the real data transfer is done by the DMA engine
* and this functions just waits for the data transfer to complete.
*
*/
int process_data_xfer(struct sd_handle *handle, uint8_t *buffer, uint32_t addr,
uint32_t length, int dir)
{
if (dir == SD_XFER_HOST_TO_CARD) {
#ifdef INCLUDE_EMMC_DRIVER_WRITE_CODE
if (handle->device->cfg.dma == SD_DMA_OFF) {
/*
* In NON DMA mode, the real data xfer starts from here
*/
if (write_buffer(handle, length, buffer))
return SD_WRITE_ERROR;
} else {
wait_for_event(handle,
SD4_EMMC_TOP_INTR_TXDONE_MASK |
SD_ERR_INTERRUPTS,
handle->device->cfg.wfe_retry);
if (handle->device->ctrl.cmdStatus == SD_OK)
return SD_OK;
check_error(handle, handle->device->ctrl.cmdStatus);
return SD_WRITE_ERROR;
}
#else
return SD_WRITE_ERROR;
#endif
} else { /* SD_XFER_CARD_TO_HOST */
if (handle->device->cfg.dma == SD_DMA_OFF) {
/* In NON DMA mode, the real data
* transfer starts from here
*/
if (read_buffer(handle, length, buffer))
return SD_READ_ERROR;
} else { /* for DMA mode */
/*
* once the data transmission is done
* copy data to the host buffer.
*/
wait_for_event(handle,
SD4_EMMC_TOP_INTR_TXDONE_MASK |
SD_ERR_INTERRUPTS,
handle->device->cfg.wfe_retry);
if (handle->device->ctrl.cmdStatus == SD_OK)
return SD_OK;
check_error(handle, handle->device->ctrl.cmdStatus);
return SD_READ_ERROR;
}
}
return SD_OK;
}
/*
* The function sets block size for the next SD/SDIO/MMC
* card read/write command.
*/
int select_blk_sz(struct sd_handle *handle, uint16_t size)
{
return sd_cmd16(handle, size);
}
/*
* The function initalizes the SD/SDIO/MMC/CEATA and detects
* the card according to the flag of detection.
* Once this function is called, the card is put into ready state
* so application can do data transfer to and from the card.
*/
int init_card(struct sd_handle *handle, int detection)
{
/*
* After Reset, eMMC comes up in 1 Bit Data Width by default.
* Set host side to match.
*/
chal_sd_config_bus_width((CHAL_HANDLE *) handle->device,
SD_BUS_DATA_WIDTH_1BIT);
#ifdef USE_EMMC_FIP_TOC_CACHE
cached_partition_block = 0;
#endif
handle->device->ctrl.present = 0; /* init card present to be no card */
init_mmc_card(handle);
handle->device->ctrl.present = 1; /* card is detected */
/* switch the data width back */
if (handle->card->type != SD_CARD_MMC)
return SD_FAIL;
/*
* Dynamically set Data Width to highest supported value.
* Try different data width settings (highest to lowest).
* Verify each setting by reading EXT_CSD and comparing
* against the EXT_CSD contents previously read in call to
* init_mmc_card() earlier. Stop at first verified data width
* setting.
*/
{
#define EXT_CSD_PROPERTIES_SECTION_START_INDEX 192
#define EXT_CSD_PROPERTIES_SECTION_END_INDEX 511
uint8_t buffer[EXT_CSD_SIZE];
#ifdef DRIVER_EMMC_ENABLE_DATA_WIDTH_8BIT
/* Try 8 Bit Data Width */
chal_sd_config_bus_width((CHAL_HANDLE *) handle->device,
SD_BUS_DATA_WIDTH_8BIT);
if ((!set_card_data_width(handle, SD_BUS_DATA_WIDTH_8BIT)) &&
(!mmc_cmd8(handle, buffer)) &&
(!memcmp(&buffer[EXT_CSD_PROPERTIES_SECTION_START_INDEX],
&(emmc_global_buf_ptr->u.Ext_CSD_storage[EXT_CSD_PROPERTIES_SECTION_START_INDEX]),
EXT_CSD_PROPERTIES_SECTION_END_INDEX - EXT_CSD_PROPERTIES_SECTION_START_INDEX + 1)))
return SD_OK;
#endif
/* Fall back to 4 Bit Data Width */
chal_sd_config_bus_width((CHAL_HANDLE *) handle->device,
SD_BUS_DATA_WIDTH_4BIT);
if ((!set_card_data_width(handle, SD_BUS_DATA_WIDTH_4BIT)) &&
(!mmc_cmd8(handle, buffer)) &&
(!memcmp(&buffer[EXT_CSD_PROPERTIES_SECTION_START_INDEX],
&(emmc_global_buf_ptr->u.Ext_CSD_storage[EXT_CSD_PROPERTIES_SECTION_START_INDEX]),
EXT_CSD_PROPERTIES_SECTION_END_INDEX - EXT_CSD_PROPERTIES_SECTION_START_INDEX + 1)))
return SD_OK;
/* Fall back to 1 Bit Data Width */
chal_sd_config_bus_width((CHAL_HANDLE *) handle->device,
SD_BUS_DATA_WIDTH_1BIT);
/* Just use 1 Bit Data Width then. */
if (!set_card_data_width(handle, SD_BUS_DATA_WIDTH_1BIT))
return SD_OK;
}
return SD_CARD_INIT_ERROR;
}
/*
* The function handles MMC/CEATA card initalization.
*/
int init_mmc_card(struct sd_handle *handle)
{
uint32_t ocr = 0, newOcr, rc, limit = 0;
uint32_t cmd1_option = 0x40300000;
uint32_t sec_count;
handle->card->type = SD_CARD_MMC;
do {
SD_US_DELAY(1000);
newOcr = 0;
ocr = 0;
rc = sd_cmd1(handle, cmd1_option, &newOcr);
limit++;
if (rc == SD_OK)
ocr = newOcr;
} while (((ocr & SD_CARD_BUSY) == 0) && (limit < SD_CARD_RETRY_LIMIT));
if (limit >= SD_CARD_RETRY_LIMIT) {
handle->card->type = SD_CARD_UNKNOWN;
EMMC_TRACE("CMD1 Timeout: Device is not ready\n");
return SD_CARD_UNKNOWN;
}
/* Save the ocr register */
handle->device->ctrl.ocr = ocr;
/* Ready State */
rc = sd_cmd2(handle);
if (rc != SD_OK) {
handle->card->type = SD_CARD_UNKNOWN;
return SD_CARD_UNKNOWN;
}
rc = sd_cmd3(handle);
if (rc != SD_OK) {
handle->card->type = SD_CARD_UNKNOWN;
return SD_CARD_UNKNOWN;
}
/* read CSD */
rc = sd_cmd9(handle, &emmc_global_vars_ptr->cardData);
if (rc != SD_OK) {
handle->card->type = SD_CARD_UNKNOWN;
return SD_CARD_UNKNOWN;
}
/* Increase clock frequency according to what the card advertises */
EMMC_TRACE("From CSD... cardData.csd.mmc.speed = 0x%X\n",
emmc_global_vars_ptr->cardData.csd.mmc.speed);
process_csd_mmc_speed(handle,
emmc_global_vars_ptr->cardData.csd.mmc.speed);
/* goto transfer mode */
rc = sd_cmd7(handle, handle->device->ctrl.rca);
if (rc != SD_OK) {
handle->card->type = SD_CARD_UNKNOWN;
return SD_CARD_UNKNOWN;
}
rc = mmc_cmd8(handle, emmc_global_buf_ptr->u.Ext_CSD_storage);
if (rc == SD_OK) {
/* calcul real capacity */
sec_count = emmc_global_buf_ptr->u.Ext_CSD_storage[212] |
emmc_global_buf_ptr->u.Ext_CSD_storage[213] << 8 |
emmc_global_buf_ptr->u.Ext_CSD_storage[214] << 16 |
emmc_global_buf_ptr->u.Ext_CSD_storage[215] << 24;
EMMC_TRACE("Device density = %ldMBytes\n",
handle->card->size / (1024 * 1024));
if (sec_count > 0) {
handle->card->size = (uint64_t)sec_count * 512;
EMMC_TRACE("Updated Device density = %ldMBytes\n",
handle->card->size / (1024 * 1024));
}
if (sec_count > (2u * 1024 * 1024 * 1024) / 512) {
handle->device->ctrl.ocr |= SD_CARD_HIGH_CAPACITY;
handle->device->cfg.blockSize = 512;
}
if (handle->device->ctrl.ocr & SD_CARD_HIGH_CAPACITY)
EMMC_TRACE("Sector addressing\n");
else
EMMC_TRACE("Byte addressing\n");
EMMC_TRACE("Ext_CSD_storage[162]: 0x%02X Ext_CSD_storage[179]: 0x%02X\n",
emmc_global_buf_ptr->u.Ext_CSD_storage[162],
emmc_global_buf_ptr->u.Ext_CSD_storage[179]);
}
return handle->card->type;
}
/*
* The function send reset command to the card.
* The card will be in ready status after the reset.
*/
int reset_card(struct sd_handle *handle)
{
int res = SD_OK;
/* on reset, card's RCA should return to 0 */
handle->device->ctrl.rca = 0;
res = sd_cmd0(handle);
if (res != SD_OK)
return SD_RESET_ERROR;
return res;
}
/*
* The function sends command to the card and starts
* data transmission.
*/
static int xfer_data(struct sd_handle *handle,
uint32_t mode,
uint32_t addr, uint32_t length, uint8_t *base)
{
int rc = SD_OK;
VERBOSE("XFER: dest: 0x%llx, addr: 0x%x, size: 0x%x bytes\n",
(uint64_t)base, addr, length);
if ((length / handle->device->cfg.blockSize) > 1) {
if (mode == SD_OP_READ) {
inv_dcache_range((uintptr_t)base, (uint64_t)length);
rc = sd_cmd18(handle, addr, length, base);
} else {
#ifdef INCLUDE_EMMC_DRIVER_WRITE_CODE
flush_dcache_range((uintptr_t)base, (uint64_t)length);
rc = sd_cmd25(handle, addr, length, base);
#else
rc = SD_DATA_XFER_ERROR;
#endif
}
} else {
if (mode == SD_OP_READ) {
inv_dcache_range((uintptr_t)base, (uint64_t)length);
rc = sd_cmd17(handle, addr,
handle->device->cfg.blockSize, base);
} else {
#ifdef INCLUDE_EMMC_DRIVER_WRITE_CODE
flush_dcache_range((uintptr_t)base, (uint64_t)length);
rc = sd_cmd24(handle, addr,
handle->device->cfg.blockSize, base);
#else
rc = SD_DATA_XFER_ERROR;
#endif
}
}
if (rc != SD_OK)
return SD_DATA_XFER_ERROR;
return SD_OK;
}
#ifdef INCLUDE_EMMC_DRIVER_ERASE_CODE
int erase_card(struct sd_handle *handle, uint32_t addr, uint32_t blocks)
{
uint32_t end_addr;
INFO("ERASE: addr: 0x%x, num of sectors: 0x%x\n", addr, blocks);
if (sd_cmd35(handle, addr) != SD_OK)
return SD_FAIL;
end_addr = addr + blocks - 1;
if (sd_cmd36(handle, end_addr) != SD_OK)
return SD_FAIL;
if (sd_cmd38(handle) != SD_OK)
return SD_FAIL;
return SD_OK;
}
#endif
/*
* The function reads block data from a card.
*/
#ifdef USE_EMMC_FIP_TOC_CACHE
int read_block(struct sd_handle *handle,
uint8_t *dst, uint32_t addr, uint32_t len)
{
int rel = SD_OK;
/*
* Avoid doing repeated reads of the partition block
* by caching.
*/
if (cached_partition_block &&
addr == PARTITION_BLOCK_ADDR &&
len == CACHE_SIZE) {
memcpy(dst, cached_block, len);
} else {
rel = xfer_data(handle, SD_OP_READ, addr, len, dst);
if (len == CACHE_SIZE && addr == PARTITION_BLOCK_ADDR) {
cached_partition_block = 1;
memcpy(cached_block, dst, len);
}
}
return rel;
}
#else
int read_block(struct sd_handle *handle,
uint8_t *dst, uint32_t addr, uint32_t len)
{
return xfer_data(handle, SD_OP_READ, addr, len, dst);
}
#endif
#ifdef INCLUDE_EMMC_DRIVER_WRITE_CODE
/*
* The function writes block data to a card.
*/
int write_block(struct sd_handle *handle,
uint8_t *src, uint32_t addr, uint32_t len)
{
int rel = SD_OK;
/*
* Current HC has problem to get response of cmd16 after cmd12,
* the delay is necessary to sure the next cmd16 will not be timed out.
* The delay has to be at least 4 ms.
* The code removed cmd16 and use cmd13 to get card status before
* sending cmd18 or cmd25 to make sure the card is ready and thus
* no need to have delay here.
*/
rel = xfer_data(handle, SD_OP_WRITE, addr, len, src);
EMMC_TRACE("wr_blk addr:0x%08X src:0x%08X len:0x%08X result:%d\n",
addr, src, len, rel);
return rel;
}
/*
* The function is called to write one block data directly to
* a card's data buffer.
* it is used in Non-DMA mode for card data transmission.
*/
int write_buffer(struct sd_handle *handle, uint32_t length, uint8_t *data)
{
uint32_t rem, blockSize, event;
uint8_t *pData = data;
blockSize = handle->device->cfg.blockSize;
rem = length;
if (rem == 0)
return SD_OK;
while (rem > 0) {
event = wait_for_event(handle,
SD4_EMMC_TOP_INTR_BWRDY_MASK |
SD_ERR_INTERRUPTS,
handle->device->cfg.wfe_retry);
if (handle->device->ctrl.cmdStatus) {
check_error(handle, handle->device->ctrl.cmdStatus);
return SD_WRITE_ERROR;
}
if (rem >= blockSize)
chal_sd_write_buffer((CHAL_HANDLE *) handle->device,
blockSize, pData);
else
chal_sd_write_buffer((CHAL_HANDLE *) handle->device,
rem, pData);
if (rem > blockSize) {
rem -= blockSize;
pData += blockSize;
} else {
pData += rem;
rem = 0;
}
}
if ((event & SD4_EMMC_TOP_INTR_TXDONE_MASK) !=
SD4_EMMC_TOP_INTR_TXDONE_MASK) {
event = wait_for_event(handle,
SD4_EMMC_TOP_INTR_TXDONE_MASK |
SD_ERR_INTERRUPTS,
handle->device->cfg.wfe_retry);
if (handle->device->ctrl.cmdStatus != SD_OK) {
check_error(handle, handle->device->ctrl.cmdStatus);
return SD_WRITE_ERROR;
}
} else {
handle->device->ctrl.eventList &= ~SD4_EMMC_TOP_INTR_TXDONE_MASK;
}
return SD_OK;
}
#endif /* INCLUDE_EMMC_DRIVER_WRITE_CODE */
/*
* The function is called to read maximal one block data
* directly from a card
* It is used in Non-DMA mode for card data transmission.
*/
int read_buffer(struct sd_handle *handle, uint32_t length, uint8_t *data)
{
uint32_t rem, blockSize, event = 0;
uint8_t *pData = data;
blockSize = handle->device->cfg.blockSize;
rem = length;
if (rem == 0)
return SD_OK;
while (rem > 0) {
event = wait_for_event(handle,
SD4_EMMC_TOP_INTR_BRRDY_MASK |
SD_ERR_INTERRUPTS,
handle->device->cfg.wfe_retry);
if (handle->device->ctrl.cmdStatus) {
check_error(handle, handle->device->ctrl.cmdStatus);
return SD_READ_ERROR;
}
if (rem >= blockSize)
chal_sd_read_buffer((CHAL_HANDLE *) handle->device,
blockSize, pData);
else
chal_sd_read_buffer((CHAL_HANDLE *) handle->device, rem,
pData);
if (rem > blockSize) {
rem -= blockSize;
pData += blockSize;
} else {
pData += rem;
rem = 0;
}
}
/* In case, there are extra data in the SD FIFO, just dump them. */
chal_sd_dump_fifo((CHAL_HANDLE *) handle->device);
if ((event & SD4_EMMC_TOP_INTR_TXDONE_MASK) !=
SD4_EMMC_TOP_INTR_TXDONE_MASK) {
event = wait_for_event(handle, SD4_EMMC_TOP_INTR_TXDONE_MASK,
handle->device->cfg.wfe_retry);
if (handle->device->ctrl.cmdStatus) {
check_error(handle, handle->device->ctrl.cmdStatus);
return SD_READ_ERROR;
}
} else {
handle->device->ctrl.eventList &= ~SD4_EMMC_TOP_INTR_TXDONE_MASK;
}
return SD_OK;
}
/*
* Error handling routine.
* The function just reset the DAT
* and CMD line if an error occures during data transmission.
*/
int check_error(struct sd_handle *handle, uint32_t ints)
{
uint32_t rel;
chal_sd_set_irq_signal((CHAL_HANDLE *) handle->device,
SD_ERR_INTERRUPTS, 0);
if (ints & SD4_EMMC_TOP_INTR_CMDERROR_MASK) {
chal_sd_reset_line((CHAL_HANDLE *) handle->device,
SD4_EMMC_TOP_CTRL1_CMDRST_MASK);
rel = abort_err(handle);
chal_sd_reset_line((CHAL_HANDLE *) handle->device,
SD4_EMMC_TOP_CTRL1_DATRST_MASK);
chal_sd_set_irq_signal((CHAL_HANDLE *) handle->device,
SD_ERR_INTERRUPTS, 1);
return (rel == SD_ERROR_NON_RECOVERABLE) ?
SD_ERROR_NON_RECOVERABLE : SD_ERROR_RECOVERABLE;
} else {
rel = err_recovery(handle, ints);
}
chal_sd_set_irq_signal((CHAL_HANDLE *) handle->device,
SD_ERR_INTERRUPTS, 1);
return rel;
}
/*
* Error recovery routine.
* Try to recover from the error.
*/
static int err_recovery(struct sd_handle *handle, uint32_t errors)
{
uint32_t rel = 0;
/*
* In case of timeout error, the cmd line and data line maybe
* still active or stuck at atcitve so it is needed to reset
* either data line or cmd line to make sure a new cmd can be sent.
*/
if (errors & SD_CMD_ERROR_INT)
chal_sd_reset_line((CHAL_HANDLE *) handle->device,
SD4_EMMC_TOP_CTRL1_CMDRST_MASK);
if (errors & SD_DAT_ERROR_INT)
chal_sd_reset_line((CHAL_HANDLE *) handle->device,
SD4_EMMC_TOP_CTRL1_DATRST_MASK);
/* Abort transaction by sending out stop command */
if ((handle->device->ctrl.cmdIndex == 18) ||
(handle->device->ctrl.cmdIndex == 25))
rel = abort_err(handle);
return rel;
}
/*
* The function is called to read one block data directly from a card.
* It is used in Non-DMA mode for card data transmission.
*/
int process_cmd_response(struct sd_handle *handle,
uint32_t cmdIndex,
uint32_t rsp0,
uint32_t rsp1,
uint32_t rsp2, uint32_t rsp3, struct sd_resp *resp)
{
int result = SD_OK;
/* R6 */
uint32_t rca = (rsp0 >> 16) & 0xffff;
uint32_t cardStatus = rsp0;
/* R4 */
uint32_t cBit = (rsp0 >> 31) & 0x1;
uint32_t funcs = (rsp0 >> 28) & 0x7;
uint32_t memPresent = (rsp0 >> 27) & 0x1;
resp->r1 = 0x3f;
resp->cardStatus = cardStatus;
if (cmdIndex == SD_CMD_IO_SEND_OP_COND) {
resp->data.r4.cardReady = cBit;
resp->data.r4.funcs = funcs;
resp->data.r4.memPresent = memPresent;
resp->data.r4.ocr = cardStatus;
}
if (cmdIndex == SD_CMD_MMC_SET_RCA) {
resp->data.r6.rca = rca;
resp->data.r6.cardStatus = cardStatus & 0xFFFF;
}
if (cmdIndex == SD_CMD_SELECT_DESELECT_CARD) {
resp->data.r7.rca = rca;
}
if (cmdIndex == SD_CMD_IO_RW_DIRECT) {
if (((rsp0 >> 16) & 0xffff) != 0)
result = SD_CMD_ERR_INVALID_RESPONSE;
resp->data.r5.data = rsp0 & 0xff;
}
if (cmdIndex == SD_CMD_IO_RW_EXTENDED) {
if (((rsp0 >> 16) & 0xffff) != 0)
result = SD_CMD_ERR_INVALID_RESPONSE;
resp->data.r5.data = rsp0 & 0xff;
}
if (cmdIndex == SD_ACMD_SD_SEND_OP_COND ||
cmdIndex == SD_CMD_SEND_OPCOND)
resp->data.r3.ocr = cardStatus;
if (cmdIndex == SD_CMD_SEND_CSD ||
cmdIndex == SD_CMD_SEND_CID ||
cmdIndex == SD_CMD_ALL_SEND_CID) {
resp->data.r2.rsp4 = rsp3;
resp->data.r2.rsp3 = rsp2;
resp->data.r2.rsp2 = rsp1;
resp->data.r2.rsp1 = rsp0;
}
if ((cmdIndex == SD_CMD_READ_EXT_CSD) &&
(handle->card->type == SD_CARD_SD)) {
if ((resp->cardStatus & 0xAA) != 0xAA) {
result = SD_CMD_ERR_INVALID_RESPONSE;
}
}
return result;
}
/*
* The function sets DMA buffer and data length, process
* block size and the number of blocks to be transferred.
* It returns the DMA buffer address.
* It copies dma data from user buffer to the DMA buffer
* if the operation is to write data to the SD card.
*/
void data_xfer_setup(struct sd_handle *handle, uint8_t *data, uint32_t length,
int dir)
{
chal_sd_setup_xfer((CHAL_HANDLE *)handle->device, data, length, dir);
}
/*
* The function does soft reset the host SD controller. After
* the function call all host controller's register are reset
* to default vallue;
*
* Note This function only resets the host controller it does not
* reset the controller's handler.
*/
int reset_host_ctrl(struct sd_handle *handle)
{
chal_sd_stop();
return SD_OK;
}
static void pstate_log(struct sd_handle *handle)
{
ERROR("PSTATE: 0x%x\n", mmio_read_32
(handle->device->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_PSTATE_SD4_OFFSET));
ERROR("ERRSTAT: 0x%x\n", mmio_read_32
(handle->device->ctrl.sdRegBaseAddr +
SD4_EMMC_TOP_ERRSTAT_OFFSET));
}
/*
* The function waits for one or a group of interrupts specified
* by mask. The function returns if any one the interrupt status
* is set. If interrupt mode is not enabled then it will poll
* the interrupt status register until a interrupt status is set
* an error interrupt happens. If interrupt mode is enabled then
* this function should be called after the interrupt
* is received by ISR routine.
*/
uint32_t wait_for_event(struct sd_handle *handle,
uint32_t mask, uint32_t retry)
{
uint32_t regval, cmd12, time = 0;
handle->device->ctrl.cmdStatus = 0; /* no error */
EMMC_TRACE("%s %d mask:0x%x timeout:%d irq_status:0x%x\n",
__func__, __LINE__, mask, retry,
chal_sd_get_irq_status((CHAL_HANDLE *)handle->device));
/* Polling mode */
do {
regval = chal_sd_get_irq_status((CHAL_HANDLE *)handle->device);
if (regval & SD4_EMMC_TOP_INTR_DMAIRQ_MASK) {
chal_sd_set_dma_addr((CHAL_HANDLE *)handle->device,
(uintptr_t)
chal_sd_get_dma_addr((CHAL_HANDLE *)
handle->device));
chal_sd_clear_irq((CHAL_HANDLE *)handle->device,
SD4_EMMC_TOP_INTR_DMAIRQ_MASK);
}
if (time++ > retry) {
ERROR("EMMC: No response (cmd%d) after %dus.\n",
handle->device->ctrl.cmdIndex,
time * EMMC_WFE_RETRY_DELAY_US);
handle->device->ctrl.cmdStatus = SD_CMD_MISSING;
pstate_log(handle);
ERROR("EMMC: INT[0x%x]\n", regval);
break;
}
if (regval & SD4_EMMC_TOP_INTR_CTOERR_MASK) {
ERROR("EMMC: Cmd%d timeout INT[0x%x]\n",
handle->device->ctrl.cmdIndex, regval);
handle->device->ctrl.cmdStatus =
SD4_EMMC_TOP_INTR_CTOERR_MASK;
pstate_log(handle);
break;
}
if (regval & SD_CMD_ERROR_FLAGS) {
ERROR("EMMC: Cmd%d error INT[0x%x]\n",
handle->device->ctrl.cmdIndex, regval);
handle->device->ctrl.cmdStatus = SD_CMD_ERROR_FLAGS;
pstate_log(handle);
break;
}
cmd12 = chal_sd_get_atuo12_error((CHAL_HANDLE *)handle->device);
if (cmd12) {
ERROR("EMMC: Cmd%d auto cmd12 err:0x%x\n",
handle->device->ctrl.cmdIndex, cmd12);
handle->device->ctrl.cmdStatus = cmd12;
pstate_log(handle);
break;
}
if (SD_DATA_ERROR_FLAGS & regval) {
ERROR("EMMC: Data for cmd%d error, INT[0x%x]\n",
handle->device->ctrl.cmdIndex, regval);
handle->device->ctrl.cmdStatus =
(SD_DATA_ERROR_FLAGS & regval);
pstate_log(handle);
break;
}
if ((regval & mask) == 0)
udelay(EMMC_WFE_RETRY_DELAY_US);
} while ((regval & mask) == 0);
/* clear the interrupt since it is processed */
chal_sd_clear_irq((CHAL_HANDLE *)handle->device, (regval & mask));
return (regval & mask);
}
int32_t set_config(struct sd_handle *handle, uint32_t speed, uint32_t retry,
uint32_t dma, uint32_t dmaBound, uint32_t blkSize,
uint32_t wfe_retry)
{
int32_t rel = 0;
if (handle == NULL)
return SD_FAIL;
handle->device->cfg.wfe_retry = wfe_retry;
rel = chal_sd_config((CHAL_HANDLE *)handle->device, speed, retry,
dmaBound, blkSize, dma);
return rel;
}
int mmc_cmd1(struct sd_handle *handle)
{
uint32_t newOcr, res;
uint32_t cmd1_option = MMC_OCR_OP_VOLT | MMC_OCR_SECTOR_ACCESS_MODE;
/*
* After Reset, eMMC comes up in 1 Bit Data Width by default.
* Set host side to match.
*/
chal_sd_config_bus_width((CHAL_HANDLE *) handle->device,
SD_BUS_DATA_WIDTH_1BIT);
#ifdef USE_EMMC_FIP_TOC_CACHE
cached_partition_block = 0;
#endif
handle->device->ctrl.present = 0; /* init card present to be no card */
handle->card->type = SD_CARD_MMC;
res = sd_cmd1(handle, cmd1_option, &newOcr);
if (res != SD_OK) {
EMMC_TRACE("CMD1 Timeout: Device is not ready\n");
res = SD_CARD_UNKNOWN;
}
return res;
}
/*
* Copyright (c) 2016 - 2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <stdlib.h>
#include <stddef.h>
#include "bcm_emmc.h"
#include "emmc_chal_types.h"
#include "emmc_chal_sd.h"
#include "emmc_csl_sdprot.h"
#include "emmc_csl_sdcmd.h"
#include "emmc_csl_sd.h"
#include "emmc_chal_sd.h"
#include "emmc_pboot_hal_memory_drv.h"
int sd_cmd0(struct sd_handle *handle)
{
int res;
uint32_t argument = 0x0; /* Go to IDLE state. */
/* send cmd and parse result */
res = send_cmd(handle, SD_CMD_GO_IDLE_STATE, argument, 0, NULL);
if (res == SD_OK) {
/* Clear all other interrupts */
chal_sd_clear_irq((void *)handle->device, 0xffffffff);
}
return res;
}
int sd_cmd1(struct sd_handle *handle, uint32_t ocr, uint32_t *ocr_output)
{
int res;
uint32_t options;
struct sd_resp resp;
options = SD_CMDR_RSP_TYPE_R3_4 << SD_CMDR_RSP_TYPE_S;
if (ocr_output == NULL) {
EMMC_TRACE("Invalid args\n");
return SD_FAIL;
}
/* send cmd and parse result */
res = send_cmd(handle, SD_CMD_SEND_OPCOND, ocr, options, &resp);
if (res == SD_OK)
*ocr_output = resp.data.r3.ocr;
return res;
}
int sd_cmd2(struct sd_handle *handle)
{
uint32_t options;
struct sd_resp resp;
/* send cmd and parse result */
options = SD_CMDR_RSP_TYPE_R2 << SD_CMDR_RSP_TYPE_S;
return send_cmd(handle, SD_CMD_ALL_SEND_CID, 0, options, &resp);
}
int sd_cmd3(struct sd_handle *handle)
{
int res;
uint32_t options = 0;
uint32_t argument;
struct sd_resp resp;
/* use non zero and non 0x1 value for rca */
handle->device->ctrl.rca = 0x5;
argument = handle->device->ctrl.rca << SD_CMD7_ARG_RCA_SHIFT;
options = SD_CMDR_RSP_TYPE_R1_5_6 << SD_CMDR_RSP_TYPE_S |
SD4_EMMC_TOP_CMD_CCHK_EN_MASK |
SD4_EMMC_TOP_CMD_CRC_EN_MASK;
/* send cmd and parse result */
res = send_cmd(handle, SD_CMD_MMC_SET_RCA, argument, options, &resp);
if (res != SD_OK)
handle->device->ctrl.rca = 0;
return res;
}
int sd_cmd7(struct sd_handle *handle, uint32_t rca)
{
int res;
uint32_t argument, options;
struct sd_resp resp;
argument = (rca << SD_CMD7_ARG_RCA_SHIFT);
/*
* Response to CMD7 is:
* R1 while selectiing from Stand-By State to Transfer State
* R1b while selecting from Disconnected State to Programming State.
*
* In this driver, we only issue a CMD7 once, to go to transfer mode
* during init_mmc_card().
*/
options = SD_CMDR_RSP_TYPE_R1_5_6 << SD_CMDR_RSP_TYPE_S |
SD4_EMMC_TOP_CMD_CCHK_EN_MASK |
SD4_EMMC_TOP_CMD_CRC_EN_MASK;
/* send cmd and parse result */
res = send_cmd(handle, SD_CMD_SELECT_DESELECT_CARD, argument, options,
&resp);
if (res == SD_OK)
/* Clear all other interrupts */
chal_sd_clear_irq((void *)handle->device, 0xffffffff);
return res;
}
/*
* CMD8 Get CSD_EXT
*/
int mmc_cmd8(struct sd_handle *handle, uint8_t *extCsdReg)
{
uint32_t res, options;
struct sd_resp resp;
data_xfer_setup(handle, extCsdReg, CEATA_EXT_CSDBLOCK_SIZE,
SD_XFER_CARD_TO_HOST);
options = SD_CMDR_RSP_TYPE_R1_5_6 << SD_CMDR_RSP_TYPE_S |
SD4_EMMC_TOP_CMD_DPS_MASK | SD4_EMMC_TOP_CMD_DTDS_MASK |
SD4_EMMC_TOP_CMD_CCHK_EN_MASK | SD4_EMMC_TOP_CMD_CRC_EN_MASK;
/* send cmd and parse result */
res = send_cmd(handle, SD_CMD_READ_EXT_CSD, 0, options, &resp);
if (res == SD_OK)
res = process_data_xfer(handle, extCsdReg, 0,
CEATA_EXT_CSDBLOCK_SIZE,
SD_XFER_CARD_TO_HOST);
return res;
}
int sd_cmd9(struct sd_handle *handle, struct sd_card_data *card)
{
int res;
uint32_t argument, options, iBlkNum, multiFactor = 1;
uint32_t maxReadBlockLen = 1, maxWriteBlockLen = 1;
struct sd_resp resp;
argument = handle->device->ctrl.rca << SD_CMD7_ARG_RCA_SHIFT;
options = SD_CMDR_RSP_TYPE_R2 << SD_CMDR_RSP_TYPE_S |
SD4_EMMC_TOP_CMD_CRC_EN_MASK;
/* send cmd and parse result */
res = send_cmd(handle, SD_CMD_SEND_CSD, argument, options, &resp);
if (res != SD_OK)
return res;
if (handle->card->type == SD_CARD_MMC) {
card->csd.mmc.structure = (resp.data.r2.rsp4 >> 22) & 0x3;
card->csd.mmc.csdSpecVer = (resp.data.r2.rsp4 >> 18) & 0x0f;
card->csd.mmc.taac = (resp.data.r2.rsp4 >> 8) & 0xff;
card->csd.mmc.nsac = resp.data.r2.rsp4 & 0xff;
card->csd.mmc.speed = resp.data.r2.rsp3 >> 24;
card->csd.mmc.classes = (resp.data.r2.rsp3 >> 12) & 0xfff;
card->csd.mmc.rdBlkLen = (resp.data.r2.rsp3 >> 8) & 0xf;
card->csd.mmc.rdBlkPartial = (resp.data.r2.rsp3 >> 7) & 0x01;
card->csd.mmc.wrBlkMisalign = (resp.data.r2.rsp3 >> 6) & 0x1;
card->csd.mmc.rdBlkMisalign = (resp.data.r2.rsp3 >> 5) & 0x1;
card->csd.mmc.dsr = (resp.data.r2.rsp2 >> 4) & 0x01;
card->csd.mmc.size =
((resp.data.r2.rsp3 & 0x3) << 10) +
((resp.data.r2.rsp2 >> 22) & 0x3ff);
card->csd.mmc.vddRdCurrMin = (resp.data.r2.rsp2 >> 19) & 0x7;
card->csd.mmc.vddRdCurrMax = (resp.data.r2.rsp2 >> 16) & 0x7;
card->csd.mmc.vddWrCurrMin = (resp.data.r2.rsp2 >> 13) & 0x7;
card->csd.mmc.vddWrCurrMax = (resp.data.r2.rsp2 >> 10) & 0x7;
card->csd.mmc.devSizeMulti = (resp.data.r2.rsp2 >> 7) & 0x7;
card->csd.mmc.eraseGrpSize = (resp.data.r2.rsp2 >> 2) & 0x1f;
card->csd.mmc.eraseGrpSizeMulti =
((resp.data.r2.rsp2 & 0x3) << 3) +
((resp.data.r2.rsp1 >> 29) & 0x7);
card->csd.mmc.wrProtGroupSize =
((resp.data.r2.rsp1 >> 24) & 0x1f);
card->csd.mmc.wrProtGroupEnable =
(resp.data.r2.rsp1 >> 23) & 0x1;
card->csd.mmc.manuDefEcc = (resp.data.r2.rsp1 >> 21) & 0x3;
card->csd.mmc.wrSpeedFactor = (resp.data.r2.rsp1 >> 18) & 0x7;
card->csd.mmc.wrBlkLen = (resp.data.r2.rsp1 >> 14) & 0xf;
card->csd.mmc.wrBlkPartial = (resp.data.r2.rsp1 >> 13) & 0x1;
card->csd.mmc.protAppl = (resp.data.r2.rsp1 >> 8) & 0x1;
card->csd.mmc.copyFlag = (resp.data.r2.rsp1 >> 7) & 0x1;
card->csd.mmc.permWrProt = (resp.data.r2.rsp1 >> 6) & 0x1;
card->csd.mmc.tmpWrProt = (resp.data.r2.rsp1 >> 5) & 0x1;
card->csd.mmc.fileFormat = (resp.data.r2.rsp1 >> 4) & 0x03;
card->csd.mmc.eccCode = resp.data.r2.rsp1 & 0x03;
maxReadBlockLen <<= card->csd.mmc.rdBlkLen;
maxWriteBlockLen <<= card->csd.mmc.wrBlkLen;
iBlkNum = card->csd.mmc.size + 1;
multiFactor = (1 << (card->csd.mmc.devSizeMulti + 2));
handle->card->size =
iBlkNum * multiFactor * (1 << card->csd.mmc.rdBlkLen);
}
handle->card->maxRdBlkLen = maxReadBlockLen;
handle->card->maxWtBlkLen = maxWriteBlockLen;
if (handle->card->size < 0xA00000) {
/*
* 10MB Too small size mean, cmd9 response is wrong,
* Use default value 1G
*/
handle->card->size = 0x40000000;
handle->card->maxRdBlkLen = 512;
handle->card->maxWtBlkLen = 512;
}
if ((handle->card->maxRdBlkLen > 512) ||
(handle->card->maxWtBlkLen > 512)) {
handle->card->maxRdBlkLen = 512;
handle->card->maxWtBlkLen = 512;
} else if ((handle->card->maxRdBlkLen == 0) ||
(handle->card->maxWtBlkLen == 0)) {
handle->card->maxRdBlkLen = 512;
handle->card->maxWtBlkLen = 512;
}
handle->device->cfg.blockSize = handle->card->maxRdBlkLen;
return res;
}
int sd_cmd13(struct sd_handle *handle, uint32_t *status)
{
int res;
uint32_t argument, options;
struct sd_resp resp;
argument = handle->device->ctrl.rca << SD_CMD7_ARG_RCA_SHIFT;
options = SD_CMDR_RSP_TYPE_R1_5_6 << SD_CMDR_RSP_TYPE_S |
SD4_EMMC_TOP_CMD_CCHK_EN_MASK |
SD4_EMMC_TOP_CMD_CRC_EN_MASK;
/* send cmd and parse result */
res = send_cmd(handle, SD_CMD_SEND_STATUS, argument, options, &resp);
if (res == SD_OK) {
*status = resp.cardStatus;
}
return res;
}
int sd_cmd16(struct sd_handle *handle, uint32_t length)
{
int res;
uint32_t argument, options, ntry;
struct sd_resp resp;
argument = length;
options = SD_CMDR_RSP_TYPE_R1_5_6 << SD_CMDR_RSP_TYPE_S |
SD4_EMMC_TOP_CMD_CRC_EN_MASK |
SD4_EMMC_TOP_CMD_CCHK_EN_MASK;
ntry = 0;
do {
res = sd_cmd13(handle, &resp.cardStatus);
if (res != SD_OK) {
EMMC_TRACE(
"cmd13 failed before cmd16: rca 0x%0x, return %d, response 0x%0x\n",
handle->device->ctrl.rca, res, resp.cardStatus);
return res;
}
if (resp.cardStatus & 0x100)
break;
EMMC_TRACE("cmd13 rsp:0x%08x before cmd16\n", resp.cardStatus);
if (ntry > handle->device->cfg.retryLimit) {
EMMC_TRACE("cmd13 retry reach limit %d\n",
handle->device->cfg.retryLimit);
return SD_CMD_TIMEOUT;
}
ntry++;
EMMC_TRACE("cmd13 retry %d\n", ntry);
SD_US_DELAY(1000);
} while (1);
/* send cmd and parse result */
res = send_cmd(handle, SD_CMD_SET_BLOCKLEN, argument, options, &resp);
return res;
}
int sd_cmd17(struct sd_handle *handle,
uint32_t addr, uint32_t len, uint8_t *buffer)
{
int res;
uint32_t argument, options, ntry;
struct sd_resp resp;
ntry = 0;
do {
res = sd_cmd13(handle, &resp.cardStatus);
if (res != SD_OK) {
EMMC_TRACE(
"cmd 13 failed before cmd17: rca 0x%0x, return %d, response 0x%0x\n",
handle->device->ctrl.rca, res, resp.cardStatus);
return res;
}
if (resp.cardStatus & 0x100)
break;
EMMC_TRACE("cmd13 rsp:0x%08x before cmd17\n", resp.cardStatus);
if (ntry > handle->device->cfg.retryLimit) {
EMMC_TRACE("cmd13 retry reach limit %d\n",
handle->device->cfg.retryLimit);
return SD_CMD_TIMEOUT;
}
ntry++;
EMMC_TRACE("cmd13 retry %d\n", ntry);
SD_US_DELAY(1000);
} while (1);
data_xfer_setup(handle, buffer, len, SD_XFER_CARD_TO_HOST);
/* send cmd and parse result */
argument = addr;
options = SD_CMDR_RSP_TYPE_R1_5_6 << SD_CMDR_RSP_TYPE_S |
SD4_EMMC_TOP_CMD_DPS_MASK | SD4_EMMC_TOP_CMD_DTDS_MASK |
SD4_EMMC_TOP_CMD_CRC_EN_MASK | SD4_EMMC_TOP_CMD_CCHK_EN_MASK;
res = send_cmd(handle, SD_CMD_READ_SINGLE_BLOCK, argument, options,
&resp);
if (res != SD_OK)
return res;
res = process_data_xfer(handle, buffer, addr, len, SD_XFER_CARD_TO_HOST);
return res;
}
int sd_cmd18(struct sd_handle *handle,
uint32_t addr, uint32_t len, uint8_t *buffer)
{
int res;
uint32_t argument, options, ntry;
struct sd_resp resp;
ntry = 0;
do {
res = sd_cmd13(handle, &resp.cardStatus);
if (res != SD_OK) {
EMMC_TRACE(
"cmd 13 failed before cmd18: rca 0x%0x, return %d, response 0x%0x\n",
handle->device->ctrl.rca, res, resp.cardStatus);
return res;
}
if (resp.cardStatus & 0x100)
break;
EMMC_TRACE("cmd13 rsp:0x%08x before cmd18\n", resp.cardStatus);
if (ntry > handle->device->cfg.retryLimit) {
EMMC_TRACE("cmd13 retry reach limit %d\n",
handle->device->cfg.retryLimit);
return SD_CMD_TIMEOUT;
}
ntry++;
EMMC_TRACE("cmd13 retry %d\n", ntry);
SD_US_DELAY(1000);
} while (1);
data_xfer_setup(handle, buffer, len, SD_XFER_CARD_TO_HOST);
argument = addr;
options = SD_CMDR_RSP_TYPE_R1_5_6 << SD_CMDR_RSP_TYPE_S |
SD4_EMMC_TOP_CMD_DPS_MASK | SD4_EMMC_TOP_CMD_DTDS_MASK |
SD4_EMMC_TOP_CMD_MSBS_MASK | SD4_EMMC_TOP_CMD_CCHK_EN_MASK |
SD4_EMMC_TOP_CMD_BCEN_MASK | SD4_EMMC_TOP_CMD_CRC_EN_MASK |
BIT(SD4_EMMC_TOP_CMD_ACMDEN_SHIFT);
/* send cmd and parse result */
res = send_cmd(handle, SD_CMD_READ_MULTIPLE_BLOCK, argument, options,
&resp);
if (res != SD_OK)
return res;
res = process_data_xfer(handle, buffer, addr, len, SD_XFER_CARD_TO_HOST);
return res;
}
#ifdef INCLUDE_EMMC_DRIVER_ERASE_CODE
static int card_sts_resp(struct sd_handle *handle, uint32_t *status)
{
int res;
uint32_t ntry = 0;
do {
res = sd_cmd13(handle, status);
if (res != SD_OK) {
EMMC_TRACE(
"cmd 13 failed before cmd35: rca 0x%0x, return %d\n",
handle->device->ctrl.rca, res);
return res;
}
if (*status & 0x100)
break;
EMMC_TRACE("cmd13 rsp:0x%08x before cmd35\n", resp.cardStatus);
if (ntry > handle->device->cfg.retryLimit) {
EMMC_TRACE("cmd13 retry reach limit %d\n",
handle->device->cfg.retryLimit);
return SD_CMD_TIMEOUT;
}
ntry++;
EMMC_TRACE("cmd13 retry %d\n", ntry);
SD_US_DELAY(1000);
} while (1);
return SD_OK;
}
int sd_cmd35(struct sd_handle *handle, uint32_t start)
{
int res;
uint32_t argument, options;
struct sd_resp resp;
res = card_sts_resp(handle, &resp.cardStatus);
if (res != SD_OK)
return res;
argument = start;
options = SD_CMDR_RSP_TYPE_R1_5_6 << SD_CMDR_RSP_TYPE_S |
SD4_EMMC_TOP_CMD_CRC_EN_MASK |
SD4_EMMC_TOP_CMD_CCHK_EN_MASK;
/* send cmd and parse result */
res = send_cmd(handle, SD_CMD_ERASE_GROUP_START,
argument, options, &resp);
if (res != SD_OK)
return res;
return res;
}
int sd_cmd36(struct sd_handle *handle, uint32_t end)
{
int res;
uint32_t argument, options;
struct sd_resp resp;
res = card_sts_resp(handle, &resp.cardStatus);
if (res != SD_OK)
return res;
argument = end;
options = SD_CMDR_RSP_TYPE_R1_5_6 << SD_CMDR_RSP_TYPE_S |
SD4_EMMC_TOP_CMD_CRC_EN_MASK |
SD4_EMMC_TOP_CMD_CCHK_EN_MASK;
/* send cmd and parse result */
res = send_cmd(handle, SD_CMD_ERASE_GROUP_END,
argument, options, &resp);
if (res != SD_OK)
return res;
return res;
}
int sd_cmd38(struct sd_handle *handle)
{
int res;
uint32_t argument, options;
struct sd_resp resp;
res = card_sts_resp(handle, &resp.cardStatus);
if (res != SD_OK)
return res;
argument = 0;
options = (SD_CMDR_RSP_TYPE_R1b_5b << SD_CMDR_RSP_TYPE_S) |
SD4_EMMC_TOP_CMD_CRC_EN_MASK |
SD4_EMMC_TOP_CMD_CCHK_EN_MASK;
/* send cmd and parse result */
res = send_cmd(handle, SD_CMD_ERASE, argument, options, &resp);
if (res != SD_OK)
return res;
return res;
}
#endif
#ifdef INCLUDE_EMMC_DRIVER_WRITE_CODE
int sd_cmd24(struct sd_handle *handle,
uint32_t addr, uint32_t len, uint8_t *buffer)
{
int res;
uint32_t argument, options, ntry;
struct sd_resp resp;
ntry = 0;
do {
res = sd_cmd13(handle, &resp.cardStatus);
if (res != SD_OK) {
EMMC_TRACE(
"cmd 13 failed before cmd24: rca 0x%0x, return %d, response 0x%0x\n",
handle->device->ctrl.rca, res, &resp.cardStatus);
return res;
}
if (resp.cardStatus & 0x100)
break;
EMMC_TRACE("cmd13 rsp:0x%08x before cmd24\n", resp.cardStatus);
if (ntry > handle->device->cfg.retryLimit) {
EMMC_TRACE("cmd13 retry reach limit %d\n",
handle->device->cfg.retryLimit);
return SD_CMD_TIMEOUT;
}
ntry++;
EMMC_TRACE("cmd13 retry %d\n", ntry);
SD_US_DELAY(1000);
} while (1);
data_xfer_setup(handle, buffer, len, SD_XFER_HOST_TO_CARD);
argument = addr;
options = SD_CMDR_RSP_TYPE_R1_5_6 << SD_CMDR_RSP_TYPE_S |
SD4_EMMC_TOP_CMD_DPS_MASK | SD4_EMMC_TOP_CMD_CRC_EN_MASK |
SD4_EMMC_TOP_CMD_CCHK_EN_MASK;
/* send cmd and parse result */
res = send_cmd(handle, SD_CMD_WRITE_BLOCK, argument, options, &resp);
if (res != SD_OK)
return res;
res = process_data_xfer(handle, buffer, addr, len, SD_XFER_HOST_TO_CARD);
return res;
}
int sd_cmd25(struct sd_handle *handle,
uint32_t addr, uint32_t len, uint8_t *buffer)
{
int res = SD_OK;
uint32_t argument, options, ntry;
struct sd_resp resp;
ntry = 0;
do {
res = sd_cmd13(handle, &resp.cardStatus);
if (res != SD_OK) {
EMMC_TRACE(
"cmd 13 failed before cmd25: rca 0x%0x, return %d, response 0x%0x\n",
handle->device->ctrl.rca, res, &resp.cardStatus);
return res;
}
if (resp.cardStatus & 0x100)
break;
EMMC_TRACE("cmd13 rsp:0x%08x before cmd25\n", resp.cardStatus);
if (ntry > handle->device->cfg.retryLimit) {
EMMC_TRACE("cmd13 retry reach limit %d\n",
handle->device->cfg.retryLimit);
return SD_CMD_TIMEOUT;
}
ntry++;
EMMC_TRACE("cmd13 retry %d\n", ntry);
SD_US_DELAY(1000);
} while (1);
data_xfer_setup(handle, buffer, len, SD_XFER_HOST_TO_CARD);
argument = addr;
options = SD_CMDR_RSP_TYPE_R1_5_6 << SD_CMDR_RSP_TYPE_S |
SD4_EMMC_TOP_CMD_DPS_MASK | SD4_EMMC_TOP_CMD_MSBS_MASK |
SD4_EMMC_TOP_CMD_CCHK_EN_MASK | SD4_EMMC_TOP_CMD_BCEN_MASK |
SD4_EMMC_TOP_CMD_CRC_EN_MASK |
BIT(SD4_EMMC_TOP_CMD_ACMDEN_SHIFT);
/* send cmd and parse result */
res = send_cmd(handle, SD_CMD_WRITE_MULTIPLE_BLOCK,
argument, options, &resp);
if (res != SD_OK)
return res;
res = process_data_xfer(handle, buffer, addr, len, SD_XFER_HOST_TO_CARD);
return res;
}
#endif /* INCLUDE_EMMC_DRIVER_WRITE_CODE */
int mmc_cmd6(struct sd_handle *handle, uint32_t argument)
{
int res;
uint32_t options;
struct sd_resp resp;
options = SD_CMDR_RSP_TYPE_R1b_5b << SD_CMDR_RSP_TYPE_S |
SD4_EMMC_TOP_CMD_CCHK_EN_MASK | SD4_EMMC_TOP_CMD_CRC_EN_MASK;
EMMC_TRACE("Sending CMD6 with argument 0x%X\n", argument);
/* send cmd and parse result */
res = send_cmd(handle, SD_ACMD_SET_BUS_WIDTH, argument, options, &resp);
/*
* For R1b type response:
* controller issues a COMMAND COMPLETE interrupt when the R1
* response is received,
* then controller monitors DAT0 for busy status,
* controller issues a TRANSFER COMPLETE interrupt when busy signal
* clears.
*/
wait_for_event(handle,
SD4_EMMC_TOP_INTR_TXDONE_MASK | SD_ERR_INTERRUPTS,
handle->device->cfg.wfe_retry);
if (res == SD_OK) {
/* Check result of Cmd6 using Cmd13 to check card status */
/* Check status using Cmd13 */
res = sd_cmd13(handle, &resp.cardStatus);
if (res == SD_OK) {
/* Check bit 7 (SWITCH_ERROR) in card status */
if ((resp.cardStatus & 0x80) != 0) {
EMMC_TRACE("cmd6 failed: SWITCH_ERROR\n");
res = SD_FAIL;
}
} else {
EMMC_TRACE("cmd13 failed after cmd6: ");
EMMC_TRACE("rca 0x%0x, return %d, response 0x%0x\n",
handle->device->ctrl.rca, res, resp.cardStatus);
}
}
return res;
}
#define SD_BUSY_CHECK 0x00203000
#define DAT0_LEVEL_MASK 0x100000 /* bit20 in PSTATE */
#define DEV_BUSY_TIMEOUT 600000 /* 60 Sec : 600000 * 100us */
int send_cmd(struct sd_handle *handle, uint32_t cmdIndex, uint32_t argument,
uint32_t options, struct sd_resp *resp)
{
int status = SD_OK;
uint32_t event = 0, present, timeout = 0, retry = 0, mask = 3;
uint32_t temp_resp[4];
if (handle == NULL) {
EMMC_TRACE("Invalid handle for cmd%d\n", cmdIndex);
return SD_INVALID_HANDLE;
}
mask = (SD_BUSY_CHECK & options) ? 3 : 1;
RETRY_WRITE_CMD:
do {
/* Make sure it is ok to send command */
present =
chal_sd_get_present_status((CHAL_HANDLE *) handle->device);
timeout++;
if (present & mask)
SD_US_DELAY(1000);
else
break;
} while (timeout < EMMC_BUSY_CMD_TIMEOUT_MS);
if (timeout >= EMMC_BUSY_CMD_TIMEOUT_MS) {
status = SD_CMD_MISSING;
EMMC_TRACE("cmd%d timedout %dms\n", cmdIndex, timeout);
}
/* Reset both DAT and CMD line if only of them are stuck */
if (present & mask)
check_error(handle, SD4_EMMC_TOP_INTR_CMDERROR_MASK);
handle->device->ctrl.argReg = argument;
chal_sd_send_cmd((CHAL_HANDLE *) handle->device, cmdIndex,
handle->device->ctrl.argReg, options);
handle->device->ctrl.cmdIndex = cmdIndex;
event = wait_for_event(handle,
(SD4_EMMC_TOP_INTR_CMDDONE_MASK |
SD_ERR_INTERRUPTS),
handle->device->cfg.wfe_retry);
if (handle->device->ctrl.cmdStatus == SD_CMD_MISSING) {
retry++;
if (retry >= handle->device->cfg.retryLimit) {
status = SD_CMD_MISSING;
EMMC_TRACE("cmd%d retry reaches the limit %d\n",
cmdIndex, retry);
} else {
/* reset both DAT & CMD line if one of them is stuck */
present = chal_sd_get_present_status((CHAL_HANDLE *)
handle->device);
if (present & mask)
check_error(handle,
SD4_EMMC_TOP_INTR_CMDERROR_MASK);
EMMC_TRACE("cmd%d retry %d PSTATE[0x%08x]\n",
cmdIndex, retry,
chal_sd_get_present_status((CHAL_HANDLE *)
handle->device));
goto RETRY_WRITE_CMD;
}
}
if (handle->device->ctrl.cmdStatus == SD_OK) {
if (resp != NULL) {
status =
chal_sd_get_response((CHAL_HANDLE *) handle->device,
temp_resp);
process_cmd_response(handle,
handle->device->ctrl.cmdIndex,
temp_resp[0], temp_resp[1],
temp_resp[2], temp_resp[3], resp);
}
/* Check Device busy after CMD */
if ((cmdIndex == 5) || (cmdIndex == 6) || (cmdIndex == 7) ||
(cmdIndex == 28) || (cmdIndex == 29) || (cmdIndex == 38)) {
timeout = 0;
do {
present =
chal_sd_get_present_status((CHAL_HANDLE *)
handle->device);
timeout++;
/* Dat[0]:bit20 low means device busy */
if ((present & DAT0_LEVEL_MASK) == 0) {
EMMC_TRACE("Device busy: ");
EMMC_TRACE(
"cmd%d arg:0x%08x: PSTATE[0x%08x]\n",
cmdIndex, argument, present);
SD_US_DELAY(100);
} else {
break;
}
} while (timeout < DEV_BUSY_TIMEOUT);
}
} else if (handle->device->ctrl.cmdStatus &&
handle->device->ctrl.cmdStatus != SD_CMD_MISSING) {
retry++;
status = check_error(handle, handle->device->ctrl.cmdStatus);
EMMC_TRACE(
"cmd%d error: cmdStatus:0x%08x error_status:0x%08x\n",
cmdIndex, handle->device->ctrl.cmdStatus, status);
if ((handle->device->ctrl.cmdIndex == 1) ||
(handle->device->ctrl.cmdIndex == 5)) {
status = event;
} else if ((handle->device->ctrl.cmdIndex == 7) ||
(handle->device->ctrl.cmdIndex == 41)) {
status = event;
} else if ((status == SD_ERROR_RECOVERABLE) &&
(retry < handle->device->cfg.retryLimit)) {
EMMC_TRACE("cmd%d recoverable error ", cmdIndex);
EMMC_TRACE("retry %d PSTATE[0x%08x].\n", retry,
chal_sd_get_present_status((CHAL_HANDLE *)
handle->device));
goto RETRY_WRITE_CMD;
} else {
EMMC_TRACE("cmd%d retry reaches the limit %d\n",
cmdIndex, retry);
status = event;
}
}
handle->device->ctrl.blkReg = 0;
/* clear error status for next command */
handle->device->ctrl.cmdStatus = 0;
return status;
}
/*
* Copyright (c) 2016 - 2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <string.h>
#include <emmc_api.h>
#include <cmn_plat_util.h>
#define MAX_CMD_RETRY 10
#if EMMC_USE_DMA
#define USE_DMA 1
#else
#define USE_DMA 0
#endif
struct emmc_global_buffer emmc_global_buf;
struct emmc_global_buffer *emmc_global_buf_ptr = &emmc_global_buf;
struct emmc_global_vars emmc_global_vars;
struct emmc_global_vars *emmc_global_vars_ptr = &emmc_global_vars;
static struct sd_handle *sdio_gethandle(void);
static uint32_t sdio_idle(struct sd_handle *p_sdhandle);
static uint32_t sdio_read(struct sd_handle *p_sdhandle,
uintptr_t mem_addr,
uintptr_t storage_addr,
size_t storage_size,
size_t bytes_to_read);
#ifdef INCLUDE_EMMC_DRIVER_WRITE_CODE
static uint32_t sdio_write(struct sd_handle *p_sdhandle,
uintptr_t mem_addr,
uintptr_t data_addr,
size_t bytes_to_write);
#endif
static struct sd_handle *sdio_init(void);
static int32_t bcm_emmc_card_ready_state(struct sd_handle *p_sdhandle);
static void init_globals(void)
{
memset((void *)emmc_global_buf_ptr, 0, sizeof(*emmc_global_buf_ptr));
memset((void *)emmc_global_vars_ptr, 0, sizeof(*emmc_global_vars_ptr));
}
/*
* This function is used to change partition
*/
uint32_t emmc_partition_select(uint32_t partition)
{
int rc;
struct sd_handle *sd_handle = sdio_gethandle();
if (sd_handle->device == 0) {
EMMC_TRACE("eMMC init is not done");
return 0;
}
switch (partition) {
case EMMC_BOOT_PARTITION1:
rc = set_boot_config(sd_handle,
SDIO_HW_EMMC_EXT_CSD_BOOT_ACC_BOOT1);
EMMC_TRACE(
"Change to Boot Partition 1 result:%d (0 means SD_OK)\n",
rc);
break;
case EMMC_BOOT_PARTITION2:
rc = set_boot_config(sd_handle,
SDIO_HW_EMMC_EXT_CSD_BOOT_ACC_BOOT2);
EMMC_TRACE(
"Change to Boot Partition 2 result:%d (0 means SD_OK)\n",
rc);
break;
case EMMC_USE_CURRENT_PARTITION:
rc = SD_OK;
EMMC_TRACE("Stay on current partition");
break;
case EMMC_USER_AREA:
default:
rc = set_boot_config(sd_handle,
SDIO_HW_EMMC_EXT_CSD_BOOT_ACC_USER);
EMMC_TRACE("Change to User area result:%d (0 means SD_OK)\n",
rc);
break;
}
return (rc == SD_OK);
}
/*
* Initialize emmc controller for eMMC
* Returns 0 on fail condition
*/
uint32_t bcm_emmc_init(bool card_rdy_only)
{
struct sd_handle *p_sdhandle;
uint32_t result = 0;
EMMC_TRACE("Enter emmc_controller_init()\n");
/* If eMMC is already initialized, skip init */
if (emmc_global_vars_ptr->init_done)
return 1;
init_globals();
p_sdhandle = sdio_init();
if (p_sdhandle == NULL) {
ERROR("eMMC init failed");
return result;
}
if (card_rdy_only) {
/* Put the card in Ready state, Not complete init */
result = bcm_emmc_card_ready_state(p_sdhandle);
return !result;
}
if (sdio_idle(p_sdhandle) == EMMC_BOOT_OK) {
set_config(p_sdhandle, SD_NORMAL_SPEED, MAX_CMD_RETRY, USE_DMA,
SD_DMA_BOUNDARY_256K, EMMC_BLOCK_SIZE,
EMMC_WFE_RETRY);
if (!select_blk_sz(p_sdhandle,
p_sdhandle->device->cfg.blockSize)) {
emmc_global_vars_ptr->init_done = 1;
result = 1;
} else {
ERROR("Select Block Size failed\n");
}
} else {
ERROR("eMMC init failed");
}
/* Initialization is failed, so deinit HW setting */
if (result == 0)
emmc_deinit();
return result;
}
/*
* Function to de-init SDIO controller for eMMC
*/
void emmc_deinit(void)
{
emmc_global_vars_ptr->init_done = 0;
emmc_global_vars_ptr->sdHandle.card = 0;
emmc_global_vars_ptr->sdHandle.device = 0;
}
/*
* Read eMMC memory
* Returns read_size
*/
uint32_t emmc_read(uintptr_t mem_addr, uintptr_t storage_addr,
size_t storage_size, size_t bytes_to_read)
{
struct sd_handle *sd_handle = sdio_gethandle();
if (sd_handle->device == 0) {
EMMC_TRACE("eMMC init is not done");
return 0;
}
return sdio_read(sdio_gethandle(), mem_addr, storage_addr,
storage_size, bytes_to_read);
}
#ifdef INCLUDE_EMMC_DRIVER_ERASE_CODE
#define EXT_CSD_ERASE_GRP_SIZE 224
static int emmc_block_erase(uintptr_t mem_addr, size_t blocks)
{
struct sd_handle *sd_handle = sdio_gethandle();
if (sd_handle->device == 0) {
ERROR("eMMC init is not done");
return -1;
}
return erase_card(sdio_gethandle(), mem_addr, blocks);
}
int emmc_erase(uintptr_t mem_addr, size_t num_of_blocks, uint32_t partition)
{
int err = 0;
size_t block_count = 0, blocks = 0;
size_t erase_group = 0;
erase_group =
emmc_global_buf_ptr->u.Ext_CSD_storage[EXT_CSD_ERASE_GRP_SIZE]*1024;
INFO("eMMC Erase Group Size=0x%lx\n", erase_group);
emmc_partition_select(partition);
while (block_count < num_of_blocks) {
blocks = ((num_of_blocks - block_count) > erase_group) ?
erase_group : (num_of_blocks - block_count);
err = emmc_block_erase(mem_addr + block_count, blocks);
if (err)
break;
block_count += blocks;
}
if (err == 0)
INFO("eMMC Erase of partition %d successful\n", partition);
else
ERROR("eMMC Erase of partition %d Failed(%i)\n", partition, err);
return err;
}
#endif
#ifdef INCLUDE_EMMC_DRIVER_WRITE_CODE
/*
* Write to eMMC memory
* Returns written_size
*/
uint32_t emmc_write(uintptr_t mem_addr, uintptr_t data_addr,
size_t bytes_to_write)
{
struct sd_handle *sd_handle = sdio_gethandle();
if (sd_handle->device == 0) {
EMMC_TRACE("eMMC init is not done");
return 0;
}
return sdio_write(sd_handle, mem_addr, data_addr, bytes_to_write);
}
#endif
/*
* Send SDIO Cmd
* Return 0 for pass condition
*/
uint32_t send_sdio_cmd(uint32_t cmdIndex, uint32_t argument,
uint32_t options, struct sd_resp *resp)
{
struct sd_handle *sd_handle = sdio_gethandle();
if (sd_handle->device == 0) {
EMMC_TRACE("eMMC init is not done");
return 1;
}
return send_cmd(sd_handle, cmdIndex, argument, options, resp);
}
/*
* This function return SDIO handle
*/
struct sd_handle *sdio_gethandle(void)
{
return &emmc_global_vars_ptr->sdHandle;
}
/*
* Initialize SDIO controller
*/
struct sd_handle *sdio_init(void)
{
uint32_t SDIO_base;
struct sd_handle *p_sdhandle = &emmc_global_vars_ptr->sdHandle;
SDIO_base = EMMC_CTRL_REGS_BASE_ADDR;
if (SDIO_base == SDIO0_EMMCSDXC_SYSADDR)
EMMC_TRACE(" ---> for SDIO 0 Controller\n\n");
memset(p_sdhandle, 0, sizeof(struct sd_handle));
p_sdhandle->device = &emmc_global_vars_ptr->sdDevice;
p_sdhandle->card = &emmc_global_vars_ptr->sdCard;
memset(p_sdhandle->device, 0, sizeof(struct sd_dev));
memset(p_sdhandle->card, 0, sizeof(struct sd_card_info));
if (chal_sd_start((CHAL_HANDLE *) p_sdhandle->device,
SD_PIO_MODE, SDIO_base, SDIO_base) != SD_OK)
return NULL;
set_config(p_sdhandle, SD_NORMAL_SPEED, MAX_CMD_RETRY, SD_DMA_OFF,
SD_DMA_BOUNDARY_4K, EMMC_BLOCK_SIZE, EMMC_WFE_RETRY);
return &emmc_global_vars_ptr->sdHandle;
}
uint32_t sdio_idle(struct sd_handle *p_sdhandle)
{
reset_card(p_sdhandle);
SD_US_DELAY(1000);
if (init_card(p_sdhandle, SD_CARD_DETECT_MMC) != SD_OK) {
reset_card(p_sdhandle);
reset_host_ctrl(p_sdhandle);
return EMMC_BOOT_NO_CARD;
}
return EMMC_BOOT_OK;
}
/*
* This function read eMMC
*/
uint32_t sdio_read(struct sd_handle *p_sdhandle,
uintptr_t mem_addr,
uintptr_t storage_addr,
size_t storage_size, size_t bytes_to_read)
{
uint32_t offset = 0, blockAddr, readLen = 0, rdCount;
uint32_t remSize, manual_copy_size;
uint8_t *outputBuf = (uint8_t *) storage_addr;
const size_t blockSize = p_sdhandle->device->cfg.blockSize;
VERBOSE("EMMC READ: dst=0x%lx, src=0x%lx, size=0x%lx\n",
storage_addr, mem_addr, bytes_to_read);
if (storage_size < bytes_to_read)
/* Don't have sufficient storage to complete the operation */
return 0;
/* Range check non high capacity memory */
if ((p_sdhandle->device->ctrl.ocr & SD_CARD_HIGH_CAPACITY) == 0) {
if (mem_addr > 0x80000000)
return 0;
}
/* High capacity card use block address mode */
if (p_sdhandle->device->ctrl.ocr & SD_CARD_HIGH_CAPACITY) {
blockAddr = (uint32_t) (mem_addr / blockSize);
offset = (uint32_t) (mem_addr - (blockAddr * blockSize));
} else {
blockAddr = (uint32_t) (mem_addr / blockSize) * blockSize;
offset = (uint32_t) (mem_addr - blockAddr);
}
remSize = bytes_to_read;
rdCount = 0;
/* Process first unaligned block of MAX_READ_LENGTH */
if (offset > 0) {
if (!read_block(p_sdhandle, emmc_global_buf_ptr->u.tempbuf,
blockAddr, SD_MAX_READ_LENGTH)) {
if (remSize < (blockSize - offset)) {
rdCount += remSize;
manual_copy_size = remSize;
remSize = 0; /* read is done */
} else {
remSize -= (blockSize - offset);
rdCount += (blockSize - offset);
manual_copy_size = blockSize - offset;
}
/* Check for overflow */
if (manual_copy_size > storage_size ||
(((uintptr_t)outputBuf + manual_copy_size) >
(storage_addr + storage_size))) {
ERROR("EMMC READ: Overflow 1\n");
return 0;
}
memcpy(outputBuf,
(void *)((uintptr_t)
(emmc_global_buf_ptr->u.tempbuf + offset)),
manual_copy_size);
/* Update Physical address */
outputBuf += manual_copy_size;
if (p_sdhandle->device->ctrl.ocr & SD_CARD_HIGH_CAPACITY)
blockAddr++;
else
blockAddr += blockSize;
} else {
return 0;
}
}
while (remSize >= blockSize) {
if (remSize >= SD_MAX_BLK_TRANSFER_LENGTH)
readLen = SD_MAX_BLK_TRANSFER_LENGTH;
else
readLen = (remSize / blockSize) * blockSize;
/* Check for overflow */
if ((rdCount + readLen) > storage_size ||
(((uintptr_t) outputBuf + readLen) >
(storage_addr + storage_size))) {
ERROR("EMMC READ: Overflow\n");
return 0;
}
if (!read_block(p_sdhandle, outputBuf, blockAddr, readLen)) {
if (p_sdhandle->device->ctrl.ocr & SD_CARD_HIGH_CAPACITY)
blockAddr += (readLen / blockSize);
else
blockAddr += readLen;
remSize -= readLen;
rdCount += readLen;
/* Update Physical address */
outputBuf += readLen;
} else {
return 0;
}
}
/* process the last unaligned block reading */
if (remSize > 0) {
if (!read_block(p_sdhandle, emmc_global_buf_ptr->u.tempbuf,
blockAddr, SD_MAX_READ_LENGTH)) {
rdCount += remSize;
/* Check for overflow */
if (rdCount > storage_size ||
(((uintptr_t) outputBuf + remSize) >
(storage_addr + storage_size))) {
ERROR("EMMC READ: Overflow\n");
return 0;
}
memcpy(outputBuf,
emmc_global_buf_ptr->u.tempbuf, remSize);
/* Update Physical address */
outputBuf += remSize;
} else {
rdCount = 0;
}
}
return rdCount;
}
#ifdef INCLUDE_EMMC_DRIVER_WRITE_CODE
static uint32_t sdio_write(struct sd_handle *p_sdhandle, uintptr_t mem_addr,
uintptr_t data_addr, size_t bytes_to_write)
{
uint32_t offset, blockAddr, writeLen, wtCount = 0;
uint32_t remSize, manual_copy_size = 0;
uint8_t *inputBuf = (uint8_t *)data_addr;
/* range check non high capacity memory */
if ((p_sdhandle->device->ctrl.ocr & SD_CARD_HIGH_CAPACITY) == 0) {
if (mem_addr > 0x80000000)
return 0;
}
/* the high capacity card use block address mode */
if (p_sdhandle->device->ctrl.ocr & SD_CARD_HIGH_CAPACITY) {
blockAddr =
(uint32_t)(mem_addr / p_sdhandle->device->cfg.blockSize);
offset =
(uint32_t)(mem_addr -
blockAddr * p_sdhandle->device->cfg.blockSize);
} else {
blockAddr =
((uint32_t)mem_addr / p_sdhandle->device->cfg.blockSize) *
p_sdhandle->device->cfg.blockSize;
offset = (uint32_t) mem_addr - blockAddr;
}
remSize = bytes_to_write;
wtCount = 0;
/* process first unaligned block */
if (offset > 0) {
if (!read_block(p_sdhandle, emmc_global_buf_ptr->u.tempbuf,
blockAddr, p_sdhandle->device->cfg.blockSize)) {
if (remSize <
(p_sdhandle->device->cfg.blockSize - offset))
manual_copy_size = remSize;
else
manual_copy_size =
p_sdhandle->device->cfg.blockSize - offset;
memcpy((void *)((uintptr_t)
(emmc_global_buf_ptr->u.tempbuf + offset)),
inputBuf,
manual_copy_size);
/* Update Physical address */
if (!write_block(p_sdhandle,
emmc_global_buf_ptr->u.tempbuf,
blockAddr,
p_sdhandle->device->cfg.blockSize)) {
if (remSize <
(p_sdhandle->device->cfg.blockSize -
offset)) {
wtCount += remSize;
manual_copy_size = remSize;
remSize = 0; /* read is done */
} else {
remSize -=
(p_sdhandle->device->cfg.blockSize -
offset);
wtCount +=
(p_sdhandle->device->cfg.blockSize -
offset);
manual_copy_size =
p_sdhandle->device->cfg.blockSize -
offset;
}
inputBuf += manual_copy_size;
if (p_sdhandle->device->ctrl.ocr &
SD_CARD_HIGH_CAPACITY)
blockAddr++;
else
blockAddr +=
p_sdhandle->device->cfg.blockSize;
} else
return 0;
} else {
return 0;
}
}
/* process block writing */
while (remSize >= p_sdhandle->device->cfg.blockSize) {
if (remSize >= SD_MAX_READ_LENGTH) {
writeLen = SD_MAX_READ_LENGTH;
} else {
writeLen =
(remSize / p_sdhandle->device->cfg.blockSize) *
p_sdhandle->device->cfg.blockSize;
}
if (!write_block(p_sdhandle, inputBuf, blockAddr, writeLen)) {
if (p_sdhandle->device->ctrl.ocr & SD_CARD_HIGH_CAPACITY)
blockAddr +=
(writeLen /
p_sdhandle->device->cfg.blockSize);
else
blockAddr += writeLen;
remSize -= writeLen;
wtCount += writeLen;
inputBuf += writeLen;
} else {
return 0;
}
}
/* process the last unaligned block reading */
if (remSize > 0) {
if (!read_block(p_sdhandle,
emmc_global_buf_ptr->u.tempbuf,
blockAddr, p_sdhandle->device->cfg.blockSize)) {
memcpy(emmc_global_buf_ptr->u.tempbuf,
inputBuf, remSize);
/* Update Physical address */
if (!write_block(p_sdhandle,
emmc_global_buf_ptr->u.tempbuf,
blockAddr,
p_sdhandle->device->cfg.blockSize)) {
wtCount += remSize;
inputBuf += remSize;
} else {
return 0;
}
} else {
wtCount = 0;
}
}
return wtCount;
}
#endif
/*
* Function to put the card in Ready state by sending CMD0 and CMD1
*/
static int32_t bcm_emmc_card_ready_state(struct sd_handle *p_sdhandle)
{
int32_t result = 0;
uint32_t argument = MMC_CMD_IDLE_RESET_ARG; /* Exit from Boot mode */
if (p_sdhandle) {
send_sdio_cmd(SD_CMD_GO_IDLE_STATE, argument, 0, NULL);
result = reset_card(p_sdhandle);
if (result != SD_OK) {
EMMC_TRACE("eMMC Reset error\n");
return SD_RESET_ERROR;
}
SD_US_DELAY(2000);
result = mmc_cmd1(p_sdhandle);
}
return result;
}
/*
* Copyright (c) 2019-2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
#include <drivers/gpio.h>
#include <lib/mmio.h>
#include <plat/common/platform.h>
#include <iproc_gpio.h>
#include <platform_def.h>
#define IPROC_GPIO_DATA_IN_OFFSET 0x00
#define IPROC_GPIO_DATA_OUT_OFFSET 0x04
#define IPROC_GPIO_OUT_EN_OFFSET 0x08
#define IPROC_GPIO_PAD_RES_OFFSET 0x34
#define IPROC_GPIO_RES_EN_OFFSET 0x38
#define PINMUX_OFFSET(gpio) ((gpio) * 4)
#define PINCONF_OFFSET(gpio) ((gpio) * 4)
#define PINCONF_PULL_UP BIT(4)
#define PINCONF_PULL_DOWN BIT(5)
/*
* iProc GPIO bank is always 0x200 per bank,
* with each bank supporting 32 GPIOs.
*/
#define GPIO_BANK_SIZE 0x200
#define NGPIOS_PER_BANK 32
#define GPIO_BANK(pin) ((pin) / NGPIOS_PER_BANK)
#define IPROC_GPIO_REG(pin, reg) (GPIO_BANK(pin) * GPIO_BANK_SIZE + (reg))
#define IPROC_GPIO_SHIFT(pin) ((pin) % NGPIOS_PER_BANK)
#define MUX_GPIO_MODE 0x3
/*
* @base: base address of the gpio controller
* @pinconf_base: base address of the pinconf
* @pinmux_base: base address of the mux controller
* @nr_gpios: maxinum number of GPIOs
*/
struct iproc_gpio {
uintptr_t base;
uintptr_t pinconf_base;
uintptr_t pinmux_base;
int nr_gpios;
};
static struct iproc_gpio iproc_gpio;
static void gpio_set_bit(uintptr_t base, unsigned int reg, int gpio, bool set)
{
unsigned int offset = IPROC_GPIO_REG(gpio, reg);
unsigned int shift = IPROC_GPIO_SHIFT(gpio);
uint32_t val;
val = mmio_read_32(base + offset);
if (set)
val |= BIT(shift);
else
val &= ~BIT(shift);
mmio_write_32(base + offset, val);
}
static bool gpio_get_bit(uintptr_t base, unsigned int reg, int gpio)
{
unsigned int offset = IPROC_GPIO_REG(gpio, reg);
unsigned int shift = IPROC_GPIO_SHIFT(gpio);
return !!(mmio_read_32(base + offset) & BIT(shift));
}
static void mux_to_gpio(struct iproc_gpio *g, int gpio)
{
/* mux pad to GPIO if IOPAD configuration is mandatory */
if (g->pinmux_base)
mmio_write_32(g->pinmux_base + PINMUX_OFFSET(gpio),
MUX_GPIO_MODE);
}
static void set_direction(int gpio, int direction)
{
struct iproc_gpio *g = &iproc_gpio;
bool dir = (direction == GPIO_DIR_OUT) ? true : false;
assert(gpio < g->nr_gpios);
mux_to_gpio(g, gpio);
gpio_set_bit(g->base, IPROC_GPIO_OUT_EN_OFFSET, gpio, dir);
}
static int get_direction(int gpio)
{
struct iproc_gpio *g = &iproc_gpio;
int dir;
assert(gpio < g->nr_gpios);
mux_to_gpio(g, gpio);
dir = gpio_get_bit(g->base, IPROC_GPIO_OUT_EN_OFFSET, gpio) ?
GPIO_DIR_OUT : GPIO_DIR_IN;
return dir;
}
static int get_value(int gpio)
{
struct iproc_gpio *g = &iproc_gpio;
unsigned int offset;
assert(gpio < g->nr_gpios);
mux_to_gpio(g, gpio);
/*
* If GPIO is configured as output, read from the GPIO_OUT register;
* otherwise, read from the GPIO_IN register
*/
offset = gpio_get_bit(g->base, IPROC_GPIO_OUT_EN_OFFSET, gpio) ?
IPROC_GPIO_DATA_OUT_OFFSET : IPROC_GPIO_DATA_IN_OFFSET;
return gpio_get_bit(g->base, offset, gpio);
}
static void set_value(int gpio, int val)
{
struct iproc_gpio *g = &iproc_gpio;
assert(gpio < g->nr_gpios);
mux_to_gpio(g, gpio);
/* make sure GPIO is configured to output, and then set the value */
gpio_set_bit(g->base, IPROC_GPIO_OUT_EN_OFFSET, gpio, true);
gpio_set_bit(g->base, IPROC_GPIO_DATA_OUT_OFFSET, gpio, !!(val));
}
static int get_pull(int gpio)
{
struct iproc_gpio *g = &iproc_gpio;
uint32_t val;
assert(gpio < g->nr_gpios);
mux_to_gpio(g, gpio);
/* when there's a valid pinconf_base, use it */
if (g->pinconf_base) {
val = mmio_read_32(g->pinconf_base + PINCONF_OFFSET(gpio));
if (val & PINCONF_PULL_UP)
return GPIO_PULL_UP;
else if (val & PINCONF_PULL_DOWN)
return GPIO_PULL_DOWN;
else
return GPIO_PULL_NONE;
}
/* no pinconf_base. fall back to GPIO internal pull control */
if (!gpio_get_bit(g->base, IPROC_GPIO_RES_EN_OFFSET, gpio))
return GPIO_PULL_NONE;
return gpio_get_bit(g->base, IPROC_GPIO_PAD_RES_OFFSET, gpio) ?
GPIO_PULL_UP : GPIO_PULL_DOWN;
}
static void set_pull(int gpio, int pull)
{
struct iproc_gpio *g = &iproc_gpio;
uint32_t val;
assert(gpio < g->nr_gpios);
mux_to_gpio(g, gpio);
/* when there's a valid pinconf_base, use it */
if (g->pinconf_base) {
val = mmio_read_32(g->pinconf_base + PINCONF_OFFSET(gpio));
if (pull == GPIO_PULL_NONE) {
val &= ~(PINCONF_PULL_UP | PINCONF_PULL_DOWN);
} else if (pull == GPIO_PULL_UP) {
val |= PINCONF_PULL_UP;
val &= ~PINCONF_PULL_DOWN;
} else if (pull == GPIO_PULL_DOWN) {
val |= PINCONF_PULL_DOWN;
val &= ~PINCONF_PULL_UP;
} else {
return;
}
mmio_write_32(g->pinconf_base + PINCONF_OFFSET(gpio), val);
}
/* no pinconf_base. fall back to GPIO internal pull control */
if (pull == GPIO_PULL_NONE) {
gpio_set_bit(g->base, IPROC_GPIO_RES_EN_OFFSET, gpio, false);
return;
}
/* enable pad register and pull up or down */
gpio_set_bit(g->base, IPROC_GPIO_RES_EN_OFFSET, gpio, true);
gpio_set_bit(g->base, IPROC_GPIO_PAD_RES_OFFSET, gpio,
!!(pull == GPIO_PULL_UP));
}
const gpio_ops_t iproc_gpio_ops = {
.get_direction = get_direction,
.set_direction = set_direction,
.get_value = get_value,
.set_value = set_value,
.get_pull = get_pull,
.set_pull = set_pull,
};
void iproc_gpio_init(uintptr_t base, int nr_gpios, uintptr_t pinmux_base,
uintptr_t pinconf_base)
{
iproc_gpio.base = base;
iproc_gpio.nr_gpios = nr_gpios;
/* pinmux/pinconf base is optional for some SoCs */
if (pinmux_base)
iproc_gpio.pinmux_base = pinmux_base;
if (pinconf_base)
iproc_gpio.pinconf_base = pinconf_base;
gpio_init(&iproc_gpio_ops);
}
/*
* Copyright (c) 2017 - 2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <stdint.h>
#include <common/debug.h>
#include <drivers/delay_timer.h>
#include <lib/mmio.h>
#include <ocotp.h>
#include <platform_def.h>
#define OTP_MAP 2
#define OTP_NUM_WORDS 2048
/*
* # of tries for OTP Status. The time to execute a command varies. The slowest
* commands are writes which also vary based on the # of bits turned on. Writing
* 0xffffffff takes ~3800 us.
*/
#define OTPC_RETRIES_US 5000
/* Sequence to enable OTP program */
#define OTPC_PROG_EN_SEQ { 0xf, 0x4, 0x8, 0xd }
/* OTPC Commands */
#define OTPC_CMD_READ 0x0
#define OTPC_CMD_OTP_PROG_ENABLE 0x2
#define OTPC_CMD_OTP_PROG_DISABLE 0x3
#define OTPC_CMD_PROGRAM 0x8
#define OTPC_CMD_ECC 0x10
#define OTPC_ECC_ADDR 0x1A
#define OTPC_ECC_VAL 0x00EC0000
/* OTPC Status Bits */
#define OTPC_STAT_CMD_DONE BIT(1)
#define OTPC_STAT_PROG_OK BIT(2)
/* OTPC register definition */
#define OTPC_MODE_REG_OFFSET 0x0
#define OTPC_MODE_REG_OTPC_MODE 0
#define OTPC_COMMAND_OFFSET 0x4
#define OTPC_COMMAND_COMMAND_WIDTH 6
#define OTPC_CMD_START_OFFSET 0x8
#define OTPC_CMD_START_START 0
#define OTPC_CPU_STATUS_OFFSET 0xc
#define OTPC_CPUADDR_REG_OFFSET 0x28
#define OTPC_CPUADDR_REG_OTPC_CPU_ADDRESS_WIDTH 16
#define OTPC_CPU_WRITE_REG_OFFSET 0x2c
#define OTPC_CMD_MASK (BIT(OTPC_COMMAND_COMMAND_WIDTH) - 1)
#define OTPC_ADDR_MASK (BIT(OTPC_CPUADDR_REG_OTPC_CPU_ADDRESS_WIDTH) - 1)
#define OTPC_MODE_REG OCOTP_REGS_BASE
struct chip_otp_cfg {
uint32_t base;
uint32_t num_words;
};
struct chip_otp_cfg ocotp_cfg = {
.base = OTPC_MODE_REG,
.num_words = 2048,
};
struct otpc_priv {
uint32_t base;
struct otpc_map *map;
int size;
int state;
};
struct otpc_priv otpc_info;
static inline void set_command(uint32_t base, uint32_t command)
{
mmio_write_32(base + OTPC_COMMAND_OFFSET, command & OTPC_CMD_MASK);
}
static inline void set_cpu_address(uint32_t base, uint32_t addr)
{
mmio_write_32(base + OTPC_CPUADDR_REG_OFFSET, addr & OTPC_ADDR_MASK);
}
static inline void set_start_bit(uint32_t base)
{
mmio_write_32(base + OTPC_CMD_START_OFFSET, 1 << OTPC_CMD_START_START);
}
static inline void reset_start_bit(uint32_t base)
{
mmio_write_32(base + OTPC_CMD_START_OFFSET, 0);
}
static inline void write_cpu_data(uint32_t base, uint32_t value)
{
mmio_write_32(base + OTPC_CPU_WRITE_REG_OFFSET, value);
}
static int poll_cpu_status(uint32_t base, uint32_t value)
{
uint32_t status;
uint32_t retries;
for (retries = 0; retries < OTPC_RETRIES_US; retries++) {
status = mmio_read_32(base + OTPC_CPU_STATUS_OFFSET);
if (status & value)
break;
udelay(1);
}
if (retries == OTPC_RETRIES_US)
return -1;
return 0;
}
static int bcm_otpc_ecc(uint32_t enable)
{
struct otpc_priv *priv = &otpc_info;
int ret;
set_command(priv->base, OTPC_CMD_ECC);
set_cpu_address(priv->base, OTPC_ECC_ADDR);
if (!enable)
write_cpu_data(priv->base, OTPC_ECC_VAL);
else
write_cpu_data(priv->base, ~OTPC_ECC_VAL);
set_start_bit(priv->base);
ret = poll_cpu_status(priv->base, OTPC_STAT_CMD_DONE);
if (ret) {
ERROR("otp ecc op error: 0x%x", ret);
return -1;
}
reset_start_bit(priv->base);
return 0;
}
/*
* bcm_otpc_read read otp data in the size of 8 byte rows.
* bytes has to be the multiple of 8.
* return -1 in error case, return read bytes in success.
*/
int bcm_otpc_read(unsigned int offset, void *val, uint32_t bytes,
uint32_t ecc_flag)
{
struct otpc_priv *priv = &otpc_info;
uint32_t *buf = val;
uint32_t bytes_read;
uint32_t address = offset / priv->map->word_size;
int i, ret;
if (!priv->state) {
ERROR("OCOTP read failed\n");
return -1;
}
bcm_otpc_ecc(ecc_flag);
for (bytes_read = 0; (bytes_read + priv->map->word_size) <= bytes;) {
set_command(priv->base, OTPC_CMD_READ);
set_cpu_address(priv->base, address++);
set_start_bit(priv->base);
ret = poll_cpu_status(priv->base, OTPC_STAT_CMD_DONE);
if (ret) {
ERROR("otp read error: 0x%x", ret);
return -1;
}
for (i = 0; i < priv->map->otpc_row_size; i++) {
*buf++ = mmio_read_32(priv->base +
priv->map->data_r_offset[i]);
bytes_read += sizeof(*buf);
}
reset_start_bit(priv->base);
}
return bytes_read;
}
int bcm_otpc_init(struct otpc_map *map)
{
struct otpc_priv *priv;
priv = &otpc_info;
priv->base = ocotp_cfg.base;
priv->map = map;
priv->size = 4 * ocotp_cfg.num_words;
/* Enable CPU access to OTPC. */
mmio_setbits_32(priv->base + OTPC_MODE_REG_OFFSET,
BIT(OTPC_MODE_REG_OTPC_MODE));
reset_start_bit(priv->base);
priv->state = 1;
VERBOSE("OTPC Initialization done\n");
return 0;
}
/*
* Copyright (c) 2017 - 2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <string.h>
#include <arch_helpers.h>
#include <common/debug.h>
/* MCU binary image structure: <header> <data>
*
* Header structure:
* <magic-start>
* <num-sections>
* {<src-offset> <src-size> <dst-addr>}*
* <magic-end>
*
* MCU data (<data>) consists of several sections of code/data, to be
* installed (copied) into MCU memories.
* Header (<header>) gives information about sections contained in <data>.
*
* The installer code iterates over sections in MCU binary.
* For each section, it copies the section into MCU memory.
*
* The header contains:
* - <magic-start> - 32-bit magic number to mark header start
* - <num-sections> - number of sections in <data>
* - <num-sections> tuples. Each tuple describes a section.
* A tuple contains three 32-bit words.
* - <magic-end> - 32-bit magic number to mark header end
*
* Each section is describes by a tuple, consisting of three 32-bit words:
* - offset of section within MCU binary (relative to beginning of <data>)
* - section size (in bytes) in MCU binary
* - target address (in MCU memory). Section is copied to this location.
*
* All fields are 32-bit unsigned integers in little endian format.
* All sizes are assumed to be 32-bit aligned.
*/
#define SCP_BIN_HEADER_MAGIC_START 0xfa587D01
#define SCP_BIN_HEADER_MAGIC_END 0xf3e06a85
int download_scp_patch(void *image, unsigned int image_size)
{
unsigned int *pheader = (unsigned int *)(image);
unsigned int header_size;
unsigned char *pdata;
void *dest;
unsigned int num_sections;
unsigned int section_src_offset;
unsigned int section_size;
if (pheader && (pheader[0] != SCP_BIN_HEADER_MAGIC_START)) {
ERROR("SCP: Could not find SCP header.\n");
return -1;
}
num_sections = pheader[1];
INFO("...Number of sections: %d\n", num_sections);
header_size = 4 * (1 + 1 + 3 * num_sections + 1);
if (image_size < header_size) {
ERROR("SCP: Wrong size.\n");
return -1;
}
if (*(pheader + header_size/4 - 1) != SCP_BIN_HEADER_MAGIC_END) {
ERROR("SCP: Could not find SCP footer.\n");
return -1;
}
VERBOSE("SCP image header validated successfully\n");
pdata = (unsigned char *)pheader + header_size;
for (pheader += 2; num_sections > 0; num_sections--) {
section_src_offset = pheader[0];
section_size = pheader[1];
dest = (void *)(unsigned long)pheader[2];
INFO("section: src:0x%x, size:%d, dst:0x%x\n",
section_src_offset, section_size, pheader[2]);
if ((section_src_offset + section_size) > image_size) {
ERROR("SCP: Section points to outside of patch.\n");
return -1;
}
/* copy from source to target section */
memcpy(dest, pdata + section_src_offset, section_size);
flush_dcache_range((uintptr_t)dest, section_size);
/* next section */
pheader += 3;
}
return 0;
}
/*
* Copyright (c) 2016-2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <string.h>
#include <common/debug.h>
#include <lib/mmio.h>
#include <sotp.h>
#include <platform_def.h>
#include <platform_sotp.h>
#ifdef USE_SOFT_SOTP
extern uint64_t soft_sotp[];
#endif
#define SOTP_PROG_CONTROL (SOTP_REGS_OTP_BASE + 0x0000)
#define SOTP_PROG_CONTROL__OTP_CPU_MODE_EN 15
#define SOTP_PROG_CONTROL__OTP_DISABLE_ECC 9
#define SOTP_PROG_CONTROL__OTP_ECC_WREN 8
#define SOTP_WRDATA_0 (SOTP_REGS_OTP_BASE + 0x0004)
#define SOTP_WRDATA_1 (SOTP_REGS_OTP_BASE + 0x0008)
#define SOTP_ADDR (SOTP_REGS_OTP_BASE + 0x000c)
#define SOTP_ADDR__OTP_ROW_ADDR_R 6
#define SOTP_ADDR_MASK 0x3FF
#define SOTP_CTRL_0 (SOTP_REGS_OTP_BASE + 0x0010)
#define SOTP_CTRL_0__START 0
#define SOTP_CTRL_0__OTP_CMD 1
#define SOTP_STATUS_0 (SOTP_REGS_OTP_BASE + 0x0018)
#define SOTP_STATUS__FDONE 3
#define SOTP_STATUS_1 (SOTP_REGS_OTP_BASE + 0x001c)
#define SOTP_STATUS_1__CMD_DONE 1
#define SOTP_STATUS_1__ECC_DET 17
#define SOTP_RDDATA_0 (SOTP_REGS_OTP_BASE + 0x0020)
#define SOTP_RDDATA_1 (SOTP_REGS_OTP_BASE + 0x0024)
#define SOTP_READ 0
#define SOTP_PROG_WORD 10
#define SOTP_STATUS__PROGOK 2
#define SOTP_PROG_ENABLE 2
#define SOTP_ROW_DATA_MASK 0xffffffff
#define SOTP_ECC_ERR_BITS_MASK 0x1ff00000000
#define SOTP_CHIP_CTRL_SW_OVERRIDE_CHIP_STATES 4
#define SOTP_CHIP_CTRL_SW_MANU_PROG 5
#define SOTP_CHIP_CTRL_SW_CID_PROG 6
#define SOTP_CHIP_CTRL_SW_AB_DEVICE 8
#define SOTP_CHIP_CTRL_SW_AB_DEV_MODE 9
#define CHIP_STATE_UNPROGRAMMED 0x1
#define CHIP_STATE_UNASSIGNED 0x2
uint64_t sotp_mem_read(uint32_t offset, uint32_t sotp_add_ecc)
{
#ifdef USE_SOFT_SOTP
(void)sotp_add_ecc;
return soft_sotp[offset];
#else
uint64_t read_data = 0;
uint64_t read_data1 = 0;
uint64_t read_data2 = 0;
/* Check for FDONE status */
while ((mmio_read_32(SOTP_STATUS_0) & BIT(SOTP_STATUS__FDONE)) !=
BIT(SOTP_STATUS__FDONE))
;
/* Enable OTP access by CPU */
mmio_setbits_32(SOTP_PROG_CONTROL,
BIT(SOTP_PROG_CONTROL__OTP_CPU_MODE_EN));
if (sotp_add_ecc == 1) {
mmio_clrbits_32(SOTP_PROG_CONTROL,
BIT(SOTP_PROG_CONTROL__OTP_DISABLE_ECC));
}
if (sotp_add_ecc == 0) {
mmio_setbits_32(SOTP_PROG_CONTROL,
BIT(SOTP_PROG_CONTROL__OTP_DISABLE_ECC));
}
mmio_write_32(SOTP_ADDR,
((offset & SOTP_ADDR_MASK) << SOTP_ADDR__OTP_ROW_ADDR_R));
mmio_write_32(SOTP_CTRL_0, (SOTP_READ << SOTP_CTRL_0__OTP_CMD));
/* Start bit to tell SOTP to send command to the OTP controller */
mmio_setbits_32(SOTP_CTRL_0, BIT(SOTP_CTRL_0__START));
/* Wait for SOTP command done to be set */
while ((mmio_read_32(SOTP_STATUS_1) & BIT(SOTP_STATUS_1__CMD_DONE)) !=
BIT(SOTP_STATUS_1__CMD_DONE))
;
/* Clr Start bit after command done */
mmio_clrbits_32(SOTP_CTRL_0, BIT(SOTP_CTRL_0__START));
if ((offset > SOTP_DEVICE_SECURE_CFG3_ROW) &&
(mmio_read_32(SOTP_STATUS_1) & BIT(SOTP_STATUS_1__ECC_DET))) {
ERROR("SOTP ECC ERROR Detected row offset %d\n", offset);
read_data = SOTP_ECC_ERR_DETECT;
} else {
read_data1 = (uint64_t)mmio_read_32(SOTP_RDDATA_0);
read_data1 = read_data1 & 0xFFFFFFFF;
read_data2 = (uint64_t)mmio_read_32(SOTP_RDDATA_1);
read_data2 = (read_data2 & 0x1ff) << 32;
read_data = read_data1 | read_data2;
}
/* Command done is cleared */
mmio_setbits_32(SOTP_STATUS_1, BIT(SOTP_STATUS_1__CMD_DONE));
/* disable OTP access by CPU */
mmio_clrbits_32(SOTP_PROG_CONTROL,
BIT(SOTP_PROG_CONTROL__OTP_CPU_MODE_EN));
return read_data;
#endif
}
void sotp_mem_write(uint32_t addr, uint32_t sotp_add_ecc, uint64_t wdata)
{
#ifdef USE_SOFT_SOTP
(void)sotp_add_ecc;
soft_sotp[addr] = wdata;
#else
uint32_t loop;
uint8_t prog_array[4] = { 0x0F, 0x04, 0x08, 0x0D };
uint32_t chip_state_default =
(CHIP_STATE_UNASSIGNED|CHIP_STATE_UNPROGRAMMED);
uint32_t chip_state = mmio_read_32(SOTP_REGS_SOTP_CHIP_STATES);
uint32_t chip_ctrl_default = 0;
/*
* The override settings is required to allow the customer to program
* the application specific keys into SOTP, before the conversion to
* one of the AB modes.
* At the end of write operation, the chip ctrl settings will restored
* to the state prior to write call
*/
if (chip_state & chip_state_default) {
uint32_t chip_ctrl;
chip_ctrl_default = mmio_read_32(SOTP_CHIP_CTRL);
INFO("SOTP: enable special prog mode\n");
chip_ctrl = BIT(SOTP_CHIP_CTRL_SW_OVERRIDE_CHIP_STATES) |
BIT(SOTP_CHIP_CTRL_SW_MANU_PROG) |
BIT(SOTP_CHIP_CTRL_SW_CID_PROG) |
BIT(SOTP_CHIP_CTRL_SW_AB_DEVICE);
mmio_write_32(SOTP_CHIP_CTRL, chip_ctrl);
}
/* Check for FDONE status */
while ((mmio_read_32(SOTP_STATUS_0) & BIT(SOTP_STATUS__FDONE)) !=
BIT(SOTP_STATUS__FDONE))
;
/* Enable OTP acces by CPU */
mmio_setbits_32(SOTP_PROG_CONTROL,
BIT(SOTP_PROG_CONTROL__OTP_CPU_MODE_EN));
if (addr > SOTP_DEVICE_SECURE_CFG3_ROW) {
if (sotp_add_ecc == 0) {
mmio_clrbits_32(SOTP_PROG_CONTROL,
BIT(SOTP_PROG_CONTROL__OTP_ECC_WREN));
}
if (sotp_add_ecc == 1) {
mmio_setbits_32(SOTP_PROG_CONTROL,
BIT(SOTP_PROG_CONTROL__OTP_ECC_WREN));
}
} else {
mmio_clrbits_32(SOTP_PROG_CONTROL,
BIT(SOTP_PROG_CONTROL__OTP_ECC_WREN));
}
mmio_write_32(SOTP_CTRL_0, (SOTP_PROG_ENABLE << 1));
/*
* In order to avoid unintentional writes / programming of the OTP
* array, the OTP Controller must be put into programming mode before
* it will accept program commands. This is done by writing 0xF, 0x4,
* 0x8, 0xD with program commands prior to starting the actual
* programming sequence
*/
for (loop = 0; loop < 4; loop++) {
mmio_write_32(SOTP_WRDATA_0, prog_array[loop]);
/*
* Start bit to tell SOTP to send command to the OTP controller
*/
mmio_setbits_32(SOTP_CTRL_0, BIT(SOTP_CTRL_0__START));
/* Wait for SOTP command done to <-- be set */
while ((mmio_read_32(SOTP_STATUS_1) &
BIT(SOTP_STATUS_1__CMD_DONE)) !=
BIT(SOTP_STATUS_1__CMD_DONE))
;
/* Command done is cleared w1c */
mmio_setbits_32(SOTP_STATUS_1, BIT(SOTP_STATUS_1__CMD_DONE));
/* Clr Start bit after command done */
mmio_clrbits_32(SOTP_CTRL_0, BIT(SOTP_CTRL_0__START));
}
/* Check for PROGOK */
while ((mmio_read_32(SOTP_STATUS_0) & 0x4) != BIT(SOTP_STATUS__PROGOK))
;
/* Set 10 bit row address */
mmio_write_32(SOTP_ADDR,
((addr & SOTP_ADDR_MASK) << SOTP_ADDR__OTP_ROW_ADDR_R));
/* Set SOTP Row data */
mmio_write_32(SOTP_WRDATA_0, (wdata & SOTP_ROW_DATA_MASK));
/* Set SOTP ECC and error bits */
mmio_write_32(SOTP_WRDATA_1, ((wdata & SOTP_ECC_ERR_BITS_MASK) >> 32));
/* Set prog_word command */
mmio_write_32(SOTP_CTRL_0, (SOTP_PROG_WORD << 1));
/* Start bit to tell SOTP to send command to the OTP controller */
mmio_setbits_32(SOTP_CTRL_0, BIT(SOTP_CTRL_0__START));
/* Wait for SOTP command done to be set */
while ((mmio_read_32(SOTP_STATUS_1) & BIT(SOTP_STATUS_1__CMD_DONE)) !=
BIT(SOTP_STATUS_1__CMD_DONE))
;
/* Command done is cleared w1c */
mmio_setbits_32(SOTP_STATUS_1, BIT(SOTP_STATUS_1__CMD_DONE));
/* disable OTP acces by CPU */
mmio_clrbits_32(SOTP_PROG_CONTROL,
BIT(SOTP_PROG_CONTROL__OTP_CPU_MODE_EN));
/* Clr Start bit after command done */
mmio_clrbits_32(SOTP_CTRL_0, BIT(SOTP_CTRL_0__START));
if (chip_state & chip_state_default)
mmio_write_32(SOTP_CHIP_CTRL, chip_ctrl_default);
#endif
}
int sotp_read_key(uint8_t *key, size_t keysize, int start_row, int end_row)
{
int row;
uint32_t status = 0;
uint32_t status2 = 0xFFFFFFFF;
uint64_t row_data;
uint32_t data;
uint32_t *temp_key = (uint32_t *)key;
row = start_row;
while ((keysize > 0) && (row <= end_row)) {
row_data = sotp_mem_read(row, SOTP_ROW_ECC);
if (!(row_data & (SOTP_ECC_ERR_DETECT | SOTP_FAIL_BITS))) {
memcpy(temp_key++, &row_data, sizeof(uint32_t));
keysize -= sizeof(uint32_t);
data = (uint32_t)(row_data & SOTP_ROW_DATA_MASK);
status |= data;
status2 &= data;
}
row++;
}
if ((status2 == 0xFFFFFFFF) || (status == 0) || (row > end_row))
return -1;
return 0;
}
int sotp_key_erased(void)
{
uint64_t row_data;
int status = 0;
row_data = sotp_mem_read(SOTP_DEVICE_SECURE_CFG0_ROW, 0);
if (row_data & SOTP_DEVICE_SECURE_CFG0_OTP_ERASED_MASK)
status = 1;
else if (mmio_read_32(SOTP_REGS_SOTP_CHIP_STATES) &
SOTP_REGS_SOTP_CHIP_STATES_OTP_ERASED_MASK)
status = 1;
return status;
}
/*
* This function optimise the SOTP redundancy
* by considering the 00- zero and 01,10,11 - one
*/
uint32_t sotp_redundancy_reduction(uint32_t sotp_row_data)
{
uint32_t opt_data;
uint32_t opt_loop;
uint32_t temp_data;
opt_data = 0;
for (opt_loop = 0; opt_loop < 16; opt_loop = opt_loop + 1) {
temp_data = ((sotp_row_data >> (opt_loop * 2)) & 0x3);
if (temp_data != 0x0)
opt_data = (opt_data | (1 << opt_loop));
}
return opt_data;
}
/*
* Copyright (c) 2017 - 2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <string.h>
#include <common/debug.h>
#include <drivers/delay_timer.h>
#include <endian.h>
#include <lib/mmio.h>
#include <platform_def.h>
#include <spi.h>
#include "iproc_qspi.h"
struct bcmspi_priv spi_cfg;
/* Redefined by platform to force appropriate information */
#pragma weak plat_spi_init
int plat_spi_init(uint32_t *max_hz)
{
return 0;
}
/* Initialize & setup iproc qspi controller */
int iproc_qspi_setup(uint32_t bus, uint32_t cs, uint32_t max_hz, uint32_t mode)
{
struct bcmspi_priv *priv = NULL;
uint32_t spbr;
priv = &spi_cfg;
priv->spi_mode = mode;
priv->state = QSPI_STATE_DISABLED;
priv->bspi_hw = QSPI_BSPI_MODE_REG_BASE;
priv->mspi_hw = QSPI_MSPI_MODE_REG_BASE;
/* Initialize clock and platform specific */
if (plat_spi_init(&max_hz) != 0)
return -1;
priv->max_hz = max_hz;
/* MSPI: Basic hardware initialization */
mmio_write_32(priv->mspi_hw + MSPI_SPCR1_LSB_REG, 0);
mmio_write_32(priv->mspi_hw + MSPI_SPCR1_MSB_REG, 0);
mmio_write_32(priv->mspi_hw + MSPI_NEWQP_REG, 0);
mmio_write_32(priv->mspi_hw + MSPI_ENDQP_REG, 0);
mmio_write_32(priv->mspi_hw + MSPI_SPCR2_REG, 0);
/* MSPI: SCK configuration */
spbr = (QSPI_AXI_CLK - 1) / (2 * priv->max_hz) + 1;
spbr = MIN(spbr, SPBR_DIV_MAX);
spbr = MAX(spbr, SPBR_DIV_MIN);
mmio_write_32(priv->mspi_hw + MSPI_SPCR0_LSB_REG, spbr);
/* MSPI: Mode configuration (8 bits by default) */
priv->mspi_16bit = 0;
mmio_write_32(priv->mspi_hw + MSPI_SPCR0_MSB_REG,
BIT(MSPI_SPCR0_MSB_REG_MSTR_SHIFT) | /* Master */
MSPI_SPCR0_MSB_REG_16_BITS_PER_WD_SHIFT | /* 16 bits per word */
(priv->spi_mode & MSPI_SPCR0_MSB_REG_MODE_MASK)); /* mode: CPOL / CPHA */
/* Display bus info */
VERBOSE("SPI: SPCR0_LSB: 0x%x\n",
mmio_read_32(priv->mspi_hw + MSPI_SPCR0_LSB_REG));
VERBOSE("SPI: SPCR0_MSB: 0x%x\n",
mmio_read_32(priv->mspi_hw + MSPI_SPCR0_MSB_REG));
VERBOSE("SPI: SPCR1_LSB: 0x%x\n",
mmio_read_32(priv->mspi_hw + MSPI_SPCR1_LSB_REG));
VERBOSE("SPI: SPCR1_MSB: 0x%x\n",
mmio_read_32(priv->mspi_hw + MSPI_SPCR1_MSB_REG));
VERBOSE("SPI: SPCR2: 0x%x\n",
mmio_read_32(priv->mspi_hw + MSPI_SPCR2_REG));
VERBOSE("SPI: CLK: %d\n", priv->max_hz);
return 0;
}
void bcmspi_enable_bspi(struct bcmspi_priv *priv)
{
if (priv->state != QSPI_STATE_BSPI) {
/* Switch to BSPI */
mmio_write_32(priv->bspi_hw + BSPI_MAST_N_BOOT_CTRL_REG, 0);
priv->state = QSPI_STATE_BSPI;
}
}
static int bcmspi_disable_bspi(struct bcmspi_priv *priv)
{
uint32_t retry;
if (priv->state == QSPI_STATE_MSPI)
return 0;
/* Switch to MSPI if not yet */
if ((mmio_read_32(priv->bspi_hw + BSPI_MAST_N_BOOT_CTRL_REG) &
MSPI_CTRL_MASK) == 0) {
retry = QSPI_RETRY_COUNT_US_MAX;
do {
if ((mmio_read_32(
priv->bspi_hw + BSPI_BUSY_STATUS_REG) &
BSPI_BUSY_MASK) == 0) {
mmio_write_32(priv->bspi_hw +
BSPI_MAST_N_BOOT_CTRL_REG,
MSPI_CTRL_MASK);
udelay(1);
break;
}
udelay(1);
} while (retry--);
if ((mmio_read_32(priv->bspi_hw + BSPI_MAST_N_BOOT_CTRL_REG) &
MSPI_CTRL_MASK) != MSPI_CTRL_MASK) {
ERROR("QSPI: Switching to QSPI error.\n");
return -1;
}
}
/* Update state */
priv->state = QSPI_STATE_MSPI;
return 0;
}
int iproc_qspi_claim_bus(void)
{
struct bcmspi_priv *priv = &spi_cfg;
/* Switch to MSPI by default */
if (bcmspi_disable_bspi(priv) != 0)
return -1;
return 0;
}
void iproc_qspi_release_bus(void)
{
struct bcmspi_priv *priv = &spi_cfg;
/* Switch to BSPI by default */
bcmspi_enable_bspi(priv);
}
static int mspi_xfer(struct bcmspi_priv *priv, uint32_t bytes,
const uint8_t *tx, uint8_t *rx, uint32_t flag)
{
uint32_t retry;
uint32_t mode = CDRAM_PCS0;
if (flag & SPI_XFER_QUAD) {
mode |= CDRAM_QUAD_MODE;
VERBOSE("SPI: QUAD mode\n");
if (!tx) {
VERBOSE("SPI: 4 lane input\n");
mode |= CDRAM_RBIT_INPUT;
}
}
/* Use 8-bit queue for odd-bytes transfer */
if (bytes & 1)
priv->mspi_16bit = 0;
else {
priv->mspi_16bit = 1;
mode |= CDRAM_BITS_EN;
}
while (bytes) {
uint32_t chunk;
uint32_t queues;
uint32_t i;
/* Separate code for 16bit and 8bit transfers for performance */
if (priv->mspi_16bit) {
VERBOSE("SPI: 16 bits xfer\n");
/* Determine how many bytes to process this time */
chunk = MIN(bytes, NUM_CDRAM_BYTES * 2);
queues = (chunk - 1) / 2 + 1;
bytes -= chunk;
/* Fill CDRAMs */
for (i = 0; i < queues; i++)
mmio_write_32(priv->mspi_hw + MSPI_CDRAM_REG +
(i << 2), mode | CDRAM_CONT);
/* Fill TXRAMs */
for (i = 0; i < chunk; i++)
if (tx)
mmio_write_32(priv->mspi_hw +
MSPI_TXRAM_REG +
(i << 2), tx[i]);
} else {
VERBOSE("SPI: 8 bits xfer\n");
/* Determine how many bytes to process this time */
chunk = MIN(bytes, NUM_CDRAM_BYTES);
queues = chunk;
bytes -= chunk;
/* Fill CDRAMs and TXRAMS */
for (i = 0; i < chunk; i++) {
mmio_write_32(priv->mspi_hw + MSPI_CDRAM_REG +
(i << 2), mode | CDRAM_CONT);
if (tx)
mmio_write_32(priv->mspi_hw +
MSPI_TXRAM_REG +
(i << 3), tx[i]);
}
}
/* Advance pointers */
if (tx)
tx += chunk;
/* Setup queue pointers */
mmio_write_32(priv->mspi_hw + MSPI_NEWQP_REG, 0);
mmio_write_32(priv->mspi_hw + MSPI_ENDQP_REG, queues - 1);
/* Remove CONT on the last byte command */
if (bytes == 0 && (flag & SPI_XFER_END))
mmio_write_32(priv->mspi_hw + MSPI_CDRAM_REG +
((queues - 1) << 2), mode);
/* Kick off */
mmio_write_32(priv->mspi_hw + MSPI_STATUS_REG, 0);
if (bytes == 0 && (flag & SPI_XFER_END))
mmio_write_32(priv->mspi_hw + MSPI_SPCR2_REG, MSPI_SPE);
else
mmio_write_32(priv->mspi_hw + MSPI_SPCR2_REG,
MSPI_SPE | MSPI_CONT_AFTER_CMD);
/* Wait for completion */
retry = QSPI_RETRY_COUNT_US_MAX;
do {
if (mmio_read_32(priv->mspi_hw + MSPI_STATUS_REG) &
MSPI_CMD_COMPLETE_MASK)
break;
udelay(1);
} while (retry--);
if ((mmio_read_32(priv->mspi_hw + MSPI_STATUS_REG) &
MSPI_CMD_COMPLETE_MASK) == 0) {
ERROR("SPI: Completion timeout.\n");
return -1;
}
/* Read data out */
if (rx) {
if (priv->mspi_16bit) {
for (i = 0; i < chunk; i++) {
rx[i] = mmio_read_32(priv->mspi_hw +
MSPI_RXRAM_REG +
(i << 2))
& 0xff;
}
} else {
for (i = 0; i < chunk; i++) {
rx[i] = mmio_read_32(priv->mspi_hw +
MSPI_RXRAM_REG +
(((i << 1) + 1) << 2))
& 0xff;
}
}
rx += chunk;
}
}
return 0;
}
int iproc_qspi_xfer(uint32_t bitlen,
const void *dout, void *din, unsigned long flags)
{
struct bcmspi_priv *priv;
const uint8_t *tx = dout;
uint8_t *rx = din;
uint32_t bytes = bitlen / 8;
int ret = 0;
priv = &spi_cfg;
if (priv->state == QSPI_STATE_DISABLED) {
ERROR("QSPI: state disabled\n");
return -1;
}
/* we can only do 8 bit transfers */
if (bitlen % 8) {
ERROR("QSPI: Only support 8 bit transfers (requested %d)\n",
bitlen);
return -1;
}
/* MSPI: Enable write lock at the beginning */
if (flags & SPI_XFER_BEGIN) {
/* Switch to MSPI if not yet */
if (bcmspi_disable_bspi(priv) != 0) {
ERROR("QSPI: Switch to MSPI failed\n");
return -1;
}
mmio_write_32(priv->mspi_hw + MSPI_WRITE_LOCK_REG, 1);
}
/* MSPI: Transfer it */
if (bytes)
ret = mspi_xfer(priv, bytes, tx, rx, flags);
/* MSPI: Disable write lock if it's done */
if (flags & SPI_XFER_END)
mmio_write_32(priv->mspi_hw + MSPI_WRITE_LOCK_REG, 0);
return ret;
}
/*
* Copyright (c) 2017 - 2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef IPROC_QSPI_H
#define IPROC_QSPI_H
#include <platform_def.h>
/*SPI configuration enable*/
#define IPROC_QSPI_CLK_SPEED 62500000
#define SPI_CPHA (1 << 0)
#define SPI_CPOL (1 << 1)
#define IPROC_QSPI_MODE0 0
#define IPROC_QSPI_MODE3 (SPI_CPOL|SPI_CPHA)
#define IPROC_QSPI_BUS 0
#define IPROC_QSPI_CS 0
#define IPROC_QSPI_BASE_REG QSPI_CTRL_BASE_ADDR
#define IPROC_QSPI_CRU_CONTROL_REG QSPI_CLK_CTRL
#define QSPI_AXI_CLK 200000000
#define QSPI_RETRY_COUNT_US_MAX 200000
/* Chip attributes */
#define QSPI_REG_BASE IPROC_QSPI_BASE_REG
#define CRU_CONTROL_REG IPROC_QSPI_CRU_CONTROL_REG
#define SPBR_DIV_MIN 8U
#define SPBR_DIV_MAX 255U
#define NUM_CDRAM_BYTES 16U
/* Register fields */
#define MSPI_SPCR0_MSB_BITS_8 0x00000020
/* Flash opcode and parameters */
#define CDRAM_PCS0 2
#define CDRAM_CONT (1 << 7)
#define CDRAM_BITS_EN (1 << 6)
#define CDRAM_QUAD_MODE (1 << 8)
#define CDRAM_RBIT_INPUT (1 << 10)
/* MSPI registers */
#define QSPI_MSPI_MODE_REG_BASE (QSPI_REG_BASE + 0x200)
#define MSPI_SPCR0_LSB_REG 0x000
#define MSPI_SPCR0_MSB_REG 0x004
#define MSPI_SPCR1_LSB_REG 0x008
#define MSPI_SPCR1_MSB_REG 0x00c
#define MSPI_NEWQP_REG 0x010
#define MSPI_ENDQP_REG 0x014
#define MSPI_SPCR2_REG 0x018
#define MSPI_STATUS_REG 0x020
#define MSPI_CPTQP_REG 0x024
#define MSPI_TXRAM_REG 0x040
#define MSPI_RXRAM_REG 0x0c0
#define MSPI_CDRAM_REG 0x140
#define MSPI_WRITE_LOCK_REG 0x180
#define MSPI_DISABLE_FLUSH_GEN_REG 0x184
#define MSPI_SPCR0_MSB_REG_MSTR_SHIFT 7
#define MSPI_SPCR0_MSB_REG_16_BITS_PER_WD_SHIFT (0 << 2)
#define MSPI_SPCR0_MSB_REG_MODE_MASK 0x3
/* BSPI registers */
#define QSPI_BSPI_MODE_REG_BASE QSPI_REG_BASE
#define BSPI_MAST_N_BOOT_CTRL_REG 0x008
#define BSPI_BUSY_STATUS_REG 0x00c
#define MSPI_CMD_COMPLETE_MASK 1
#define BSPI_BUSY_MASK 1
#define MSPI_CTRL_MASK 1
#define MSPI_SPE (1 << 6)
#define MSPI_CONT_AFTER_CMD (1 << 7)
/* State */
enum bcm_qspi_state {
QSPI_STATE_DISABLED,
QSPI_STATE_MSPI,
QSPI_STATE_BSPI
};
/* QSPI private data */
struct bcmspi_priv {
/* Specified SPI parameters */
uint32_t max_hz;
uint32_t spi_mode;
/* State */
enum bcm_qspi_state state;
int mspi_16bit;
/* Registers */
uintptr_t mspi_hw;
uintptr_t bspi_hw;
};
int iproc_qspi_setup(uint32_t bus, uint32_t cs,
uint32_t max_hz, uint32_t mode);
int iproc_qspi_claim_bus(void);
void iproc_qspi_release_bus(void);
int iproc_qspi_xfer(uint32_t bitlen, const void *dout,
void *din, unsigned long flags);
#endif /* _IPROC_QSPI_H_ */
/*
* Copyright (c) 2017 - 2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <spi.h>
#include "iproc_qspi.h"
int spi_init(void)
{
return iproc_qspi_setup(IPROC_QSPI_BUS, IPROC_QSPI_CS,
IPROC_QSPI_CLK_SPEED, IPROC_QSPI_MODE0);
}
int spi_claim_bus(void)
{
return iproc_qspi_claim_bus();
}
void spi_release_bus(void)
{
iproc_qspi_release_bus();
}
int spi_xfer(uint32_t bitlen, const void *dout,
void *din, uint32_t flags)
{
return iproc_qspi_xfer(bitlen, dout, din, flags);
}
/*
* Copyright (c) 2019-2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <common/debug.h>
#include <drivers/delay_timer.h>
#include <errno.h>
#include <sf.h>
#include <spi.h>
#define SPI_FLASH_CMD_LEN 4
#define QSPI_WAIT_TIMEOUT_US 200000U /* usec */
#define FINFO(jedec_id, ext_id, _sector_size, _n_sectors, _page_size, _flags) \
.id = { \
((jedec_id) >> 16) & 0xff, \
((jedec_id) >> 8) & 0xff, \
(jedec_id) & 0xff, \
((ext_id) >> 8) & 0xff, \
(ext_id) & 0xff, \
}, \
.id_len = (!(jedec_id) ? 0 : (3 + ((ext_id) ? 2 : 0))), \
.sector_size = (_sector_size), \
.n_sectors = (_n_sectors), \
.page_size = _page_size, \
.flags = (_flags),
/* SPI/QSPI flash device params structure */
const struct spi_flash_info spi_flash_ids[] = {
{"W25Q64CV", FINFO(0xef4017, 0x0, 64 * 1024, 128, 256, WR_QPP | SECT_4K)},
{"W25Q64DW", FINFO(0xef6017, 0x0, 64 * 1024, 128, 256, WR_QPP | SECT_4K)},
{"W25Q32", FINFO(0xef4016, 0x0, 64 * 1024, 64, 256, SECT_4K)},
{"MX25l3205D", FINFO(0xc22016, 0x0, 64 * 1024, 64, 256, SECT_4K)},
};
static void spi_flash_addr(uint32_t addr, uint8_t *cmd)
{
/*
* cmd[0] holds a SPI Flash command, stored earlier
* cmd[1/2/3] holds 24bit flash address
*/
cmd[1] = addr >> 16;
cmd[2] = addr >> 8;
cmd[3] = addr >> 0;
}
static const struct spi_flash_info *spi_flash_read_id(void)
{
const struct spi_flash_info *info;
uint8_t id[SPI_FLASH_MAX_ID_LEN];
int ret;
ret = spi_flash_cmd(CMD_READ_ID, id, SPI_FLASH_MAX_ID_LEN);
if (ret < 0) {
ERROR("SF: Error %d reading JEDEC ID\n", ret);
return NULL;
}
for (info = spi_flash_ids; info->name != NULL; info++) {
if (info->id_len) {
if (!memcmp(info->id, id, info->id_len))
return info;
}
}
printf("SF: unrecognized JEDEC id bytes: %02x, %02x, %02x\n",
id[0], id[1], id[2]);
return NULL;
}
/* Enable writing on the SPI flash */
static inline int spi_flash_cmd_write_enable(struct spi_flash *flash)
{
return spi_flash_cmd(CMD_WRITE_ENABLE, NULL, 0);
}
static int spi_flash_cmd_wait(struct spi_flash *flash)
{
uint8_t cmd;
uint32_t i;
uint8_t status;
int ret;
i = 0;
while (1) {
cmd = CMD_RDSR;
ret = spi_flash_cmd_read(&cmd, 1, &status, 1);
if (ret < 0) {
ERROR("SF: cmd wait failed\n");
break;
}
if (!(status & STATUS_WIP))
break;
i++;
if (i >= QSPI_WAIT_TIMEOUT_US) {
ERROR("SF: cmd wait timeout\n");
ret = -1;
break;
}
udelay(1);
}
return ret;
}
static int spi_flash_write_common(struct spi_flash *flash, const uint8_t *cmd,
size_t cmd_len, const void *buf,
size_t buf_len)
{
int ret;
ret = spi_flash_cmd_write_enable(flash);
if (ret < 0) {
ERROR("SF: enabling write failed\n");
return ret;
}
ret = spi_flash_cmd_write(cmd, cmd_len, buf, buf_len);
if (ret < 0) {
ERROR("SF: write cmd failed\n");
return ret;
}
ret = spi_flash_cmd_wait(flash);
if (ret < 0) {
ERROR("SF: write timed out\n");
return ret;
}
return ret;
}
static int spi_flash_read_common(const uint8_t *cmd, size_t cmd_len,
void *data, size_t data_len)
{
int ret;
ret = spi_flash_cmd_read(cmd, cmd_len, data, data_len);
if (ret < 0) {
ERROR("SF: read cmd failed\n");
return ret;
}
return ret;
}
int spi_flash_read(struct spi_flash *flash, uint32_t offset,
uint32_t len, void *data)
{
uint32_t read_len = 0, read_addr;
uint8_t cmd[SPI_FLASH_CMD_LEN];
int ret;
ret = spi_claim_bus();
if (ret) {
ERROR("SF: unable to claim SPI bus\n");
return ret;
}
cmd[0] = CMD_READ_NORMAL;
while (len) {
read_addr = offset;
read_len = MIN(flash->page_size, (len - read_len));
spi_flash_addr(read_addr, cmd);
ret = spi_flash_read_common(cmd, sizeof(cmd), data, read_len);
if (ret < 0) {
ERROR("SF: read failed\n");
break;
}
offset += read_len;
len -= read_len;
data += read_len;
}
SPI_DEBUG("SF read done\n");
spi_release_bus();
return ret;
}
int spi_flash_write(struct spi_flash *flash, uint32_t offset,
uint32_t len, void *buf)
{
unsigned long byte_addr, page_size;
uint8_t cmd[SPI_FLASH_CMD_LEN];
uint32_t chunk_len, actual;
uint32_t write_addr;
int ret;
ret = spi_claim_bus();
if (ret) {
ERROR("SF: unable to claim SPI bus\n");
return ret;
}
page_size = flash->page_size;
cmd[0] = flash->write_cmd;
for (actual = 0; actual < len; actual += chunk_len) {
write_addr = offset;
byte_addr = offset % page_size;
chunk_len = MIN(len - actual,
(uint32_t)(page_size - byte_addr));
spi_flash_addr(write_addr, cmd);
SPI_DEBUG("SF:0x%p=>cmd:{0x%02x 0x%02x%02x%02x} chunk_len:%d\n",
buf + actual, cmd[0], cmd[1],
cmd[2], cmd[3], chunk_len);
ret = spi_flash_write_common(flash, cmd, sizeof(cmd),
buf + actual, chunk_len);
if (ret < 0) {
ERROR("SF: write cmd failed\n");
break;
}
offset += chunk_len;
}
SPI_DEBUG("SF write done\n");
spi_release_bus();
return ret;
}
int spi_flash_erase(struct spi_flash *flash, uint32_t offset, uint32_t len)
{
uint8_t cmd[SPI_FLASH_CMD_LEN];
uint32_t erase_size, erase_addr;
int ret;
erase_size = flash->erase_size;
if (offset % erase_size || len % erase_size) {
ERROR("SF: Erase offset/length not multiple of erase size\n");
return -1;
}
ret = spi_claim_bus();
if (ret) {
ERROR("SF: unable to claim SPI bus\n");
return ret;
}
cmd[0] = flash->erase_cmd;
while (len) {
erase_addr = offset;
spi_flash_addr(erase_addr, cmd);
SPI_DEBUG("SF: erase %2x %2x %2x %2x (%x)\n", cmd[0], cmd[1],
cmd[2], cmd[3], erase_addr);
ret = spi_flash_write_common(flash, cmd, sizeof(cmd), NULL, 0);
if (ret < 0) {
ERROR("SF: erase failed\n");
break;
}
offset += erase_size;
len -= erase_size;
}
SPI_DEBUG("sf erase done\n");
spi_release_bus();
return ret;
}
int spi_flash_probe(struct spi_flash *flash)
{
const struct spi_flash_info *info = NULL;
int ret;
ret = spi_claim_bus();
if (ret) {
ERROR("SF: Unable to claim SPI bus\n");
ERROR("SF: probe failed\n");
return ret;
}
info = spi_flash_read_id();
if (!info)
goto probe_fail;
INFO("Flash Name: %s sectors %x, sec size %x\n",
info->name, info->n_sectors,
info->sector_size);
flash->size = info->n_sectors * info->sector_size;
flash->sector_size = info->sector_size;
flash->page_size = info->page_size;
flash->flags = info->flags;
flash->read_cmd = CMD_READ_NORMAL;
flash->write_cmd = CMD_PAGE_PROGRAM;
flash->erase_cmd = CMD_ERASE_64K;
flash->erase_size = ERASE_SIZE_64K;
probe_fail:
spi_release_bus();
return ret;
}
/*
* Copyright (c) 2019-2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <common/debug.h>
#include <spi.h>
#define BITS_PER_BYTE 8
#define CMD_LEN1 1
static int spi_flash_read_write(const uint8_t *cmd,
size_t cmd_len,
const uint8_t *data_out,
uint8_t *data_in,
size_t data_len)
{
unsigned long flags = SPI_XFER_BEGIN;
int ret;
if (data_len == 0)
flags |= SPI_XFER_END;
ret = spi_xfer(cmd_len * BITS_PER_BYTE, cmd, NULL, flags);
if (ret) {
ERROR("SF: Failed to send command (%zu bytes): %d\n",
cmd_len, ret);
} else if (data_len != 0) {
ret = spi_xfer(data_len * BITS_PER_BYTE, data_out,
data_in, SPI_XFER_END);
if (ret)
ERROR("SF: Failed to transfer %zu bytes of data: %d\n",
data_len, ret);
}
return ret;
}
int spi_flash_cmd_read(const uint8_t *cmd,
size_t cmd_len,
void *data,
size_t data_len)
{
return spi_flash_read_write(cmd, cmd_len, NULL, data, data_len);
}
int spi_flash_cmd(uint8_t cmd, void *response, size_t len)
{
return spi_flash_cmd_read(&cmd, CMD_LEN1, response, len);
}
int spi_flash_cmd_write(const uint8_t *cmd,
size_t cmd_len,
const void *data,
size_t data_len)
{
return spi_flash_read_write(cmd, cmd_len, data, NULL, data_len);
}
/* /*
* Copyright (c) 2014-2019, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2014-2020, ARM Limited and Contributors. All rights reserved.
* *
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
...@@ -46,6 +46,7 @@ typedef struct { ...@@ -46,6 +46,7 @@ typedef struct {
*/ */
typedef struct { typedef struct {
uintptr_t dev_spec; uintptr_t dev_spec;
uint16_t plat_toc_flag;
} fip_dev_state_t; } fip_dev_state_t;
static const uuid_t uuid_null; static const uuid_t uuid_null;
...@@ -220,6 +221,11 @@ static int fip_dev_init(io_dev_info_t *dev_info, const uintptr_t init_params) ...@@ -220,6 +221,11 @@ static int fip_dev_init(io_dev_info_t *dev_info, const uintptr_t init_params)
uintptr_t backend_handle; uintptr_t backend_handle;
fip_toc_header_t header; fip_toc_header_t header;
size_t bytes_read; size_t bytes_read;
fip_dev_state_t *state;
assert(dev_info != NULL);
state = (fip_dev_state_t *)dev_info->info;
/* Obtain a reference to the image by querying the platform layer */ /* Obtain a reference to the image by querying the platform layer */
result = plat_get_image_source(image_id, &backend_dev_handle, result = plat_get_image_source(image_id, &backend_dev_handle,
...@@ -248,6 +254,11 @@ static int fip_dev_init(io_dev_info_t *dev_info, const uintptr_t init_params) ...@@ -248,6 +254,11 @@ static int fip_dev_init(io_dev_info_t *dev_info, const uintptr_t init_params)
result = -ENOENT; result = -ENOENT;
} else { } else {
VERBOSE("FIP header looks OK.\n"); VERBOSE("FIP header looks OK.\n");
/*
* Store 16-bit Platform ToC flags field which occupies
* bits [32-47] in fip header.
*/
state->plat_toc_flag = (header.flags >> 32) & 0xffff;
} }
} }
...@@ -453,3 +464,17 @@ int register_io_dev_fip(const io_dev_connector_t **dev_con) ...@@ -453,3 +464,17 @@ int register_io_dev_fip(const io_dev_connector_t **dev_con)
return result; return result;
} }
/* Function to retrieve plat_toc_flags, previously saved in FIP dev */
int fip_dev_get_plat_toc_flag(io_dev_info_t *dev_info, uint16_t *plat_toc_flag)
{
fip_dev_state_t *state;
assert(dev_info != NULL);
state = (fip_dev_state_t *)dev_info->info;
*plat_toc_flag = state->plat_toc_flag;
return 0;
}
/*
* Copyright (c) 2016 - 2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef SR_CHIMP_H
#define SR_CHIMP_H
#include <common/bl_common.h>
#include <common/debug.h>
#include <lib/mmio.h>
#include <platform_def.h>
#define CHIMP_WINDOW_SIZE 0x400000
#define CHIMP_ERROR_OFFSET 28
#define CHIMP_ERROR_MASK 0xf0000000
#ifndef EMULATION_SETUP
#define CHIMP_HANDSHAKE_TIMEOUT_MS 10000
#else
/*
* 1hr timeout for test in emulator
* By doing this ChiMP is given a chance to boot
* fully from the QSPI
* (on Palladium this takes upto 50 min depending on QSPI clk)
*/
#define CHIMP_HANDSHAKE_TIMEOUT_MS 3600000
#endif
#define CHIMP_BPE_MODE_ID_PATTERN (0x25000000)
#define CHIMP_BPE_MODE_ID_MASK (0x7f000000)
#define NIC_RESET_RELEASE_TIMEOUT_US (10)
/* written by M0, used by ChiMP ROM */
#define SR_IN_SMARTNIC_MODE_BIT 0
/* written by M0, used by ChiMP ROM */
#define SR_CHIMP_SECURE_BOOT_BIT 1
/* cleared by AP, set by ChiMP BC2 code */
#define SR_FLASH_ACCESS_DONE_BIT 2
#ifdef USE_CHIMP
void bcm_chimp_write(uintptr_t addr, uint32_t value);
uint32_t bcm_chimp_read(uintptr_t addr);
uint32_t bcm_chimp_read_ctrl(uint32_t offset);
void bcm_chimp_clrbits(uintptr_t addr, uint32_t bits);
void bcm_chimp_setbits(uintptr_t addr, uint32_t bits);
int bcm_chimp_is_nic_mode(void);
void bcm_chimp_fru_prog_done(bool status);
int bcm_chimp_handshake_done(void);
int bcm_chimp_wait_handshake(void);
/* Fastboot-related*/
int bcm_chimp_initiate_fastboot(int fastboot_type);
#else
static inline void bcm_chimp_write(uintptr_t addr, uint32_t value)
{
}
static inline uint32_t bcm_chimp_read(uintptr_t addr)
{
return 0;
}
static inline uint32_t bcm_chimp_read_ctrl(uint32_t offset)
{
return 0;
}
static inline void bcm_chimp_clrbits(uintptr_t addr, uint32_t bits)
{
}
static inline void bcm_chimp_setbits(uintptr_t addr, uint32_t bits)
{
}
static inline int bcm_chimp_is_nic_mode(void)
{
return 0;
}
static inline void bcm_chimp_fru_prog_done(bool status)
{
}
static inline int bcm_chimp_handshake_done(void)
{
return 0;
}
static inline int bcm_chimp_wait_handshake(void)
{
return 0;
}
static inline int bcm_chimp_initiate_fastboot(int fastboot_type)
{
return 0;
}
#endif /* USE_CHIMP */
#endif
/*
* Copyright (c) 2016 - 2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef BNXNVM_DEFS_H
#define BNXNVM_DEFS_H
#if defined(__GNUC__)
#define PACKED_STRUCT __packed
#else /* non-GCC compiler */
#ifndef DOS_DRIVERS
#pragma pack(push)
#pragma pack(1)
#endif
#define PACKED_STRUCT
#endif
typedef uint32_t u32_t;
typedef uint8_t u8_t;
typedef uint16_t u16_t;
#define BNXNVM_DEFAULT_BLOCK_SIZE 4096
#define BNXNVM_UNUSED_BYTE_VALUE 0xff
#define NV_MAX_BLOCK_SIZE 16384
#define BITS_PER_BYTE (8)
#define SIZEOF_IN_BITS(x) (sizeof(x)*BITS_PER_BYTE)
/************************/
/* byte-swapping macros */
/************************/
#define BYTE_SWAP_16(x) \
((((u16_t)(x) & 0xff00) >> 8) | \
(((u16_t)(x) & 0x00ff) << 8))
#define BYTE_SWAP_32(x) \
((((u32_t)(x) & 0xff000000) >> 24) | \
(((u32_t)(x) & 0x00ff0000) >> 8) | \
(((u32_t)(x) & 0x0000ff00) << 8) | \
(((u32_t)(x) & 0x000000ff) << 24))
/* auto-detect integer size */
#define BYTE_SWAP_INT(x) \
(SIZEOF_IN_BITS(x) == 16 ? BYTE_SWAP_16(x) : \
SIZEOF_IN_BITS(x) == 32 ? BYTE_SWAP_32(x) : (x))
/********************************/
/* Architecture-specific macros */
/********************************/
#ifdef __BIG_ENDIAN__ /* e.g. Motorola */
#define BE_INT16(x) (x)
#define BE_INT32(x) (x)
#define BE_INT(x) (x)
#define LE_INT16(x) BYTE_SWAP_16(x)
#define LE_INT32(x) BYTE_SWAP_32(x)
#define LE_INT(x) BYTE_SWAP_INT(x)
#else /* Little Endian (e.g. Intel) */
#define LE_INT16(x) (x)
#define LE_INT32(x) (x)
#define LE_INT(x) (x)
#define BE_INT16(x) BYTE_SWAP_16(x)
#define BE_INT32(x) BYTE_SWAP_32(x)
#define BE_INT(x) BYTE_SWAP_INT(x)
#endif
enum {
NV_OK = 0,
NV_NOT_NVRAM,
NV_BAD_MB,
NV_BAD_DIR_HEADER,
NV_BAD_DIR_ENTRY,
NV_FW_NOT_FOUND,
};
typedef struct {
#define BNXNVM_MASTER_BLOCK_SIG BE_INT32(0x424E5834) /*"BNX4"*/
/* Signature*/
u32_t sig;
/* Length of Master Block Header, in bytes [32] */
u32_t length;
/* Block size, in bytes [4096] */
u32_t block_size;
/* Byte-offset to Directory Block (translated) */
u32_t directory_offset;
/* Byte-offset to Block Redirection Table (non-translated) */
u32_t redirect_offset;
/* Size, in bytes of Reserved Blocks region (at end of NVRAM) */
u32_t reserved_size;
/*
* Size of NVRAM (in bytes) - may be used to
* override auto-detected size
*/
u32_t nvram_size;
/* CRC-32 (IEEE 802.3 compatible) of the above */
u32_t chksum;
} PACKED_STRUCT bnxnvm_master_block_header_t;
typedef struct {
#define BNXNVM_DIRECTORY_BLOCK_SIG BE_INT32(0x44697230) /* "Dir0" */
/* Signature */
u32_t sig;
/* Length of Directory Header, in bytes [16] */
u32_t length;
/* Number of Directory Entries */
u32_t entries;
/* Length of each Directory Entry, in bytes [24] */
u32_t entry_length;
} PACKED_STRUCT bnxnvm_directory_block_header_t;
typedef struct {
/* Directory Entry Type (see enum bnxnvm_directory_type) */
u16_t type;
/* Instance of this Directory Entry type (0-based) */
u16_t ordinal;
/*
* Directory Entry Extension flags used to identify
* secondary instances of a type:ordinal combinations
*/
u16_t ext;
/* Directory Entry Attribute flags used to describe the item contents */
u16_t attr;
/* Item location in NVRAM specified as offset (in bytes) */
u32_t item_location;
/*
* Length of NVRAM item in bytes
* (including padding - multiple of block size)
*/
u32_t item_length;
/* Length of item data in bytes (excluding padding) */
u32_t data_length;
/*
* CRC-32 (IEEE 802.3 compatible) of item data
* (excluding padding) (optional)
*/
u32_t data_chksum;
} PACKED_STRUCT bnxnvm_directory_entry_t;
enum bnxnvm_version_format {
/* US-ASCII string (not necessarily null-terminated) */
BNX_VERSION_FMT_ASCII = 0,
/* Each field 16-bits, displayed as unpadded decimal (e.g. "1.2.3.4") */
BNX_VERSION_FMT_DEC = 1,
/* A single hexadecimal value, up to 64-bits (no dots) */
BNX_VERSION_FMT_HEX = 2,
/* Multiple version values (three 8-bit version fields) */
BNX_VERSION_FMT_MULTI = 3
};
/* This structure definition must not change: */
typedef struct {
u16_t flags; /* bit-flags (defaults to 0x0000) */
u8_t version_format; /* enum bnxnvm_version_format */
u8_t version_length; /* in bytes */
u8_t version[16]; /* version value */
u16_t dir_type; /* enum bnxnvm_directory_type */
/* size of the entire trailer (to locate end of component data) */
u16_t trailer_length;
#define BNXNVM_COMPONENT_TRAILER_SIG BE_INT32(0x54726c72) /* "Trlr" */
u32_t sig;
u32_t chksum; /* CRC-32 of all bytes to this point */
} PACKED_STRUCT bnxnvm_component_trailer_base_t;
typedef struct {
/*
* new trailer members (e.g. digital signature)
* go here (insert at top):
*/
u8_t rsa_sig[256]; /* 2048-bit RSA-encrypted SHA-256 hash */
bnxnvm_component_trailer_base_t base;
} PACKED_STRUCT bnxnvm_component_trailer_t;
#define BNX_MAX_LEN_DIR_NAME 12
#define BNX_MAX_LEN_DIR_DESC 50
/*********************************************************
* NVRAM Directory Entry/Item Types, Names, and Descriptions
*
* If you see a name or description that needs improvement,
* please correct it or raise for discussion.
* When adding a new directory type, it would be appreciated
* if you also updated ../../libs/nvm/bnxt_nvm_str.c.
* DIR_NAME macros may contain up to 12 alpha-numeric
* US-ASCII characters only, camelCase is preferred for clarity.
* DIR_DESC macros may contain up to 50 US-ASCII characters
* providing a verbose description of the directory type.
*/
enum bnxnvm_directory_type {
/* 0x00 Unused directory entry, available for use */
BNX_DIR_TYPE_UNUSED = 0,
#define BNX_DIR_NAME_UNUSED "unused"
#define BNX_DIR_DESC_UNUSED "Deleted directory entry, available for reuse"
/* 0x01 Package installation log */
BNX_DIR_TYPE_PKG_LOG = 1,
#define BNX_DIR_NAME_PKG_LOG "pkgLog"
#define BNX_DIR_DESC_PKG_LOG "Package Installation Log"
BNX_DIR_TYPE_CHIMP_PATCH = 3,
#define BNX_DIR_NAME_CHIMP_PATCH "chimpPatch"
#define BNX_DIR_DESC_CHIMP_PATCH "ChiMP Patch Firmware"
/* 0x04 ChiMP firmware: Boot Code phase 1 */
BNX_DIR_TYPE_BOOTCODE = 4,
#define BNX_DIR_NAME_BOOTCODE "chimpBoot"
#define BNX_DIR_DESC_BOOTCODE "Chip Management Processor Boot Firmware"
/* 0x05 VPD data block */
BNX_DIR_TYPE_VPD = 5,
#define BNX_DIR_NAME_VPD "VPD"
#define BNX_DIR_DESC_VPD "Vital Product Data"
/* 0x06 Exp ROM MBA */
BNX_DIR_TYPE_EXP_ROM_MBA = 6,
#define BNX_DIR_NAME_EXP_ROM_MBA "MBA"
#define BNX_DIR_DESC_EXP_ROM_MBA "Multiple Boot Agent Expansion ROM"
BNX_DIR_TYPE_AVS = 7, /* 0x07 AVS FW */
#define BNX_DIR_NAME_AVS "AVS"
#define BNX_DIR_DESC_AVS "Adaptive Voltage Scaling Firmware"
BNX_DIR_TYPE_PCIE = 8, /* 0x08 PCIE FW */
#define BNX_DIR_NAME_PCIE "PCIEucode"
#define BNX_DIR_DESC_PCIE "PCIe Microcode"
BNX_DIR_TYPE_PORT_MACRO = 9, /* 0x09 PORT MACRO FW */
#define BNX_DIR_NAME_PORT_MACRO "portMacro"
#define BNX_DIR_DESC_PORT_MACRO "Port Macro Firmware"
BNX_DIR_TYPE_APE_FW = 10, /* 0x0A APE Firmware */
#define BNX_DIR_NAME_APE_FW "apeFW"
#define BNX_DIR_DESC_APE_FW "Application Processing Engine Firmware"
/* 0x0B Patch firmware executed by APE ROM */
BNX_DIR_TYPE_APE_PATCH = 11,
#define BNX_DIR_NAME_APE_PATCH "apePatch"
#define BNX_DIR_DESC_APE_PATCH "APE Patch Firmware"
BNX_DIR_TYPE_KONG_FW = 12, /* 0x0C Kong Firmware */
#define BNX_DIR_NAME_KONG_FW "kongFW"
#define BNX_DIR_DESC_KONG_FW "Kong Firmware"
/* 0x0D Patch firmware executed by Kong ROM */
BNX_DIR_TYPE_KONG_PATCH = 13,
#define BNX_DIR_NAME_KONG_PATCH "kongPatch"
#define BNX_DIR_DESC_KONG_PATCH "Kong Patch Firmware"
BNX_DIR_TYPE_BONO_FW = 14, /* 0x0E Bono Firmware */
#define BNX_DIR_NAME_BONO_FW "bonoFW"
#define BNX_DIR_DESC_BONO_FW "Bono Firmware"
/* 0x0F Patch firmware executed by Bono ROM */
BNX_DIR_TYPE_BONO_PATCH = 15,
#define BNX_DIR_NAME_BONO_PATCH "bonoPatch"
#define BNX_DIR_DESC_BONO_PATCH "Bono Patch Firmware"
BNX_DIR_TYPE_TANG_FW = 16, /* 0x10 Tang firmware */
#define BNX_DIR_NAME_TANG_FW "tangFW"
#define BNX_DIR_DESC_TANG_FW "Tang Firmware"
/* 0x11 Patch firmware executed by Tang ROM */
BNX_DIR_TYPE_TANG_PATCH = 17,
#define BNX_DIR_NAME_TANG_PATCH "tangPatch"
#define BNX_DIR_DESC_TANG_PATCH "Tang Patch Firmware"
/* 0x12 ChiMP firmware: Boot Code phase 2 (loaded by phase 1) */
BNX_DIR_TYPE_BOOTCODE_2 = 18,
#define BNX_DIR_NAME_BOOTCODE_2 "chimpHWRM"
#define BNX_DIR_DESC_BOOTCODE_2 "ChiMP Hardware Resource Manager Firmware"
BNX_DIR_TYPE_CCM = 19, /* 0x13 CCM ROM binary */
#define BNX_DIR_NAME_CCM "CCM"
#define BNX_DIR_DESC_CCM "Comprehensive Configuration Management"
/* 0x14 PCI-IDs, PCI-related configuration properties */
BNX_DIR_TYPE_PCI_CFG = 20,
#define BNX_DIR_NAME_PCI_CFG "pciCFG"
#define BNX_DIR_DESC_PCI_CFG "PCIe Configuration Data"
BNX_DIR_TYPE_TSCF_UCODE = 21, /* 0x15 TSCF micro-code */
#define BNX_DIR_NAME_TSCF_UCODE "PHYucode"
#define BNX_DIR_DESC_TSCF_UCODE "Falcon PHY Microcode"
BNX_DIR_TYPE_ISCSI_BOOT = 22, /* 0x16 iSCSI Boot */
#define BNX_DIR_NAME_ISCSI_BOOT "iSCSIboot"
#define BNX_DIR_DESC_ISCSI_BOOT "iSCSI Boot Software Initiator"
/* 0x18 iSCSI Boot IPV6 - ***DEPRECATED*** */
BNX_DIR_TYPE_ISCSI_BOOT_IPV6 = 24,
/* 0x19 iSCSI Boot IPV4N6 - ***DEPRECATED*** */
BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6 = 25,
BNX_DIR_TYPE_ISCSI_BOOT_CFG = 26, /* 0x1a iSCSI Boot CFG v6 */
#define BNX_DIR_NAME_ISCSI_BOOT_CFG "iSCSIcfg"
#define BNX_DIR_DESC_ISCSI_BOOT_CFG "iSCSI Boot Configuration Data"
BNX_DIR_TYPE_EXT_PHY = 27, /* 0x1b External PHY FW */
#define BNX_DIR_NAME_EXT_PHY "extPHYfw"
#define BNX_DIR_DESC_EXT_PHY "External PHY Firmware"
BNX_DIR_TYPE_MODULES_PN = 28, /* 0x1c Modules PartNum list */
#define BNX_DIR_NAME_MODULES_PN "modPartNums"
#define BNX_DIR_DESC_MODULES_PN "Optical Modules Part Number List"
BNX_DIR_TYPE_SHARED_CFG = 40, /* 0x28 shared configuration block */
#define BNX_DIR_NAME_SHARED_CFG "sharedCFG"
#define BNX_DIR_DESC_SHARED_CFG "Shared Configuration Data"
BNX_DIR_TYPE_PORT_CFG = 41, /* 0x29 port configuration block */
#define BNX_DIR_NAME_PORT_CFG "portCFG"
#define BNX_DIR_DESC_PORT_CFG "Port Configuration Data"
BNX_DIR_TYPE_FUNC_CFG = 42, /* 0x2A func configuration block */
#define BNX_DIR_NAME_FUNC_CFG "funcCFG"
#define BNX_DIR_DESC_FUNC_CFG "Function Configuration Data"
/* Management Firmware (TruManage) related dir entries*/
/* 0x30 Management firmware configuration (see BMCFG library)*/
BNX_DIR_TYPE_MGMT_CFG = 48,
#define BNX_DIR_NAME_MGMT_CFG "mgmtCFG"
#define BNX_DIR_DESC_MGMT_CFG "Out-of-band Management Configuration Data"
BNX_DIR_TYPE_MGMT_DATA = 49, /* 0x31 "Opaque Management Data" */
#define BNX_DIR_NAME_MGMT_DATA "mgmtData"
#define BNX_DIR_DESC_MGMT_DATA "Out-of-band Management Data"
BNX_DIR_TYPE_MGMT_WEB_DATA = 50, /* 0x32 "Web GUI" file data */
#define BNX_DIR_NAME_MGMT_WEB_DATA "webData"
#define BNX_DIR_DESC_MGMT_WEB_DATA "Out-of-band Management Web Data"
/* 0x33 "Web GUI" file metadata */
BNX_DIR_TYPE_MGMT_WEB_META = 51,
#define BNX_DIR_NAME_MGMT_WEB_META "webMeta"
#define BNX_DIR_DESC_MGMT_WEB_META "Out-of-band Management Web Metadata"
/* 0x34 Management firmware Event Log (a.k.a. "SEL") */
BNX_DIR_TYPE_MGMT_EVENT_LOG = 52,
#define BNX_DIR_NAME_MGMT_EVENT_LOG "eventLog"
#define BNX_DIR_DESC_MGMT_EVENT_LOG "Out-of-band Management Event Log"
/* 0x35 Management firmware Audit Log */
BNX_DIR_TYPE_MGMT_AUDIT_LOG = 53
#define BNX_DIR_NAME_MGMT_AUDIT_LOG "auditLog"
#define BNX_DIR_DESC_MGMT_AUDIT_LOG "Out-of-band Management Audit Log"
};
/* For backwards compatibility only, may be removed later */
#define BNX_DIR_TYPE_ISCSI_BOOT_CFG6 BNX_DIR_TYPE_ISCSI_BOOT_CFG
/* Firmware NVM items of "APE BIN" format are identified with
* the following macro:
*/
#define BNX_DIR_TYPE_IS_APE_BIN_FMT(type)\
((type) == BNX_DIR_TYPE_CHIMP_PATCH \
|| (type) == BNX_DIR_TYPE_BOOTCODE \
|| (type) == BNX_DIR_TYPE_BOOTCODE_2 \
|| (type) == BNX_DIR_TYPE_APE_FW \
|| (type) == BNX_DIR_TYPE_APE_PATCH \
|| (type) == BNX_DIR_TYPE_TANG_FW \
|| (type) == BNX_DIR_TYPE_TANG_PATCH \
|| (type) == BNX_DIR_TYPE_KONG_FW \
|| (type) == BNX_DIR_TYPE_KONG_PATCH \
|| (type) == BNX_DIR_TYPE_BONO_FW \
|| (type) == BNX_DIR_TYPE_BONO_PATCH \
)
/* Other (non APE BIN) executable NVM items are identified with
* the following macro:
*/
#define BNX_DIR_TYPE_IS_OTHER_EXEC(type)\
((type) == BNX_DIR_TYPE_AVS \
|| (type) == BNX_DIR_TYPE_EXP_ROM_MBA \
|| (type) == BNX_DIR_TYPE_PCIE \
|| (type) == BNX_DIR_TYPE_TSCF_UCODE \
|| (type) == BNX_DIR_TYPE_EXT_PHY \
|| (type) == BNX_DIR_TYPE_CCM \
|| (type) == BNX_DIR_TYPE_ISCSI_BOOT \
)
/* Executable NVM items (e.g. microcode, firmware, software) identified
* with the following macro
*/
#define BNX_DIR_TYPE_IS_EXECUTABLE(type) \
(BNX_DIR_TYPE_IS_APE_BIN_FMT(type) \
|| BNX_DIR_TYPE_IS_OTHER_EXEC(type))
#define BNX_DIR_ORDINAL_FIRST 0 /* Ordinals are 0-based */
/* No extension flags for this directory entry */
#define BNX_DIR_EXT_NONE 0
/* Directory entry is inactive (not used, not hidden,
* not available for reuse)
*/
#define BNX_DIR_EXT_INACTIVE (1 << 0)
/* Directory content is a temporary staging location for
* updating the primary (non-update) directory entry contents
* (e.g. performing a secure firmware update)
*/
#define BNX_DIR_EXT_UPDATE (1 << 1)
/* No attribute flags set for this directory entry */
#define BNX_DIR_ATTR_NONE 0
/* Directory entry checksum of contents is purposely incorrect */
#define BNX_DIR_ATTR_NO_CHKSUM (1 << 0)
/* Directory contents are in the form of a property-stream
* (e.g. configuration properties)
*/
#define BNX_DIR_ATTR_PROP_STREAM (1 << 1)
/* Directory content (e.g. iSCSI boot) supports IPv4 */
#define BNX_DIR_ATTR_IPv4 (1 << 2)
/* Directory content (e.g. iSCSI boot) supports IPv6 */
#define BNX_DIR_ATTR_IPv6 (1 << 3)
/* Directory content includes standard NVM component trailer
* (bnxnvm_component_trailer_t)
*/
#define BNX_DIR_ATTR_TRAILER (1 << 4)
/* Index of tab-delimited fields in each package log
* (BNX_DIR_TYPE_PKG_LOG) record (\n-terminated line):
*/
enum bnxnvm_pkglog_field_index {
/* Package installation date/time in ISO-8601 format */
BNX_PKG_LOG_FIELD_IDX_INSTALLED_TIMESTAMP = 0,
/* Installed package description (from package header) or "N/A" */
BNX_PKG_LOG_FIELD_IDX_PKG_DESCRIPTION = 1,
/* Installed package version string (from package header) or "N/A" */
BNX_PKG_LOG_FIELD_IDX_PKG_VERSION = 2,
/* Installed package creation/modification timestamp (ISO-8601) */
BNX_PKG_LOG_FIELD_IDX_PKG_TIMESTAMP = 3,
/* Installed package checksum in hexadecimal (CRC-32) or "N/A" */
BNX_PKG_LOG_FIELD_IDX_PKG_CHECKSUM = 4,
/* Total number of packaged items applied in this installation */
BNX_PKG_LOG_FIELD_IDX_INSTALLED_ITEMS = 5,
/* Hexadecimal bit-mask identifying which items were installed */
BNX_PKG_LOG_FIELD_IDX_INSTALLED_MASK = 6
};
#if !defined(__GNUC__)
#ifndef DOS_DRIVERS
#pragma pack(pop) /* original packing */
#endif
#endif
#endif /* Don't add anything after this line */
/*
* Copyright (c) 2015 - 2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef DMU_H
#define DMU_H
/* Clock field should be 2 bits only */
#define CLKCONFIG_MASK 0x3
/* argument */
struct DmuBlockEnable {
uint32_t sotp:1;
uint32_t pka_rng:1;
uint32_t crypto:1;
uint32_t spl:1;
uint32_t cdru_vgm:1;
uint32_t apbs_s0_idm:1;
uint32_t smau_s0_idm:1;
};
/* prototype */
uint32_t bcm_dmu_block_enable(struct DmuBlockEnable dbe);
uint32_t bcm_dmu_block_disable(struct DmuBlockEnable dbe);
uint32_t bcm_set_ihost_pll_freq(uint32_t cluster_num, int ihost_pll_freq_sel);
uint32_t bcm_get_ihost_pll_freq(uint32_t cluster_num);
#define PLL_FREQ_BYPASS 0x0
#define PLL_FREQ_FULL 0x1
#define PLL_FREQ_HALF 0x2
#define PLL_FREQ_QRTR 0x3
#endif
/*
* Copyright (c) 2016 - 2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef EMMC_H
#define EMMC_H
#include <stdint.h>
#include <common/debug.h>
#include <platform_def.h>
#include "emmc_chal_types.h"
#include "emmc_chal_sd.h"
#include "emmc_csl_sdprot.h"
#include "emmc_csl_sdcmd.h"
#include "emmc_pboot_hal_memory_drv.h"
/* ------------------------------------------------------------------- */
#define EXT_CSD_SIZE 512
#ifdef PLAT_SD_MAX_READ_LENGTH
#define SD_MAX_READ_LENGTH PLAT_SD_MAX_READ_LENGTH
#ifdef USE_EMMC_LARGE_BLK_TRANSFER_LENGTH
#define SD_MAX_BLK_TRANSFER_LENGTH 0x10000000
#else
#define SD_MAX_BLK_TRANSFER_LENGTH 0x1000
#endif
#else
#define SD_MAX_READ_LENGTH EMMC_BLOCK_SIZE
#define SD_MAX_BLK_TRANSFER_LENGTH EMMC_BLOCK_SIZE
#endif
struct emmc_global_buffer {
union {
uint8_t Ext_CSD_storage[EXT_CSD_SIZE];
uint8_t tempbuf[SD_MAX_READ_LENGTH];
} u;
};
struct emmc_global_vars {
struct sd_card_data cardData;
struct sd_handle sdHandle;
struct sd_dev sdDevice;
struct sd_card_info sdCard;
unsigned int init_done;
};
#define ICFG_SDIO0_CAP0__SLOT_TYPE_R 27
#define ICFG_SDIO0_CAP0__INT_MODE_R 26
#define ICFG_SDIO0_CAP0__SYS_BUS_64BIT_R 25
#define ICFG_SDIO0_CAP0__VOLTAGE_1P8V_R 24
#define ICFG_SDIO0_CAP0__VOLTAGE_3P0V_R 23
#define ICFG_SDIO0_CAP0__VOLTAGE_3P3V_R 22
#define ICFG_SDIO0_CAP0__SUSPEND_RESUME_R 21
#define ICFG_SDIO0_CAP0__SDMA_R 20
#define ICFG_SDIO0_CAP0__HIGH_SPEED_R 19
#define ICFG_SDIO0_CAP0__ADMA2_R 18
#define ICFG_SDIO0_CAP0__EXTENDED_MEDIA_R 17
#define ICFG_SDIO0_CAP0__MAX_BLOCK_LEN_R 15
#define ICFG_SDIO0_CAP0__BASE_CLK_FREQ_R 7
#define ICFG_SDIO0_CAP0__TIMEOUT_UNIT_R 6
#define ICFG_SDIO0_CAP0__TIMEOUT_CLK_FREQ_R 0
#define ICFG_SDIO0_CAP1__SPI_BLOCK_MODE_R 22
#define ICFG_SDIO0_CAP1__SPI_MODE_R 21
#define ICFG_SDIO0_CAP1__CLK_MULT_R 13
#define ICFG_SDIO0_CAP1__RETUNING_MODE_R 11
#define ICFG_SDIO0_CAP1__TUNE_SDR50_R 10
#define ICFG_SDIO0_CAP1__TIME_RETUNE_R 6
#define ICFG_SDIO0_CAP1__DRIVER_D_R 5
#define ICFG_SDIO0_CAP1__DRIVER_C_R 4
#define ICFG_SDIO0_CAP1__DRIVER_A_R 3
#define ICFG_SDIO0_CAP1__DDR50_R 2
#define ICFG_SDIO0_CAP1__SDR104_R 1
#define ICFG_SDIO0_CAP1__SDR50_R 0
#define SDIO0_CTRL_REGS_BASE_ADDR (SDIO0_EMMCSDXC_SYSADDR)
#define SDIO0_IDM_RESET_CTRL_ADDR (SDIO_IDM0_IDM_RESET_CONTROL)
#define EMMC_CTRL_REGS_BASE_ADDR SDIO0_CTRL_REGS_BASE_ADDR
#define EMMC_IDM_RESET_CTRL_ADDR SDIO0_IDM_RESET_CTRL_ADDR
#define EMMC_IDM_IO_CTRL_DIRECT_ADDR SDIO_IDM0_IO_CONTROL_DIRECT
extern struct emmc_global_buffer *emmc_global_buf_ptr;
extern struct emmc_global_vars *emmc_global_vars_ptr;
#define EMMC_CARD_DETECT_TIMEOUT_MS 1200
#define EMMC_CMD_TIMEOUT_MS 200
#define EMMC_BUSY_CMD_TIMEOUT_MS 200
#define EMMC_CLOCK_SETTING_TIMEOUT_MS 100
#define EMMC_WFE_RETRY 40000
#define EMMC_WFE_RETRY_DELAY_US 10
#ifdef EMMC_DEBUG
#define EMMC_TRACE INFO
#else
#define EMMC_TRACE(...)
#endif
#endif /* EMMC_H */
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment