Commit 0a910952 authored by Soby Mathew's avatar Soby Mathew Committed by TrustedFirmware Code Review
Browse files

Merge changes from topic "ld/mtd_framework" into integration

* changes:
  doc: stm32mp1: Update build command line
  fdts: stm32mp1: remove second QSPI flash instance
  stm32mp1: Add support for SPI-NOR boot device
  stm32mp1: Add support for SPI-NAND boot device
  spi: stm32_qspi: Add QSPI support
  fdts: stm32mp1: update for FMC2 pin muxing
  stm32mp1: Add support for raw NAND boot device
  fmc: stm32_fmc2_nand: Add FMC2 driver support
  stm32mp1: Reduce MAX_XLAT_TABLES to 4
  io: stm32image: fix device_size type
  stm32mp: add DT helper for reg by name
  stm32mp1: add compilation flags for boot devices
  lib: utils_def: add CLAMP macro
  compiler_rt: Import popcountdi2.c and popcountsi2.c files
  Add SPI-NOR framework
  Add SPI-NAND framework
  Add SPI-MEM framework
  Add raw NAND framework
parents 45cc606e ac7764bb
......@@ -76,21 +76,34 @@ ROM code -> BL2 (compiled with BL2_AT_EL3) -> OP-TEE -> BL33 (U-Boot)
Build Instructions
------------------
Boot media(s) supported by BL2 must be specified in the build command.
Available storage medias are:
- ``STM32MP_SDMMC``
- ``STM32MP_EMMC``
- ``STM32MP_RAW_NAND``
- ``STM32MP_SPI_NAND``
- ``STM32MP_SPI_NOR``
To build with SP_min:
To build with SP_min and support for all bootable devices:
.. code:: bash
make CROSS_COMPILE=arm-linux-gnueabihf- PLAT=stm32mp1 ARCH=aarch32 ARM_ARCH_MAJOR=7 AARCH32_SP=sp_min DTB_FILE_NAME=stm32mp157c-ev1.dtb
make CROSS_COMPILE=arm-linux-gnueabihf- PLAT=stm32mp1 ARCH=aarch32 ARM_ARCH_MAJOR=7 AARCH32_SP=sp_min STM32MP_SDMMC=1 STM32MP_EMMC=1 STM32MP_RAW_NAND=1 STM32MP_SPI_NAND=1
STM32MP_SPI_NOR=1 DTB_FILE_NAME=stm32mp157c-ev1.dtb
cd <u-boot_directory>
make stm32mp15_trusted_defconfig
make DEVICE_TREE=stm32mp157c-ev1 all
To build TF-A with with Op-TEE support:
To build TF-A with OP-TEE support for all bootable devices:
.. code:: bash
make CROSS_COMPILE=arm-linux-gnueabihf- PLAT=stm32mp1 ARCH=aarch32 ARM_ARCH_MAJOR=7 AARCH32_SP=optee
make CROSS_COMPILE=arm-linux-gnueabihf- PLAT=stm32mp1 ARCH=aarch32 ARM_ARCH_MAJOR=7 AARCH32_SP=optee STM32MP_SDMMC=1 STM32MP_EMMC=1 STM32MP_RAW_NAND=1 STM32MP_SPI_NAND=1 STM32MP_SPI_NOR=1 DTB_FILE_NAME=stm32mp157c-ev1.dtb
cd <optee_directory>
make CROSS_COMPILE=arm-linux-gnueabihf- ARCH=arm PLATFORM=stm32mp1 CFG_EMBED_DTB_SOURCE_FILE=stm32mp157c-ev1.dts
cd <u-boot_directory>
make stm32mp15_optee_defconfig
make DEVICE_TREE=stm32mp157c-ev1 all
The following build options are supported:
......
/*
* Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
#include <errno.h>
#include <string.h>
#include <platform_def.h>
#include <common/debug.h>
#include <drivers/io/io_driver.h>
#include <drivers/io/io_mtd.h>
#include <lib/utils.h>
typedef struct {
io_mtd_dev_spec_t *dev_spec;
uintptr_t base;
unsigned long long offset; /* Offset in bytes */
unsigned long long size; /* Size of device in bytes */
} mtd_dev_state_t;
io_type_t device_type_mtd(void);
static int mtd_open(io_dev_info_t *dev_info, const uintptr_t spec,
io_entity_t *entity);
static int mtd_seek(io_entity_t *entity, int mode, signed long long offset);
static int mtd_read(io_entity_t *entity, uintptr_t buffer, size_t length,
size_t *length_read);
static int mtd_close(io_entity_t *entity);
static int mtd_dev_open(const uintptr_t dev_spec, io_dev_info_t **dev_info);
static int mtd_dev_close(io_dev_info_t *dev_info);
static const io_dev_connector_t mtd_dev_connector = {
.dev_open = mtd_dev_open
};
static const io_dev_funcs_t mtd_dev_funcs = {
.type = device_type_mtd,
.open = mtd_open,
.seek = mtd_seek,
.read = mtd_read,
.close = mtd_close,
.dev_close = mtd_dev_close,
};
static mtd_dev_state_t state_pool[MAX_IO_MTD_DEVICES];
static io_dev_info_t dev_info_pool[MAX_IO_MTD_DEVICES];
io_type_t device_type_mtd(void)
{
return IO_TYPE_MTD;
}
/* Locate a MTD state in the pool, specified by address */
static int find_first_mtd_state(const io_mtd_dev_spec_t *dev_spec,
unsigned int *index_out)
{
unsigned int index;
int result = -ENOENT;
for (index = 0U; index < MAX_IO_MTD_DEVICES; index++) {
/* dev_spec is used as identifier since it's unique */
if (state_pool[index].dev_spec == dev_spec) {
result = 0;
*index_out = index;
break;
}
}
return result;
}
/* Allocate a device info from the pool */
static int allocate_dev_info(io_dev_info_t **dev_info)
{
unsigned int index = 0U;
int result;
result = find_first_mtd_state(NULL, &index);
if (result != 0) {
return -ENOMEM;
}
dev_info_pool[index].funcs = &mtd_dev_funcs;
dev_info_pool[index].info = (uintptr_t)&state_pool[index];
*dev_info = &dev_info_pool[index];
return 0;
}
/* Release a device info from the pool */
static int free_dev_info(io_dev_info_t *dev_info)
{
int result;
unsigned int index = 0U;
mtd_dev_state_t *state;
state = (mtd_dev_state_t *)dev_info->info;
result = find_first_mtd_state(state->dev_spec, &index);
if (result != 0) {
return result;
}
zeromem(state, sizeof(mtd_dev_state_t));
zeromem(dev_info, sizeof(io_dev_info_t));
return 0;
}
static int mtd_open(io_dev_info_t *dev_info, const uintptr_t spec,
io_entity_t *entity)
{
mtd_dev_state_t *cur;
assert((dev_info->info != 0UL) && (entity->info == 0UL));
cur = (mtd_dev_state_t *)dev_info->info;
entity->info = (uintptr_t)cur;
cur->offset = 0U;
return 0;
}
/* Seek to a specific position using offset */
static int mtd_seek(io_entity_t *entity, int mode, signed long long offset)
{
mtd_dev_state_t *cur;
assert((entity->info != (uintptr_t)NULL) && (offset >= 0));
cur = (mtd_dev_state_t *)entity->info;
switch (mode) {
case IO_SEEK_SET:
if ((offset >= 0) &&
((unsigned long long)offset >= cur->size)) {
return -EINVAL;
}
cur->offset = offset;
break;
case IO_SEEK_CUR:
if (((cur->offset + (unsigned long long)offset) >=
cur->size) ||
((cur->offset + (unsigned long long)offset) <
cur->offset)) {
return -EINVAL;
}
cur->offset += (unsigned long long)offset;
break;
default:
return -EINVAL;
}
return 0;
}
static int mtd_read(io_entity_t *entity, uintptr_t buffer, size_t length,
size_t *out_length)
{
mtd_dev_state_t *cur;
io_mtd_ops_t *ops;
int ret;
assert(entity->info != (uintptr_t)NULL);
assert((length > 0U) && (buffer != (uintptr_t)NULL));
cur = (mtd_dev_state_t *)entity->info;
ops = &cur->dev_spec->ops;
assert(ops->read != NULL);
VERBOSE("Read at %llx into %lx, length %zi\n",
cur->offset, buffer, length);
if ((cur->offset + length) > cur->dev_spec->device_size) {
return -EINVAL;
}
ret = ops->read(cur->offset, buffer, length, out_length);
if (ret < 0) {
return ret;
}
assert(*out_length == length);
cur->offset += *out_length;
return 0;
}
static int mtd_close(io_entity_t *entity)
{
entity->info = (uintptr_t)NULL;
return 0;
}
static int mtd_dev_open(const uintptr_t dev_spec, io_dev_info_t **dev_info)
{
mtd_dev_state_t *cur;
io_dev_info_t *info;
io_mtd_ops_t *ops;
int result;
result = allocate_dev_info(&info);
if (result != 0) {
return -ENOENT;
}
cur = (mtd_dev_state_t *)info->info;
cur->dev_spec = (io_mtd_dev_spec_t *)dev_spec;
*dev_info = info;
ops = &(cur->dev_spec->ops);
if (ops->init != NULL) {
result = ops->init(&cur->dev_spec->device_size,
&cur->dev_spec->erase_size);
}
if (result == 0) {
cur->size = cur->dev_spec->device_size;
} else {
cur->size = 0ULL;
}
return result;
}
static int mtd_dev_close(io_dev_info_t *dev_info)
{
return free_dev_info(dev_info);
}
/* Exported functions */
/* Register the MTD driver in the IO abstraction */
int register_io_dev_mtd(const io_dev_connector_t **dev_con)
{
int result;
result = io_register_device(&dev_info_pool[0]);
if (result == 0) {
*dev_con = &mtd_dev_connector;
}
return result;
}
/*
* Copyright (c) 2019, STMicroelectronics - All Rights Reserved
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
#include <errno.h>
#include <stddef.h>
#include <platform_def.h>
#include <common/debug.h>
#include <drivers/delay_timer.h>
#include <drivers/nand.h>
#include <lib/utils.h>
/*
* Define a single nand_device used by specific NAND frameworks.
*/
static struct nand_device nand_dev;
static uint8_t scratch_buff[PLATFORM_MTD_MAX_PAGE_SIZE];
int nand_read(unsigned int offset, uintptr_t buffer, size_t length,
size_t *length_read)
{
unsigned int block = offset / nand_dev.block_size;
unsigned int end_block = (offset + length - 1U) / nand_dev.block_size;
unsigned int page_start =
(offset % nand_dev.block_size) / nand_dev.page_size;
unsigned int nb_pages = nand_dev.block_size / nand_dev.page_size;
unsigned int start_offset = offset % nand_dev.page_size;
unsigned int page;
unsigned int bytes_read;
int is_bad;
int ret;
VERBOSE("Block %u - %u, page_start %u, nb %u, length %zu, offset %u\n",
block, end_block, page_start, nb_pages, length, offset);
*length_read = 0UL;
if (((start_offset != 0U) || (length % nand_dev.page_size) != 0U) &&
(sizeof(scratch_buff) < nand_dev.page_size)) {
return -EINVAL;
}
while (block <= end_block) {
is_bad = nand_dev.mtd_block_is_bad(block);
if (is_bad < 0) {
return is_bad;
}
if (is_bad == 1) {
/* Skip the block */
uint32_t max_block =
nand_dev.size / nand_dev.block_size;
block++;
end_block++;
if ((block < max_block) && (end_block < max_block)) {
continue;
}
return -EIO;
}
for (page = page_start; page < nb_pages; page++) {
if ((start_offset != 0U) ||
(length < nand_dev.page_size)) {
ret = nand_dev.mtd_read_page(
&nand_dev,
(block * nb_pages) + page,
(uintptr_t)scratch_buff);
if (ret != 0) {
return ret;
}
bytes_read = MIN((size_t)(nand_dev.page_size -
start_offset),
length);
memcpy((uint8_t *)buffer,
scratch_buff + start_offset,
bytes_read);
start_offset = 0U;
} else {
ret = nand_dev.mtd_read_page(&nand_dev,
(block * nb_pages) + page,
buffer);
if (ret != 0) {
return ret;
}
bytes_read = nand_dev.page_size;
}
length -= bytes_read;
buffer += bytes_read;
*length_read += bytes_read;
if (length == 0U) {
break;
}
}
page_start = 0U;
block++;
}
return 0;
}
struct nand_device *get_nand_device(void)
{
return &nand_dev;
}
/*
* Copyright (c) 2019, STMicroelectronics - All Rights Reserved
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
#include <errno.h>
#include <stddef.h>
#include <platform_def.h>
#include <common/debug.h>
#include <drivers/delay_timer.h>
#include <drivers/raw_nand.h>
#include <lib/utils.h>
#define ONFI_SIGNATURE_ADDR 0x20U
/* CRC calculation */
#define CRC_POLYNOM 0x8005U
#define CRC_INIT_VALUE 0x4F4EU
/* Status register */
#define NAND_STATUS_READY BIT(6)
#define SZ_128M 0x08000000U
#define SZ_512 0x200U
static struct rawnand_device rawnand_dev;
#pragma weak plat_get_raw_nand_data
int plat_get_raw_nand_data(struct rawnand_device *device)
{
return 0;
}
static int nand_send_cmd(uint8_t cmd, unsigned int tim)
{
struct nand_req req;
zeromem(&req, sizeof(struct nand_req));
req.nand = rawnand_dev.nand_dev;
req.type = NAND_REQ_CMD | cmd;
req.inst_delay = tim;
return rawnand_dev.ops->exec(&req);
}
static int nand_send_addr(uint8_t addr, unsigned int tim)
{
struct nand_req req;
zeromem(&req, sizeof(struct nand_req));
req.nand = rawnand_dev.nand_dev;
req.type = NAND_REQ_ADDR;
req.addr = &addr;
req.inst_delay = tim;
return rawnand_dev.ops->exec(&req);
}
static int nand_send_wait(unsigned int delay, unsigned int tim)
{
struct nand_req req;
zeromem(&req, sizeof(struct nand_req));
req.nand = rawnand_dev.nand_dev;
req.type = NAND_REQ_WAIT;
req.inst_delay = tim;
req.delay_ms = delay;
return rawnand_dev.ops->exec(&req);
}
static int nand_read_data(uint8_t *data, unsigned int length, bool use_8bit)
{
struct nand_req req;
zeromem(&req, sizeof(struct nand_req));
req.nand = rawnand_dev.nand_dev;
req.type = NAND_REQ_DATAIN | (use_8bit ? NAND_REQ_BUS_WIDTH_8 : 0U);
req.addr = data;
req.length = length;
return rawnand_dev.ops->exec(&req);
}
int nand_change_read_column_cmd(unsigned int offset, uintptr_t buffer,
unsigned int len)
{
int ret;
uint8_t addr[2];
unsigned int i;
ret = nand_send_cmd(NAND_CMD_CHANGE_1ST, 0U);
if (ret != 0) {
return ret;
}
if (rawnand_dev.nand_dev->buswidth == NAND_BUS_WIDTH_16) {
offset /= 2U;
}
addr[0] = offset;
addr[1] = offset >> 8;
for (i = 0; i < 2U; i++) {
ret = nand_send_addr(addr[i], 0U);
if (ret != 0) {
return ret;
}
}
ret = nand_send_cmd(NAND_CMD_CHANGE_2ND, NAND_TCCS_MIN);
if (ret != 0) {
return ret;
}
return nand_read_data((uint8_t *)buffer, len, false);
}
int nand_read_page_cmd(unsigned int page, unsigned int offset,
uintptr_t buffer, unsigned int len)
{
uint8_t addr[5];
uint8_t i = 0U;
uint8_t j;
int ret;
VERBOSE(">%s page %u offset %u buffer 0x%lx\n", __func__, page, offset,
buffer);
if (rawnand_dev.nand_dev->buswidth == NAND_BUS_WIDTH_16) {
offset /= 2U;
}
addr[i++] = offset;
addr[i++] = offset >> 8;
addr[i++] = page;
addr[i++] = page >> 8;
if (rawnand_dev.nand_dev->size > SZ_128M) {
addr[i++] = page >> 16;
}
ret = nand_send_cmd(NAND_CMD_READ_1ST, 0U);
if (ret != 0) {
return ret;
}
for (j = 0U; j < i; j++) {
ret = nand_send_addr(addr[j], 0U);
if (ret != 0) {
return ret;
}
}
ret = nand_send_cmd(NAND_CMD_READ_2ND, NAND_TWB_MAX);
if (ret != 0) {
return ret;
}
ret = nand_send_wait(PSEC_TO_MSEC(NAND_TR_MAX), NAND_TRR_MIN);
if (ret != 0) {
return ret;
}
if (buffer != 0U) {
ret = nand_read_data((uint8_t *)buffer, len, false);
}
return ret;
}
static int nand_status(uint8_t *status)
{
int ret;
ret = nand_send_cmd(NAND_CMD_STATUS, NAND_TWHR_MIN);
if (ret != 0) {
return ret;
}
if (status != NULL) {
ret = nand_read_data(status, 1U, true);
}
return ret;
}
int nand_wait_ready(unsigned long delay)
{
uint8_t status;
int ret;
uint64_t timeout;
/* Wait before reading status */
udelay(1);
ret = nand_status(NULL);
if (ret != 0) {
return ret;
}
timeout = timeout_init_us(delay);
while (!timeout_elapsed(timeout)) {
ret = nand_read_data(&status, 1U, true);
if (ret != 0) {
return ret;
}
if ((status & NAND_STATUS_READY) != 0U) {
return nand_send_cmd(NAND_CMD_READ_1ST, 0U);
}
udelay(10);
}
return -ETIMEDOUT;
}
#if NAND_ONFI_DETECT
static uint16_t nand_check_crc(uint16_t crc, uint8_t *data_in,
unsigned int data_len)
{
uint32_t i;
uint32_t j;
uint32_t bit;
for (i = 0U; i < data_len; i++) {
uint8_t cur_param = *data_in++;
for (j = BIT(7); j != 0U; j >>= 1) {
bit = crc & BIT(15);
crc <<= 1;
if ((cur_param & j) != 0U) {
bit ^= BIT(15);
}
if (bit != 0U) {
crc ^= CRC_POLYNOM;
}
}
crc &= GENMASK(15, 0);
}
return crc;
}
static int nand_read_id(uint8_t addr, uint8_t *id, unsigned int size)
{
int ret;
ret = nand_send_cmd(NAND_CMD_READID, 0U);
if (ret != 0) {
return ret;
}
ret = nand_send_addr(addr, NAND_TWHR_MIN);
if (ret != 0) {
return ret;
}
return nand_read_data(id, size, true);
}
static int nand_reset(void)
{
int ret;
ret = nand_send_cmd(NAND_CMD_RESET, NAND_TWB_MAX);
if (ret != 0) {
return ret;
}
return nand_send_wait(PSEC_TO_MSEC(NAND_TRST_MAX), 0U);
}
static int nand_read_param_page(void)
{
struct nand_param_page page;
uint8_t addr = 0U;
int ret;
ret = nand_send_cmd(NAND_CMD_READ_PARAM_PAGE, 0U);
if (ret != 0) {
return ret;
}
ret = nand_send_addr(addr, NAND_TWB_MAX);
if (ret != 0) {
return ret;
}
ret = nand_send_wait(PSEC_TO_MSEC(NAND_TR_MAX), NAND_TRR_MIN);
if (ret != 0) {
return ret;
}
ret = nand_read_data((uint8_t *)&page, sizeof(page), true);
if (ret != 0) {
return ret;
}
if (strncmp((char *)&page.page_sig, "ONFI", 4) != 0) {
WARN("Error ONFI detection\n");
return -EINVAL;
}
if (nand_check_crc(CRC_INIT_VALUE, (uint8_t *)&page, 254U) !=
page.crc16) {
WARN("Error reading param\n");
return -EINVAL;
}
if ((page.features & ONFI_FEAT_BUS_WIDTH_16) != 0U) {
rawnand_dev.nand_dev->buswidth = NAND_BUS_WIDTH_16;
} else {
rawnand_dev.nand_dev->buswidth = NAND_BUS_WIDTH_8;
}
rawnand_dev.nand_dev->block_size = page.num_pages_per_blk *
page.bytes_per_page;
rawnand_dev.nand_dev->page_size = page.bytes_per_page;
rawnand_dev.nand_dev->size = page.num_pages_per_blk *
page.bytes_per_page *
page.num_blk_in_lun * page.num_lun;
if (page.nb_ecc_bits != GENMASK_32(7, 0)) {
rawnand_dev.nand_dev->ecc.max_bit_corr = page.nb_ecc_bits;
rawnand_dev.nand_dev->ecc.size = SZ_512;
}
VERBOSE("Page size %u, block_size %u, Size %llu, ecc %u, buswidth %u\n",
rawnand_dev.nand_dev->page_size,
rawnand_dev.nand_dev->block_size, rawnand_dev.nand_dev->size,
rawnand_dev.nand_dev->ecc.max_bit_corr,
rawnand_dev.nand_dev->buswidth);
return 0;
}
static int detect_onfi(void)
{
int ret;
char id[4];
ret = nand_reset();
if (ret != 0) {
return ret;
}
ret = nand_read_id(ONFI_SIGNATURE_ADDR, (uint8_t *)id, sizeof(id));
if (ret != 0) {
return ret;
}
if (strncmp(id, "ONFI", sizeof(id)) != 0) {
WARN("NAND Non ONFI detected\n");
return -ENODEV;
}
return nand_read_param_page();
}
#endif
static int nand_mtd_block_is_bad(unsigned int block)
{
unsigned int nbpages_per_block = rawnand_dev.nand_dev->block_size /
rawnand_dev.nand_dev->page_size;
uint8_t bbm_marker[2];
uint8_t page;
int ret;
for (page = 0U; page < 2U; page++) {
ret = nand_read_page_cmd(block * nbpages_per_block,
rawnand_dev.nand_dev->page_size,
(uintptr_t)bbm_marker,
sizeof(bbm_marker));
if (ret != 0) {
return ret;
}
if ((bbm_marker[0] != GENMASK_32(7, 0)) ||
(bbm_marker[1] != GENMASK_32(7, 0))) {
WARN("Block %u is bad\n", block);
return 1;
}
}
return 0;
}
static int nand_mtd_read_page_raw(struct nand_device *nand, unsigned int page,
uintptr_t buffer)
{
return nand_read_page_cmd(page, 0U, buffer,
rawnand_dev.nand_dev->page_size);
}
void nand_raw_ctrl_init(const struct nand_ctrl_ops *ops)
{
rawnand_dev.ops = ops;
}
int nand_raw_init(unsigned long long *size, unsigned int *erase_size)
{
rawnand_dev.nand_dev = get_nand_device();
if (rawnand_dev.nand_dev == NULL) {
return -EINVAL;
}
rawnand_dev.nand_dev->mtd_block_is_bad = nand_mtd_block_is_bad;
rawnand_dev.nand_dev->mtd_read_page = nand_mtd_read_page_raw;
rawnand_dev.nand_dev->ecc.mode = NAND_ECC_NONE;
if ((rawnand_dev.ops->setup == NULL) ||
(rawnand_dev.ops->exec == NULL)) {
return -ENODEV;
}
#if NAND_ONFI_DETECT
if (detect_onfi() != 0) {
WARN("Detect ONFI failed\n");
}
#endif
if (plat_get_raw_nand_data(&rawnand_dev) != 0) {
return -EINVAL;
}
assert((rawnand_dev.nand_dev->page_size != 0U) &&
(rawnand_dev.nand_dev->block_size != 0U) &&
(rawnand_dev.nand_dev->size != 0U));
*size = rawnand_dev.nand_dev->size;
*erase_size = rawnand_dev.nand_dev->block_size;
rawnand_dev.ops->setup(rawnand_dev.nand_dev);
return 0;
}
/*
* Copyright (c) 2019, STMicroelectronics - All Rights Reserved
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
#include <errno.h>
#include <stddef.h>
#include <platform_def.h>
#include <common/debug.h>
#include <drivers/delay_timer.h>
#include <drivers/spi_nand.h>
#include <lib/utils.h>
#define SPI_NAND_MAX_ID_LEN 4U
#define DELAY_US_400MS 400000U
#define MACRONIX_ID 0xC2U
static struct spinand_device spinand_dev;
#pragma weak plat_get_spi_nand_data
int plat_get_spi_nand_data(struct spinand_device *device)
{
return 0;
}
static int spi_nand_reg(bool read_reg, uint8_t reg, uint8_t *val,
enum spi_mem_data_dir dir)
{
struct spi_mem_op op;
zeromem(&op, sizeof(struct spi_mem_op));
if (read_reg) {
op.cmd.opcode = SPI_NAND_OP_GET_FEATURE;
} else {
op.cmd.opcode = SPI_NAND_OP_SET_FEATURE;
}
op.cmd.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
op.addr.val = reg;
op.addr.nbytes = 1U;
op.addr.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
op.data.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
op.data.dir = dir;
op.data.nbytes = 1U;
op.data.buf = val;
return spi_mem_exec_op(&op);
}
static int spi_nand_read_reg(uint8_t reg, uint8_t *val)
{
return spi_nand_reg(true, reg, val, SPI_MEM_DATA_IN);
}
static int spi_nand_write_reg(uint8_t reg, uint8_t val)
{
return spi_nand_reg(false, reg, &val, SPI_MEM_DATA_OUT);
}
static int spi_nand_update_cfg(uint8_t mask, uint8_t val)
{
int ret;
uint8_t cfg = spinand_dev.cfg_cache;
cfg &= ~mask;
cfg |= val;
if (cfg == spinand_dev.cfg_cache) {
return 0;
}
ret = spi_nand_write_reg(SPI_NAND_REG_CFG, cfg);
if (ret == 0) {
spinand_dev.cfg_cache = cfg;
}
return ret;
}
static int spi_nand_ecc_enable(bool enable)
{
return spi_nand_update_cfg(SPI_NAND_CFG_ECC_EN,
enable ? SPI_NAND_CFG_ECC_EN : 0U);
}
static int spi_nand_quad_enable(uint8_t manufacturer_id)
{
bool enable = false;
if (manufacturer_id != MACRONIX_ID) {
return 0;
}
if (spinand_dev.spi_read_cache_op.data.buswidth ==
SPI_MEM_BUSWIDTH_4_LINE) {
enable = true;
}
return spi_nand_update_cfg(SPI_NAND_CFG_QE,
enable ? SPI_NAND_CFG_QE : 0U);
}
static int spi_nand_wait_ready(uint8_t *status)
{
int ret;
uint64_t timeout = timeout_init_us(DELAY_US_400MS);
while (!timeout_elapsed(timeout)) {
ret = spi_nand_read_reg(SPI_NAND_REG_STATUS, status);
if (ret != 0) {
return ret;
}
VERBOSE("%s Status %x\n", __func__, *status);
if ((*status & SPI_NAND_STATUS_BUSY) == 0U) {
return 0;
}
}
return -ETIMEDOUT;
}
static int spi_nand_reset(void)
{
struct spi_mem_op op;
uint8_t status;
int ret;
zeromem(&op, sizeof(struct spi_mem_op));
op.cmd.opcode = SPI_NAND_OP_RESET;
op.cmd.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
ret = spi_mem_exec_op(&op);
if (ret != 0) {
return ret;
}
return spi_nand_wait_ready(&status);
}
static int spi_nand_read_id(uint8_t *id)
{
struct spi_mem_op op;
zeromem(&op, sizeof(struct spi_mem_op));
op.cmd.opcode = SPI_NAND_OP_READ_ID;
op.cmd.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
op.data.dir = SPI_MEM_DATA_IN;
op.data.nbytes = SPI_NAND_MAX_ID_LEN;
op.data.buf = id;
op.data.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
return spi_mem_exec_op(&op);
}
static int spi_nand_load_page(unsigned int page)
{
struct spi_mem_op op;
uint32_t block_nb = page / spinand_dev.nand_dev->block_size;
uint32_t page_nb = page - (block_nb * spinand_dev.nand_dev->page_size);
uint32_t nbpages_per_block = spinand_dev.nand_dev->block_size /
spinand_dev.nand_dev->page_size;
uint32_t block_sh = __builtin_ctz(nbpages_per_block) + 1U;
zeromem(&op, sizeof(struct spi_mem_op));
op.cmd.opcode = SPI_NAND_OP_LOAD_PAGE;
op.cmd.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
op.addr.val = (block_nb << block_sh) | page_nb;
op.addr.nbytes = 3U;
op.addr.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
return spi_mem_exec_op(&op);
}
static int spi_nand_read_from_cache(unsigned int page, unsigned int offset,
uint8_t *buffer, unsigned int len)
{
uint32_t nbpages_per_block = spinand_dev.nand_dev->block_size /
spinand_dev.nand_dev->page_size;
uint32_t block_nb = page / nbpages_per_block;
uint32_t page_sh = __builtin_ctz(spinand_dev.nand_dev->page_size) + 1U;
spinand_dev.spi_read_cache_op.addr.val = offset;
if ((spinand_dev.nand_dev->nb_planes > 1U) && ((block_nb % 2U) == 1U)) {
spinand_dev.spi_read_cache_op.addr.val |= 1U << page_sh;
}
spinand_dev.spi_read_cache_op.data.buf = buffer;
spinand_dev.spi_read_cache_op.data.nbytes = len;
return spi_mem_exec_op(&spinand_dev.spi_read_cache_op);
}
static int spi_nand_read_page(unsigned int page, unsigned int offset,
uint8_t *buffer, unsigned int len,
bool ecc_enabled)
{
uint8_t status;
int ret;
ret = spi_nand_ecc_enable(ecc_enabled);
if (ret != 0) {
return ret;
}
ret = spi_nand_load_page(page);
if (ret != 0) {
return ret;
}
ret = spi_nand_wait_ready(&status);
if (ret != 0) {
return ret;
}
ret = spi_nand_read_from_cache(page, offset, buffer, len);
if (ret != 0) {
return ret;
}
if (ecc_enabled && ((status & SPI_NAND_STATUS_ECC_UNCOR) != 0U)) {
return -EBADMSG;
}
return 0;
}
static int spi_nand_mtd_block_is_bad(unsigned int block)
{
unsigned int nbpages_per_block = spinand_dev.nand_dev->block_size /
spinand_dev.nand_dev->page_size;
uint8_t bbm_marker[2];
int ret;
ret = spi_nand_read_page(block * nbpages_per_block,
spinand_dev.nand_dev->page_size,
bbm_marker, sizeof(bbm_marker), false);
if (ret != 0) {
return ret;
}
if ((bbm_marker[0] != GENMASK_32(7, 0)) ||
(bbm_marker[1] != GENMASK_32(7, 0))) {
WARN("Block %i is bad\n", block);
return 1;
}
return 0;
}
static int spi_nand_mtd_read_page(struct nand_device *nand, unsigned int page,
uintptr_t buffer)
{
return spi_nand_read_page(page, 0, (uint8_t *)buffer,
spinand_dev.nand_dev->page_size, true);
}
int spi_nand_init(unsigned long long *size, unsigned int *erase_size)
{
uint8_t id[SPI_NAND_MAX_ID_LEN];
int ret;
spinand_dev.nand_dev = get_nand_device();
if (spinand_dev.nand_dev == NULL) {
return -EINVAL;
}
spinand_dev.nand_dev->mtd_block_is_bad = spi_nand_mtd_block_is_bad;
spinand_dev.nand_dev->mtd_read_page = spi_nand_mtd_read_page;
spinand_dev.nand_dev->nb_planes = 1;
spinand_dev.spi_read_cache_op.cmd.opcode = SPI_NAND_OP_READ_FROM_CACHE;
spinand_dev.spi_read_cache_op.cmd.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
spinand_dev.spi_read_cache_op.addr.nbytes = 2U;
spinand_dev.spi_read_cache_op.addr.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
spinand_dev.spi_read_cache_op.dummy.nbytes = 1U;
spinand_dev.spi_read_cache_op.dummy.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
spinand_dev.spi_read_cache_op.data.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
if (plat_get_spi_nand_data(&spinand_dev) != 0) {
return -EINVAL;
}
ret = spi_nand_reset();
if (ret != 0) {
return ret;
}
ret = spi_nand_read_id(id);
if (ret != 0) {
return ret;
}
ret = spi_nand_read_reg(SPI_NAND_REG_CFG, &spinand_dev.cfg_cache);
if (ret != 0) {
return ret;
}
ret = spi_nand_quad_enable(id[0]);
if (ret != 0) {
return ret;
}
VERBOSE("SPI_NAND Detected ID 0x%x 0x%x\n", id[0], id[1]);
VERBOSE("Page size %i, Block size %i, size %lli\n",
spinand_dev.nand_dev->page_size,
spinand_dev.nand_dev->block_size,
spinand_dev.nand_dev->size);
*size = spinand_dev.nand_dev->size;
*erase_size = spinand_dev.nand_dev->block_size;
return 0;
}
/*
* Copyright (c) 2019, STMicroelectronics - All Rights Reserved
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
#include <errno.h>
#include <stddef.h>
#include <common/debug.h>
#include <drivers/delay_timer.h>
#include <drivers/spi_nor.h>
#include <lib/utils.h>
#define SR_WIP BIT(0) /* Write in progress */
#define CR_QUAD_EN_SPAN BIT(1) /* Spansion Quad I/O */
#define SR_QUAD_EN_MX BIT(6) /* Macronix Quad I/O */
#define FSR_READY BIT(7) /* Device status, 0 = Busy, 1 = Ready */
/* Defined IDs for supported memories */
#define SPANSION_ID 0x01U
#define MACRONIX_ID 0xC2U
#define MICRON_ID 0x2CU
#define BANK_SIZE 0x1000000U
#define SPI_READY_TIMEOUT_US 40000U
static struct nor_device nor_dev;
#pragma weak plat_get_nor_data
int plat_get_nor_data(struct nor_device *device)
{
return 0;
}
static int spi_nor_reg(uint8_t reg, uint8_t *buf, size_t len,
enum spi_mem_data_dir dir)
{
struct spi_mem_op op;
zeromem(&op, sizeof(struct spi_mem_op));
op.cmd.opcode = reg;
op.cmd.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
op.data.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
op.data.dir = dir;
op.data.nbytes = len;
op.data.buf = buf;
return spi_mem_exec_op(&op);
}
static inline int spi_nor_read_id(uint8_t *id)
{
return spi_nor_reg(SPI_NOR_OP_READ_ID, id, 1U, SPI_MEM_DATA_IN);
}
static inline int spi_nor_read_cr(uint8_t *cr)
{
return spi_nor_reg(SPI_NOR_OP_READ_CR, cr, 1U, SPI_MEM_DATA_IN);
}
static inline int spi_nor_read_sr(uint8_t *sr)
{
return spi_nor_reg(SPI_NOR_OP_READ_SR, sr, 1U, SPI_MEM_DATA_IN);
}
static inline int spi_nor_read_fsr(uint8_t *fsr)
{
return spi_nor_reg(SPI_NOR_OP_READ_FSR, fsr, 1U, SPI_MEM_DATA_IN);
}
static inline int spi_nor_write_en(void)
{
return spi_nor_reg(SPI_NOR_OP_WREN, NULL, 0U, SPI_MEM_DATA_OUT);
}
/*
* Check if device is ready.
*
* Return 0 if ready, 1 if busy or a negative error code otherwise
*/
static int spi_nor_ready(void)
{
uint8_t sr;
int ret;
ret = spi_nor_read_sr(&sr);
if (ret != 0) {
return ret;
}
if ((nor_dev.flags & SPI_NOR_USE_FSR) != 0U) {
uint8_t fsr;
ret = spi_nor_read_fsr(&fsr);
if (ret != 0) {
return ret;
}
return (((fsr & FSR_READY) != 0U) && ((sr & SR_WIP) == 0U)) ?
0 : 1;
}
return (((sr & SR_WIP) != 0U) ? 1 : 0);
}
static int spi_nor_wait_ready(void)
{
int ret;
uint64_t timeout = timeout_init_us(SPI_READY_TIMEOUT_US);
while (!timeout_elapsed(timeout)) {
ret = spi_nor_ready();
if (ret <= 0) {
return ret;
}
}
return -ETIMEDOUT;
}
static int spi_nor_macronix_quad_enable(void)
{
uint8_t sr;
int ret;
ret = spi_nor_read_sr(&sr);
if (ret != 0) {
return ret;
}
if ((sr & SR_QUAD_EN_MX) == 0U) {
return 0;
}
ret = spi_nor_write_en();
if (ret != 0) {
return ret;
}
sr |= SR_QUAD_EN_MX;
ret = spi_nor_reg(SPI_NOR_OP_WRSR, &sr, 1, SPI_MEM_DATA_OUT);
if (ret != 0) {
return ret;
}
ret = spi_nor_wait_ready();
if (ret != 0) {
return ret;
}
ret = spi_nor_read_sr(&sr);
if ((ret != 0) || ((sr & SR_QUAD_EN_MX) == 0U)) {
return -EINVAL;
}
return 0;
}
static int spi_nor_write_sr_cr(uint8_t *sr_cr)
{
int ret;
ret = spi_nor_write_en();
if (ret != 0) {
return ret;
}
ret = spi_nor_reg(SPI_NOR_OP_WRSR, sr_cr, 2, SPI_MEM_DATA_OUT);
if (ret != 0) {
return -EINVAL;
}
ret = spi_nor_wait_ready();
if (ret != 0) {
return ret;
}
return 0;
}
static int spi_nor_quad_enable(void)
{
uint8_t sr_cr[2];
int ret;
ret = spi_nor_read_cr(&sr_cr[1]);
if (ret != 0) {
return ret;
}
if ((sr_cr[1] & CR_QUAD_EN_SPAN) != 0U) {
return 0;
}
sr_cr[1] |= CR_QUAD_EN_SPAN;
ret = spi_nor_read_sr(&sr_cr[0]);
if (ret != 0) {
return ret;
}
ret = spi_nor_write_sr_cr(sr_cr);
if (ret != 0) {
return ret;
}
ret = spi_nor_read_cr(&sr_cr[1]);
if ((ret != 0) || ((sr_cr[1] & CR_QUAD_EN_SPAN) == 0U)) {
return -EINVAL;
}
return 0;
}
static int spi_nor_clean_bar(void)
{
int ret;
if (nor_dev.selected_bank == 0U) {
return 0;
}
nor_dev.selected_bank = 0U;
ret = spi_nor_write_en();
if (ret != 0) {
return ret;
}
return spi_nor_reg(nor_dev.bank_write_cmd, &nor_dev.selected_bank,
1, SPI_MEM_DATA_OUT);
}
static int spi_nor_write_bar(uint32_t offset)
{
uint8_t selected_bank = offset / BANK_SIZE;
int ret;
if (selected_bank == nor_dev.selected_bank) {
return 0;
}
ret = spi_nor_write_en();
if (ret != 0) {
return ret;
}
ret = spi_nor_reg(nor_dev.bank_write_cmd, &selected_bank,
1, SPI_MEM_DATA_OUT);
if (ret != 0) {
return ret;
}
nor_dev.selected_bank = selected_bank;
return 0;
}
static int spi_nor_read_bar(void)
{
uint8_t selected_bank = 0;
int ret;
ret = spi_nor_reg(nor_dev.bank_read_cmd, &selected_bank,
1, SPI_MEM_DATA_IN);
if (ret != 0) {
return ret;
}
nor_dev.selected_bank = selected_bank;
return 0;
}
int spi_nor_read(unsigned int offset, uintptr_t buffer, size_t length,
size_t *length_read)
{
size_t remain_len;
int ret;
*length_read = 0;
nor_dev.read_op.addr.val = offset;
nor_dev.read_op.data.buf = (void *)buffer;
VERBOSE("%s offset %i length %zu\n", __func__, offset, length);
while (length != 0U) {
if ((nor_dev.flags & SPI_NOR_USE_BANK) != 0U) {
ret = spi_nor_write_bar(nor_dev.read_op.addr.val);
if (ret != 0) {
return ret;
}
remain_len = (BANK_SIZE * (nor_dev.selected_bank + 1)) -
nor_dev.read_op.addr.val;
nor_dev.read_op.data.nbytes = MIN(length, remain_len);
} else {
nor_dev.read_op.data.nbytes = length;
}
ret = spi_mem_exec_op(&nor_dev.read_op);
if (ret != 0) {
spi_nor_clean_bar();
return ret;
}
length -= nor_dev.read_op.data.nbytes;
nor_dev.read_op.addr.val += nor_dev.read_op.data.nbytes;
nor_dev.read_op.data.buf += nor_dev.read_op.data.nbytes;
*length_read += nor_dev.read_op.data.nbytes;
}
if ((nor_dev.flags & SPI_NOR_USE_BANK) != 0U) {
ret = spi_nor_clean_bar();
if (ret != 0) {
return ret;
}
}
return 0;
}
int spi_nor_init(unsigned long long *size, unsigned int *erase_size)
{
int ret = 0;
uint8_t id;
/* Default read command used */
nor_dev.read_op.cmd.opcode = SPI_NOR_OP_READ;
nor_dev.read_op.cmd.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
nor_dev.read_op.addr.nbytes = 3U;
nor_dev.read_op.addr.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
nor_dev.read_op.data.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
nor_dev.read_op.data.dir = SPI_MEM_DATA_IN;
if (plat_get_nor_data(&nor_dev) != 0) {
return -EINVAL;
}
assert(nor_dev.size != 0);
if (nor_dev.size > BANK_SIZE) {
nor_dev.flags |= SPI_NOR_USE_BANK;
}
*size = nor_dev.size;
ret = spi_nor_read_id(&id);
if (ret != 0) {
return ret;
}
if ((nor_dev.flags & SPI_NOR_USE_BANK) != 0U) {
switch (id) {
case SPANSION_ID:
nor_dev.bank_read_cmd = SPINOR_OP_BRRD;
nor_dev.bank_write_cmd = SPINOR_OP_BRWR;
break;
default:
nor_dev.bank_read_cmd = SPINOR_OP_RDEAR;
nor_dev.bank_write_cmd = SPINOR_OP_WREAR;
break;
}
}
if (nor_dev.read_op.data.buswidth == 4U) {
switch (id) {
case MACRONIX_ID:
WARN("Enable Macronix quad support\n");
ret = spi_nor_macronix_quad_enable();
break;
case MICRON_ID:
break;
default:
ret = spi_nor_quad_enable();
break;
}
}
if ((ret == 0) && ((nor_dev.flags & SPI_NOR_USE_BANK) != 0U)) {
ret = spi_nor_read_bar();
}
return ret;
}
/*
* Copyright (c) 2019, STMicroelectronics - All Rights Reserved
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
#include <libfdt.h>
#include <drivers/spi_mem.h>
#include <lib/utils_def.h>
#define SPI_MEM_DEFAULT_SPEED_HZ 100000U
/*
* struct spi_slave - Representation of a SPI slave.
*
* @max_hz: Maximum speed for this slave in Hertz.
* @cs: ID of the chip select connected to the slave.
* @mode: SPI mode to use for this slave (see SPI mode flags).
* @ops: Ops defined by the bus.
*/
struct spi_slave {
unsigned int max_hz;
unsigned int cs;
unsigned int mode;
const struct spi_bus_ops *ops;
};
static struct spi_slave spi_slave;
static bool spi_mem_check_buswidth_req(uint8_t buswidth, bool tx)
{
switch (buswidth) {
case 1U:
return true;
case 2U:
if ((tx && (spi_slave.mode & (SPI_TX_DUAL | SPI_TX_QUAD)) !=
0U) ||
(!tx && (spi_slave.mode & (SPI_RX_DUAL | SPI_RX_QUAD)) !=
0U)) {
return true;
}
break;
case 4U:
if ((tx && (spi_slave.mode & SPI_TX_QUAD) != 0U) ||
(!tx && (spi_slave.mode & SPI_RX_QUAD) != 0U)) {
return true;
}
break;
default:
break;
}
return false;
}
static bool spi_mem_supports_op(const struct spi_mem_op *op)
{
if (!spi_mem_check_buswidth_req(op->cmd.buswidth, true)) {
return false;
}
if ((op->addr.nbytes != 0U) &&
!spi_mem_check_buswidth_req(op->addr.buswidth, true)) {
return false;
}
if ((op->dummy.nbytes != 0U) &&
!spi_mem_check_buswidth_req(op->dummy.buswidth, true)) {
return false;
}
if ((op->data.nbytes != 0U) &&
!spi_mem_check_buswidth_req(op->data.buswidth,
op->data.dir == SPI_MEM_DATA_OUT)) {
return false;
}
return true;
}
static int spi_mem_set_speed_mode(void)
{
const struct spi_bus_ops *ops = spi_slave.ops;
int ret;
ret = ops->set_speed(spi_slave.max_hz);
if (ret != 0) {
VERBOSE("Cannot set speed (err=%d)\n", ret);
return ret;
}
ret = ops->set_mode(spi_slave.mode);
if (ret != 0) {
VERBOSE("Cannot set mode (err=%d)\n", ret);
return ret;
}
return 0;
}
static int spi_mem_check_bus_ops(const struct spi_bus_ops *ops)
{
bool error = false;
if (ops->claim_bus == NULL) {
VERBOSE("Ops claim bus is not defined\n");
error = true;
}
if (ops->release_bus == NULL) {
VERBOSE("Ops release bus is not defined\n");
error = true;
}
if (ops->exec_op == NULL) {
VERBOSE("Ops exec op is not defined\n");
error = true;
}
if (ops->set_speed == NULL) {
VERBOSE("Ops set speed is not defined\n");
error = true;
}
if (ops->set_mode == NULL) {
VERBOSE("Ops set mode is not defined\n");
error = true;
}
return error ? -EINVAL : 0;
}
/*
* spi_mem_exec_op() - Execute a memory operation.
* @op: The memory operation to execute.
*
* This function first checks that @op is supported and then tries to execute
* it.
*
* Return: 0 in case of success, a negative error code otherwise.
*/
int spi_mem_exec_op(const struct spi_mem_op *op)
{
const struct spi_bus_ops *ops = spi_slave.ops;
int ret;
VERBOSE("%s: cmd:%x mode:%d.%d.%d.%d addqr:%llx len:%x\n",
__func__, op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
op->dummy.buswidth, op->data.buswidth,
op->addr.val, op->data.nbytes);
if (!spi_mem_supports_op(op)) {
WARN("Error in spi_mem_support\n");
return -ENOTSUP;
}
ret = ops->claim_bus(spi_slave.cs);
if (ret != 0) {
WARN("Error claim_bus\n");
return ret;
}
ret = ops->exec_op(op);
ops->release_bus();
return ret;
}
/*
* spi_mem_init_slave() - SPI slave device initialization.
* @fdt: Pointer to the device tree blob.
* @bus_node: Offset of the bus node.
* @ops: The SPI bus ops defined.
*
* This function first checks that @ops are supported and then tries to find
* a SPI slave device.
*
* Return: 0 in case of success, a negative error code otherwise.
*/
int spi_mem_init_slave(void *fdt, int bus_node, const struct spi_bus_ops *ops)
{
int ret;
int mode = 0;
int nchips = 0;
int bus_subnode = 0;
const fdt32_t *cuint = NULL;
ret = spi_mem_check_bus_ops(ops);
if (ret != 0) {
return ret;
}
fdt_for_each_subnode(bus_subnode, fdt, bus_node) {
nchips++;
}
if (nchips != 1) {
ERROR("Only one SPI device is currently supported\n");
return -EINVAL;
}
fdt_for_each_subnode(bus_subnode, fdt, bus_node) {
/* Get chip select */
cuint = fdt_getprop(fdt, bus_subnode, "reg", NULL);
if (cuint == NULL) {
ERROR("Chip select not well defined\n");
return -EINVAL;
}
spi_slave.cs = fdt32_to_cpu(*cuint);
/* Get max slave frequency */
spi_slave.max_hz = SPI_MEM_DEFAULT_SPEED_HZ;
cuint = fdt_getprop(fdt, bus_subnode,
"spi-max-frequency", NULL);
if (cuint != NULL) {
spi_slave.max_hz = fdt32_to_cpu(*cuint);
}
/* Get mode */
if ((fdt_getprop(fdt, bus_subnode, "spi-cpol", NULL)) != NULL) {
mode |= SPI_CPOL;
}
if ((fdt_getprop(fdt, bus_subnode, "spi-cpha", NULL)) != NULL) {
mode |= SPI_CPHA;
}
if ((fdt_getprop(fdt, bus_subnode, "spi-cs-high", NULL)) !=
NULL) {
mode |= SPI_CS_HIGH;
}
if ((fdt_getprop(fdt, bus_subnode, "spi-3wire", NULL)) !=
NULL) {
mode |= SPI_3WIRE;
}
if ((fdt_getprop(fdt, bus_subnode, "spi-half-duplex", NULL)) !=
NULL) {
mode |= SPI_PREAMBLE;
}
/* Get dual/quad mode */
cuint = fdt_getprop(fdt, bus_subnode, "spi-tx-bus-width", NULL);
if (cuint != NULL) {
switch (fdt32_to_cpu(*cuint)) {
case 1U:
break;
case 2U:
mode |= SPI_TX_DUAL;
break;
case 4U:
mode |= SPI_TX_QUAD;
break;
default:
WARN("spi-tx-bus-width %d not supported\n",
fdt32_to_cpu(*cuint));
return -EINVAL;
}
}
cuint = fdt_getprop(fdt, bus_subnode, "spi-rx-bus-width", NULL);
if (cuint != NULL) {
switch (fdt32_to_cpu(*cuint)) {
case 1U:
break;
case 2U:
mode |= SPI_RX_DUAL;
break;
case 4U:
mode |= SPI_RX_QUAD;
break;
default:
WARN("spi-rx-bus-width %d not supported\n",
fdt32_to_cpu(*cuint));
return -EINVAL;
}
}
spi_slave.mode = mode;
spi_slave.ops = ops;
}
return spi_mem_set_speed_mode();
}
This diff is collapsed.
/*
* Copyright (c) 2019, STMicroelectronics - All Rights Reserved
*
* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
*/
#include <libfdt.h>
#include <platform_def.h>
#include <common/debug.h>
#include <drivers/delay_timer.h>
#include <drivers/spi_mem.h>
#include <drivers/st/stm32_gpio.h>
#include <drivers/st/stm32mp_reset.h>
#include <lib/mmio.h>
#include <lib/utils_def.h>
/* QUADSPI registers */
#define QSPI_CR 0x00U
#define QSPI_DCR 0x04U
#define QSPI_SR 0x08U
#define QSPI_FCR 0x0CU
#define QSPI_DLR 0x10U
#define QSPI_CCR 0x14U
#define QSPI_AR 0x18U
#define QSPI_ABR 0x1CU
#define QSPI_DR 0x20U
#define QSPI_PSMKR 0x24U
#define QSPI_PSMAR 0x28U
#define QSPI_PIR 0x2CU
#define QSPI_LPTR 0x30U
/* QUADSPI control register */
#define QSPI_CR_EN BIT(0)
#define QSPI_CR_ABORT BIT(1)
#define QSPI_CR_DMAEN BIT(2)
#define QSPI_CR_TCEN BIT(3)
#define QSPI_CR_SSHIFT BIT(4)
#define QSPI_CR_DFM BIT(6)
#define QSPI_CR_FSEL BIT(7)
#define QSPI_CR_FTHRES_SHIFT 8U
#define QSPI_CR_TEIE BIT(16)
#define QSPI_CR_TCIE BIT(17)
#define QSPI_CR_FTIE BIT(18)
#define QSPI_CR_SMIE BIT(19)
#define QSPI_CR_TOIE BIT(20)
#define QSPI_CR_APMS BIT(22)
#define QSPI_CR_PMM BIT(23)
#define QSPI_CR_PRESCALER_MASK GENMASK_32(31, 24)
#define QSPI_CR_PRESCALER_SHIFT 24U
/* QUADSPI device configuration register */
#define QSPI_DCR_CKMODE BIT(0)
#define QSPI_DCR_CSHT_MASK GENMASK_32(10, 8)
#define QSPI_DCR_CSHT_SHIFT 8U
#define QSPI_DCR_FSIZE_MASK GENMASK_32(20, 16)
#define QSPI_DCR_FSIZE_SHIFT 16U
/* QUADSPI status register */
#define QSPI_SR_TEF BIT(0)
#define QSPI_SR_TCF BIT(1)
#define QSPI_SR_FTF BIT(2)
#define QSPI_SR_SMF BIT(3)
#define QSPI_SR_TOF BIT(4)
#define QSPI_SR_BUSY BIT(5)
/* QUADSPI flag clear register */
#define QSPI_FCR_CTEF BIT(0)
#define QSPI_FCR_CTCF BIT(1)
#define QSPI_FCR_CSMF BIT(3)
#define QSPI_FCR_CTOF BIT(4)
/* QUADSPI communication configuration register */
#define QSPI_CCR_DDRM BIT(31)
#define QSPI_CCR_DHHC BIT(30)
#define QSPI_CCR_SIOO BIT(28)
#define QSPI_CCR_FMODE_SHIFT 26U
#define QSPI_CCR_DMODE_SHIFT 24U
#define QSPI_CCR_DCYC_SHIFT 18U
#define QSPI_CCR_ABSIZE_SHIFT 16U
#define QSPI_CCR_ABMODE_SHIFT 14U
#define QSPI_CCR_ADSIZE_SHIFT 12U
#define QSPI_CCR_ADMODE_SHIFT 10U
#define QSPI_CCR_IMODE_SHIFT 8U
#define QSPI_CCR_IND_WRITE 0U
#define QSPI_CCR_IND_READ 1U
#define QSPI_CCR_MEM_MAP 3U
#define QSPI_MAX_CHIP 2U
#define QSPI_FIFO_TIMEOUT_US 30U
#define QSPI_CMD_TIMEOUT_US 1000U
#define QSPI_BUSY_TIMEOUT_US 100U
#define QSPI_ABT_TIMEOUT_US 100U
#define DT_QSPI_COMPAT "st,stm32f469-qspi"
#define FREQ_100MHZ 100000000U
struct stm32_qspi_ctrl {
uintptr_t reg_base;
uintptr_t mm_base;
size_t mm_size;
unsigned long clock_id;
unsigned int reset_id;
};
static struct stm32_qspi_ctrl stm32_qspi;
static uintptr_t qspi_base(void)
{
return stm32_qspi.reg_base;
}
static int stm32_qspi_wait_for_not_busy(void)
{
uint64_t timeout = timeout_init_us(QSPI_BUSY_TIMEOUT_US);
while ((mmio_read_32(qspi_base() + QSPI_SR) & QSPI_SR_BUSY) != 0U) {
if (timeout_elapsed(timeout)) {
ERROR("%s: busy timeout\n", __func__);
return -ETIMEDOUT;
}
}
return 0;
}
static int stm32_qspi_wait_cmd(const struct spi_mem_op *op)
{
int ret = 0;
uint64_t timeout;
if (op->data.nbytes == 0U) {
return stm32_qspi_wait_for_not_busy();
}
timeout = timeout_init_us(QSPI_CMD_TIMEOUT_US);
while ((mmio_read_32(qspi_base() + QSPI_SR) & QSPI_SR_TCF) == 0U) {
if (timeout_elapsed(timeout)) {
ret = -ETIMEDOUT;
break;
}
}
if (ret == 0) {
if ((mmio_read_32(qspi_base() + QSPI_SR) & QSPI_SR_TEF) != 0U) {
ERROR("%s: transfer error\n", __func__);
ret = -EIO;
}
} else {
ERROR("%s: cmd timeout\n", __func__);
}
/* Clear flags */
mmio_write_32(qspi_base() + QSPI_FCR, QSPI_FCR_CTCF | QSPI_FCR_CTEF);
return ret;
}
static void stm32_qspi_read_fifo(uint8_t *val, uintptr_t addr)
{
*val = mmio_read_8(addr);
}
static void stm32_qspi_write_fifo(uint8_t *val, uintptr_t addr)
{
mmio_write_8(addr, *val);
}
static int stm32_qspi_poll(const struct spi_mem_op *op)
{
void (*fifo)(uint8_t *val, uintptr_t addr);
uint32_t len = op->data.nbytes;
uint8_t *buf;
uint64_t timeout;
if (op->data.dir == SPI_MEM_DATA_IN) {
fifo = stm32_qspi_read_fifo;
} else {
fifo = stm32_qspi_write_fifo;
}
buf = (uint8_t *)op->data.buf;
for (len = op->data.nbytes; len != 0U; len--) {
timeout = timeout_init_us(QSPI_FIFO_TIMEOUT_US);
while ((mmio_read_32(qspi_base() + QSPI_SR) &
QSPI_SR_FTF) == 0U) {
if (timeout_elapsed(timeout)) {
ERROR("%s: fifo timeout\n", __func__);
return -ETIMEDOUT;
}
}
fifo(buf++, qspi_base() + QSPI_DR);
}
return 0;
}
static int stm32_qspi_mm(const struct spi_mem_op *op)
{
memcpy(op->data.buf,
(void *)(stm32_qspi.mm_base + (size_t)op->addr.val),
op->data.nbytes);
return 0;
}
static int stm32_qspi_tx(const struct spi_mem_op *op, uint8_t mode)
{
if (op->data.nbytes == 0U) {
return 0;
}
if (mode == QSPI_CCR_MEM_MAP) {
return stm32_qspi_mm(op);
}
return stm32_qspi_poll(op);
}
static unsigned int stm32_qspi_get_mode(uint8_t buswidth)
{
if (buswidth == 4U) {
return 3U;
}
return buswidth;
}
static int stm32_qspi_exec_op(const struct spi_mem_op *op)
{
uint64_t timeout;
uint32_t ccr;
size_t addr_max;
uint8_t mode = QSPI_CCR_IND_WRITE;
int ret;
VERBOSE("%s: cmd:%x mode:%d.%d.%d.%d addr:%llx len:%x\n",
__func__, op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
op->dummy.buswidth, op->data.buswidth,
op->addr.val, op->data.nbytes);
ret = stm32_qspi_wait_for_not_busy();
if (ret != 0) {
return ret;
}
addr_max = op->addr.val + op->data.nbytes + 1U;
if ((op->data.dir == SPI_MEM_DATA_IN) && (op->data.nbytes != 0U)) {
if ((addr_max < stm32_qspi.mm_size) &&
(op->addr.buswidth != 0U)) {
mode = QSPI_CCR_MEM_MAP;
} else {
mode = QSPI_CCR_IND_READ;
}
}
if (op->data.nbytes != 0U) {
mmio_write_32(qspi_base() + QSPI_DLR, op->data.nbytes - 1U);
}
ccr = mode << QSPI_CCR_FMODE_SHIFT;
ccr |= op->cmd.opcode;
ccr |= stm32_qspi_get_mode(op->cmd.buswidth) << QSPI_CCR_IMODE_SHIFT;
if (op->addr.nbytes != 0U) {
ccr |= (op->addr.nbytes - 1U) << QSPI_CCR_ADSIZE_SHIFT;
ccr |= stm32_qspi_get_mode(op->addr.buswidth) <<
QSPI_CCR_ADMODE_SHIFT;
}
if ((op->dummy.buswidth != 0U) && (op->dummy.nbytes != 0U)) {
ccr |= (op->dummy.nbytes * 8U / op->dummy.buswidth) <<
QSPI_CCR_DCYC_SHIFT;
}
if (op->data.nbytes != 0U) {
ccr |= stm32_qspi_get_mode(op->data.buswidth) <<
QSPI_CCR_DMODE_SHIFT;
}
mmio_write_32(qspi_base() + QSPI_CCR, ccr);
if ((op->addr.nbytes != 0U) && (mode != QSPI_CCR_MEM_MAP)) {
mmio_write_32(qspi_base() + QSPI_AR, op->addr.val);
}
ret = stm32_qspi_tx(op, mode);
/*
* Abort in:
* - Error case.
* - Memory mapped read: prefetching must be stopped if we read the last
* byte of device (device size - fifo size). If device size is not
* known then prefetching is always stopped.
*/
if ((ret != 0) || (mode == QSPI_CCR_MEM_MAP)) {
goto abort;
}
/* Wait end of TX in indirect mode */
ret = stm32_qspi_wait_cmd(op);
if (ret != 0) {
goto abort;
}
return 0;
abort:
mmio_setbits_32(qspi_base() + QSPI_CR, QSPI_CR_ABORT);
/* Wait clear of abort bit by hardware */
timeout = timeout_init_us(QSPI_ABT_TIMEOUT_US);
while ((mmio_read_32(qspi_base() + QSPI_CR) & QSPI_CR_ABORT) != 0U) {
if (timeout_elapsed(timeout)) {
ret = -ETIMEDOUT;
break;
}
}
mmio_write_32(qspi_base() + QSPI_FCR, QSPI_FCR_CTCF);
if (ret != 0) {
ERROR("%s: exec op error\n", __func__);
}
return ret;
}
static int stm32_qspi_claim_bus(unsigned int cs)
{
uint32_t cr;
if (cs >= QSPI_MAX_CHIP) {
return -ENODEV;
}
/* Set chip select and enable the controller */
cr = QSPI_CR_EN;
if (cs == 1U) {
cr |= QSPI_CR_FSEL;
}
mmio_clrsetbits_32(qspi_base() + QSPI_CR, QSPI_CR_FSEL, cr);
return 0;
}
static void stm32_qspi_release_bus(void)
{
mmio_clrbits_32(qspi_base() + QSPI_CR, QSPI_CR_EN);
}
static int stm32_qspi_set_speed(unsigned int hz)
{
unsigned long qspi_clk = stm32mp_clk_get_rate(stm32_qspi.clock_id);
uint32_t prescaler = UINT8_MAX;
uint32_t csht;
int ret;
if (qspi_clk == 0U) {
return -EINVAL;
}
if (hz > 0U) {
prescaler = div_round_up(qspi_clk, hz) - 1U;
if (prescaler > UINT8_MAX) {
prescaler = UINT8_MAX;
}
}
csht = div_round_up((5U * qspi_clk) / (prescaler + 1U), FREQ_100MHZ);
csht = ((csht - 1U) << QSPI_DCR_CSHT_SHIFT) & QSPI_DCR_CSHT_MASK;
ret = stm32_qspi_wait_for_not_busy();
if (ret != 0) {
return ret;
}
mmio_clrsetbits_32(qspi_base() + QSPI_CR, QSPI_CR_PRESCALER_MASK,
prescaler << QSPI_CR_PRESCALER_SHIFT);
mmio_clrsetbits_32(qspi_base() + QSPI_DCR, QSPI_DCR_CSHT_MASK, csht);
VERBOSE("%s: speed=%lu\n", __func__, qspi_clk / (prescaler + 1U));
return 0;
}
static int stm32_qspi_set_mode(unsigned int mode)
{
int ret;
ret = stm32_qspi_wait_for_not_busy();
if (ret != 0) {
return ret;
}
if ((mode & SPI_CS_HIGH) != 0U) {
return -ENODEV;
}
if (((mode & SPI_CPHA) != 0U) && ((mode & SPI_CPOL) != 0U)) {
mmio_setbits_32(qspi_base() + QSPI_DCR, QSPI_DCR_CKMODE);
} else if (((mode & SPI_CPHA) == 0U) && ((mode & SPI_CPOL) == 0U)) {
mmio_clrbits_32(qspi_base() + QSPI_DCR, QSPI_DCR_CKMODE);
} else {
return -ENODEV;
}
VERBOSE("%s: mode=0x%x\n", __func__, mode);
if ((mode & SPI_RX_QUAD) != 0U) {
VERBOSE("rx: quad\n");
} else if ((mode & SPI_RX_DUAL) != 0U) {
VERBOSE("rx: dual\n");
} else {
VERBOSE("rx: single\n");
}
if ((mode & SPI_TX_QUAD) != 0U) {
VERBOSE("tx: quad\n");
} else if ((mode & SPI_TX_DUAL) != 0U) {
VERBOSE("tx: dual\n");
} else {
VERBOSE("tx: single\n");
}
return 0;
}
static const struct spi_bus_ops stm32_qspi_bus_ops = {
.claim_bus = stm32_qspi_claim_bus,
.release_bus = stm32_qspi_release_bus,
.set_speed = stm32_qspi_set_speed,
.set_mode = stm32_qspi_set_mode,
.exec_op = stm32_qspi_exec_op,
};
int stm32_qspi_init(void)
{
size_t size;
int qspi_node;
struct dt_node_info info;
void *fdt = NULL;
int ret;
if (fdt_get_address(&fdt) == 0) {
return -FDT_ERR_NOTFOUND;
}
qspi_node = dt_get_node(&info, -1, DT_QSPI_COMPAT);
if (qspi_node < 0) {
ERROR("No QSPI ctrl found\n");
return -FDT_ERR_NOTFOUND;
}
if (info.status == DT_DISABLED) {
return -FDT_ERR_NOTFOUND;
}
ret = fdt_get_reg_props_by_name(qspi_node, "qspi",
&stm32_qspi.reg_base, &size);
if (ret != 0) {
return ret;
}
ret = fdt_get_reg_props_by_name(qspi_node, "qspi_mm",
&stm32_qspi.mm_base,
&stm32_qspi.mm_size);
if (ret != 0) {
return ret;
}
if (dt_set_pinctrl_config(qspi_node) != 0) {
return -FDT_ERR_BADVALUE;
}
if ((info.clock < 0) || (info.reset < 0)) {
return -FDT_ERR_BADVALUE;
}
stm32_qspi.clock_id = (unsigned long)info.clock;
stm32_qspi.reset_id = (unsigned int)info.reset;
stm32mp_clk_enable(stm32_qspi.clock_id);
stm32mp_reset_assert(stm32_qspi.reset_id);
stm32mp_reset_deassert(stm32_qspi.reset_id);
mmio_write_32(qspi_base() + QSPI_CR, QSPI_CR_SSHIFT);
mmio_write_32(qspi_base() + QSPI_DCR, QSPI_DCR_FSIZE_MASK);
return spi_mem_init_slave(fdt, qspi_node, &stm32_qspi_bus_ops);
};
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright (C) STMicroelectronics 2017 - All Rights Reserved
* Copyright (C) STMicroelectronics 2017-2019 - All Rights Reserved
* Author: Ludovic Barre <ludovic.barre@st.com> for STMicroelectronics.
*/
#include <dt-bindings/pinctrl/stm32-pinfunc.h>
......@@ -135,6 +135,31 @@
status = "disabled";
};
fmc_pins_a: fmc-0 {
pins1 {
pinmux = <STM32_PINMUX('D', 4, AF12)>, /* FMC_NOE */
<STM32_PINMUX('D', 5, AF12)>, /* FMC_NWE */
<STM32_PINMUX('D', 11, AF12)>, /* FMC_A16_FMC_CLE */
<STM32_PINMUX('D', 12, AF12)>, /* FMC_A17_FMC_ALE */
<STM32_PINMUX('D', 14, AF12)>, /* FMC_D0 */
<STM32_PINMUX('D', 15, AF12)>, /* FMC_D1 */
<STM32_PINMUX('D', 0, AF12)>, /* FMC_D2 */
<STM32_PINMUX('D', 1, AF12)>, /* FMC_D3 */
<STM32_PINMUX('E', 7, AF12)>, /* FMC_D4 */
<STM32_PINMUX('E', 8, AF12)>, /* FMC_D5 */
<STM32_PINMUX('E', 9, AF12)>, /* FMC_D6 */
<STM32_PINMUX('E', 10, AF12)>, /* FMC_D7 */
<STM32_PINMUX('G', 9, AF12)>; /* FMC_NE2_FMC_NCE */
bias-disable;
drive-push-pull;
slew-rate = <1>;
};
pins2 {
pinmux = <STM32_PINMUX('D', 6, AF12)>; /* FMC_NWAIT */
bias-pull-up;
};
};
qspi_bk1_pins_a: qspi-bk1-0 {
pins1 {
pinmux = <STM32_PINMUX('F', 8, AF10)>, /* QSPI_BK1_IO0 */
......
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright (C) STMicroelectronics 2017 - All Rights Reserved
* Copyright (C) STMicroelectronics 2017-2019 - All Rights Reserved
* Author: Ludovic Barre <ludovic.barre@st.com> for STMicroelectronics.
*/
/dts-v1/;
......@@ -21,21 +21,20 @@
};
&fmc {
pinctrl-names = "default";
pinctrl-0 = <&fmc_pins_a>;
status = "okay";
#address-cells = <1>;
#size-cells = <0>;
nand: nand@0 {
reg = <0>;
nand-on-flash-bbt;
#address-cells = <1>;
#size-cells = <1>;
};
};
&qspi {
pinctrl-names = "default";
pinctrl-0 = <&qspi_clk_pins_a &qspi_bk1_pins_a &qspi_bk2_pins_a>;
pinctrl-0 = <&qspi_clk_pins_a &qspi_bk1_pins_a>;
reg = <0x58003000 0x1000>, <0x70000000 0x4000000>;
#address-cells = <1>;
#size-cells = <0>;
......@@ -49,15 +48,6 @@
#address-cells = <1>;
#size-cells = <1>;
};
flash1: mx66l51235l@1 {
compatible = "jedec,spi-nor";
reg = <1>;
spi-rx-bus-width = <4>;
spi-max-frequency = <108000000>;
#address-cells = <1>;
#size-cells = <1>;
};
};
&usart3 {
......
/*
* Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef IO_MTD_H
#define IO_MTD_H
#include <stdint.h>
#include <stdio.h>
#include <drivers/io/io_storage.h>
/* MTD devices ops */
typedef struct io_mtd_ops {
/*
* Initialize MTD framework and retrieve device information.
*
* @size: [out] MTD device size in bytes.
* @erase_size: [out] MTD erase size in bytes.
* Return 0 on success, a negative error code otherwise.
*/
int (*init)(unsigned long long *size, unsigned int *erase_size);
/*
* Execute a read memory operation.
*
* @offset: Offset in bytes to start read operation.
* @buffer: [out] Buffer to store read data.
* @length: Required length to be read in bytes.
* @out_length: [out] Length read in bytes.
* Return 0 on success, a negative error code otherwise.
*/
int (*read)(unsigned int offset, uintptr_t buffer, size_t length,
size_t *out_length);
/*
* Execute a write memory operation.
*
* @offset: Offset in bytes to start write operation.
* @buffer: Buffer to be written in device.
* @length: Required length to be written in bytes.
* Return 0 on success, a negative error code otherwise.
*/
int (*write)(unsigned int offset, uintptr_t buffer, size_t length);
} io_mtd_ops_t;
typedef struct io_mtd_dev_spec {
unsigned long long device_size;
unsigned int erase_size;
io_mtd_ops_t ops;
} io_mtd_dev_spec_t;
struct io_dev_connector;
int register_io_dev_mtd(const struct io_dev_connector **dev_con);
#endif /* IO_MTD_H */
/*
* Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2014-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
......@@ -22,6 +22,7 @@ typedef enum {
IO_TYPE_DUMMY,
IO_TYPE_FIRMWARE_IMAGE_PACKAGE,
IO_TYPE_BLOCK,
IO_TYPE_MTD,
IO_TYPE_MMC,
IO_TYPE_STM32IMAGE,
IO_TYPE_MAX
......
/*
* Copyright (c) 2019, STMicroelectronics - All Rights Reserved
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef DRIVERS_NAND_H
#define DRIVERS_NAND_H
#include <stddef.h>
#include <stdint.h>
#include <lib/utils_def.h>
#define PSEC_TO_MSEC(x) div_round_up((x), 1000000000ULL)
struct ecc {
unsigned int mode; /* ECC mode NAND_ECC_MODE_{NONE|HW|ONDIE} */
unsigned int size; /* Data byte per ECC step */
unsigned int bytes; /* ECC bytes per step */
unsigned int max_bit_corr; /* Max correctible bits per ECC steps */
};
struct nand_device {
unsigned int block_size;
unsigned int page_size;
unsigned long long size;
unsigned int nb_planes;
unsigned int buswidth;
struct ecc ecc;
int (*mtd_block_is_bad)(unsigned int block);
int (*mtd_read_page)(struct nand_device *nand, unsigned int page,
uintptr_t buffer);
};
/*
* Read bytes from NAND device
*
* @offset: Byte offset to read from in device
* @buffer: [out] Bytes read from device
* @length: Number of bytes to read
* @length_read: [out] Number of bytes read from device
* Return: 0 on success, a negative errno on failure
*/
int nand_read(unsigned int offset, uintptr_t buffer, size_t length,
size_t *length_read);
/*
* Get NAND device instance
*
* Return: NAND device instance reference
*/
struct nand_device *get_nand_device(void);
#endif /* DRIVERS_NAND_H */
/*
* Copyright (c) 2019, STMicroelectronics - All Rights Reserved
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef DRIVERS_RAW_NAND_H
#define DRIVERS_RAW_NAND_H
#include <stdint.h>
#include <drivers/nand.h>
/* NAND ONFI default value mode 0 in picosecond */
#define NAND_TADL_MIN 400000UL
#define NAND_TALH_MIN 20000UL
#define NAND_TALS_MIN 50000UL
#define NAND_TAR_MIN 25000UL
#define NAND_TCCS_MIN 500000UL
#define NAND_TCEA_MIN 100000UL
#define NAND_TCEH_MIN 20000UL
#define NAND_TCH_MIN 20000UL
#define NAND_TCHZ_MAX 100000UL
#define NAND_TCLH_MIN 20000UL
#define NAND_TCLR_MIN 20000UL
#define NAND_TCLS_MIN 50000UL
#define NAND_TCOH_MIN 0UL
#define NAND_TCS_MIN 70000UL
#define NAND_TDH_MIN 20000UL
#define NAND_TDS_MIN 40000UL
#define NAND_TFEAT_MAX 1000000UL
#define NAND_TIR_MIN 10000UL
#define NAND_TITC_MIN 1000000UL
#define NAND_TR_MAX 200000000UL
#define NAND_TRC_MIN 100000UL
#define NAND_TREA_MAX 40000UL
#define NAND_TREH_MIN 30000UL
#define NAND_TRHOH_MIN 0UL
#define NAND_TRHW_MIN 200000UL
#define NAND_TRHZ_MAX 200000UL
#define NAND_TRLOH_MIN 0UL
#define NAND_TRP_MIN 50000UL
#define NAND_TRR_MIN 40000UL
#define NAND_TRST_MAX 250000000000ULL
#define NAND_TWB_MAX 200000UL
#define NAND_TWC_MIN 100000UL
#define NAND_TWH_MIN 30000UL
#define NAND_TWHR_MIN 120000UL
#define NAND_TWP_MIN 50000UL
#define NAND_TWW_MIN 100000UL
/* NAND request types */
#define NAND_REQ_CMD 0x0000U
#define NAND_REQ_ADDR 0x1000U
#define NAND_REQ_DATAIN 0x2000U
#define NAND_REQ_DATAOUT 0x3000U
#define NAND_REQ_WAIT 0x4000U
#define NAND_REQ_MASK GENMASK(14, 12)
#define NAND_REQ_BUS_WIDTH_8 BIT(15)
#define PARAM_PAGE_SIZE 256
/* NAND ONFI commands */
#define NAND_CMD_READ_1ST 0x00U
#define NAND_CMD_CHANGE_1ST 0x05U
#define NAND_CMD_READID_SIG_ADDR 0x20U
#define NAND_CMD_READ_2ND 0x30U
#define NAND_CMD_STATUS 0x70U
#define NAND_CMD_READID 0x90U
#define NAND_CMD_CHANGE_2ND 0xE0U
#define NAND_CMD_READ_PARAM_PAGE 0xECU
#define NAND_CMD_RESET 0xFFU
#define ONFI_REV_21 BIT(3)
#define ONFI_FEAT_BUS_WIDTH_16 BIT(0)
#define ONFI_FEAT_EXTENDED_PARAM BIT(7)
/* NAND ECC type */
#define NAND_ECC_NONE U(0)
#define NAND_ECC_HW U(1)
#define NAND_ECC_ONDIE U(2)
/* NAND bus width */
#define NAND_BUS_WIDTH_8 U(0)
#define NAND_BUS_WIDTH_16 U(1)
struct nand_req {
struct nand_device *nand;
uint16_t type;
uint8_t *addr;
unsigned int length;
unsigned int delay_ms;
unsigned int inst_delay;
};
struct nand_param_page {
/* Rev information and feature block */
uint32_t page_sig;
uint16_t rev;
uint16_t features;
uint16_t opt_cmd;
uint8_t jtg;
uint8_t train_cmd;
uint16_t ext_param_length;
uint8_t nb_param_pages;
uint8_t reserved1[17];
/* Manufacturer information */
uint8_t manufacturer[12];
uint8_t model[20];
uint8_t manufacturer_id;
uint16_t data_code;
uint8_t reserved2[13];
/* Memory organization */
uint32_t bytes_per_page;
uint16_t spare_per_page;
uint32_t bytes_per_partial;
uint16_t spare_per_partial;
uint32_t num_pages_per_blk;
uint32_t num_blk_in_lun;
uint8_t num_lun;
uint8_t num_addr_cycles;
uint8_t bit_per_cell;
uint16_t max_bb_per_lun;
uint16_t blk_endur;
uint8_t valid_blk_begin;
uint16_t blk_enbur_valid;
uint8_t nb_prog_page;
uint8_t partial_prog_attr;
uint8_t nb_ecc_bits;
uint8_t plane_addr;
uint8_t mplanes_ops;
uint8_t ez_nand;
uint8_t reserved3[12];
/* Electrical parameters */
uint8_t io_pin_cap_max;
uint16_t sdr_timing_mode;
uint16_t sdr_prog_cache_timing;
uint16_t tprog;
uint16_t tbers;
uint16_t tr;
uint16_t tccs;
uint8_t nvddr_timing_mode;
uint8_t nvddr2_timing_mode;
uint8_t nvddr_features;
uint16_t clk_input_cap_typ;
uint16_t io_pin_cap_typ;
uint16_t input_pin_cap_typ;
uint8_t input_pin_cap_max;
uint8_t drv_strength_support;
uint16_t tr_max;
uint16_t tadl;
uint16_t tr_typ;
uint8_t reserved4[6];
/* Vendor block */
uint16_t vendor_revision;
uint8_t vendor[88];
uint16_t crc16;
} __packed;
struct nand_ctrl_ops {
int (*exec)(struct nand_req *req);
void (*setup)(struct nand_device *nand);
};
struct rawnand_device {
struct nand_device *nand_dev;
const struct nand_ctrl_ops *ops;
};
int nand_raw_init(unsigned long long *size, unsigned int *erase_size);
int nand_wait_ready(unsigned long delay);
int nand_read_page_cmd(unsigned int page, unsigned int offset,
uintptr_t buffer, unsigned int len);
int nand_change_read_column_cmd(unsigned int offset, uintptr_t buffer,
unsigned int len);
void nand_raw_ctrl_init(const struct nand_ctrl_ops *ops);
/*
* Platform can implement this to override default raw NAND instance
* configuration.
*
* @device: target raw NAND instance.
* Return 0 on success, negative value otherwise.
*/
int plat_get_raw_nand_data(struct rawnand_device *device);
#endif /* DRIVERS_RAW_NAND_H */
/*
* Copyright (c) 2019, STMicroelectronics - All Rights Reserved
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef DRIVERS_SPI_MEM_H
#define DRIVERS_SPI_MEM_H
#include <errno.h>
#include <stdbool.h>
#include <stdint.h>
#define SPI_MEM_BUSWIDTH_1_LINE 1U
#define SPI_MEM_BUSWIDTH_2_LINE 2U
#define SPI_MEM_BUSWIDTH_4_LINE 4U
/*
* enum spi_mem_data_dir - Describes the direction of a SPI memory data
* transfer from the controller perspective.
* @SPI_MEM_DATA_IN: data coming from the SPI memory.
* @SPI_MEM_DATA_OUT: data sent to the SPI memory.
*/
enum spi_mem_data_dir {
SPI_MEM_DATA_IN,
SPI_MEM_DATA_OUT,
};
/*
* struct spi_mem_op - Describes a SPI memory operation.
*
* @cmd.buswidth: Number of IO lines used to transmit the command.
* @cmd.opcode: Operation opcode.
* @addr.nbytes: Number of address bytes to send. Can be zero if the operation
* does not need to send an address.
* @addr.buswidth: Number of IO lines used to transmit the address.
* @addr.val: Address value. This value is always sent MSB first on the bus.
* Note that only @addr.nbytes are taken into account in this
* address value, so users should make sure the value fits in the
* assigned number of bytes.
* @dummy.nbytes: Number of dummy bytes to send after an opcode or address. Can
* be zero if the operation does not require dummy bytes.
* @dummy.buswidth: Number of IO lines used to transmit the dummy bytes.
* @data.buswidth: Number of IO lines used to send/receive the data.
* @data.dir: Direction of the transfer.
* @data.nbytes: Number of data bytes to transfer.
* @data.buf: Input or output data buffer depending on data::dir.
*/
struct spi_mem_op {
struct {
uint8_t buswidth;
uint8_t opcode;
} cmd;
struct {
uint8_t nbytes;
uint8_t buswidth;
uint64_t val;
} addr;
struct {
uint8_t nbytes;
uint8_t buswidth;
} dummy;
struct {
uint8_t buswidth;
enum spi_mem_data_dir dir;
unsigned int nbytes;
void *buf;
} data;
};
/* SPI mode flags */
#define SPI_CPHA BIT(0) /* clock phase */
#define SPI_CPOL BIT(1) /* clock polarity */
#define SPI_CS_HIGH BIT(2) /* CS active high */
#define SPI_LSB_FIRST BIT(3) /* per-word bits-on-wire */
#define SPI_3WIRE BIT(4) /* SI/SO signals shared */
#define SPI_PREAMBLE BIT(5) /* Skip preamble bytes */
#define SPI_TX_DUAL BIT(6) /* transmit with 2 wires */
#define SPI_TX_QUAD BIT(7) /* transmit with 4 wires */
#define SPI_RX_DUAL BIT(8) /* receive with 2 wires */
#define SPI_RX_QUAD BIT(9) /* receive with 4 wires */
struct spi_bus_ops {
/*
* Claim the bus and prepare it for communication.
*
* @cs: The chip select.
* Returns: 0 if the bus was claimed successfully, or a negative value
* if it wasn't.
*/
int (*claim_bus)(unsigned int cs);
/*
* Release the SPI bus.
*/
void (*release_bus)(void);
/*
* Set transfer speed.
*
* @hz: The transfer speed in Hertz.
* Returns: 0 on success, a negative error code otherwise.
*/
int (*set_speed)(unsigned int hz);
/*
* Set the SPI mode/flags.
*
* @mode: Requested SPI mode (SPI_... flags).
* Returns: 0 on success, a negative error code otherwise.
*/
int (*set_mode)(unsigned int mode);
/*
* Execute a SPI memory operation.
*
* @op: The memory operation to execute.
* Returns: 0 on success, a negative error code otherwise.
*/
int (*exec_op)(const struct spi_mem_op *op);
};
int spi_mem_exec_op(const struct spi_mem_op *op);
int spi_mem_init_slave(void *fdt, int bus_node,
const struct spi_bus_ops *ops);
#endif /* DRIVERS_SPI_MEM_H */
/*
* Copyright (c) 2019, STMicroelectronics - All Rights Reserved
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef DRIVERS_SPI_NAND_H
#define DRIVERS_SPI_NAND_H
#include <drivers/nand.h>
#include <drivers/spi_mem.h>
#define SPI_NAND_OP_GET_FEATURE 0x0FU
#define SPI_NAND_OP_SET_FEATURE 0x1FU
#define SPI_NAND_OP_READ_ID 0x9FU
#define SPI_NAND_OP_LOAD_PAGE 0x13U
#define SPI_NAND_OP_RESET 0xFFU
#define SPI_NAND_OP_READ_FROM_CACHE 0x03U
#define SPI_NAND_OP_READ_FROM_CACHE_2X 0x3BU
#define SPI_NAND_OP_READ_FROM_CACHE_4X 0x6BU
/* Configuration register */
#define SPI_NAND_REG_CFG 0xB0U
#define SPI_NAND_CFG_ECC_EN BIT(4)
#define SPI_NAND_CFG_QE BIT(0)
/* Status register */
#define SPI_NAND_REG_STATUS 0xC0U
#define SPI_NAND_STATUS_BUSY BIT(0)
#define SPI_NAND_STATUS_ECC_UNCOR BIT(5)
struct spinand_device {
struct nand_device *nand_dev;
struct spi_mem_op spi_read_cache_op;
uint8_t cfg_cache; /* Cached value of SPI NAND device register CFG */
};
int spi_nand_init(unsigned long long *size, unsigned int *erase_size);
/*
* Platform can implement this to override default SPI-NAND instance
* configuration.
*
* @device: target SPI-NAND instance.
* Return 0 on success, negative value otherwise.
*/
int plat_get_spi_nand_data(struct spinand_device *device);
#endif /* DRIVERS_SPI_NAND_H */
/*
* Copyright (c) 2019, STMicroelectronics - All Rights Reserved
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef DRIVERS_SPI_NOR_H
#define DRIVERS_SPI_NOR_H
#include <drivers/spi_mem.h>
/* OPCODE */
#define SPI_NOR_OP_WREN 0x06U /* Write enable */
#define SPI_NOR_OP_WRSR 0x01U /* Write status register 1 byte */
#define SPI_NOR_OP_READ_ID 0x9FU /* Read JEDEC ID */
#define SPI_NOR_OP_READ_CR 0x35U /* Read configuration register */
#define SPI_NOR_OP_READ_SR 0x05U /* Read status register */
#define SPI_NOR_OP_READ_FSR 0x70U /* Read flag status register */
#define SPINOR_OP_RDEAR 0xC8U /* Read Extended Address Register */
#define SPINOR_OP_WREAR 0xC5U /* Write Extended Address Register */
/* Used for Spansion flashes only. */
#define SPINOR_OP_BRWR 0x17U /* Bank register write */
#define SPINOR_OP_BRRD 0x16U /* Bank register read */
#define SPI_NOR_OP_READ 0x03U /* Read data bytes (low frequency) */
#define SPI_NOR_OP_READ_FAST 0x0BU /* Read data bytes (high frequency) */
#define SPI_NOR_OP_READ_1_1_2 0x3BU /* Read data bytes (Dual Output SPI) */
#define SPI_NOR_OP_READ_1_2_2 0xBBU /* Read data bytes (Dual I/O SPI) */
#define SPI_NOR_OP_READ_1_1_4 0x6BU /* Read data bytes (Quad Output SPI) */
#define SPI_NOR_OP_READ_1_4_4 0xEBU /* Read data bytes (Quad I/O SPI) */
/* Flags for NOR specific configuration */
#define SPI_NOR_USE_FSR BIT(0)
#define SPI_NOR_USE_BANK BIT(1)
struct nor_device {
struct spi_mem_op read_op;
uint32_t size;
uint32_t flags;
uint8_t selected_bank;
uint8_t bank_write_cmd;
uint8_t bank_read_cmd;
};
int spi_nor_read(unsigned int offset, uintptr_t buffer, size_t length,
size_t *length_read);
int spi_nor_init(unsigned long long *device_size, unsigned int *erase_size);
/*
* Platform can implement this to override default NOR instance configuration.
*
* @device: target NOR instance.
* Return 0 on success, negative value otherwise.
*/
int plat_get_nor_data(struct nor_device *device);
#endif /* DRIVERS_SPI_NOR_H */
......@@ -23,7 +23,7 @@ struct stm32image_part_info {
struct stm32image_device_info {
struct stm32image_part_info part_info[STM32_PART_NUM];
uint32_t device_size;
unsigned long long device_size;
uint32_t lba_size;
};
......
/*
* Copyright (c) 2019, STMicroelectronics - All Rights Reserved
*
* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
*/
#ifndef STM32_FMC2_NAND_H
#define STM32_FMC2_NAND_H
int stm32_fmc2_init(void);
#endif /* STM32_FMC2_NAND_H */
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment