Commit b514ee86 authored by Soby Mathew's avatar Soby Mathew Committed by TrustedFirmware Code Review
Browse files

Merge "intel: Adds support for Agilex platform" into integration

parents 59e3df6e 2f11d548
/*
* Copyright (c) 2019, Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
#include <common/debug.h>
#include <drivers/delay_timer.h>
#include <errno.h>
#include <lib/mmio.h>
#include "agilex_clock_manager.h"
#include "agilex_handoff.h"
static const CLOCK_SOURCE_CONFIG clk_source = {
/* clk_freq_of_eosc1 */
(uint32_t) 25000000,
/* clk_freq_of_f2h_free */
(uint32_t) 400000000,
/* clk_freq_of_cb_intosc_ls */
(uint32_t) 50000000,
};
uint32_t wait_pll_lock(void)
{
uint32_t data;
uint32_t count = 0;
do {
data = mmio_read_32(CLKMGR_OFFSET + CLKMGR_STAT);
count++;
if (count >= 1000)
return -ETIMEDOUT;
} while ((CLKMGR_STAT_MAINPLLLOCKED(data) == 0) ||
(CLKMGR_STAT_PERPLLLOCKED(data) == 0));
return 0;
}
uint32_t wait_fsm(void)
{
uint32_t data;
uint32_t count = 0;
do {
data = mmio_read_32(CLKMGR_OFFSET + CLKMGR_STAT);
count++;
if (count >= 1000)
return -ETIMEDOUT;
} while (CLKMGR_STAT_BUSY(data) == CLKMGR_STAT_BUSY_E_BUSY);
return 0;
}
uint32_t pll_source_sync_config(uint32_t pll_mem_offset)
{
uint32_t val = 0;
uint32_t count = 0;
uint32_t req_status = 0;
val = (CLKMGR_MEM_WR | CLKMGR_MEM_REQ |
CLKMGR_MEM_WDAT << CLKMGR_MEM_WDAT_OFFSET | CLKMGR_MEM_ADDR);
mmio_write_32(pll_mem_offset, val);
do {
req_status = mmio_read_32(pll_mem_offset);
count++;
} while ((req_status & CLKMGR_MEM_REQ) && (count < 10));
if (count >= 100)
return -ETIMEDOUT;
return 0;
}
uint32_t pll_source_sync_read(uint32_t pll_mem_offset)
{
uint32_t val = 0;
uint32_t rdata = 0;
uint32_t count = 0;
uint32_t req_status = 0;
val = (CLKMGR_MEM_REQ | CLKMGR_MEM_ADDR);
mmio_write_32(pll_mem_offset, val);
do {
req_status = mmio_read_32(pll_mem_offset);
count++;
} while ((req_status & CLKMGR_MEM_REQ) && (count < 10));
if (count >= 100)
return -ETIMEDOUT;
rdata = mmio_read_32(pll_mem_offset + 0x4);
INFO("rdata (%x) = %x\n", pll_mem_offset + 0x4, rdata);
return 0;
}
void config_clkmgr_handoff(handoff *hoff_ptr)
{
uint32_t mdiv, mscnt, hscnt;
uint32_t arefclk_div, drefclk_div;
/* Bypass all mainpllgrp's clocks */
mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_BYPASS, 0x7);
wait_fsm();
/* Bypass all perpllgrp's clocks */
mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_BYPASS, 0x7f);
wait_fsm();
/* Put both PLL in reset and power down */
mmio_clrbits_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLGLOB,
CLKMGR_MAINPLL_PLLGLOB_PD_SET_MSK |
CLKMGR_MAINPLL_PLLGLOB_RST_SET_MSK);
mmio_clrbits_32(CLKMGR_PERPLL + CLKMGR_PERPLL_PLLGLOB,
CLKMGR_PERPLL_PLLGLOB_PD_SET_MSK |
CLKMGR_PERPLL_PLLGLOB_RST_SET_MSK);
/* Setup main PLL dividers */
mdiv = CLKMGR_MAINPLL_PLLM_MDIV(hoff_ptr->main_pll_pllm);
arefclk_div = CLKMGR_MAINPLL_PLLGLOB_AREFCLKDIV(
hoff_ptr->main_pll_pllglob);
drefclk_div = CLKMGR_MAINPLL_PLLGLOB_DREFCLKDIV(
hoff_ptr->main_pll_pllglob);
mscnt = 100 / (mdiv / BIT(drefclk_div));
if (!mscnt)
mscnt = 1;
hscnt = (mdiv * mscnt * BIT(drefclk_div) / arefclk_div) - 4;
mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_VCOCALIB,
CLKMGR_MAINPLL_VCOCALIB_HSCNT_SET(hscnt) |
CLKMGR_MAINPLL_VCOCALIB_MSCNT_SET(mscnt));
mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_NOCDIV,
hoff_ptr->main_pll_nocdiv);
mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLGLOB,
hoff_ptr->main_pll_pllglob);
mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_FDBCK,
hoff_ptr->main_pll_fdbck);
mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLC0,
hoff_ptr->main_pll_pllc0);
mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLC1,
hoff_ptr->main_pll_pllc1);
mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLC2,
hoff_ptr->main_pll_pllc2);
mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLC3,
hoff_ptr->main_pll_pllc3);
mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLM,
hoff_ptr->main_pll_pllm);
mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_MPUCLK,
hoff_ptr->main_pll_mpuclk);
mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_NOCCLK,
hoff_ptr->main_pll_nocclk);
/* Setup peripheral PLL dividers */
mdiv = CLKMGR_PERPLL_PLLM_MDIV(hoff_ptr->per_pll_pllm);
arefclk_div = CLKMGR_PERPLL_PLLGLOB_AREFCLKDIV(
hoff_ptr->per_pll_pllglob);
drefclk_div = CLKMGR_PERPLL_PLLGLOB_DREFCLKDIV(
hoff_ptr->per_pll_pllglob);
mscnt = 100 / (mdiv / BIT(drefclk_div));
if (!mscnt)
mscnt = 1;
hscnt = (mdiv * mscnt * BIT(drefclk_div) / arefclk_div) - 4;
mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_VCOCALIB,
CLKMGR_PERPLL_VCOCALIB_HSCNT_SET(hscnt) |
CLKMGR_PERPLL_VCOCALIB_MSCNT_SET(mscnt));
mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_EMACCTL,
hoff_ptr->per_pll_emacctl);
mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_GPIODIV,
CLKMGR_PERPLL_GPIODIV_GPIODBCLK_SET(
hoff_ptr->per_pll_gpiodiv));
mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_PLLGLOB,
hoff_ptr->per_pll_pllglob);
mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_FDBCK,
hoff_ptr->per_pll_fdbck);
mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_PLLC0,
hoff_ptr->per_pll_pllc0);
mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_PLLC1,
hoff_ptr->per_pll_pllc1);
mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_PLLC2,
hoff_ptr->per_pll_pllc2);
mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_PLLC3,
hoff_ptr->per_pll_pllc3);
mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_PLLM,
hoff_ptr->per_pll_pllm);
/* Take both PLL out of reset and power up */
mmio_setbits_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLGLOB,
CLKMGR_MAINPLL_PLLGLOB_PD_SET_MSK |
CLKMGR_MAINPLL_PLLGLOB_RST_SET_MSK);
mmio_setbits_32(CLKMGR_PERPLL + CLKMGR_PERPLL_PLLGLOB,
CLKMGR_PERPLL_PLLGLOB_PD_SET_MSK |
CLKMGR_PERPLL_PLLGLOB_RST_SET_MSK);
wait_pll_lock();
pll_source_sync_config(CLKMGR_MAINPLL + CLKMGR_MAINPLL_MEM);
pll_source_sync_read(CLKMGR_MAINPLL + CLKMGR_MAINPLL_MEM);
pll_source_sync_config(CLKMGR_PERPLL + CLKMGR_PERPLL_MEM);
pll_source_sync_read(CLKMGR_PERPLL + CLKMGR_PERPLL_MEM);
/*Configure Ping Pong counters in altera group */
mmio_write_32(CLKMGR_ALTERA + CLKMGR_ALTERA_EMACACTR,
hoff_ptr->alt_emacactr);
mmio_write_32(CLKMGR_ALTERA + CLKMGR_ALTERA_EMACBCTR,
hoff_ptr->alt_emacbctr);
mmio_write_32(CLKMGR_ALTERA + CLKMGR_ALTERA_EMACPTPCTR,
hoff_ptr->alt_emacptpctr);
mmio_write_32(CLKMGR_ALTERA + CLKMGR_ALTERA_GPIODBCTR,
hoff_ptr->alt_gpiodbctr);
mmio_write_32(CLKMGR_ALTERA + CLKMGR_ALTERA_SDMMCCTR,
hoff_ptr->alt_sdmmcctr);
mmio_write_32(CLKMGR_ALTERA + CLKMGR_ALTERA_S2FUSER0CTR,
hoff_ptr->alt_s2fuser0ctr);
mmio_write_32(CLKMGR_ALTERA + CLKMGR_ALTERA_S2FUSER1CTR,
hoff_ptr->alt_s2fuser1ctr);
mmio_write_32(CLKMGR_ALTERA + CLKMGR_ALTERA_PSIREFCTR,
hoff_ptr->alt_psirefctr);
/* Take all PLLs out of bypass */
mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_BYPASS, 0);
wait_fsm();
mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_BYPASS, 0);
wait_fsm();
/* Clear loss lock interrupt status register that */
/* might be set during configuration */
mmio_setbits_32(CLKMGR_OFFSET + CLKMGR_INTRCLR,
CLKMGR_INTRCLR_MAINLOCKLOST_SET_MSK |
CLKMGR_INTRCLR_PERLOCKLOST_SET_MSK);
/* Take all ping pong counters out of reset */
mmio_clrbits_32(CLKMGR_ALTERA + CLKMGR_ALTERA_EXTCNTRST,
CLKMGR_ALTERA_EXTCNTRST_RESET);
/* Set safe mode / out of boot mode */
mmio_clrbits_32(CLKMGR_OFFSET + CLKMGR_CTRL,
CLKMGR_CTRL_BOOTMODE_SET_MSK);
wait_fsm();
/* Enable mainpllgrp's software-managed clock */
mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_EN,
CLKMGR_MAINPLL_EN_RESET);
mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_EN,
CLKMGR_PERPLL_EN_RESET);
}
int get_wdt_clk(handoff *hoff_ptr)
{
int main_noc_base_clk, l3_main_free_clk, l4_sys_free_clk;
int data32, mdiv, arefclkdiv, ref_clk;
data32 = mmio_read_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLGLOB);
switch (CLKMGR_MAINPLL_PLLGLOB_PSRC(data32)) {
case CLKMGR_MAINPLL_PLLGLOB_PSRC_EOSC1:
ref_clk = clk_source.clk_freq_of_eosc1;
break;
case CLKMGR_MAINPLL_PLLGLOB_PSRC_INTOSC:
ref_clk = clk_source.clk_freq_of_cb_intosc_ls;
break;
case CLKMGR_MAINPLL_PLLGLOB_PSRC_F2S:
ref_clk = clk_source.clk_freq_of_f2h_free;
break;
default:
ref_clk = 0;
assert(0);
break;
}
arefclkdiv = CLKMGR_MAINPLL_PLLGLOB_AREFCLKDIV(data32);
mdiv = CLKMGR_MAINPLL_PLLM_MDIV(hoff_ptr->main_pll_pllm);
ref_clk = (ref_clk / arefclkdiv) * mdiv;
main_noc_base_clk = ref_clk / (hoff_ptr->main_pll_pllc1 & 0x7ff);
l3_main_free_clk = main_noc_base_clk / (hoff_ptr->main_pll_nocclk + 1);
l4_sys_free_clk = l3_main_free_clk / 4;
return l4_sys_free_clk;
}
/*
* Copyright (c) 2019, Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <platform_def.h>
#include <string.h>
#include "agilex_handoff.h"
#define SWAP_UINT32(x) (((x) >> 24) | (((x) & 0x00FF0000) >> 8) | \
(((x) & 0x0000FF00) << 8) | ((x) << 24))
int agilex_get_handoff(handoff *reverse_hoff_ptr)
{
int i;
uint32_t *buffer;
handoff *handoff_ptr = (handoff *) PLAT_HANDOFF_OFFSET;
memcpy(reverse_hoff_ptr, handoff_ptr, sizeof(handoff));
buffer = (uint32_t *)reverse_hoff_ptr;
/* convert big endian to little endian */
for (i = 0; i < sizeof(handoff) / 4; i++)
buffer[i] = SWAP_UINT32(buffer[i]);
if (reverse_hoff_ptr->header_magic != HANDOFF_MAGIC_HEADER)
return -1;
if (reverse_hoff_ptr->pinmux_sel_magic != HANDOFF_MAGIC_PINMUX_SEL)
return -1;
if (reverse_hoff_ptr->pinmux_io_magic != HANDOFF_MAGIC_IOCTLR)
return -1;
if (reverse_hoff_ptr->pinmux_fpga_magic != HANDOFF_MAGIC_FPGA)
return -1;
if (reverse_hoff_ptr->pinmux_delay_magic != HANDOFF_MAGIC_IODELAY)
return -1;
return 0;
}
/*
* Copyright (c) 2019, Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <lib/mmio.h>
#include <common/debug.h>
#include "agilex_mailbox.h"
static int fill_mailbox_circular_buffer(uint32_t header_cmd, uint32_t *args,
int len)
{
uint32_t cmd_free_offset;
int i;
cmd_free_offset = mmio_read_32(MBOX_OFFSET + MBOX_CIN);
if (cmd_free_offset >= MBOX_CMD_BUFFER_SIZE) {
INFO("Insufficient buffer in mailbox\n");
return MBOX_INSUFFICIENT_BUFFER;
}
mmio_write_32(MBOX_OFFSET + MBOX_CMD_BUFFER + (cmd_free_offset++ * 4),
header_cmd);
for (i = 0; i < len; i++) {
cmd_free_offset %= MBOX_CMD_BUFFER_SIZE;
mmio_write_32(MBOX_OFFSET + MBOX_CMD_BUFFER +
(cmd_free_offset++ * 4), args[i]);
}
cmd_free_offset %= MBOX_CMD_BUFFER_SIZE;
mmio_write_32(MBOX_OFFSET + MBOX_CIN, cmd_free_offset);
return 0;
}
int mailbox_read_response(int job_id, uint32_t *response)
{
int rin = 0;
int rout = 0;
int response_length = 0;
int resp = 0;
int total_resp_len = 0;
int timeout = 100000;
mmio_write_32(MBOX_OFFSET + MBOX_DOORBELL_TO_SDM, 1);
while (mmio_read_32(MBOX_OFFSET + MBOX_DOORBELL_FROM_SDM) != 1) {
if (timeout-- < 0)
return MBOX_NO_RESPONSE;
}
mmio_write_32(MBOX_OFFSET + MBOX_DOORBELL_FROM_SDM, 0);
rin = mmio_read_32(MBOX_OFFSET + MBOX_RIN);
rout = mmio_read_32(MBOX_OFFSET + MBOX_ROUT);
while (rout != rin) {
resp = mmio_read_32(MBOX_OFFSET +
MBOX_RESP_BUFFER + ((rout++)*4));
rout %= MBOX_RESP_BUFFER_SIZE;
mmio_write_32(MBOX_OFFSET + MBOX_ROUT, rout);
if (MBOX_RESP_CLIENT_ID(resp) != MBOX_ATF_CLIENT_ID ||
MBOX_RESP_JOB_ID(resp) != job_id) {
return MBOX_WRONG_ID;
}
if (MBOX_RESP_ERR(resp) > 0) {
INFO("Error in response: %x\n", resp);
return -resp;
}
response_length = MBOX_RESP_LEN(resp);
while (response_length) {
response_length--;
resp = mmio_read_32(MBOX_OFFSET +
MBOX_RESP_BUFFER +
(rout)*4);
if (response) {
*(response + total_resp_len) = resp;
total_resp_len++;
}
rout++;
rout %= MBOX_RESP_BUFFER_SIZE;
mmio_write_32(MBOX_OFFSET + MBOX_ROUT, rout);
}
return total_resp_len;
}
return MBOX_NO_RESPONSE;
}
int mailbox_poll_response(int job_id, int urgent, uint32_t *response)
{
int timeout = 80000;
int rin = 0;
int rout = 0;
int response_length = 0;
int resp = 0;
int total_resp_len = 0;
mmio_write_32(MBOX_OFFSET + MBOX_DOORBELL_TO_SDM, 1);
while (1) {
while (timeout > 0 &&
mmio_read_32(MBOX_OFFSET +
MBOX_DOORBELL_FROM_SDM) != 1) {
timeout--;
}
if (mmio_read_32(MBOX_OFFSET + MBOX_DOORBELL_FROM_SDM) != 1) {
INFO("Timed out waiting for SDM");
return MBOX_TIMEOUT;
}
mmio_write_32(MBOX_OFFSET + MBOX_DOORBELL_FROM_SDM, 0);
if (urgent & 1) {
if ((mmio_read_32(MBOX_OFFSET + MBOX_STATUS) &
MBOX_STATUS_UA_MASK) ^
(urgent & MBOX_STATUS_UA_MASK)) {
mmio_write_32(MBOX_OFFSET + MBOX_URG, 0);
return 0;
}
mmio_write_32(MBOX_OFFSET + MBOX_URG, 0);
INFO("Error: Mailbox did not get UA");
return -1;
}
rin = mmio_read_32(MBOX_OFFSET + MBOX_RIN);
rout = mmio_read_32(MBOX_OFFSET + MBOX_ROUT);
while (rout != rin) {
resp = mmio_read_32(MBOX_OFFSET +
MBOX_RESP_BUFFER + ((rout++)*4));
rout %= MBOX_RESP_BUFFER_SIZE;
mmio_write_32(MBOX_OFFSET + MBOX_ROUT, rout);
if (MBOX_RESP_CLIENT_ID(resp) != MBOX_ATF_CLIENT_ID ||
MBOX_RESP_JOB_ID(resp) != job_id)
continue;
if (MBOX_RESP_ERR(resp) > 0) {
INFO("Error in response: %x\n", resp);
return -MBOX_RESP_ERR(resp);
}
response_length = MBOX_RESP_LEN(resp);
while (response_length) {
response_length--;
resp = mmio_read_32(MBOX_OFFSET +
MBOX_RESP_BUFFER +
(rout)*4);
if (response) {
*(response + total_resp_len) = resp;
total_resp_len++;
}
rout++;
rout %= MBOX_RESP_BUFFER_SIZE;
mmio_write_32(MBOX_OFFSET + MBOX_ROUT, rout);
}
return total_resp_len;
}
}
}
void mailbox_send_cmd_async(int job_id, unsigned int cmd, uint32_t *args,
int len, int urgent)
{
if (urgent)
mmio_write_32(MBOX_OFFSET + MBOX_URG, 1);
fill_mailbox_circular_buffer(MBOX_CLIENT_ID_CMD(MBOX_ATF_CLIENT_ID) |
MBOX_JOB_ID_CMD(job_id) |
MBOX_CMD_LEN_CMD(len) |
MBOX_INDIRECT |
cmd, args, len);
}
int mailbox_send_cmd(int job_id, unsigned int cmd, uint32_t *args,
int len, int urgent, uint32_t *response)
{
int status;
if (urgent) {
urgent |= mmio_read_32(MBOX_OFFSET + MBOX_STATUS) &
MBOX_STATUS_UA_MASK;
mmio_write_32(MBOX_OFFSET + MBOX_URG, cmd);
status = 0;
} else {
status = fill_mailbox_circular_buffer(
MBOX_CLIENT_ID_CMD(MBOX_ATF_CLIENT_ID) |
MBOX_JOB_ID_CMD(job_id) |
cmd, args, len);
}
if (status)
return status;
return mailbox_poll_response(job_id, urgent, response);
}
void mailbox_set_int(int interrupt)
{
mmio_write_32(MBOX_OFFSET+MBOX_INT, MBOX_COE_BIT(interrupt) |
MBOX_UAE_BIT(interrupt));
}
void mailbox_set_qspi_open(void)
{
mailbox_set_int(MBOX_INT_FLAG_COE | MBOX_INT_FLAG_RIE);
mailbox_send_cmd(MBOX_JOB_ID, MBOX_CMD_QSPI_OPEN, 0, 0, 0, 0);
}
void mailbox_set_qspi_direct(void)
{
mailbox_send_cmd(MBOX_JOB_ID, MBOX_CMD_QSPI_DIRECT, 0, 0, 0, 0);
}
void mailbox_set_qspi_close(void)
{
mailbox_set_int(MBOX_INT_FLAG_COE | MBOX_INT_FLAG_RIE);
mailbox_send_cmd(MBOX_JOB_ID, MBOX_CMD_QSPI_CLOSE, 0, 0, 0, 0);
}
int mailbox_get_qspi_clock(void)
{
mailbox_set_int(MBOX_INT_FLAG_COE | MBOX_INT_FLAG_RIE);
return mailbox_send_cmd(MBOX_JOB_ID, MBOX_CMD_QSPI_DIRECT, 0, 0, 0, 0);
}
void mailbox_qspi_set_cs(int device_select)
{
uint32_t cs_setting = device_select;
/* QSPI device select settings at 31:28 */
cs_setting = (cs_setting << 28);
mailbox_set_int(MBOX_INT_FLAG_COE | MBOX_INT_FLAG_RIE);
mailbox_send_cmd(MBOX_JOB_ID, MBOX_CMD_QSPI_SET_CS, &cs_setting,
1, 0, 0);
}
void mailbox_reset_cold(void)
{
mailbox_set_int(MBOX_INT_FLAG_COE | MBOX_INT_FLAG_RIE);
mailbox_send_cmd(MBOX_JOB_ID, MBOX_CMD_REBOOT_HPS, 0, 0, 0, 0);
}
int mailbox_init(void)
{
int status = 0;
mailbox_set_int(MBOX_INT_FLAG_COE | MBOX_INT_FLAG_RIE |
MBOX_INT_FLAG_UAE);
mmio_write_32(MBOX_OFFSET + MBOX_URG, 0);
mmio_write_32(MBOX_OFFSET + MBOX_DOORBELL_FROM_SDM, 0);
status = mailbox_send_cmd(0, MBOX_CMD_RESTART, 0, 0, 1, 0);
if (status)
return status;
mailbox_set_int(MBOX_INT_FLAG_COE | MBOX_INT_FLAG_RIE);
return 0;
}
/*
* Copyright (c) 2019, Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <errno.h>
#include <lib/mmio.h>
#include <lib/utils.h>
#include <common/debug.h>
#include <drivers/delay_timer.h>
#include <platform_def.h>
#include "agilex_memory_controller.h"
#define ALT_CCU_NOC_DI_SET_MSK 0x10
#define DDR_READ_LATENCY_DELAY 40
#define MAX_MEM_CAL_RETRY 3
#define PRE_CALIBRATION_DELAY 1
#define POST_CALIBRATION_DELAY 1
#define TIMEOUT_EMIF_CALIBRATION 100
#define CLEAR_EMIF_DELAY 50000
#define CLEAR_EMIF_TIMEOUT 0x100000
#define TIMEOUT_INT_RESP 10000
#define DDR_CONFIG(A, B, C, R) (((A) << 24) | ((B) << 16) | ((C) << 8) | (R))
#define DDR_CONFIG_ELEMENTS (sizeof(ddr_config)/sizeof(uint32_t))
/* tWR = Min. 15ns constant, see JEDEC standard eg. DDR4 is JESD79-4.pdf */
#define tWR_IN_NS 15
void configure_hmc_adaptor_regs(void);
void configure_ddr_sched_ctrl_regs(void);
/* The followring are the supported configurations */
uint32_t ddr_config[] = {
/* DDR_CONFIG(Address order,Bank,Column,Row) */
/* List for DDR3 or LPDDR3 (pinout order > chip, row, bank, column) */
DDR_CONFIG(0, 3, 10, 12),
DDR_CONFIG(0, 3, 9, 13),
DDR_CONFIG(0, 3, 10, 13),
DDR_CONFIG(0, 3, 9, 14),
DDR_CONFIG(0, 3, 10, 14),
DDR_CONFIG(0, 3, 10, 15),
DDR_CONFIG(0, 3, 11, 14),
DDR_CONFIG(0, 3, 11, 15),
DDR_CONFIG(0, 3, 10, 16),
DDR_CONFIG(0, 3, 11, 16),
DDR_CONFIG(0, 3, 12, 15), /* 0xa */
/* List for DDR4 only (pinout order > chip, bank, row, column) */
DDR_CONFIG(1, 3, 10, 14),
DDR_CONFIG(1, 4, 10, 14),
DDR_CONFIG(1, 3, 10, 15),
DDR_CONFIG(1, 4, 10, 15),
DDR_CONFIG(1, 3, 10, 16),
DDR_CONFIG(1, 4, 10, 16),
DDR_CONFIG(1, 3, 10, 17),
DDR_CONFIG(1, 4, 10, 17),
};
static int match_ddr_conf(uint32_t ddr_conf)
{
int i;
for (i = 0; i < DDR_CONFIG_ELEMENTS; i++) {
if (ddr_conf == ddr_config[i])
return i;
}
return 0;
}
static int check_hmc_clk(void)
{
unsigned long timeout = 0;
uint32_t hmc_clk;
do {
hmc_clk = mmio_read_32(AGX_SYSMGR_CORE_HMC_CLK);
if (hmc_clk & AGX_SYSMGR_CORE_HMC_CLK_STATUS)
break;
udelay(1);
} while (++timeout < 1000);
if (timeout >= 1000)
return -ETIMEDOUT;
return 0;
}
static int clear_emif(void)
{
uint32_t data;
unsigned long timeout;
mmio_write_32(AGX_MPFE_HMC_ADP_RSTHANDSHAKECTRL, 0);
timeout = 0;
do {
data = mmio_read_32(AGX_MPFE_HMC_ADP_RSTHANDSHAKESTAT);
if ((data & AGX_MPFE_HMC_ADP_RSTHANDSHAKESTAT_SEQ2CORE) == 0)
break;
udelay(CLEAR_EMIF_DELAY);
} while (++timeout < CLEAR_EMIF_TIMEOUT);
if (timeout >= CLEAR_EMIF_TIMEOUT)
return -ETIMEDOUT;
return 0;
}
static int mem_calibration(void)
{
int status = 0;
uint32_t data;
unsigned long timeout;
unsigned long retry = 0;
udelay(PRE_CALIBRATION_DELAY);
do {
if (retry != 0)
INFO("DDR: Retrying DRAM calibration\n");
timeout = 0;
do {
data = mmio_read_32(AGX_MPFE_HMC_ADP_DDRCALSTAT);
if (AGX_MPFE_HMC_ADP_DDRCALSTAT_CAL(data) == 1)
break;
udelay(1);
} while (++timeout < TIMEOUT_EMIF_CALIBRATION);
if (AGX_MPFE_HMC_ADP_DDRCALSTAT_CAL(data) == 0) {
status = clear_emif();
if (status)
ERROR("Failed to clear Emif\n");
} else {
break;
}
} while (++retry < MAX_MEM_CAL_RETRY);
if (AGX_MPFE_HMC_ADP_DDRCALSTAT_CAL(data) == 0) {
ERROR("DDR: DRAM calibration failed.\n");
status = -EIO;
} else {
INFO("DDR: DRAM calibration success.\n");
status = 0;
}
udelay(POST_CALIBRATION_DELAY);
return status;
}
int init_hard_memory_controller(void)
{
int status;
status = check_hmc_clk();
if (status) {
ERROR("DDR: Error, HMC clock not running\n");
return status;
}
/* mmio_clrbits_32(AGX_RSTMGR_BRGMODRST, AGX_RSTMGR_BRGMODRST_DDRSCH);*/
status = mem_calibration();
if (status) {
ERROR("DDR: Memory Calibration Failed\n");
return status;
}
configure_hmc_adaptor_regs();
/* configure_ddr_sched_ctrl_regs();*/
return 0;
}
void configure_ddr_sched_ctrl_regs(void)
{
uint32_t data, dram_addr_order, ddr_conf, bank, row, col,
rd_to_miss, wr_to_miss, burst_len, burst_len_ddr_clk,
burst_len_sched_clk, act_to_act, rd_to_wr, wr_to_rd, bw_ratio,
t_rtp, t_rp, t_rcd, rd_latency, tw_rin_clk_cycles,
bw_ratio_extended, auto_precharge = 0, act_to_act_bank, faw,
faw_bank, bus_rd_to_rd, bus_rd_to_wr, bus_wr_to_rd;
INFO("Init HPS NOC's DDR Scheduler.\n");
data = mmio_read_32(AGX_MPFE_IOHMC_CTRLCFG1);
dram_addr_order = AGX_MPFE_IOHMC_CTRLCFG1_CFG_ADDR_ORDER(data);
data = mmio_read_32(AGX_MPFE_IOHMC_DRAMADDRW);
col = IOHMC_DRAMADDRW_COL_ADDR_WIDTH(data);
row = IOHMC_DRAMADDRW_ROW_ADDR_WIDTH(data);
bank = IOHMC_DRAMADDRW_BANK_ADDR_WIDTH(data) +
IOHMC_DRAMADDRW_BANK_GRP_ADDR_WIDTH(data);
ddr_conf = match_ddr_conf(DDR_CONFIG(dram_addr_order, bank, col, row));
if (ddr_conf) {
mmio_clrsetbits_32(
AGX_MPFE_DDR_MAIN_SCHED_DDRCONF,
AGX_MPFE_DDR_MAIN_SCHED_DDRCONF_SET_MSK,
AGX_MPFE_DDR_MAIN_SCHED_DDRCONF_SET(ddr_conf));
} else {
ERROR("DDR: Cannot find predefined ddrConf configuration.\n");
}
mmio_write_32(AGX_MPFE_HMC_ADP(ADP_DRAMADDRWIDTH), data);
data = mmio_read_32(AGX_MPFE_IOHMC_DRAMTIMING0);
rd_latency = AGX_MPFE_IOHMC_REG_DRAMTIMING0_CFG_TCL(data);
data = mmio_read_32(AGX_MPFE_IOHMC_CALTIMING0);
act_to_act = ACT_TO_ACT(data);
t_rcd = ACT_TO_RDWR(data);
act_to_act_bank = ACT_TO_ACT_DIFF_BANK(data);
data = mmio_read_32(AGX_MPFE_IOHMC_CALTIMING1);
rd_to_wr = RD_TO_WR(data);
bus_rd_to_rd = RD_TO_RD_DIFF_CHIP(data);
bus_rd_to_wr = RD_TO_WR_DIFF_CHIP(data);
data = mmio_read_32(AGX_MPFE_IOHMC_CALTIMING2);
t_rtp = RD_TO_PCH(data);
data = mmio_read_32(AGX_MPFE_IOHMC_CALTIMING3);
wr_to_rd = CALTIMING3_WR_TO_RD(data);
bus_wr_to_rd = CALTIMING3_WR_TO_RD_DIFF_CHIP(data);
data = mmio_read_32(AGX_MPFE_IOHMC_CALTIMING4);
t_rp = PCH_TO_VALID(data);
data = mmio_read_32(AGX_MPFE_HMC_ADP(HMC_ADP_DDRIOCTRL));
bw_ratio = ((HMC_ADP_DDRIOCTRL_IO_SIZE(data) == 0) ? 0 : 1);
data = mmio_read_32(AGX_MPFE_IOHMC_CTRLCFG0);
burst_len = HMC_ADP_DDRIOCTRL_CTRL_BURST_LENGTH(data);
burst_len_ddr_clk = burst_len / 2;
burst_len_sched_clk = ((burst_len/2) / 2);
data = mmio_read_32(AGX_MPFE_IOHMC_CTRLCFG0);
switch (AGX_MPFE_IOHMC_REG_CTRLCFG0_CFG_MEM_TYPE(data)) {
case 1:
/* DDR4 - 1333MHz */
/* 20 (19.995) clock cycles = 15ns */
/* Calculate with rounding */
tw_rin_clk_cycles = (((tWR_IN_NS * 1333) % 1000) >= 500) ?
((tWR_IN_NS * 1333) / 1000) + 1 :
((tWR_IN_NS * 1333) / 1000);
break;
default:
/* Others - 1066MHz or slower */
/* 16 (15.990) clock cycles = 15ns */
/* Calculate with rounding */
tw_rin_clk_cycles = (((tWR_IN_NS * 1066) % 1000) >= 500) ?
((tWR_IN_NS * 1066) / 1000) + 1 :
((tWR_IN_NS * 1066) / 1000);
break;
}
rd_to_miss = t_rtp + t_rp + t_rcd - burst_len_sched_clk;
wr_to_miss = ((rd_latency + burst_len_ddr_clk + 2 + tw_rin_clk_cycles)
/ 2) - rd_to_wr + t_rp + t_rcd;
mmio_write_32(AGX_MPFE_DDR_MAIN_SCHED_DDRTIMING,
bw_ratio << DDRTIMING_BWRATIO_OFST |
wr_to_rd << DDRTIMING_WRTORD_OFST|
rd_to_wr << DDRTIMING_RDTOWR_OFST |
burst_len_sched_clk << DDRTIMING_BURSTLEN_OFST |
wr_to_miss << DDRTIMING_WRTOMISS_OFST |
rd_to_miss << DDRTIMING_RDTOMISS_OFST |
act_to_act << DDRTIMING_ACTTOACT_OFST);
data = mmio_read_32(AGX_MPFE_HMC_ADP(HMC_ADP_DDRIOCTRL));
bw_ratio_extended = ((ADP_DDRIOCTRL_IO_SIZE(data) == 0) ? 1 : 0);
mmio_write_32(AGX_MPFE_DDR_MAIN_SCHED_DDRMODE,
bw_ratio_extended << DDRMODE_BWRATIOEXTENDED_OFST |
auto_precharge << DDRMODE_AUTOPRECHARGE_OFST);
mmio_write_32(AGX_MPFE_DDR_MAIN_SCHED_READLATENCY,
(rd_latency / 2) + DDR_READ_LATENCY_DELAY);
data = mmio_read_32(AGX_MPFE_IOHMC_CALTIMING9);
faw = AGX_MPFE_IOHMC_CALTIMING9_ACT_TO_ACT(data);
faw_bank = 1; // always 1 because we always have 4 bank DDR.
mmio_write_32(AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE,
faw_bank << AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE_FAWBANK_OFST |
faw << AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE_FAW_OFST |
act_to_act_bank << AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE_RRD_OFST);
mmio_write_32(AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV,
((bus_rd_to_rd
<< AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTORD_OFST)
& AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTORD_MSK) |
((bus_rd_to_wr
<< AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTOWR_OFST)
& AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTOWR_MSK) |
((bus_wr_to_rd
<< AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSWRTORD_OFST)
& AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSWRTORD_MSK));
}
unsigned long get_physical_dram_size(void)
{
uint32_t data;
unsigned long ram_addr_width, ram_ext_if_io_width;
data = mmio_read_32(AGX_MPFE_HMC_ADP_DDRIOCTRL);
switch (AGX_MPFE_HMC_ADP_DDRIOCTRL_IO_SIZE(data)) {
case 0:
ram_ext_if_io_width = 16;
break;
case 1:
ram_ext_if_io_width = 32;
break;
case 2:
ram_ext_if_io_width = 64;
break;
default:
ram_ext_if_io_width = 0;
break;
}
data = mmio_read_32(AGX_MPFE_IOHMC_REG_DRAMADDRW);
ram_addr_width = IOHMC_DRAMADDRW_CFG_COL_ADDR_WIDTH(data) +
IOHMC_DRAMADDRW_CFG_ROW_ADDR_WIDTH(data) +
IOHMC_DRAMADDRW_CFG_BANK_ADDR_WIDTH(data) +
IOHMC_DRAMADDRW_CFG_BANK_GROUP_ADDR_WIDTH(data) +
IOHMC_DRAMADDRW_CFG_CS_ADDR_WIDTH(data);
return (1 << ram_addr_width) * (ram_ext_if_io_width / 8);
}
void configure_hmc_adaptor_regs(void)
{
uint32_t data;
uint32_t dram_io_width;
/* Configure DDR data rate */
dram_io_width = AGX_MPFE_IOHMC_NIOSRESERVE0_NIOS_RESERVE0(
mmio_read_32(AGX_MPFE_IOHMC_REG_NIOSRESERVE0_OFST));
dram_io_width = (dram_io_width & 0xFF) >> 5;
mmio_clrsetbits_32(AGX_MPFE_HMC_ADP_DDRIOCTRL,
AGX_MPFE_HMC_ADP_DDRIOCTRL_IO_SIZE_MSK,
dram_io_width << AGX_MPFE_HMC_ADP_DDRIOCTRL_IO_SIZE_OFST);
/* Copy dram addr width from IOHMC to HMC ADP */
data = mmio_read_32(AGX_MPFE_IOHMC_DRAMADDRW);
mmio_write_32(AGX_MPFE_HMC_ADP(ADP_DRAMADDRWIDTH), data);
/* Enable nonsecure access to DDR */
mmio_write_32(AGX_NOC_FW_DDR_SCR_MPUREGION0ADDR_LIMIT,
0x4000000 - 1);
mmio_write_32(AGX_NOC_FW_DDR_SCR_NONMPUREGION0ADDR_LIMIT,
0x4000000 - 1);
mmio_write_32(AGX_SOC_NOC_FW_DDR_SCR_ENABLE, BIT(0) | BIT(8));
/* ECC enablement */
data = mmio_read_32(AGX_MPFE_IOHMC_REG_CTRLCFG1);
if (data & (1 << AGX_IOHMC_CTRLCFG1_ENABLE_ECC_OFST)) {
mmio_clrsetbits_32(AGX_MPFE_HMC_ADP_ECCCTRL1,
AGX_MPFE_HMC_ADP_ECCCTRL1_AUTOWB_CNT_RST_SET_MSK |
AGX_MPFE_HMC_ADP_ECCCTRL1_CNT_RST_SET_MSK |
AGX_MPFE_HMC_ADP_ECCCTRL1_ECC_EN_SET_MSK,
AGX_MPFE_HMC_ADP_ECCCTRL1_AUTOWB_CNT_RST_SET_MSK |
AGX_MPFE_HMC_ADP_ECCCTRL1_CNT_RST_SET_MSK);
mmio_clrsetbits_32(AGX_MPFE_HMC_ADP_ECCCTRL2,
AGX_MPFE_HMC_ADP_ECCCTRL2_OVRW_RB_ECC_EN_SET_MSK |
AGX_MPFE_HMC_ADP_ECCCTRL2_RMW_EN_SET_MSK |
AGX_MPFE_HMC_ADP_ECCCTRL2_AUTOWB_EN_SET_MSK,
AGX_MPFE_HMC_ADP_ECCCTRL2_RMW_EN_SET_MSK |
AGX_MPFE_HMC_ADP_ECCCTRL2_AUTOWB_EN_SET_MSK);
mmio_clrsetbits_32(AGX_MPFE_HMC_ADP_ECCCTRL1,
AGX_MPFE_HMC_ADP_ECCCTRL1_AUTOWB_CNT_RST_SET_MSK |
AGX_MPFE_HMC_ADP_ECCCTRL1_CNT_RST_SET_MSK |
AGX_MPFE_HMC_ADP_ECCCTRL1_ECC_EN_SET_MSK,
AGX_MPFE_HMC_ADP_ECCCTRL1_ECC_EN_SET_MSK);
INFO("Scrubbing ECC\n");
/* ECC Scrubbing */
zeromem(DRAM_BASE, DRAM_SIZE);
} else {
INFO("ECC is disabled.\n");
}
}
/*
* Copyright (c) 2019, Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <lib/mmio.h>
#include "agilex_pinmux.h"
const uint32_t sysmgr_pinmux_array_sel[] = {
0x00000000, 0x00000001, /* usb */
0x00000004, 0x00000001,
0x00000008, 0x00000001,
0x0000000c, 0x00000001,
0x00000010, 0x00000001,
0x00000014, 0x00000001,
0x00000018, 0x00000001,
0x0000001c, 0x00000001,
0x00000020, 0x00000001,
0x00000024, 0x00000001,
0x00000028, 0x00000001,
0x0000002c, 0x00000001,
0x00000030, 0x00000000, /* emac0 */
0x00000034, 0x00000000,
0x00000038, 0x00000000,
0x0000003c, 0x00000000,
0x00000040, 0x00000000,
0x00000044, 0x00000000,
0x00000048, 0x00000000,
0x0000004c, 0x00000000,
0x00000050, 0x00000000,
0x00000054, 0x00000000,
0x00000058, 0x00000000,
0x0000005c, 0x00000000,
0x00000060, 0x00000008, /* gpio1 */
0x00000064, 0x00000008,
0x00000068, 0x00000005, /* uart0 tx */
0x0000006c, 0x00000005, /* uart 0 rx */
0x00000070, 0x00000008, /* gpio */
0x00000074, 0x00000008,
0x00000078, 0x00000004, /* i2c1 */
0x0000007c, 0x00000004,
0x00000080, 0x00000007, /* jtag */
0x00000084, 0x00000007,
0x00000088, 0x00000007,
0x0000008c, 0x00000007,
0x00000090, 0x00000001, /* sdmmc data0 */
0x00000094, 0x00000001,
0x00000098, 0x00000001,
0x0000009c, 0x00000001,
0x00000100, 0x00000001,
0x00000104, 0x00000001, /* sdmmc.data3 */
0x00000108, 0x00000008, /* loan */
0x0000010c, 0x00000008, /* gpio */
0x00000110, 0x00000008,
0x00000114, 0x00000008, /* gpio1.io21 */
0x00000118, 0x00000005, /* mdio0.mdio */
0x0000011c, 0x00000005 /* mdio0.mdc */
};
const uint32_t sysmgr_pinmux_array_ctrl[] = {
0x00000000, 0x00502c38, /* Q1_1 */
0x00000004, 0x00102c38,
0x00000008, 0x00502c38,
0x0000000c, 0x00502c38,
0x00000010, 0x00502c38,
0x00000014, 0x00502c38,
0x00000018, 0x00502c38,
0x0000001c, 0x00502c38,
0x00000020, 0x00502c38,
0x00000024, 0x00502c38,
0x00000028, 0x00502c38,
0x0000002c, 0x00502c38,
0x00000030, 0x00102c38, /* Q2_1 */
0x00000034, 0x00102c38,
0x00000038, 0x00502c38,
0x0000003c, 0x00502c38,
0x00000040, 0x00102c38,
0x00000044, 0x00102c38,
0x00000048, 0x00502c38,
0x0000004c, 0x00502c38,
0x00000050, 0x00102c38,
0x00000054, 0x00102c38,
0x00000058, 0x00502c38,
0x0000005c, 0x00502c38,
0x00000060, 0x00502c38, /* Q3_1 */
0x00000064, 0x00502c38,
0x00000068, 0x00102c38,
0x0000006c, 0x00502c38,
0x000000d0, 0x00502c38,
0x000000d4, 0x00502c38,
0x000000d8, 0x00542c38,
0x000000dc, 0x00542c38,
0x000000e0, 0x00502c38,
0x000000e4, 0x00502c38,
0x000000e8, 0x00102c38,
0x000000ec, 0x00502c38,
0x000000f0, 0x00502c38, /* Q4_1 */
0x000000f4, 0x00502c38,
0x000000f8, 0x00102c38,
0x000000fc, 0x00502c38,
0x00000100, 0x00502c38,
0x00000104, 0x00502c38,
0x00000108, 0x00102c38,
0x0000010c, 0x00502c38,
0x00000110, 0x00502c38,
0x00000114, 0x00502c38,
0x00000118, 0x00542c38,
0x0000011c, 0x00102c38
};
const uint32_t sysmgr_pinmux_array_fpga[] = {
0x00000000, 0x00000000,
0x00000004, 0x00000000,
0x00000008, 0x00000000,
0x0000000c, 0x00000000,
0x00000010, 0x00000000,
0x00000014, 0x00000000,
0x00000018, 0x00000000,
0x0000001c, 0x00000000,
0x00000020, 0x00000000,
0x00000028, 0x00000000,
0x0000002c, 0x00000000,
0x00000030, 0x00000000,
0x00000034, 0x00000000,
0x00000038, 0x00000000,
0x0000003c, 0x00000000,
0x00000040, 0x00000000,
0x00000044, 0x00000000,
0x00000048, 0x00000000,
0x00000050, 0x00000000,
0x00000054, 0x00000000,
0x00000058, 0x0000002a
};
const uint32_t sysmgr_pinmux_array_iodelay[] = {
0x00000000, 0x00000000,
0x00000004, 0x00000000,
0x00000008, 0x00000000,
0x0000000c, 0x00000000,
0x00000010, 0x00000000,
0x00000014, 0x00000000,
0x00000018, 0x00000000,
0x0000001c, 0x00000000,
0x00000020, 0x00000000,
0x00000024, 0x00000000,
0x00000028, 0x00000000,
0x0000002c, 0x00000000,
0x00000030, 0x00000000,
0x00000034, 0x00000000,
0x00000038, 0x00000000,
0x0000003c, 0x00000000,
0x00000040, 0x00000000,
0x00000044, 0x00000000,
0x00000048, 0x00000000,
0x0000004c, 0x00000000,
0x00000050, 0x00000000,
0x00000054, 0x00000000,
0x00000058, 0x00000000,
0x0000005c, 0x00000000,
0x00000060, 0x00000000,
0x00000064, 0x00000000,
0x00000068, 0x00000000,
0x0000006c, 0x00000000,
0x00000070, 0x00000000,
0x00000074, 0x00000000,
0x00000078, 0x00000000,
0x0000007c, 0x00000000,
0x00000080, 0x00000000,
0x00000084, 0x00000000,
0x00000088, 0x00000000,
0x0000008c, 0x00000000,
0x00000090, 0x00000000,
0x00000094, 0x00000000,
0x00000098, 0x00000000,
0x0000009c, 0x00000000,
0x00000100, 0x00000000,
0x00000104, 0x00000000,
0x00000108, 0x00000000,
0x0000010c, 0x00000000,
0x00000110, 0x00000000,
0x00000114, 0x00000000,
0x00000118, 0x00000000,
0x0000011c, 0x00000000
};
void config_pinmux(handoff *hoff_ptr)
{
unsigned int i;
for (i = 0; i < 96; i += 2) {
mmio_write_32(AGX_PINMUX_PIN0SEL +
hoff_ptr->pinmux_sel_array[i],
hoff_ptr->pinmux_sel_array[i+1]);
}
for (i = 0; i < 96; i += 2) {
mmio_write_32(AGX_PINMUX_IO0CTRL +
hoff_ptr->pinmux_io_array[i],
hoff_ptr->pinmux_io_array[i+1]);
}
for (i = 0; i < 42; i += 2) {
mmio_write_32(AGX_PINMUX_PINMUX_EMAC0_USEFPGA +
hoff_ptr->pinmux_fpga_array[i],
hoff_ptr->pinmux_fpga_array[i+1]);
}
for (i = 0; i < 96; i += 2) {
mmio_write_32(AGX_PINMUX_IO0_DELAY +
hoff_ptr->pinmux_iodelay_array[i],
hoff_ptr->pinmux_iodelay_array[i+1]);
}
}
/*
* Copyright (c) 2019, Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <lib/mmio.h>
#include "agilex_reset_manager.h"
void deassert_peripheral_reset(void)
{
mmio_clrbits_32(AGX_RSTMGR_PER1MODRST,
AGX_RSTMGR_PER1MODRST_WATCHDOG0 |
AGX_RSTMGR_PER1MODRST_WATCHDOG1 |
AGX_RSTMGR_PER1MODRST_WATCHDOG2 |
AGX_RSTMGR_PER1MODRST_WATCHDOG3 |
AGX_RSTMGR_PER1MODRST_L4SYSTIMER0 |
AGX_RSTMGR_PER1MODRST_L4SYSTIMER1 |
AGX_RSTMGR_PER1MODRST_SPTIMER0 |
AGX_RSTMGR_PER1MODRST_SPTIMER1 |
AGX_RSTMGR_PER1MODRST_I2C0 |
AGX_RSTMGR_PER1MODRST_I2C1 |
AGX_RSTMGR_PER1MODRST_I2C2 |
AGX_RSTMGR_PER1MODRST_I2C3 |
AGX_RSTMGR_PER1MODRST_I2C4 |
AGX_RSTMGR_PER1MODRST_UART0 |
AGX_RSTMGR_PER1MODRST_UART1 |
AGX_RSTMGR_PER1MODRST_GPIO0 |
AGX_RSTMGR_PER1MODRST_GPIO1);
mmio_clrbits_32(AGX_RSTMGR_PER0MODRST,
AGX_RSTMGR_PER0MODRST_EMAC0OCP |
AGX_RSTMGR_PER0MODRST_EMAC1OCP |
AGX_RSTMGR_PER0MODRST_EMAC2OCP |
AGX_RSTMGR_PER0MODRST_USB0OCP |
AGX_RSTMGR_PER0MODRST_USB1OCP |
AGX_RSTMGR_PER0MODRST_NANDOCP |
AGX_RSTMGR_PER0MODRST_SDMMCOCP |
AGX_RSTMGR_PER0MODRST_DMAOCP);
mmio_clrbits_32(AGX_RSTMGR_PER0MODRST,
AGX_RSTMGR_PER0MODRST_EMAC0 |
AGX_RSTMGR_PER0MODRST_EMAC1 |
AGX_RSTMGR_PER0MODRST_EMAC2 |
AGX_RSTMGR_PER0MODRST_USB0 |
AGX_RSTMGR_PER0MODRST_USB1 |
AGX_RSTMGR_PER0MODRST_NAND |
AGX_RSTMGR_PER0MODRST_SDMMC |
AGX_RSTMGR_PER0MODRST_DMA |
AGX_RSTMGR_PER0MODRST_SPIM0 |
AGX_RSTMGR_PER0MODRST_SPIM1 |
AGX_RSTMGR_PER0MODRST_SPIS0 |
AGX_RSTMGR_PER0MODRST_SPIS1 |
AGX_RSTMGR_PER0MODRST_EMACPTP |
AGX_RSTMGR_PER0MODRST_DMAIF0 |
AGX_RSTMGR_PER0MODRST_DMAIF1 |
AGX_RSTMGR_PER0MODRST_DMAIF2 |
AGX_RSTMGR_PER0MODRST_DMAIF3 |
AGX_RSTMGR_PER0MODRST_DMAIF4 |
AGX_RSTMGR_PER0MODRST_DMAIF5 |
AGX_RSTMGR_PER0MODRST_DMAIF6 |
AGX_RSTMGR_PER0MODRST_DMAIF7);
mmio_clrbits_32(AGX_RSTMGR_BRGMODRST,
AGX_RSTMGR_BRGMODRST_MPFE);
}
void config_hps_hs_before_warm_reset(void)
{
uint32_t or_mask = 0;
or_mask |= AGX_RSTMGR_HDSKEN_SDRSELFREFEN;
or_mask |= AGX_RSTMGR_HDSKEN_FPGAHSEN;
or_mask |= AGX_RSTMGR_HDSKEN_ETRSTALLEN;
or_mask |= AGX_RSTMGR_HDSKEN_L2FLUSHEN;
or_mask |= AGX_RSTMGR_HDSKEN_L3NOC_DBG;
or_mask |= AGX_RSTMGR_HDSKEN_DEBUG_L3NOC;
mmio_setbits_32(AGX_RSTMGR_HDSKEN, or_mask);
}
/*
* Copyright (c) 2019, Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <lib/mmio.h>
#include <lib/utils_def.h>
#include "agilex_system_manager.h"
void enable_nonsecure_access(void)
{
mmio_write_32(AGX_NOC_FW_L4_PER_SCR_NAND_REGISTER, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_PER_SCR_NAND_DATA, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_NAND_ECC, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_NAND_READ_ECC, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_NAND_WRITE_ECC,
DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_PER_SCR_USB0_REGISTER, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_PER_SCR_USB1_REGISTER, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_USB0_ECC, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_USB1_ECC, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_PER_SCR_SPI_MASTER0, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_PER_SCR_SPI_MASTER1, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_PER_SCR_SPI_SLAVE0, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_PER_SCR_SPI_SLAVE1, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_PER_SCR_EMAC0, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_PER_SCR_EMAC1, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_PER_SCR_EMAC2, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_EMAC0RX_ECC, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_EMAC0TX_ECC, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_EMAC1RX_ECC, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_EMAC1TX_ECC, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_EMAC2RX_ECC, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_EMAC2TX_ECC, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_PER_SCR_SDMMC, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_SDMMC_ECC, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_PER_SCR_GPIO0, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_PER_SCR_GPIO1, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_PER_SCR_I2C0, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_PER_SCR_I2C1, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_PER_SCR_I2C2, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_PER_SCR_I2C3, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_PER_SCR_I2C4, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_PER_SCR_SP_TIMER1, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_PER_SCR_UART0, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_PER_SCR_UART1, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_DMA_ECC, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_OCRAM_ECC, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_CLK_MGR, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_IO_MGR, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_RST_MGR, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_SYS_MGR, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_OSC0_TIMER, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_OSC1_TIMER, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_WATCHDOG0, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_WATCHDOG1, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_WATCHDOG2, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_WATCHDOG3, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_DAP, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_L4_NOC_PROBES, DISABLE_L4_FIREWALL);
mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_L4_NOC_QOS, DISABLE_L4_FIREWALL);
}
void enable_ns_bridge_access(void)
{
mmio_write_32(AGX_FIREWALL_SOC2FPGA, DISABLE_BRIDGE_FIREWALL);
mmio_write_32(AGX_FIREWALL_LWSOC2FPGA, DISABLE_BRIDGE_FIREWALL);
}
/*
* Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
#include <arch_helpers.h>
#include <drivers/delay_timer.h>
#include <lib/mmio.h>
#define AGX_GLOBAL_TIMER 0xffd01000
#define AGX_GLOBAL_TIMER_EN 0x3
/********************************************************************
* The timer delay function
********************************************************************/
static uint32_t socfpga_get_timer_value(void)
{
/*
* Generic delay timer implementation expects the timer to be a down
* counter. We apply bitwise NOT operator to the tick values returned
* by read_cntpct_el0() to simulate the down counter. The value is
* clipped from 64 to 32 bits.
*/
return (uint32_t)(~read_cntpct_el0());
}
static const timer_ops_t plat_timer_ops = {
.get_timer_value = socfpga_get_timer_value,
.clk_mult = 1,
.clk_div = PLAT_SYS_COUNTER_FREQ_IN_MHZ,
};
void socfpga_delay_timer_init(void)
{
timer_init(&plat_timer_ops);
mmio_write_32(AGX_GLOBAL_TIMER, AGX_GLOBAL_TIMER_EN);
}
/*
* Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <common/desc_image_load.h>
/*******************************************************************************
* This function flushes the data structures so that they are visible
* in memory for the next BL image.
******************************************************************************/
void plat_flush_next_bl_params(void)
{
flush_bl_params_desc();
}
/*******************************************************************************
* This function returns the list of loadable images.
******************************************************************************/
bl_load_info_t *plat_get_bl_image_load_info(void)
{
return get_bl_load_info_from_mem_params_desc();
}
/*******************************************************************************
* This function returns the list of executable images.
******************************************************************************/
bl_params_t *plat_get_next_bl_params(void)
{
return get_next_bl_params_from_mem_params_desc();
}
/*
* Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch_helpers.h>
#include <common/debug.h>
#include <drivers/arm/gicv2.h>
#include <lib/mmio.h>
#include <lib/psci/psci.h>
#include <plat/common/platform.h>
#include "agilex_reset_manager.h"
#include "agilex_mailbox.h"
#define AGX_RSTMGR_OFST 0xffd11000
#define AGX_RSTMGR_MPUMODRST_OFST 0x20
uintptr_t *agilex_sec_entry = (uintptr_t *) PLAT_AGX_SEC_ENTRY;
uintptr_t *cpuid_release = (uintptr_t *) PLAT_CPUID_RELEASE;
/*******************************************************************************
* plat handler called when a CPU is about to enter standby.
******************************************************************************/
void socfpga_cpu_standby(plat_local_state_t cpu_state)
{
/*
* Enter standby state
* dsb is good practice before using wfi to enter low power states
*/
VERBOSE("%s: cpu_state: 0x%x\n", __func__, cpu_state);
dsb();
wfi();
}
/*******************************************************************************
* plat handler called when a power domain is about to be turned on. The
* mpidr determines the CPU to be turned on.
******************************************************************************/
int socfpga_pwr_domain_on(u_register_t mpidr)
{
unsigned int cpu_id = plat_core_pos_by_mpidr(mpidr);
VERBOSE("%s: mpidr: 0x%lx\n", __func__, mpidr);
if (cpu_id == -1)
return PSCI_E_INTERN_FAIL;
*cpuid_release = cpu_id;
/* release core reset */
mmio_setbits_32(AGX_RSTMGR_OFST + AGX_RSTMGR_MPUMODRST_OFST,
1 << cpu_id);
return PSCI_E_SUCCESS;
}
/*******************************************************************************
* plat handler called when a power domain is about to be turned off. The
* target_state encodes the power state that each level should transition to.
******************************************************************************/
void socfpga_pwr_domain_off(const psci_power_state_t *target_state)
{
unsigned int cpu_id = plat_my_core_pos();
for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++)
VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n",
__func__, i, target_state->pwr_domain_state[i]);
/* TODO: Prevent interrupts from spuriously waking up this cpu */
/* gicv2_cpuif_disable(); */
/* assert core reset */
mmio_setbits_32(AGX_RSTMGR_OFST + AGX_RSTMGR_MPUMODRST_OFST,
1 << cpu_id);
}
/*******************************************************************************
* plat handler called when a power domain is about to be suspended. The
* target_state encodes the power state that each level should transition to.
******************************************************************************/
void socfpga_pwr_domain_suspend(const psci_power_state_t *target_state)
{
unsigned int cpu_id = plat_my_core_pos();
for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++)
VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n",
__func__, i, target_state->pwr_domain_state[i]);
/* assert core reset */
mmio_setbits_32(AGX_RSTMGR_OFST + AGX_RSTMGR_MPUMODRST_OFST,
1 << cpu_id);
}
/*******************************************************************************
* plat handler called when a power domain has just been powered on after
* being turned off earlier. The target_state encodes the low power state that
* each level has woken up from.
******************************************************************************/
void socfpga_pwr_domain_on_finish(const psci_power_state_t *target_state)
{
for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++)
VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n",
__func__, i, target_state->pwr_domain_state[i]);
/* Program the gic per-cpu distributor or re-distributor interface */
gicv2_pcpu_distif_init();
gicv2_set_pe_target_mask(plat_my_core_pos());
/* Enable the gic cpu interface */
gicv2_cpuif_enable();
}
/*******************************************************************************
* plat handler called when a power domain has just been powered on after
* having been suspended earlier. The target_state encodes the low power state
* that each level has woken up from.
* TODO: At the moment we reuse the on finisher and reinitialize the secure
* context. Need to implement a separate suspend finisher.
******************************************************************************/
void socfpga_pwr_domain_suspend_finish(const psci_power_state_t *target_state)
{
unsigned int cpu_id = plat_my_core_pos();
for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++)
VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n",
__func__, i, target_state->pwr_domain_state[i]);
/* release core reset */
mmio_clrbits_32(AGX_RSTMGR_OFST + AGX_RSTMGR_MPUMODRST_OFST,
1 << cpu_id);
}
/*******************************************************************************
* plat handlers to shutdown/reboot the system
******************************************************************************/
static void __dead2 socfpga_system_off(void)
{
wfi();
ERROR("System Off: operation not handled.\n");
panic();
}
static void __dead2 socfpga_system_reset(void)
{
INFO("assert Peripheral from Reset\r\n");
deassert_peripheral_reset();
mailbox_reset_cold();
while (1)
wfi();
}
int socfpga_validate_power_state(unsigned int power_state,
psci_power_state_t *req_state)
{
VERBOSE("%s: power_state: 0x%x\n", __func__, power_state);
return PSCI_E_SUCCESS;
}
int socfpga_validate_ns_entrypoint(unsigned long ns_entrypoint)
{
VERBOSE("%s: ns_entrypoint: 0x%lx\n", __func__, ns_entrypoint);
return PSCI_E_SUCCESS;
}
void socfpga_get_sys_suspend_power_state(psci_power_state_t *req_state)
{
req_state->pwr_domain_state[PSCI_CPU_PWR_LVL] = PLAT_MAX_OFF_STATE;
req_state->pwr_domain_state[1] = PLAT_MAX_OFF_STATE;
}
/*******************************************************************************
* Export the platform handlers via plat_arm_psci_pm_ops. The ARM Standard
* platform layer will take care of registering the handlers with PSCI.
******************************************************************************/
const plat_psci_ops_t socfpga_psci_pm_ops = {
.cpu_standby = socfpga_cpu_standby,
.pwr_domain_on = socfpga_pwr_domain_on,
.pwr_domain_off = socfpga_pwr_domain_off,
.pwr_domain_suspend = socfpga_pwr_domain_suspend,
.pwr_domain_on_finish = socfpga_pwr_domain_on_finish,
.pwr_domain_suspend_finish = socfpga_pwr_domain_suspend_finish,
.system_off = socfpga_system_off,
.system_reset = socfpga_system_reset,
.validate_power_state = socfpga_validate_power_state,
.validate_ns_entrypoint = socfpga_validate_ns_entrypoint,
.get_sys_suspend_power_state = socfpga_get_sys_suspend_power_state
};
/*******************************************************************************
* Export the platform specific power ops.
******************************************************************************/
int plat_setup_psci_ops(uintptr_t sec_entrypoint,
const struct plat_psci_ops **psci_ops)
{
/* Save warm boot entrypoint.*/
*agilex_sec_entry = sec_entrypoint;
*psci_ops = &socfpga_psci_pm_ops;
return 0;
}
/*
* Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
#include <common/debug.h>
#include <common/runtime_svc.h>
#include <tools_share/uuid.h>
#include "agilex_mailbox.h"
/* Number of SiP Calls implemented */
#define SIP_NUM_CALLS 0x3
/* Total buffer the driver can hold */
#define FPGA_CONFIG_BUFFER_SIZE 4
int current_block;
int current_buffer;
int current_id = 1;
int max_blocks;
uint32_t bytes_per_block;
uint32_t blocks_submitted;
uint32_t blocks_completed;
struct fpga_config_info {
uint32_t addr;
int size;
int size_written;
uint32_t write_requested;
int subblocks_sent;
int block_number;
};
/* SiP Service UUID */
DEFINE_SVC_UUID2(intl_svc_uid,
0xa85273b0, 0xe85a, 0x4862, 0xa6, 0x2a,
0xfa, 0x88, 0x88, 0x17, 0x68, 0x81);
uint64_t socfpga_sip_handler(uint32_t smc_fid,
uint64_t x1,
uint64_t x2,
uint64_t x3,
uint64_t x4,
void *cookie,
void *handle,
uint64_t flags)
{
ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid);
SMC_RET1(handle, SMC_UNK);
}
struct fpga_config_info fpga_config_buffers[FPGA_CONFIG_BUFFER_SIZE];
static void intel_fpga_sdm_write_buffer(struct fpga_config_info *buffer)
{
uint32_t args[3];
while (max_blocks > 0 && buffer->size > buffer->size_written) {
if (buffer->size - buffer->size_written <=
bytes_per_block) {
args[0] = (1<<8);
args[1] = buffer->addr + buffer->size_written;
args[2] = buffer->size - buffer->size_written;
buffer->size_written +=
buffer->size - buffer->size_written;
buffer->subblocks_sent++;
mailbox_send_cmd_async(0x4,
MBOX_RECONFIG_DATA,
args, 3, 0);
current_buffer++;
current_buffer %= FPGA_CONFIG_BUFFER_SIZE;
} else {
args[0] = (1<<8);
args[1] = buffer->addr + buffer->size_written;
args[2] = bytes_per_block;
buffer->size_written += bytes_per_block;
mailbox_send_cmd_async(0x4,
MBOX_RECONFIG_DATA,
args, 3, 0);
buffer->subblocks_sent++;
}
max_blocks--;
}
}
static int intel_fpga_sdm_write_all(void)
{
int i;
for (i = 0; i < FPGA_CONFIG_BUFFER_SIZE; i++)
intel_fpga_sdm_write_buffer(
&fpga_config_buffers[current_buffer]);
return 0;
}
uint32_t intel_mailbox_fpga_config_isdone(void)
{
uint32_t args[2];
uint32_t response[6];
int status;
status = mailbox_send_cmd(1, MBOX_RECONFIG_STATUS, args, 0, 0,
response);
if (status < 0)
return INTEL_SIP_SMC_STATUS_ERROR;
if (response[RECONFIG_STATUS_STATE] &&
response[RECONFIG_STATUS_STATE] != MBOX_CFGSTAT_STATE_CONFIG)
return INTEL_SIP_SMC_STATUS_ERROR;
if (!(response[RECONFIG_STATUS_PIN_STATUS] & PIN_STATUS_NSTATUS))
return INTEL_SIP_SMC_STATUS_ERROR;
if (response[RECONFIG_STATUS_SOFTFUNC_STATUS] &
SOFTFUNC_STATUS_SEU_ERROR)
return INTEL_SIP_SMC_STATUS_ERROR;
if ((response[RECONFIG_STATUS_SOFTFUNC_STATUS] &
SOFTFUNC_STATUS_CONF_DONE) &&
(response[RECONFIG_STATUS_SOFTFUNC_STATUS] &
SOFTFUNC_STATUS_INIT_DONE))
return INTEL_SIP_SMC_STATUS_OK;
return INTEL_SIP_SMC_STATUS_ERROR;
}
static int mark_last_buffer_xfer_completed(uint32_t *buffer_addr_completed)
{
int i;
for (i = 0; i < FPGA_CONFIG_BUFFER_SIZE; i++) {
if (fpga_config_buffers[i].block_number == current_block) {
fpga_config_buffers[i].subblocks_sent--;
if (fpga_config_buffers[i].subblocks_sent == 0
&& fpga_config_buffers[i].size <=
fpga_config_buffers[i].size_written) {
fpga_config_buffers[i].write_requested = 0;
current_block++;
*buffer_addr_completed =
fpga_config_buffers[i].addr;
return 0;
}
}
}
return -1;
}
unsigned int address_in_ddr(uint32_t *addr)
{
if (((unsigned long long)addr > DRAM_BASE) &&
((unsigned long long)addr < DRAM_BASE + DRAM_SIZE))
return 0;
return -1;
}
int intel_fpga_config_completed_write(uint32_t *completed_addr,
uint32_t *count)
{
uint32_t status = INTEL_SIP_SMC_STATUS_OK;
*count = 0;
int resp_len = 0;
uint32_t resp[5];
int all_completed = 1;
int count_check = 0;
if (address_in_ddr(completed_addr) != 0 || address_in_ddr(count) != 0)
return INTEL_SIP_SMC_STATUS_ERROR;
for (count_check = 0; count_check < 3; count_check++)
if (address_in_ddr(&completed_addr[*count + count_check]) != 0)
return INTEL_SIP_SMC_STATUS_ERROR;
resp_len = mailbox_read_response(0x4, resp);
while (resp_len >= 0 && *count < 3) {
max_blocks++;
if (mark_last_buffer_xfer_completed(
&completed_addr[*count]) == 0)
*count = *count + 1;
else
break;
resp_len = mailbox_read_response(0x4, resp);
}
if (*count <= 0) {
if (resp_len != MBOX_NO_RESPONSE &&
resp_len != MBOX_TIMEOUT && resp_len != 0) {
return INTEL_SIP_SMC_STATUS_ERROR;
}
*count = 0;
}
intel_fpga_sdm_write_all();
if (*count > 0)
status = INTEL_SIP_SMC_STATUS_OK;
else if (*count == 0)
status = INTEL_SIP_SMC_STATUS_BUSY;
for (int i = 0; i < FPGA_CONFIG_BUFFER_SIZE; i++) {
if (fpga_config_buffers[i].write_requested != 0) {
all_completed = 0;
break;
}
}
if (all_completed == 1)
return INTEL_SIP_SMC_STATUS_OK;
return status;
}
int intel_fpga_config_start(uint32_t config_type)
{
uint32_t response[3];
int status = 0;
status = mailbox_send_cmd(2, MBOX_RECONFIG, 0, 0, 0,
response);
if (status < 0)
return status;
max_blocks = response[0];
bytes_per_block = response[1];
for (int i = 0; i < FPGA_CONFIG_BUFFER_SIZE; i++) {
fpga_config_buffers[i].size = 0;
fpga_config_buffers[i].size_written = 0;
fpga_config_buffers[i].addr = 0;
fpga_config_buffers[i].write_requested = 0;
fpga_config_buffers[i].block_number = 0;
fpga_config_buffers[i].subblocks_sent = 0;
}
blocks_submitted = 0;
current_block = 0;
current_buffer = 0;
return 0;
}
uint32_t intel_fpga_config_write(uint64_t mem, uint64_t size)
{
int i = 0;
uint32_t status = INTEL_SIP_SMC_STATUS_OK;
if (mem < DRAM_BASE || mem > DRAM_BASE + DRAM_SIZE)
status = INTEL_SIP_SMC_STATUS_REJECTED;
if (mem + size > DRAM_BASE + DRAM_SIZE)
status = INTEL_SIP_SMC_STATUS_REJECTED;
for (i = 0; i < FPGA_CONFIG_BUFFER_SIZE; i++) {
if (!fpga_config_buffers[i].write_requested) {
fpga_config_buffers[i].addr = mem;
fpga_config_buffers[i].size = size;
fpga_config_buffers[i].size_written = 0;
fpga_config_buffers[i].write_requested = 1;
fpga_config_buffers[i].block_number =
blocks_submitted++;
fpga_config_buffers[i].subblocks_sent = 0;
break;
}
}
if (i == FPGA_CONFIG_BUFFER_SIZE) {
status = INTEL_SIP_SMC_STATUS_REJECTED;
return status;
} else if (i == FPGA_CONFIG_BUFFER_SIZE - 1) {
status = INTEL_SIP_SMC_STATUS_BUSY;
}
intel_fpga_sdm_write_all();
return status;
}
/*
* This function is responsible for handling all SiP calls from the NS world
*/
uintptr_t sip_smc_handler(uint32_t smc_fid,
u_register_t x1,
u_register_t x2,
u_register_t x3,
u_register_t x4,
void *cookie,
void *handle,
u_register_t flags)
{
uint32_t status = INTEL_SIP_SMC_STATUS_OK;
uint32_t completed_addr[3];
uint32_t count = 0;
switch (smc_fid) {
case SIP_SVC_UID:
/* Return UID to the caller */
SMC_UUID_RET(handle, intl_svc_uid);
break;
case INTEL_SIP_SMC_FPGA_CONFIG_ISDONE:
status = intel_mailbox_fpga_config_isdone();
SMC_RET4(handle, status, 0, 0, 0);
break;
case INTEL_SIP_SMC_FPGA_CONFIG_GET_MEM:
SMC_RET3(handle, INTEL_SIP_SMC_STATUS_OK,
INTEL_SIP_SMC_FPGA_CONFIG_ADDR,
INTEL_SIP_SMC_FPGA_CONFIG_SIZE -
INTEL_SIP_SMC_FPGA_CONFIG_ADDR);
break;
case INTEL_SIP_SMC_FPGA_CONFIG_START:
status = intel_fpga_config_start(x1);
SMC_RET4(handle, status, 0, 0, 0);
break;
case INTEL_SIP_SMC_FPGA_CONFIG_WRITE:
status = intel_fpga_config_write(x1, x2);
SMC_RET4(handle, status, 0, 0, 0);
break;
case INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE:
status = intel_fpga_config_completed_write(completed_addr,
&count);
switch (count) {
case 1:
SMC_RET4(handle, INTEL_SIP_SMC_STATUS_OK,
completed_addr[0], 0, 0);
break;
case 2:
SMC_RET4(handle, INTEL_SIP_SMC_STATUS_OK,
completed_addr[0],
completed_addr[1], 0);
break;
case 3:
SMC_RET4(handle, INTEL_SIP_SMC_STATUS_OK,
completed_addr[0],
completed_addr[1],
completed_addr[2]);
break;
case 0:
SMC_RET4(handle, status, 0, 0, 0);
break;
default:
SMC_RET1(handle, INTEL_SIP_SMC_STATUS_ERROR);
}
break;
default:
return socfpga_sip_handler(smc_fid, x1, x2, x3, x4,
cookie, handle, flags);
}
}
DECLARE_RT_SVC(
agilex_sip_svc,
OEN_SIP_START,
OEN_SIP_END,
SMC_TYPE_FAST,
NULL,
sip_smc_handler
);
DECLARE_RT_SVC(
agilex_sip_svc_std,
OEN_SIP_START,
OEN_SIP_END,
SMC_TYPE_YIELD,
NULL,
sip_smc_handler
);
/*
* Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2019, Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch_helpers.h>
#include <assert.h>
#include <common/debug.h>
#include <common/tbbr/tbbr_img_def.h>
#include <drivers/io/io_block.h>
#include <drivers/io/io_driver.h>
#include <drivers/io/io_fip.h>
#include <drivers/io/io_memmap.h>
#include <drivers/io/io_storage.h>
#include <drivers/mmc.h>
#include <drivers/partition/partition.h>
#include <lib/mmio.h>
#include <tools_share/firmware_image_package.h>
#include "agilex_private.h"
#define PLAT_FIP_BASE (0)
#define PLAT_FIP_MAX_SIZE (0x1000000)
#define PLAT_MMC_DATA_BASE (0xffe3c000)
#define PLAT_MMC_DATA_SIZE (0x2000)
#define PLAT_QSPI_DATA_BASE (0x3C00000)
#define PLAT_QSPI_DATA_SIZE (0x1000000)
static const io_dev_connector_t *fip_dev_con;
static const io_dev_connector_t *boot_dev_con;
static uintptr_t fip_dev_handle;
static uintptr_t boot_dev_handle;
static const io_uuid_spec_t bl2_uuid_spec = {
.uuid = UUID_TRUSTED_BOOT_FIRMWARE_BL2,
};
static const io_uuid_spec_t bl31_uuid_spec = {
.uuid = UUID_EL3_RUNTIME_FIRMWARE_BL31,
};
static const io_uuid_spec_t bl33_uuid_spec = {
.uuid = UUID_NON_TRUSTED_FIRMWARE_BL33,
};
uintptr_t a2_lba_offset;
const char a2[] = {0xa2, 0x0};
static const io_block_spec_t gpt_block_spec = {
.offset = 0,
.length = MMC_BLOCK_SIZE
};
static int check_fip(const uintptr_t spec);
static int check_dev(const uintptr_t spec);
static io_block_dev_spec_t boot_dev_spec;
static int (*register_io_dev)(const io_dev_connector_t **);
static io_block_spec_t fip_spec = {
.offset = PLAT_FIP_BASE,
.length = PLAT_FIP_MAX_SIZE,
};
struct plat_io_policy {
uintptr_t *dev_handle;
uintptr_t image_spec;
int (*check)(const uintptr_t spec);
};
static const struct plat_io_policy policies[] = {
[FIP_IMAGE_ID] = {
&boot_dev_handle,
(uintptr_t)&fip_spec,
check_dev
},
[BL2_IMAGE_ID] = {
&fip_dev_handle,
(uintptr_t)&bl2_uuid_spec,
check_fip
},
[BL31_IMAGE_ID] = {
&fip_dev_handle,
(uintptr_t)&bl31_uuid_spec,
check_fip
},
[BL33_IMAGE_ID] = {
&fip_dev_handle,
(uintptr_t) &bl33_uuid_spec,
check_fip
},
[GPT_IMAGE_ID] = {
&boot_dev_handle,
(uintptr_t) &gpt_block_spec,
check_dev
},
};
static int check_dev(const uintptr_t spec)
{
int result;
uintptr_t local_handle;
result = io_dev_init(boot_dev_handle, (uintptr_t)NULL);
if (result == 0) {
result = io_open(boot_dev_handle, spec, &local_handle);
if (result == 0)
io_close(local_handle);
}
return result;
}
static int check_fip(const uintptr_t spec)
{
int result;
uintptr_t local_image_handle;
result = io_dev_init(fip_dev_handle, (uintptr_t)FIP_IMAGE_ID);
if (result == 0) {
result = io_open(fip_dev_handle, spec, &local_image_handle);
if (result == 0)
io_close(local_image_handle);
}
return result;
}
void socfpga_io_setup(int boot_source)
{
int result;
switch (boot_source) {
case BOOT_SOURCE_SDMMC:
register_io_dev = &register_io_dev_block;
boot_dev_spec.buffer.offset = PLAT_MMC_DATA_BASE;
boot_dev_spec.buffer.length = MMC_BLOCK_SIZE;
boot_dev_spec.ops.read = mmc_read_blocks;
boot_dev_spec.ops.write = mmc_write_blocks;
boot_dev_spec.block_size = MMC_BLOCK_SIZE;
break;
case BOOT_SOURCE_QSPI:
register_io_dev = &register_io_dev_memmap;
fip_spec.offset = fip_spec.offset + PLAT_QSPI_DATA_BASE;
break;
default:
ERROR("Unsupported boot source\n");
panic();
break;
}
result = (*register_io_dev)(&boot_dev_con);
assert(result == 0);
result = register_io_dev_fip(&fip_dev_con);
assert(result == 0);
result = io_dev_open(boot_dev_con, (uintptr_t)&boot_dev_spec,
&boot_dev_handle);
assert(result == 0);
result = io_dev_open(fip_dev_con, (uintptr_t)NULL, &fip_dev_handle);
assert(result == 0);
if (boot_source == BOOT_SOURCE_SDMMC) {
partition_init(GPT_IMAGE_ID);
fip_spec.offset = get_partition_entry(a2)->start;
}
(void)result;
}
int plat_get_image_source(unsigned int image_id, uintptr_t *dev_handle,
uintptr_t *image_spec)
{
int result;
const struct plat_io_policy *policy;
assert(image_id < ARRAY_SIZE(policies));
policy = &policies[image_id];
result = policy->check(policy->image_spec);
assert(result == 0);
*image_spec = policy->image_spec;
*dev_handle = *(policy->dev_handle);
return result;
}
/*
* Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <platform_def.h>
#include <lib/psci/psci.h>
static const unsigned char plat_power_domain_tree_desc[] = {1, 4};
/*******************************************************************************
* This function returns the default topology tree information.
******************************************************************************/
const unsigned char *plat_get_power_domain_tree_desc(void)
{
return plat_power_domain_tree_desc;
}
/*******************************************************************************
* This function implements a part of the critical interface between the psci
* generic layer and the platform that allows the former to query the platform
* to convert an MPIDR to a unique linear index. An error code (-1) is returned
* in case the MPIDR is invalid.
******************************************************************************/
int plat_core_pos_by_mpidr(u_register_t mpidr)
{
unsigned int cluster_id, cpu_id;
mpidr &= MPIDR_AFFINITY_MASK;
if (mpidr & ~(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK))
return -1;
cluster_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
cpu_id = (mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK;
if (cluster_id >= PLATFORM_CLUSTER_COUNT)
return -1;
/*
* Validate cpu_id by checking whether it represents a CPU in
* one of the two clusters present on the platform.
*/
if (cpu_id >= PLATFORM_MAX_CPUS_PER_CLUSTER)
return -1;
return (cpu_id + (cluster_id * 4));
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment