Unverified Commit bc5e79cd authored by Antonio Niño Díaz's avatar Antonio Niño Díaz Committed by GitHub
Browse files

Merge pull request #1776 from vwadekar/tf2.0-tegra-downstream-rebase-1.22.19

Tf2.0 tegra downstream rebase 1.22.19
parents ae478c26 8ec45621
...@@ -185,6 +185,7 @@ DEFINE_SYSREG_READ_FUNC(id_pfr1_el1) ...@@ -185,6 +185,7 @@ DEFINE_SYSREG_READ_FUNC(id_pfr1_el1)
DEFINE_SYSREG_READ_FUNC(id_aa64isar1_el1) DEFINE_SYSREG_READ_FUNC(id_aa64isar1_el1)
DEFINE_SYSREG_READ_FUNC(id_aa64pfr0_el1) DEFINE_SYSREG_READ_FUNC(id_aa64pfr0_el1)
DEFINE_SYSREG_READ_FUNC(id_aa64dfr0_el1) DEFINE_SYSREG_READ_FUNC(id_aa64dfr0_el1)
DEFINE_SYSREG_READ_FUNC(id_afr0_el1)
DEFINE_SYSREG_READ_FUNC(CurrentEl) DEFINE_SYSREG_READ_FUNC(CurrentEl)
DEFINE_SYSREG_READ_FUNC(ctr_el0) DEFINE_SYSREG_READ_FUNC(ctr_el0)
DEFINE_SYSREG_RW_FUNCS(daif) DEFINE_SYSREG_RW_FUNCS(daif)
......
...@@ -125,7 +125,7 @@ int tegra_bpmp_init(void) ...@@ -125,7 +125,7 @@ int tegra_bpmp_init(void)
val = mmio_read_32(TEGRA_RES_SEMA_BASE + STA_OFFSET); val = mmio_read_32(TEGRA_RES_SEMA_BASE + STA_OFFSET);
if (val != SIGN_OF_LIFE) { if (val != SIGN_OF_LIFE) {
ERROR("BPMP precessor not available\n"); ERROR("BPMP precessor not available\n");
ret = -ENOTSUP; return -ENOTSUP;
} }
/* check if clock for the atomics block is enabled */ /* check if clock for the atomics block is enabled */
...@@ -158,7 +158,6 @@ int tegra_bpmp_init(void) ...@@ -158,7 +158,6 @@ int tegra_bpmp_init(void)
} }
/* mark state as "initialized" */ /* mark state as "initialized" */
if (ret == 0)
bpmp_init_state = BPMP_INIT_COMPLETE; bpmp_init_state = BPMP_INIT_COMPLETE;
/* the channel values have to be visible across all cpus */ /* the channel values have to be visible across all cpus */
......
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
#include <bpmp_ipc.h>
#include <debug.h>
#include <delay_timer.h>
#include <errno.h>
#include <mmio.h>
#include <stdbool.h>
#include <string.h>
#include <tegra_def.h>
#include <utils_def.h>
#include "intf.h"
#include "ivc.h"
/**
* Holds IVC channel data
*/
struct ccplex_bpmp_channel_data {
/* Buffer for incoming data */
struct frame_data *ib;
/* Buffer for outgoing data */
struct frame_data *ob;
};
static struct ccplex_bpmp_channel_data s_channel;
static struct ivc ivc_ccplex_bpmp_channel;
/*
* Helper functions to access the HSP doorbell registers
*/
static inline uint32_t hsp_db_read(uint32_t reg)
{
return mmio_read_32((uint32_t)(TEGRA_HSP_DBELL_BASE + reg));
}
static inline void hsp_db_write(uint32_t reg, uint32_t val)
{
mmio_write_32((uint32_t)(TEGRA_HSP_DBELL_BASE + reg), val);
}
/*******************************************************************************
* IVC wrappers for CCPLEX <-> BPMP communication.
******************************************************************************/
static void tegra_bpmp_ring_bpmp_doorbell(void);
/*
* Get the next frame where data can be written.
*/
static struct frame_data *tegra_bpmp_get_next_out_frame(void)
{
struct frame_data *frame;
const struct ivc *ch = &ivc_ccplex_bpmp_channel;
frame = (struct frame_data *)tegra_ivc_write_get_next_frame(ch);
if (frame == NULL) {
ERROR("%s: Error in getting next frame, exiting\n", __func__);
} else {
s_channel.ob = frame;
}
return frame;
}
static void tegra_bpmp_signal_slave(void)
{
(void)tegra_ivc_write_advance(&ivc_ccplex_bpmp_channel);
tegra_bpmp_ring_bpmp_doorbell();
}
static int32_t tegra_bpmp_free_master(void)
{
return tegra_ivc_read_advance(&ivc_ccplex_bpmp_channel);
}
static bool tegra_bpmp_slave_acked(void)
{
struct frame_data *frame;
bool ret = true;
frame = (struct frame_data *)tegra_ivc_read_get_next_frame(&ivc_ccplex_bpmp_channel);
if (frame == NULL) {
ret = false;
} else {
s_channel.ib = frame;
}
return ret;
}
static struct frame_data *tegra_bpmp_get_cur_in_frame(void)
{
return s_channel.ib;
}
/*
* Enables BPMP to ring CCPlex doorbell
*/
static void tegra_bpmp_enable_ccplex_doorbell(void)
{
uint32_t reg;
reg = hsp_db_read(HSP_DBELL_1_ENABLE);
reg |= HSP_MASTER_BPMP_BIT;
hsp_db_write(HSP_DBELL_1_ENABLE, reg);
}
/*
* CCPlex rings the BPMP doorbell
*/
static void tegra_bpmp_ring_bpmp_doorbell(void)
{
/*
* Any writes to this register has the same effect,
* uses master ID of the write transaction and set
* corresponding flag.
*/
hsp_db_write(HSP_DBELL_3_TRIGGER, HSP_MASTER_CCPLEX_BIT);
}
/*
* Returns true if CCPLex can ring BPMP doorbell, otherwise false.
* This also signals that BPMP is up and ready.
*/
static bool tegra_bpmp_can_ccplex_ring_doorbell(void)
{
uint32_t reg;
/* check if ccplex can communicate with bpmp */
reg = hsp_db_read(HSP_DBELL_3_ENABLE);
return ((reg & HSP_MASTER_CCPLEX_BIT) != 0U);
}
static int32_t tegra_bpmp_wait_for_slave_ack(void)
{
uint32_t timeout = TIMEOUT_RESPONSE_FROM_BPMP_US;
while (!tegra_bpmp_slave_acked() && (timeout != 0U)) {
udelay(1);
timeout--;
};
return ((timeout == 0U) ? -ETIMEDOUT : 0);
}
/*
* Notification from the ivc layer
*/
static void tegra_bpmp_ivc_notify(const struct ivc *ivc)
{
(void)(ivc);
tegra_bpmp_ring_bpmp_doorbell();
}
/*
* Atomic send/receive API, which means it waits until slave acks
*/
static int32_t tegra_bpmp_ipc_send_req_atomic(uint32_t mrq, void *p_out,
uint32_t size_out, void *p_in, uint32_t size_in)
{
struct frame_data *frame = tegra_bpmp_get_next_out_frame();
const struct frame_data *f_in = NULL;
int32_t ret = 0;
void *p_fdata;
if ((p_out == NULL) || (size_out > IVC_DATA_SZ_BYTES) ||
(frame == NULL)) {
ERROR("%s: invalid parameters, exiting\n", __func__);
ret = -EINVAL;
}
if (ret == 0) {
/* prepare the command frame */
frame->mrq = mrq;
frame->flags = FLAG_DO_ACK;
p_fdata = frame->data;
(void)memcpy(p_fdata, p_out, (size_t)size_out);
/* signal the slave */
tegra_bpmp_signal_slave();
/* wait for slave to ack */
ret = tegra_bpmp_wait_for_slave_ack();
if (ret != 0) {
ERROR("failed waiting for the slave to ack\n");
}
/* retrieve the response frame */
if ((size_in <= IVC_DATA_SZ_BYTES) && (p_in != NULL) &&
(ret == 0)) {
f_in = tegra_bpmp_get_cur_in_frame();
if (f_in != NULL) {
ERROR("Failed to get next input frame!\n");
} else {
(void)memcpy(p_in, p_fdata, (size_t)size_in);
}
}
if (ret == 0) {
ret = tegra_bpmp_free_master();
if (ret != 0) {
ERROR("Failed to free master\n");
}
}
}
return ret;
}
/*
* Initializes the BPMP<--->CCPlex communication path.
*/
int32_t tegra_bpmp_ipc_init(void)
{
size_t msg_size;
uint32_t frame_size, timeout;
int32_t error = 0;
/* allow bpmp to ring CCPLEX's doorbell */
tegra_bpmp_enable_ccplex_doorbell();
/* wait for BPMP to actually ring the doorbell */
timeout = TIMEOUT_RESPONSE_FROM_BPMP_US;
while ((timeout != 0U) && !tegra_bpmp_can_ccplex_ring_doorbell()) {
udelay(1); /* bpmp turn-around time */
timeout--;
}
if (timeout == 0U) {
ERROR("%s: BPMP firmware is not ready\n", __func__);
return -ENOTSUP;
}
INFO("%s: BPMP handshake completed\n", __func__);
msg_size = tegra_ivc_align(IVC_CMD_SZ_BYTES);
frame_size = (uint32_t)tegra_ivc_total_queue_size(msg_size);
if (frame_size > TEGRA_BPMP_IPC_CH_MAP_SIZE) {
ERROR("%s: carveout size is not sufficient\n", __func__);
return -EINVAL;
}
error = tegra_ivc_init(&ivc_ccplex_bpmp_channel,
(uint32_t)TEGRA_BPMP_IPC_RX_PHYS_BASE,
(uint32_t)TEGRA_BPMP_IPC_TX_PHYS_BASE,
1U, frame_size, tegra_bpmp_ivc_notify);
if (error != 0) {
ERROR("%s: IVC init failed (%d)\n", __func__, error);
} else {
/* reset channel */
tegra_ivc_channel_reset(&ivc_ccplex_bpmp_channel);
/* wait for notification from BPMP */
while (tegra_ivc_channel_notified(&ivc_ccplex_bpmp_channel) != 0) {
/*
* Interrupt BPMP with doorbell each time after
* tegra_ivc_channel_notified() returns non zero
* value.
*/
tegra_bpmp_ring_bpmp_doorbell();
}
INFO("%s: All communication channels initialized\n", __func__);
}
return error;
}
/* Handler to reset a hardware module */
int32_t tegra_bpmp_ipc_reset_module(uint32_t rst_id)
{
int32_t ret;
struct mrq_reset_request req = {
.cmd = (uint32_t)CMD_RESET_MODULE,
.reset_id = rst_id
};
/* only GPCDMA/XUSB_PADCTL resets are supported */
assert((rst_id == TEGRA_RESET_ID_XUSB_PADCTL) ||
(rst_id == TEGRA_RESET_ID_GPCDMA));
ret = tegra_bpmp_ipc_send_req_atomic(MRQ_RESET, &req,
(uint32_t)sizeof(req), NULL, 0);
if (ret != 0) {
ERROR("%s: failed for module %d with error %d\n", __func__,
rst_id, ret);
}
return ret;
}
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef INTF_H
#define INTF_H
/**
* Flags used in IPC req
*/
#define FLAG_DO_ACK (U(1) << 0)
#define FLAG_RING_DOORBELL (U(1) << 1)
/* Bit 1 is designated for CCPlex in secure world */
#define HSP_MASTER_CCPLEX_BIT (U(1) << 1)
/* Bit 19 is designated for BPMP in non-secure world */
#define HSP_MASTER_BPMP_BIT (U(1) << 19)
/* Timeout to receive response from BPMP is 1 sec */
#define TIMEOUT_RESPONSE_FROM_BPMP_US U(1000000) /* in microseconds */
/**
* IVC protocol defines and command/response frame
*/
/**
* IVC specific defines
*/
#define IVC_CMD_SZ_BYTES U(128)
#define IVC_DATA_SZ_BYTES U(120)
/**
* Holds frame data for an IPC request
*/
struct frame_data {
/* Identification as to what kind of data is being transmitted */
uint32_t mrq;
/* Flags for slave as to how to respond back */
uint32_t flags;
/* Actual data being sent */
uint8_t data[IVC_DATA_SZ_BYTES];
};
/**
* Commands send to the BPMP firmware
*/
/**
* MRQ code to issue a module reset command to BPMP
*/
#define MRQ_RESET U(20)
/**
* Reset sub-commands
*/
#define CMD_RESET_ASSERT U(1)
#define CMD_RESET_DEASSERT U(2)
#define CMD_RESET_MODULE U(3)
/**
* Used by the sender of an #MRQ_RESET message to request BPMP to
* assert or deassert a given reset line.
*/
struct __attribute__((packed)) mrq_reset_request {
/* reset action to perform (mrq_reset_commands) */
uint32_t cmd;
/* id of the reset to affected */
uint32_t reset_id;
};
#endif /* INTF_H */
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch_helpers.h>
#include <assert.h>
#include <debug.h>
#include <errno.h>
#include <stddef.h>
#include <string.h>
#include "ivc.h"
/*
* IVC channel reset protocol.
*
* Each end uses its tx_channel.state to indicate its synchronization state.
*/
enum {
/*
* This value is zero for backwards compatibility with services that
* assume channels to be initially zeroed. Such channels are in an
* initially valid state, but cannot be asynchronously reset, and must
* maintain a valid state at all times.
*
* The transmitting end can enter the established state from the sync or
* ack state when it observes the receiving endpoint in the ack or
* established state, indicating that has cleared the counters in our
* rx_channel.
*/
ivc_state_established = U(0),
/*
* If an endpoint is observed in the sync state, the remote endpoint is
* allowed to clear the counters it owns asynchronously with respect to
* the current endpoint. Therefore, the current endpoint is no longer
* allowed to communicate.
*/
ivc_state_sync = U(1),
/*
* When the transmitting end observes the receiving end in the sync
* state, it can clear the w_count and r_count and transition to the ack
* state. If the remote endpoint observes us in the ack state, it can
* return to the established state once it has cleared its counters.
*/
ivc_state_ack = U(2)
};
/*
* This structure is divided into two-cache aligned parts, the first is only
* written through the tx_channel pointer, while the second is only written
* through the rx_channel pointer. This delineates ownership of the cache lines,
* which is critical to performance and necessary in non-cache coherent
* implementations.
*/
struct ivc_channel_header {
struct {
/* fields owned by the transmitting end */
uint32_t w_count;
uint32_t state;
uint32_t w_rsvd[IVC_CHHDR_TX_FIELDS - 2];
};
struct {
/* fields owned by the receiving end */
uint32_t r_count;
uint32_t r_rsvd[IVC_CHHDR_RX_FIELDS - 1];
};
};
static inline bool ivc_channel_empty(const struct ivc *ivc,
volatile const struct ivc_channel_header *ch)
{
/*
* This function performs multiple checks on the same values with
* security implications, so sample the counters' current values in
* shared memory to ensure that these checks use the same values.
*/
uint32_t wr_count = ch->w_count;
uint32_t rd_count = ch->r_count;
bool ret = false;
(void)ivc;
/*
* Perform an over-full check to prevent denial of service attacks where
* a server could be easily fooled into believing that there's an
* extremely large number of frames ready, since receivers are not
* expected to check for full or over-full conditions.
*
* Although the channel isn't empty, this is an invalid case caused by
* a potentially malicious peer, so returning empty is safer, because it
* gives the impression that the channel has gone silent.
*/
if (((wr_count - rd_count) > ivc->nframes) || (wr_count == rd_count)) {
ret = true;
}
return ret;
}
static inline bool ivc_channel_full(const struct ivc *ivc,
volatile const struct ivc_channel_header *ch)
{
uint32_t wr_count = ch->w_count;
uint32_t rd_count = ch->r_count;
(void)ivc;
/*
* Invalid cases where the counters indicate that the queue is over
* capacity also appear full.
*/
return ((wr_count - rd_count) >= ivc->nframes);
}
static inline uint32_t ivc_channel_avail_count(const struct ivc *ivc,
volatile const struct ivc_channel_header *ch)
{
uint32_t wr_count = ch->w_count;
uint32_t rd_count = ch->r_count;
(void)ivc;
/*
* This function isn't expected to be used in scenarios where an
* over-full situation can lead to denial of service attacks. See the
* comment in ivc_channel_empty() for an explanation about special
* over-full considerations.
*/
return (wr_count - rd_count);
}
static inline void ivc_advance_tx(struct ivc *ivc)
{
ivc->tx_channel->w_count++;
if (ivc->w_pos == (ivc->nframes - (uint32_t)1U)) {
ivc->w_pos = 0U;
} else {
ivc->w_pos++;
}
}
static inline void ivc_advance_rx(struct ivc *ivc)
{
ivc->rx_channel->r_count++;
if (ivc->r_pos == (ivc->nframes - (uint32_t)1U)) {
ivc->r_pos = 0U;
} else {
ivc->r_pos++;
}
}
static inline int32_t ivc_check_read(const struct ivc *ivc)
{
/*
* tx_channel->state is set locally, so it is not synchronized with
* state from the remote peer. The remote peer cannot reset its
* transmit counters until we've acknowledged its synchronization
* request, so no additional synchronization is required because an
* asynchronous transition of rx_channel->state to ivc_state_ack is not
* allowed.
*/
if (ivc->tx_channel->state != ivc_state_established) {
return -ECONNRESET;
}
/*
* Avoid unnecessary invalidations when performing repeated accesses to
* an IVC channel by checking the old queue pointers first.
* Synchronization is only necessary when these pointers indicate empty
* or full.
*/
if (!ivc_channel_empty(ivc, ivc->rx_channel)) {
return 0;
}
return ivc_channel_empty(ivc, ivc->rx_channel) ? -ENOMEM : 0;
}
static inline int32_t ivc_check_write(const struct ivc *ivc)
{
if (ivc->tx_channel->state != ivc_state_established) {
return -ECONNRESET;
}
if (!ivc_channel_full(ivc, ivc->tx_channel)) {
return 0;
}
return ivc_channel_full(ivc, ivc->tx_channel) ? -ENOMEM : 0;
}
bool tegra_ivc_can_read(const struct ivc *ivc)
{
return ivc_check_read(ivc) == 0;
}
bool tegra_ivc_can_write(const struct ivc *ivc)
{
return ivc_check_write(ivc) == 0;
}
bool tegra_ivc_tx_empty(const struct ivc *ivc)
{
return ivc_channel_empty(ivc, ivc->tx_channel);
}
static inline uintptr_t calc_frame_offset(uint32_t frame_index,
uint32_t frame_size, uint32_t frame_offset)
{
return ((uintptr_t)frame_index * (uintptr_t)frame_size) +
(uintptr_t)frame_offset;
}
static void *ivc_frame_pointer(const struct ivc *ivc,
volatile const struct ivc_channel_header *ch,
uint32_t frame)
{
assert(frame < ivc->nframes);
return (void *)((uintptr_t)(&ch[1]) +
calc_frame_offset(frame, ivc->frame_size, 0));
}
int32_t tegra_ivc_read(struct ivc *ivc, void *buf, size_t max_read)
{
const void *src;
int32_t result;
if (buf == NULL) {
return -EINVAL;
}
if (max_read > ivc->frame_size) {
return -E2BIG;
}
result = ivc_check_read(ivc);
if (result != 0) {
return result;
}
/*
* Order observation of w_pos potentially indicating new data before
* data read.
*/
dmbish();
src = ivc_frame_pointer(ivc, ivc->rx_channel, ivc->r_pos);
(void)memcpy(buf, src, max_read);
ivc_advance_rx(ivc);
/*
* Ensure our write to r_pos occurs before our read from w_pos.
*/
dmbish();
/*
* Notify only upon transition from full to non-full.
* The available count can only asynchronously increase, so the
* worst possible side-effect will be a spurious notification.
*/
if (ivc_channel_avail_count(ivc, ivc->rx_channel) == (ivc->nframes - (uint32_t)1U)) {
ivc->notify(ivc);
}
return (int32_t)max_read;
}
/* directly peek at the next frame rx'ed */
void *tegra_ivc_read_get_next_frame(const struct ivc *ivc)
{
if (ivc_check_read(ivc) != 0) {
return NULL;
}
/*
* Order observation of w_pos potentially indicating new data before
* data read.
*/
dmbld();
return ivc_frame_pointer(ivc, ivc->rx_channel, ivc->r_pos);
}
int32_t tegra_ivc_read_advance(struct ivc *ivc)
{
/*
* No read barriers or synchronization here: the caller is expected to
* have already observed the channel non-empty. This check is just to
* catch programming errors.
*/
int32_t result = ivc_check_read(ivc);
if (result != 0) {
return result;
}
ivc_advance_rx(ivc);
/*
* Ensure our write to r_pos occurs before our read from w_pos.
*/
dmbish();
/*
* Notify only upon transition from full to non-full.
* The available count can only asynchronously increase, so the
* worst possible side-effect will be a spurious notification.
*/
if (ivc_channel_avail_count(ivc, ivc->rx_channel) == (ivc->nframes - (uint32_t)1U)) {
ivc->notify(ivc);
}
return 0;
}
int32_t tegra_ivc_write(struct ivc *ivc, const void *buf, size_t size)
{
void *p;
int32_t result;
if ((buf == NULL) || (ivc == NULL)) {
return -EINVAL;
}
if (size > ivc->frame_size) {
return -E2BIG;
}
result = ivc_check_write(ivc);
if (result != 0) {
return result;
}
p = ivc_frame_pointer(ivc, ivc->tx_channel, ivc->w_pos);
(void)memset(p, 0, ivc->frame_size);
(void)memcpy(p, buf, size);
/*
* Ensure that updated data is visible before the w_pos counter
* indicates that it is ready.
*/
dmbst();
ivc_advance_tx(ivc);
/*
* Ensure our write to w_pos occurs before our read from r_pos.
*/
dmbish();
/*
* Notify only upon transition from empty to non-empty.
* The available count can only asynchronously decrease, so the
* worst possible side-effect will be a spurious notification.
*/
if (ivc_channel_avail_count(ivc, ivc->tx_channel) == 1U) {
ivc->notify(ivc);
}
return (int32_t)size;
}
/* directly poke at the next frame to be tx'ed */
void *tegra_ivc_write_get_next_frame(const struct ivc *ivc)
{
if (ivc_check_write(ivc) != 0) {
return NULL;
}
return ivc_frame_pointer(ivc, ivc->tx_channel, ivc->w_pos);
}
/* advance the tx buffer */
int32_t tegra_ivc_write_advance(struct ivc *ivc)
{
int32_t result = ivc_check_write(ivc);
if (result != 0) {
return result;
}
/*
* Order any possible stores to the frame before update of w_pos.
*/
dmbst();
ivc_advance_tx(ivc);
/*
* Ensure our write to w_pos occurs before our read from r_pos.
*/
dmbish();
/*
* Notify only upon transition from empty to non-empty.
* The available count can only asynchronously decrease, so the
* worst possible side-effect will be a spurious notification.
*/
if (ivc_channel_avail_count(ivc, ivc->tx_channel) == (uint32_t)1U) {
ivc->notify(ivc);
}
return 0;
}
void tegra_ivc_channel_reset(const struct ivc *ivc)
{
ivc->tx_channel->state = ivc_state_sync;
ivc->notify(ivc);
}
/*
* ===============================================================
* IVC State Transition Table - see tegra_ivc_channel_notified()
* ===============================================================
*
* local remote action
* ----- ------ -----------------------------------
* SYNC EST <none>
* SYNC ACK reset counters; move to EST; notify
* SYNC SYNC reset counters; move to ACK; notify
* ACK EST move to EST; notify
* ACK ACK move to EST; notify
* ACK SYNC reset counters; move to ACK; notify
* EST EST <none>
* EST ACK <none>
* EST SYNC reset counters; move to ACK; notify
*
* ===============================================================
*/
int32_t tegra_ivc_channel_notified(struct ivc *ivc)
{
uint32_t peer_state;
/* Copy the receiver's state out of shared memory. */
peer_state = ivc->rx_channel->state;
if (peer_state == (uint32_t)ivc_state_sync) {
/*
* Order observation of ivc_state_sync before stores clearing
* tx_channel.
*/
dmbld();
/*
* Reset tx_channel counters. The remote end is in the SYNC
* state and won't make progress until we change our state,
* so the counters are not in use at this time.
*/
ivc->tx_channel->w_count = 0U;
ivc->rx_channel->r_count = 0U;
ivc->w_pos = 0U;
ivc->r_pos = 0U;
/*
* Ensure that counters appear cleared before new state can be
* observed.
*/
dmbst();
/*
* Move to ACK state. We have just cleared our counters, so it
* is now safe for the remote end to start using these values.
*/
ivc->tx_channel->state = ivc_state_ack;
/*
* Notify remote end to observe state transition.
*/
ivc->notify(ivc);
} else if ((ivc->tx_channel->state == (uint32_t)ivc_state_sync) &&
(peer_state == (uint32_t)ivc_state_ack)) {
/*
* Order observation of ivc_state_sync before stores clearing
* tx_channel.
*/
dmbld();
/*
* Reset tx_channel counters. The remote end is in the ACK
* state and won't make progress until we change our state,
* so the counters are not in use at this time.
*/
ivc->tx_channel->w_count = 0U;
ivc->rx_channel->r_count = 0U;
ivc->w_pos = 0U;
ivc->r_pos = 0U;
/*
* Ensure that counters appear cleared before new state can be
* observed.
*/
dmbst();
/*
* Move to ESTABLISHED state. We know that the remote end has
* already cleared its counters, so it is safe to start
* writing/reading on this channel.
*/
ivc->tx_channel->state = ivc_state_established;
/*
* Notify remote end to observe state transition.
*/
ivc->notify(ivc);
} else if (ivc->tx_channel->state == (uint32_t)ivc_state_ack) {
/*
* At this point, we have observed the peer to be in either
* the ACK or ESTABLISHED state. Next, order observation of
* peer state before storing to tx_channel.
*/
dmbld();
/*
* Move to ESTABLISHED state. We know that we have previously
* cleared our counters, and we know that the remote end has
* cleared its counters, so it is safe to start writing/reading
* on this channel.
*/
ivc->tx_channel->state = ivc_state_established;
/*
* Notify remote end to observe state transition.
*/
ivc->notify(ivc);
} else {
/*
* There is no need to handle any further action. Either the
* channel is already fully established, or we are waiting for
* the remote end to catch up with our current state. Refer
* to the diagram in "IVC State Transition Table" above.
*/
}
return ((ivc->tx_channel->state == (uint32_t)ivc_state_established) ? 0 : -EAGAIN);
}
size_t tegra_ivc_align(size_t size)
{
return (size + (IVC_ALIGN - 1U)) & ~(IVC_ALIGN - 1U);
}
size_t tegra_ivc_total_queue_size(size_t queue_size)
{
if ((queue_size & (IVC_ALIGN - 1U)) != 0U) {
ERROR("queue_size (%d) must be %d-byte aligned\n",
(int32_t)queue_size, IVC_ALIGN);
return 0;
}
return queue_size + sizeof(struct ivc_channel_header);
}
static int32_t check_ivc_params(uintptr_t queue_base1, uintptr_t queue_base2,
uint32_t nframes, uint32_t frame_size)
{
assert((offsetof(struct ivc_channel_header, w_count)
& (IVC_ALIGN - 1U)) == 0U);
assert((offsetof(struct ivc_channel_header, r_count)
& (IVC_ALIGN - 1U)) == 0U);
assert((sizeof(struct ivc_channel_header) & (IVC_ALIGN - 1U)) == 0U);
if (((uint64_t)nframes * (uint64_t)frame_size) >= 0x100000000ULL) {
ERROR("nframes * frame_size overflows\n");
return -EINVAL;
}
/*
* The headers must at least be aligned enough for counters
* to be accessed atomically.
*/
if ((queue_base1 & (IVC_ALIGN - 1U)) != 0U) {
ERROR("ivc channel start not aligned: %lx\n", queue_base1);
return -EINVAL;
}
if ((queue_base2 & (IVC_ALIGN - 1U)) != 0U) {
ERROR("ivc channel start not aligned: %lx\n", queue_base2);
return -EINVAL;
}
if ((frame_size & (IVC_ALIGN - 1U)) != 0U) {
ERROR("frame size not adequately aligned: %u\n",
frame_size);
return -EINVAL;
}
if (queue_base1 < queue_base2) {
if ((queue_base1 + ((uint64_t)frame_size * nframes)) > queue_base2) {
ERROR("queue regions overlap: %lx + %x, %x\n",
queue_base1, frame_size,
frame_size * nframes);
return -EINVAL;
}
} else {
if ((queue_base2 + ((uint64_t)frame_size * nframes)) > queue_base1) {
ERROR("queue regions overlap: %lx + %x, %x\n",
queue_base2, frame_size,
frame_size * nframes);
return -EINVAL;
}
}
return 0;
}
int32_t tegra_ivc_init(struct ivc *ivc, uintptr_t rx_base, uintptr_t tx_base,
uint32_t nframes, uint32_t frame_size,
ivc_notify_function notify)
{
int32_t result;
/* sanity check input params */
if ((ivc == NULL) || (notify == NULL)) {
return -EINVAL;
}
result = check_ivc_params(rx_base, tx_base, nframes, frame_size);
if (result != 0) {
return result;
}
/*
* All sizes that can be returned by communication functions should
* fit in a 32-bit integer.
*/
if (frame_size > (1u << 31)) {
return -E2BIG;
}
ivc->rx_channel = (struct ivc_channel_header *)rx_base;
ivc->tx_channel = (struct ivc_channel_header *)tx_base;
ivc->notify = notify;
ivc->frame_size = frame_size;
ivc->nframes = nframes;
ivc->w_pos = 0U;
ivc->r_pos = 0U;
INFO("%s: done\n", __func__);
return 0;
}
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef IVC_H
#define IVC_H
#include <stdint.h>
#include <stddef.h>
#include <utils_def.h>
#define IVC_ALIGN U(64)
#define IVC_CHHDR_TX_FIELDS U(16)
#define IVC_CHHDR_RX_FIELDS U(16)
struct ivc;
struct ivc_channel_header;
/* callback handler for notify on receiving a response */
typedef void (* ivc_notify_function)(const struct ivc *);
struct ivc {
struct ivc_channel_header *rx_channel;
struct ivc_channel_header *tx_channel;
uint32_t w_pos;
uint32_t r_pos;
ivc_notify_function notify;
uint32_t nframes;
uint32_t frame_size;
};
int32_t tegra_ivc_init(struct ivc *ivc, uintptr_t rx_base, uintptr_t tx_base,
uint32_t nframes, uint32_t frame_size,
ivc_notify_function notify);
size_t tegra_ivc_total_queue_size(size_t queue_size);
size_t tegra_ivc_align(size_t size);
int32_t tegra_ivc_channel_notified(struct ivc *ivc);
void tegra_ivc_channel_reset(const struct ivc *ivc);
int32_t tegra_ivc_write_advance(struct ivc *ivc);
void *tegra_ivc_write_get_next_frame(const struct ivc *ivc);
int32_t tegra_ivc_write(struct ivc *ivc, const void *buf, size_t size);
int32_t tegra_ivc_read_advance(struct ivc *ivc);
void *tegra_ivc_read_get_next_frame(const struct ivc *ivc);
int32_t tegra_ivc_read(struct ivc *ivc, void *buf, size_t max_read);
bool tegra_ivc_tx_empty(const struct ivc *ivc);
bool tegra_ivc_can_write(const struct ivc *ivc);
bool tegra_ivc_can_read(const struct ivc *ivc);
#endif /* IVC_H */
...@@ -25,316 +25,15 @@ ...@@ -25,316 +25,15 @@
static uint64_t video_mem_base; static uint64_t video_mem_base;
static uint64_t video_mem_size_mb; static uint64_t video_mem_size_mb;
static void tegra_memctrl_reconfig_mss_clients(void) /*
{ * The following platform setup functions are weakly defined. They
#if ENABLE_ROC_FOR_ORDERING_CLIENT_REQUESTS * provide typical implementations that will be overridden by a SoC.
uint32_t val, wdata_0, wdata_1;
/*
* Assert Memory Controller's HOTRESET_FLUSH_ENABLE signal for
* boot and strongly ordered MSS clients to flush existing memory
* traffic and stall future requests.
*/
val = tegra_mc_read_32(MC_CLIENT_HOTRESET_CTRL0);
assert(val == MC_CLIENT_HOTRESET_CTRL0_RESET_VAL);
wdata_0 = MC_CLIENT_HOTRESET_CTRL0_HDA_FLUSH_ENB |
#if ENABLE_AFI_DEVICE
MC_CLIENT_HOTRESET_CTRL0_AFI_FLUSH_ENB |
#endif
MC_CLIENT_HOTRESET_CTRL0_SATA_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL0_XUSB_HOST_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL0_XUSB_DEV_FLUSH_ENB;
tegra_mc_write_32(MC_CLIENT_HOTRESET_CTRL0, wdata_0);
/* Wait for HOTRESET STATUS to indicate FLUSH_DONE */
do {
val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS0);
} while ((val & wdata_0) != wdata_0);
/* Wait one more time due to SW WAR for known legacy issue */
do {
val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS0);
} while ((val & wdata_0) != wdata_0);
val = tegra_mc_read_32(MC_CLIENT_HOTRESET_CTRL1);
assert(val == MC_CLIENT_HOTRESET_CTRL1_RESET_VAL);
wdata_1 = MC_CLIENT_HOTRESET_CTRL1_SDMMC4A_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL1_APE_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL1_SE_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL1_ETR_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL1_AXIS_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL1_EQOS_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL1_UFSHC_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL1_BPMP_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL1_AON_FLUSH_ENB |
MC_CLIENT_HOTRESET_CTRL1_SCE_FLUSH_ENB;
tegra_mc_write_32(MC_CLIENT_HOTRESET_CTRL1, wdata_1);
/* Wait for HOTRESET STATUS to indicate FLUSH_DONE */
do {
val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS1);
} while ((val & wdata_1) != wdata_1);
/* Wait one more time due to SW WAR for known legacy issue */
do {
val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS1);
} while ((val & wdata_1) != wdata_1);
/*
* Change MEMTYPE_OVERRIDE from SO_DEV -> PASSTHRU for boot and
* strongly ordered MSS clients. ROC needs to be single point
* of control on overriding the memory type. So, remove TSA's
* memtype override.
*
* MC clients with default SO_DEV override still enabled at TSA:
* AONW, BPMPW, SCEW, APEW
*/
#if ENABLE_AFI_DEVICE
mc_set_tsa_passthrough(AFIW);
#endif
mc_set_tsa_passthrough(HDAW);
mc_set_tsa_passthrough(SATAW);
mc_set_tsa_passthrough(XUSB_HOSTW);
mc_set_tsa_passthrough(XUSB_DEVW);
mc_set_tsa_passthrough(SDMMCWAB);
mc_set_tsa_passthrough(APEDMAW);
mc_set_tsa_passthrough(SESWR);
mc_set_tsa_passthrough(ETRW);
mc_set_tsa_passthrough(AXISW);
mc_set_tsa_passthrough(EQOSW);
mc_set_tsa_passthrough(UFSHCW);
mc_set_tsa_passthrough(BPMPDMAW);
mc_set_tsa_passthrough(AONDMAW);
mc_set_tsa_passthrough(SCEDMAW);
/* Parker has no IO Coherency support and need the following:
* Ordered MC Clients on Parker are AFI, EQOS, SATA, XUSB.
* ISO clients(DISP, VI, EQOS) should never snoop caches and
* don't need ROC/PCFIFO ordering.
* ISO clients(EQOS) that need ordering should use PCFIFO ordering
* and bypass ROC ordering by using FORCE_NON_COHERENT path.
* FORCE_NON_COHERENT/FORCE_COHERENT config take precedence
* over SMMU attributes.
* Force all Normal memory transactions from ISO and non-ISO to be
* non-coherent(bypass ROC, avoid cache snoop to avoid perf hit).
* Force the SO_DEV transactions from ordered ISO clients(EQOS) to
* non-coherent path and enable MC PCFIFO interlock for ordering.
* Force the SO_DEV transactions from ordered non-ISO clients (PCIe,
* XUSB, SATA) to coherent so that the transactions are
* ordered by ROC.
* PCFIFO ensure write ordering.
* Read after Write ordering is maintained/enforced by MC clients.
* Clients that need PCIe type write ordering must
* go through ROC ordering.
* Ordering enable for Read clients is not necessary.
* R5's and A9 would get necessary ordering from AXI and
* don't need ROC ordering enable:
* - MMIO ordering is through dev mapping and MMIO
* accesses bypass SMMU.
* - Normal memory is accessed through SMMU and ordering is
* ensured by client and AXI.
* - Ack point for Normal memory is WCAM in MC.
* - MMIO's can be early acked and AXI ensures dev memory ordering,
* Client ensures read/write direction change ordering.
* - See Bug 200312466 for more details.
*
* CGID_TAG_ADR is only present from T186 A02. As this code is common
* between A01 and A02, tegra_memctrl_set_overrides() programs
* CGID_TAG_ADR for the necessary clients on A02.
*/
mc_set_txn_override(HDAR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(BPMPW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(PTCR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(NVDISPLAYR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(EQOSW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(NVJPGSWR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(ISPRA, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(SDMMCWAA, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(VICSRD, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(MPCOREW, CGID_TAG_DEFAULT, SO_DEV_ZERO, NO_OVERRIDE, NO_OVERRIDE);
mc_set_txn_override(GPUSRD, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(AXISR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(SCEDMAW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(SDMMCW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(EQOSR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
/* See bug 200131110 comment #35*/
mc_set_txn_override(APEDMAR, CGID_TAG_CLIENT_AXI_ID, SO_DEV_CLIENT_AXI_ID, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(NVENCSRD, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(SDMMCRAB, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(VICSRD1, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(BPMPDMAR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(VIW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(SDMMCRAA, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(AXISW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(XUSB_DEVR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(UFSHCR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(TSECSWR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(GPUSWR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(SATAR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(XUSB_HOSTW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_COHERENT);
mc_set_txn_override(TSECSWRB, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(GPUSRD2, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(SCEDMAR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(GPUSWR2, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(AONDMAW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
/* See bug 200131110 comment #35*/
mc_set_txn_override(APEDMAW, CGID_TAG_CLIENT_AXI_ID, SO_DEV_CLIENT_AXI_ID, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(AONW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(HOST1XDMAR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(ETRR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(SESWR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(NVJPGSRD, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(NVDECSRD, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(TSECSRDB, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(BPMPDMAW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(APER, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(NVDECSRD1, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(XUSB_HOSTR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(ISPWA, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(SESRD, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(SCER, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(AONR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(MPCORER, CGID_TAG_DEFAULT, SO_DEV_ZERO, NO_OVERRIDE, NO_OVERRIDE);
mc_set_txn_override(SDMMCWA, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(HDAW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(NVDECSWR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(UFSHCW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(AONDMAR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(SATAW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_COHERENT);
mc_set_txn_override(ETRW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(VICSWR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(NVENCSWR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
/* See bug 200131110 comment #35 */
mc_set_txn_override(AFIR, CGID_TAG_DEFAULT, SO_DEV_CLIENT_AXI_ID, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(SDMMCWAB, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(SDMMCRA, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(NVDISPLAYR1, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(ISPWB, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(BPMPR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(APEW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(SDMMCR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
mc_set_txn_override(XUSB_DEVW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_COHERENT);
mc_set_txn_override(TSECSRD, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
/*
* See bug 200131110 comment #35 - there are no normal requests
* and AWID for SO/DEV requests is hardcoded in RTL for a
* particular PCIE controller
*/
mc_set_txn_override(AFIW, CGID_TAG_DEFAULT, SO_DEV_CLIENT_AXI_ID, FORCE_NON_COHERENT, FORCE_COHERENT);
mc_set_txn_override(SCEW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT);
/*
* At this point, ordering can occur at ROC. So, remove PCFIFO's
* control over ordering requests.
*
* Change PCFIFO_*_ORDERED_CLIENT from ORDERED -> UNORDERED for
* boot and strongly ordered MSS clients
*/
val = MC_PCFIFO_CLIENT_CONFIG1_RESET_VAL &
#if ENABLE_AFI_DEVICE
mc_set_pcfifo_unordered_boot_so_mss(1, AFIW) &
#endif
mc_set_pcfifo_unordered_boot_so_mss(1, HDAW) &
mc_set_pcfifo_unordered_boot_so_mss(1, SATAW);
tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG1, val);
val = MC_PCFIFO_CLIENT_CONFIG2_RESET_VAL &
mc_set_pcfifo_unordered_boot_so_mss(2, XUSB_HOSTW) &
mc_set_pcfifo_unordered_boot_so_mss(2, XUSB_DEVW);
tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG2, val);
val = MC_PCFIFO_CLIENT_CONFIG3_RESET_VAL &
mc_set_pcfifo_unordered_boot_so_mss(3, SDMMCWAB);
tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG3, val);
val = MC_PCFIFO_CLIENT_CONFIG4_RESET_VAL &
mc_set_pcfifo_unordered_boot_so_mss(4, SESWR) &
mc_set_pcfifo_unordered_boot_so_mss(4, ETRW) &
mc_set_pcfifo_unordered_boot_so_mss(4, AXISW) &
mc_set_pcfifo_unordered_boot_so_mss(4, UFSHCW) &
mc_set_pcfifo_unordered_boot_so_mss(4, BPMPDMAW) &
mc_set_pcfifo_unordered_boot_so_mss(4, AONDMAW) &
mc_set_pcfifo_unordered_boot_so_mss(4, SCEDMAW);
/* EQOSW is the only client that has PCFIFO order enabled. */
val |= mc_set_pcfifo_ordered_boot_so_mss(4, EQOSW);
tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG4, val);
val = MC_PCFIFO_CLIENT_CONFIG5_RESET_VAL &
mc_set_pcfifo_unordered_boot_so_mss(5, APEDMAW);
tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG5, val);
/*
* Deassert HOTRESET FLUSH_ENABLE for boot and strongly ordered MSS
* clients to allow memory traffic from all clients to start passing
* through ROC
*/ */
val = tegra_mc_read_32(MC_CLIENT_HOTRESET_CTRL0); #pragma weak plat_memctrl_tzdram_setup
assert(val == wdata_0);
wdata_0 = MC_CLIENT_HOTRESET_CTRL0_RESET_VAL;
tegra_mc_write_32(MC_CLIENT_HOTRESET_CTRL0, wdata_0);
val = tegra_mc_read_32(MC_CLIENT_HOTRESET_CTRL1);
assert(val == wdata_1);
wdata_1 = MC_CLIENT_HOTRESET_CTRL1_RESET_VAL; void plat_memctrl_tzdram_setup(uint64_t phys_base, uint64_t size_in_bytes)
tegra_mc_write_32(MC_CLIENT_HOTRESET_CTRL1, wdata_1);
#endif
}
static void tegra_memctrl_set_overrides(void)
{ {
const tegra_mc_settings_t *plat_mc_settings = tegra_get_mc_settings(); ; /* do nothing */
const mc_txn_override_cfg_t *mc_txn_override_cfgs;
uint32_t num_txn_override_cfgs;
uint32_t i, val;
/* Get the settings from the platform */
assert(plat_mc_settings != NULL);
mc_txn_override_cfgs = plat_mc_settings->txn_override_cfg;
num_txn_override_cfgs = plat_mc_settings->num_txn_override_cfgs;
/*
* Set the MC_TXN_OVERRIDE registers for write clients.
*/
if ((tegra_chipid_is_t186()) &&
(!tegra_platform_is_silicon() ||
(tegra_platform_is_silicon() && (tegra_get_chipid_minor() == 1U)))) {
/*
* GPU and NVENC settings for Tegra186 simulation and
* Silicon rev. A01
*/
val = tegra_mc_read_32(MC_TXN_OVERRIDE_CONFIG_GPUSWR);
val &= (uint32_t)~MC_TXN_OVERRIDE_CGID_TAG_MASK;
tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_GPUSWR,
val | MC_TXN_OVERRIDE_CGID_TAG_ZERO);
val = tegra_mc_read_32(MC_TXN_OVERRIDE_CONFIG_GPUSWR2);
val &= (uint32_t)~MC_TXN_OVERRIDE_CGID_TAG_MASK;
tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_GPUSWR2,
val | MC_TXN_OVERRIDE_CGID_TAG_ZERO);
val = tegra_mc_read_32(MC_TXN_OVERRIDE_CONFIG_NVENCSWR);
val &= (uint32_t)~MC_TXN_OVERRIDE_CGID_TAG_MASK;
tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_NVENCSWR,
val | MC_TXN_OVERRIDE_CGID_TAG_CLIENT_AXI_ID);
} else {
/*
* Settings for Tegra186 silicon rev. A02 and onwards.
*/
for (i = 0; i < num_txn_override_cfgs; i++) {
val = tegra_mc_read_32(mc_txn_override_cfgs[i].offset);
val &= (uint32_t)~MC_TXN_OVERRIDE_CGID_TAG_MASK;
tegra_mc_write_32(mc_txn_override_cfgs[i].offset,
val | mc_txn_override_cfgs[i].cgid_tag);
}
}
} }
/* /*
...@@ -352,10 +51,9 @@ void tegra_memctrl_setup(void) ...@@ -352,10 +51,9 @@ void tegra_memctrl_setup(void)
INFO("Tegra Memory Controller (v2)\n"); INFO("Tegra Memory Controller (v2)\n");
#if ENABLE_SMMU_DEVICE
/* Program the SMMU pagesize */ /* Program the SMMU pagesize */
tegra_smmu_init(); tegra_smmu_init();
#endif
/* Get the settings from the platform */ /* Get the settings from the platform */
assert(plat_mc_settings != NULL); assert(plat_mc_settings != NULL);
mc_streamid_override_regs = plat_mc_settings->streamid_override_cfg; mc_streamid_override_regs = plat_mc_settings->streamid_override_cfg;
...@@ -398,10 +96,14 @@ void tegra_memctrl_setup(void) ...@@ -398,10 +96,14 @@ void tegra_memctrl_setup(void)
* boots with MSS having all control, but ROC provides a performance * boots with MSS having all control, but ROC provides a performance
* boost as compared to MSS. * boost as compared to MSS.
*/ */
tegra_memctrl_reconfig_mss_clients(); if (plat_mc_settings->reconfig_mss_clients != NULL) {
plat_mc_settings->reconfig_mss_clients();
}
/* Program overrides for MC transactions */ /* Program overrides for MC transactions */
tegra_memctrl_set_overrides(); if (plat_mc_settings->set_txn_overrides != NULL) {
plat_mc_settings->set_txn_overrides();
}
} }
/* /*
...@@ -409,16 +111,24 @@ void tegra_memctrl_setup(void) ...@@ -409,16 +111,24 @@ void tegra_memctrl_setup(void)
*/ */
void tegra_memctrl_restore_settings(void) void tegra_memctrl_restore_settings(void)
{ {
const tegra_mc_settings_t *plat_mc_settings = tegra_get_mc_settings();
assert(plat_mc_settings != NULL);
/* /*
* Re-configure MSS to allow ROC to deal with ordering of the * Re-configure MSS to allow ROC to deal with ordering of the
* Memory Controller traffic. This is needed as the Memory Controller * Memory Controller traffic. This is needed as the Memory Controller
* resets during System Suspend with MSS having all control, but ROC * resets during System Suspend with MSS having all control, but ROC
* provides a performance boost as compared to MSS. * provides a performance boost as compared to MSS.
*/ */
tegra_memctrl_reconfig_mss_clients(); if (plat_mc_settings->reconfig_mss_clients != NULL) {
plat_mc_settings->reconfig_mss_clients();
}
/* Program overrides for MC transactions */ /* Program overrides for MC transactions */
tegra_memctrl_set_overrides(); if (plat_mc_settings->set_txn_overrides != NULL) {
plat_mc_settings->set_txn_overrides();
}
/* video memory carveout region */ /* video memory carveout region */
if (video_mem_base != 0ULL) { if (video_mem_base != 0ULL) {
...@@ -444,42 +154,10 @@ void tegra_memctrl_restore_settings(void) ...@@ -444,42 +154,10 @@ void tegra_memctrl_restore_settings(void)
*/ */
void tegra_memctrl_tzdram_setup(uint64_t phys_base, uint32_t size_in_bytes) void tegra_memctrl_tzdram_setup(uint64_t phys_base, uint32_t size_in_bytes)
{ {
uint32_t val;
/*
* Setup the Memory controller to allow only secure accesses to
* the TZDRAM carveout
*/
INFO("Configuring TrustZone DRAM Memory Carveout\n");
tegra_mc_write_32(MC_SECURITY_CFG0_0, (uint32_t)phys_base);
tegra_mc_write_32(MC_SECURITY_CFG3_0, (uint32_t)(phys_base >> 32));
tegra_mc_write_32(MC_SECURITY_CFG1_0, size_in_bytes >> 20);
/*
* When TZ encryption enabled,
* We need setup TZDRAM before CPU to access TZ Carveout,
* otherwise CPU will fetch non-decrypted data.
* So save TZDRAM setting for restore by SC7 resume FW.
* Scratch registers map:
* RSV55_0 = CFG1[12:0] | CFG0[31:20]
* RSV55_1 = CFG3[1:0]
*/
val = tegra_mc_read_32(MC_SECURITY_CFG1_0) & MC_SECURITY_SIZE_MB_MASK;
mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV54_HI, val);
val |= tegra_mc_read_32(MC_SECURITY_CFG0_0) & MC_SECURITY_BOM_MASK;
mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV55_LO, val);
val = tegra_mc_read_32(MC_SECURITY_CFG3_0) & MC_SECURITY_BOM_HI_MASK;
mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV55_HI, val);
/* /*
* MCE propagates the security configuration values across the * Perform platform specific steps.
* CCPLEX.
*/ */
mce_update_gsc_tzdram(); plat_memctrl_tzdram_setup(phys_base, size_in_bytes);
} }
/* /*
...@@ -501,12 +179,21 @@ void tegra_memctrl_tzram_setup(uint64_t phys_base, uint32_t size_in_bytes) ...@@ -501,12 +179,21 @@ void tegra_memctrl_tzram_setup(uint64_t phys_base, uint32_t size_in_bytes)
* Reset the access configuration registers to restrict access * Reset the access configuration registers to restrict access
* to the TZRAM aperture * to the TZRAM aperture
*/ */
for (index = MC_TZRAM_CLIENT_ACCESS_CFG0; for (index = MC_TZRAM_CLIENT_ACCESS0_CFG0;
index < ((uint32_t)MC_TZRAM_CARVEOUT_CFG + (uint32_t)MC_GSC_CONFIG_REGS_SIZE); index < ((uint32_t)MC_TZRAM_CARVEOUT_CFG + (uint32_t)MC_GSC_CONFIG_REGS_SIZE);
index += 4U) { index += 4U) {
tegra_mc_write_32(index, 0); tegra_mc_write_32(index, 0);
} }
/*
* Enable CPU access configuration registers to access the TZRAM aperture
*/
if (!tegra_chipid_is_t186()) {
val = tegra_mc_read_32(MC_TZRAM_CLIENT_ACCESS1_CFG0);
val |= TZRAM_ALLOW_MPCORER | TZRAM_ALLOW_MPCOREW;
tegra_mc_write_32(MC_TZRAM_CLIENT_ACCESS1_CFG0, val);
}
/* /*
* Set the TZRAM base. TZRAM base must be 4k aligned, at least. * Set the TZRAM base. TZRAM base must be 4k aligned, at least.
*/ */
...@@ -534,6 +221,9 @@ void tegra_memctrl_tzram_setup(uint64_t phys_base, uint32_t size_in_bytes) ...@@ -534,6 +221,9 @@ void tegra_memctrl_tzram_setup(uint64_t phys_base, uint32_t size_in_bytes)
val = tegra_mc_read_32(MC_TZRAM_CARVEOUT_CFG); val = tegra_mc_read_32(MC_TZRAM_CARVEOUT_CFG);
val &= (uint32_t)~MC_GSC_ENABLE_TZ_LOCK_BIT; val &= (uint32_t)~MC_GSC_ENABLE_TZ_LOCK_BIT;
val |= MC_GSC_LOCK_CFG_SETTINGS_BIT; val |= MC_GSC_LOCK_CFG_SETTINGS_BIT;
if (!tegra_chipid_is_t186()) {
val |= MC_GSC_ENABLE_CPU_SECURE_BIT;
}
tegra_mc_write_32(MC_TZRAM_CARVEOUT_CFG, val); tegra_mc_write_32(MC_TZRAM_CARVEOUT_CFG, val);
/* /*
......
...@@ -101,12 +101,13 @@ void tegra_smmu_save_context(uint64_t smmu_ctx_addr) ...@@ -101,12 +101,13 @@ void tegra_smmu_save_context(uint64_t smmu_ctx_addr)
* the last entry. Sanity check the table size before we start with * the last entry. Sanity check the table size before we start with
* the context save operation. * the context save operation.
*/ */
while (smmu_ctx_regs[num_entries].val != 0xFFFFFFFFU) { while ((smmu_ctx_regs[num_entries].reg != 0xFFFFFFFFU)) {
num_entries++; num_entries++;
} }
/* panic if the sizes do not match */ /* panic if the sizes do not match */
if (num_entries != smmu_ctx_regs[0].val) { if (num_entries != smmu_ctx_regs[0].val) {
ERROR("SMMU context size mismatch!");
panic(); panic();
} }
...@@ -123,9 +124,9 @@ void tegra_smmu_save_context(uint64_t smmu_ctx_addr) ...@@ -123,9 +124,9 @@ void tegra_smmu_save_context(uint64_t smmu_ctx_addr)
(sizeof(smmu_regs_t) * num_entries)); (sizeof(smmu_regs_t) * num_entries));
/* save the SMMU table address */ /* save the SMMU table address */
mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV11_LO, mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_SMMU_TABLE_ADDR_LO,
(uint32_t)smmu_ctx_addr); (uint32_t)smmu_ctx_addr);
mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV11_HI, mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_SMMU_TABLE_ADDR_HI,
(uint32_t)(smmu_ctx_addr >> 32)); (uint32_t)(smmu_ctx_addr >> 32));
} }
......
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <asm_macros.S>
#define CONSOLE_NUM_BYTES_SHIFT 24
#define CONSOLE_FLUSH_DATA_TO_PORT (1 << 26)
#define CONSOLE_RING_DOORBELL (1 << 31)
#define CONSOLE_IS_BUSY (1 << 31)
#define CONSOLE_WRITE (CONSOLE_RING_DOORBELL | CONSOLE_FLUSH_DATA_TO_PORT)
/*
* This file contains a driver implementation to make use of the
* real console implementation provided by the SPE firmware running
* SoCs after Tegra186.
*
* This console is shared by multiple components and the SPE firmware
* finally displays everything on the UART port.
*/
.globl console_core_init
.globl console_core_putc
.globl console_core_getc
.globl console_core_flush
/* -----------------------------------------------
* int console_core_init(uintptr_t base_addr,
* unsigned int uart_clk, unsigned int baud_rate)
* Function to initialize the console without a
* C Runtime to print debug information. This
* function will be accessed by console_init and
* crash reporting.
* In: x0 - console base address
* w1 - Uart clock in Hz
* w2 - Baud rate
* Out: return 1 on success else 0 on error
* Clobber list : x1, x2
* -----------------------------------------------
*/
func console_core_init
/* Check the input base address */
cbz x0, core_init_fail
mov w0, #1
ret
core_init_fail:
mov w0, wzr
ret
endfunc console_core_init
/* --------------------------------------------------------
* int console_core_putc(int c, uintptr_t base_addr)
* Function to output a character over the console. It
* returns the character printed on success or -1 on error.
* In : w0 - character to be printed
* x1 - console base address
* Out : return -1 on error else return character.
* Clobber list : x2
* --------------------------------------------------------
*/
func console_core_putc
/* Check the input parameter */
cbz x1, putc_error
/* wait until spe is ready */
1: ldr w2, [x1]
and w2, w2, #CONSOLE_IS_BUSY
cbnz w2, 1b
/* spe is ready */
mov w2, w0
and w2, w2, #0xFF
mov w3, #(CONSOLE_WRITE | (1 << CONSOLE_NUM_BYTES_SHIFT))
orr w2, w2, w3
str w2, [x1]
ret
putc_error:
mov w0, #-1
ret
endfunc console_core_putc
/* ---------------------------------------------
* int console_core_getc(uintptr_t base_addr)
* Function to get a character from the console.
* It returns the character grabbed on success
* or -1 on error.
* In : x0 - console base address
* Clobber list : x0, x1
* ---------------------------------------------
*/
func console_core_getc
mov w0, #-1
ret
endfunc console_core_getc
/* ---------------------------------------------
* int console_core_flush(uintptr_t base_addr)
* Function to force a write of all buffered
* data that hasn't been output.
* In : x0 - console base address
* Out : return -1 on error else return 0.
* Clobber list : x0, x1
* ---------------------------------------------
*/
func console_core_flush
cbz x0, flush_error
/* flush console */
mov w1, #CONSOLE_WRITE
str w1, [x0]
mov w0, #0
ret
flush_error:
mov w0, #-1
ret
endfunc console_core_flush
...@@ -26,8 +26,6 @@ ...@@ -26,8 +26,6 @@
#include <mmio.h> #include <mmio.h>
#include <profiler.h> #include <profiler.h>
#include <stdbool.h> #include <stdbool.h>
#include <stdio.h>
#include <stdint.h>
#include <string.h> #include <string.h>
#include <utils_def.h> #include <utils_def.h>
#include <xlat_tables_v2.h> #include <xlat_tables_v2.h>
......
...@@ -70,6 +70,7 @@ extern uint64_t ns_image_entrypoint; ...@@ -70,6 +70,7 @@ extern uint64_t ns_image_entrypoint;
#pragma weak plat_early_platform_setup #pragma weak plat_early_platform_setup
#pragma weak plat_get_bl31_params #pragma weak plat_get_bl31_params
#pragma weak plat_get_bl31_plat_params #pragma weak plat_get_bl31_plat_params
#pragma weak plat_late_platform_setup
void plat_early_platform_setup(void) void plat_early_platform_setup(void)
{ {
...@@ -86,6 +87,11 @@ plat_params_from_bl2_t *plat_get_bl31_plat_params(void) ...@@ -86,6 +87,11 @@ plat_params_from_bl2_t *plat_get_bl31_plat_params(void)
return NULL; return NULL;
} }
void plat_late_platform_setup(void)
{
; /* do nothing */
}
/******************************************************************************* /*******************************************************************************
* Return a pointer to the 'entry_point_info' structure of the next image for * Return a pointer to the 'entry_point_info' structure of the next image for
* security state specified. BL33 corresponds to the non-secure image type * security state specified. BL33 corresponds to the non-secure image type
...@@ -227,6 +233,9 @@ void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1, ...@@ -227,6 +233,9 @@ void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
*/ */
tegra_delay_timer_init(); tegra_delay_timer_init();
/* Early platform setup for Tegra SoCs */
plat_early_platform_setup();
/* /*
* Do initial security configuration to allow DRAM/device access. * Do initial security configuration to allow DRAM/device access.
*/ */
...@@ -269,9 +278,6 @@ void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1, ...@@ -269,9 +278,6 @@ void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
} }
} }
/* Early platform setup for Tegra SoCs */
plat_early_platform_setup();
/* /*
* Add timestamp for platform early setup exit. * Add timestamp for platform early setup exit.
*/ */
...@@ -328,6 +334,13 @@ void bl31_platform_setup(void) ...@@ -328,6 +334,13 @@ void bl31_platform_setup(void)
*/ */
tegra_memctrl_tzram_setup(TEGRA_TZRAM_BASE, TEGRA_TZRAM_SIZE); tegra_memctrl_tzram_setup(TEGRA_TZRAM_BASE, TEGRA_TZRAM_SIZE);
/*
* Late setup handler to allow platforms to performs additional
* functionality.
* This handler gets called with MMU enabled.
*/
plat_late_platform_setup();
/* /*
* Add timestamp for platform setup exit. * Add timestamp for platform setup exit.
*/ */
......
...@@ -22,7 +22,6 @@ TEGRA_GICv2_SOURCES := drivers/arm/gic/common/gic_common.c \ ...@@ -22,7 +22,6 @@ TEGRA_GICv2_SOURCES := drivers/arm/gic/common/gic_common.c \
BL31_SOURCES += drivers/console/aarch64/console.S \ BL31_SOURCES += drivers/console/aarch64/console.S \
drivers/delay_timer/delay_timer.c \ drivers/delay_timer/delay_timer.c \
drivers/ti/uart/aarch64/16550_console.S \
${TEGRA_GICv2_SOURCES} \ ${TEGRA_GICv2_SOURCES} \
${COMMON_DIR}/aarch64/tegra_helpers.S \ ${COMMON_DIR}/aarch64/tegra_helpers.S \
${COMMON_DIR}/drivers/pmc/pmc.c \ ${COMMON_DIR}/drivers/pmc/pmc.c \
......
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef __BPMP_IPC_H__
#define __BPMP_IPC_H__
#include <stdbool.h>
#include <stdint.h>
#include <utils_def.h>
/**
* Currently supported reset identifiers
*/
#define TEGRA_RESET_ID_XUSB_PADCTL U(114)
#define TEGRA_RESET_ID_GPCDMA U(70)
/**
* Function to initialise the IPC with the bpmp
*/
int32_t tegra_bpmp_ipc_init(void);
/**
* Handler to reset a module
*/
int32_t tegra_bpmp_ipc_reset_module(uint32_t rst_id);
#endif /* __BPMP_IPC_H__ */
...@@ -11,184 +11,9 @@ ...@@ -11,184 +11,9 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <mmio.h>
#include <stdint.h> #include <stdint.h>
/*******************************************************************************
* StreamID to indicate no SMMU translations (requests to be steered on the
* SMMU bypass path)
******************************************************************************/
#define MC_STREAM_ID_MAX 0x7FU
/*******************************************************************************
* Stream ID Override Config registers
******************************************************************************/
#define MC_STREAMID_OVERRIDE_CFG_PTCR 0x000U
#define MC_STREAMID_OVERRIDE_CFG_AFIR 0x070U
#define MC_STREAMID_OVERRIDE_CFG_HDAR 0x0A8U
#define MC_STREAMID_OVERRIDE_CFG_HOST1XDMAR 0x0B0U
#define MC_STREAMID_OVERRIDE_CFG_NVENCSRD 0x0E0U
#define MC_STREAMID_OVERRIDE_CFG_SATAR 0x0F8U
#define MC_STREAMID_OVERRIDE_CFG_MPCORER 0x138U
#define MC_STREAMID_OVERRIDE_CFG_NVENCSWR 0x158U
#define MC_STREAMID_OVERRIDE_CFG_AFIW 0x188U
#define MC_STREAMID_OVERRIDE_CFG_HDAW 0x1A8U
#define MC_STREAMID_OVERRIDE_CFG_MPCOREW 0x1C8U
#define MC_STREAMID_OVERRIDE_CFG_SATAW 0x1E8U
#define MC_STREAMID_OVERRIDE_CFG_ISPRA 0x220U
#define MC_STREAMID_OVERRIDE_CFG_ISPWA 0x230U
#define MC_STREAMID_OVERRIDE_CFG_ISPWB 0x238U
#define MC_STREAMID_OVERRIDE_CFG_XUSB_HOSTR 0x250U
#define MC_STREAMID_OVERRIDE_CFG_XUSB_HOSTW 0x258U
#define MC_STREAMID_OVERRIDE_CFG_XUSB_DEVR 0x260U
#define MC_STREAMID_OVERRIDE_CFG_XUSB_DEVW 0x268U
#define MC_STREAMID_OVERRIDE_CFG_TSECSRD 0x2A0U
#define MC_STREAMID_OVERRIDE_CFG_TSECSWR 0x2A8U
#define MC_STREAMID_OVERRIDE_CFG_GPUSRD 0x2C0U
#define MC_STREAMID_OVERRIDE_CFG_GPUSWR 0x2C8U
#define MC_STREAMID_OVERRIDE_CFG_SDMMCRA 0x300U
#define MC_STREAMID_OVERRIDE_CFG_SDMMCRAA 0x308U
#define MC_STREAMID_OVERRIDE_CFG_SDMMCR 0x310U
#define MC_STREAMID_OVERRIDE_CFG_SDMMCRAB 0x318U
#define MC_STREAMID_OVERRIDE_CFG_SDMMCWA 0x320U
#define MC_STREAMID_OVERRIDE_CFG_SDMMCWAA 0x328U
#define MC_STREAMID_OVERRIDE_CFG_SDMMCW 0x330U
#define MC_STREAMID_OVERRIDE_CFG_SDMMCWAB 0x338U
#define MC_STREAMID_OVERRIDE_CFG_VICSRD 0x360U
#define MC_STREAMID_OVERRIDE_CFG_VICSWR 0x368U
#define MC_STREAMID_OVERRIDE_CFG_VIW 0x390U
#define MC_STREAMID_OVERRIDE_CFG_NVDECSRD 0x3C0U
#define MC_STREAMID_OVERRIDE_CFG_NVDECSWR 0x3C8U
#define MC_STREAMID_OVERRIDE_CFG_APER 0x3D0U
#define MC_STREAMID_OVERRIDE_CFG_APEW 0x3D8U
#define MC_STREAMID_OVERRIDE_CFG_NVJPGSRD 0x3F0U
#define MC_STREAMID_OVERRIDE_CFG_NVJPGSWR 0x3F8U
#define MC_STREAMID_OVERRIDE_CFG_SESRD 0x400U
#define MC_STREAMID_OVERRIDE_CFG_SESWR 0x408U
#define MC_STREAMID_OVERRIDE_CFG_ETRR 0x420U
#define MC_STREAMID_OVERRIDE_CFG_ETRW 0x428U
#define MC_STREAMID_OVERRIDE_CFG_TSECSRDB 0x430U
#define MC_STREAMID_OVERRIDE_CFG_TSECSWRB 0x438U
#define MC_STREAMID_OVERRIDE_CFG_GPUSRD2 0x440U
#define MC_STREAMID_OVERRIDE_CFG_GPUSWR2 0x448U
#define MC_STREAMID_OVERRIDE_CFG_AXISR 0x460U
#define MC_STREAMID_OVERRIDE_CFG_AXISW 0x468U
#define MC_STREAMID_OVERRIDE_CFG_EQOSR 0x470U
#define MC_STREAMID_OVERRIDE_CFG_EQOSW 0x478U
#define MC_STREAMID_OVERRIDE_CFG_UFSHCR 0x480U
#define MC_STREAMID_OVERRIDE_CFG_UFSHCW 0x488U
#define MC_STREAMID_OVERRIDE_CFG_NVDISPLAYR 0x490U
#define MC_STREAMID_OVERRIDE_CFG_BPMPR 0x498U
#define MC_STREAMID_OVERRIDE_CFG_BPMPW 0x4A0U
#define MC_STREAMID_OVERRIDE_CFG_BPMPDMAR 0x4A8U
#define MC_STREAMID_OVERRIDE_CFG_BPMPDMAW 0x4B0U
#define MC_STREAMID_OVERRIDE_CFG_AONR 0x4B8U
#define MC_STREAMID_OVERRIDE_CFG_AONW 0x4C0U
#define MC_STREAMID_OVERRIDE_CFG_AONDMAR 0x4C8U
#define MC_STREAMID_OVERRIDE_CFG_AONDMAW 0x4D0U
#define MC_STREAMID_OVERRIDE_CFG_SCER 0x4D8U
#define MC_STREAMID_OVERRIDE_CFG_SCEW 0x4E0U
#define MC_STREAMID_OVERRIDE_CFG_SCEDMAR 0x4E8U
#define MC_STREAMID_OVERRIDE_CFG_SCEDMAW 0x4F0U
#define MC_STREAMID_OVERRIDE_CFG_APEDMAR 0x4F8U
#define MC_STREAMID_OVERRIDE_CFG_APEDMAW 0x500U
#define MC_STREAMID_OVERRIDE_CFG_NVDISPLAYR1 0x508U
#define MC_STREAMID_OVERRIDE_CFG_VICSRD1 0x510U
#define MC_STREAMID_OVERRIDE_CFG_NVDECSRD1 0x518U
/*******************************************************************************
* Macro to calculate Security cfg register addr from StreamID Override register
******************************************************************************/
#define MC_STREAMID_OVERRIDE_TO_SECURITY_CFG(addr) ((addr) + (uint32_t)sizeof(uint32_t))
#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_NO_OVERRIDE_SO_DEV (0U << 4)
#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_NON_COHERENT_SO_DEV (1U << 4)
#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_COHERENT_SO_DEV (2U << 4)
#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_COHERENT_SNOOP_SO_DEV (3U << 4)
#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_NO_OVERRIDE_NORMAL (0U << 8)
#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_NON_COHERENT_NORMAL (1U << 8)
#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_COHERENT_NORMAL (2U << 8)
#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_COHERENT_SNOOP_NORMAL (3U << 8)
#define MC_TXN_OVERRIDE_CONFIG_CGID_SO_DEV_ZERO (0U << 12)
#define MC_TXN_OVERRIDE_CONFIG_CGID_SO_DEV_CLIENT_AXI_ID (1U << 12)
/*******************************************************************************
* Memory Controller transaction override config registers
******************************************************************************/
#define MC_TXN_OVERRIDE_CONFIG_HDAR 0x10a8U
#define MC_TXN_OVERRIDE_CONFIG_BPMPW 0x14a0U
#define MC_TXN_OVERRIDE_CONFIG_PTCR 0x1000U
#define MC_TXN_OVERRIDE_CONFIG_NVDISPLAYR 0x1490U
#define MC_TXN_OVERRIDE_CONFIG_EQOSW 0x1478U
#define MC_TXN_OVERRIDE_CONFIG_NVJPGSWR 0x13f8U
#define MC_TXN_OVERRIDE_CONFIG_ISPRA 0x1220U
#define MC_TXN_OVERRIDE_CONFIG_SDMMCWAA 0x1328U
#define MC_TXN_OVERRIDE_CONFIG_VICSRD 0x1360U
#define MC_TXN_OVERRIDE_CONFIG_MPCOREW 0x11c8U
#define MC_TXN_OVERRIDE_CONFIG_GPUSRD 0x12c0U
#define MC_TXN_OVERRIDE_CONFIG_AXISR 0x1460U
#define MC_TXN_OVERRIDE_CONFIG_SCEDMAW 0x14f0U
#define MC_TXN_OVERRIDE_CONFIG_SDMMCW 0x1330U
#define MC_TXN_OVERRIDE_CONFIG_EQOSR 0x1470U
#define MC_TXN_OVERRIDE_CONFIG_APEDMAR 0x14f8U
#define MC_TXN_OVERRIDE_CONFIG_NVENCSRD 0x10e0U
#define MC_TXN_OVERRIDE_CONFIG_SDMMCRAB 0x1318U
#define MC_TXN_OVERRIDE_CONFIG_VICSRD1 0x1510U
#define MC_TXN_OVERRIDE_CONFIG_BPMPDMAR 0x14a8U
#define MC_TXN_OVERRIDE_CONFIG_VIW 0x1390U
#define MC_TXN_OVERRIDE_CONFIG_SDMMCRAA 0x1308U
#define MC_TXN_OVERRIDE_CONFIG_AXISW 0x1468U
#define MC_TXN_OVERRIDE_CONFIG_XUSB_DEVR 0x1260U
#define MC_TXN_OVERRIDE_CONFIG_UFSHCR 0x1480U
#define MC_TXN_OVERRIDE_CONFIG_TSECSWR 0x12a8U
#define MC_TXN_OVERRIDE_CONFIG_GPUSWR 0x12c8U
#define MC_TXN_OVERRIDE_CONFIG_SATAR 0x10f8U
#define MC_TXN_OVERRIDE_CONFIG_XUSB_HOSTW 0x1258U
#define MC_TXN_OVERRIDE_CONFIG_TSECSWRB 0x1438U
#define MC_TXN_OVERRIDE_CONFIG_GPUSRD2 0x1440U
#define MC_TXN_OVERRIDE_CONFIG_SCEDMAR 0x14e8U
#define MC_TXN_OVERRIDE_CONFIG_GPUSWR2 0x1448U
#define MC_TXN_OVERRIDE_CONFIG_AONDMAW 0x14d0U
#define MC_TXN_OVERRIDE_CONFIG_APEDMAW 0x1500U
#define MC_TXN_OVERRIDE_CONFIG_AONW 0x14c0U
#define MC_TXN_OVERRIDE_CONFIG_HOST1XDMAR 0x10b0U
#define MC_TXN_OVERRIDE_CONFIG_ETRR 0x1420U
#define MC_TXN_OVERRIDE_CONFIG_SESWR 0x1408U
#define MC_TXN_OVERRIDE_CONFIG_NVJPGSRD 0x13f0U
#define MC_TXN_OVERRIDE_CONFIG_NVDECSRD 0x13c0U
#define MC_TXN_OVERRIDE_CONFIG_TSECSRDB 0x1430U
#define MC_TXN_OVERRIDE_CONFIG_BPMPDMAW 0x14b0U
#define MC_TXN_OVERRIDE_CONFIG_APER 0x13d0U
#define MC_TXN_OVERRIDE_CONFIG_NVDECSRD1 0x1518U
#define MC_TXN_OVERRIDE_CONFIG_XUSB_HOSTR 0x1250U
#define MC_TXN_OVERRIDE_CONFIG_ISPWA 0x1230U
#define MC_TXN_OVERRIDE_CONFIG_SESRD 0x1400U
#define MC_TXN_OVERRIDE_CONFIG_SCER 0x14d8U
#define MC_TXN_OVERRIDE_CONFIG_AONR 0x14b8U
#define MC_TXN_OVERRIDE_CONFIG_MPCORER 0x1138U
#define MC_TXN_OVERRIDE_CONFIG_SDMMCWA 0x1320U
#define MC_TXN_OVERRIDE_CONFIG_HDAW 0x11a8U
#define MC_TXN_OVERRIDE_CONFIG_NVDECSWR 0x13c8U
#define MC_TXN_OVERRIDE_CONFIG_UFSHCW 0x1488U
#define MC_TXN_OVERRIDE_CONFIG_AONDMAR 0x14c8U
#define MC_TXN_OVERRIDE_CONFIG_SATAW 0x11e8U
#define MC_TXN_OVERRIDE_CONFIG_ETRW 0x1428U
#define MC_TXN_OVERRIDE_CONFIG_VICSWR 0x1368U
#define MC_TXN_OVERRIDE_CONFIG_NVENCSWR 0x1158U
#define MC_TXN_OVERRIDE_CONFIG_AFIR 0x1070U
#define MC_TXN_OVERRIDE_CONFIG_SDMMCWAB 0x1338U
#define MC_TXN_OVERRIDE_CONFIG_SDMMCRA 0x1300U
#define MC_TXN_OVERRIDE_CONFIG_NVDISPLAYR1 0x1508U
#define MC_TXN_OVERRIDE_CONFIG_ISPWB 0x1238U
#define MC_TXN_OVERRIDE_CONFIG_BPMPR 0x1498U
#define MC_TXN_OVERRIDE_CONFIG_APEW 0x13d8U
#define MC_TXN_OVERRIDE_CONFIG_SDMMCR 0x1310U
#define MC_TXN_OVERRIDE_CONFIG_XUSB_DEVW 0x1268U
#define MC_TXN_OVERRIDE_CONFIG_TSECSRD 0x12a0U
#define MC_TXN_OVERRIDE_CONFIG_AFIW 0x1188U
#define MC_TXN_OVERRIDE_CONFIG_SCEW 0x14e0U
/******************************************************************************* /*******************************************************************************
* Structure to hold the transaction override settings to use to override * Structure to hold the transaction override settings to use to override
* client inputs * client inputs
...@@ -229,6 +54,25 @@ typedef struct mc_streamid_security_cfg { ...@@ -229,6 +54,25 @@ typedef struct mc_streamid_security_cfg {
#define CLIENT_FLAG_NON_SECURE 1U #define CLIENT_FLAG_NON_SECURE 1U
#define CLIENT_INPUTS_OVERRIDE 1U #define CLIENT_INPUTS_OVERRIDE 1U
#define CLIENT_INPUTS_NO_OVERRIDE 0U #define CLIENT_INPUTS_NO_OVERRIDE 0U
/*******************************************************************************
* StreamID to indicate no SMMU translations (requests to be steered on the
* SMMU bypass path)
******************************************************************************/
#define MC_STREAM_ID_MAX 0x7FU
/*******************************************************************************
* Memory Controller SMMU Bypass config register
******************************************************************************/
#define MC_SMMU_BYPASS_CONFIG 0x1820U
#define MC_SMMU_BYPASS_CTRL_MASK 0x3U
#define MC_SMMU_BYPASS_CTRL_SHIFT 0U
#define MC_SMMU_CTRL_TBU_BYPASS_ALL (0U << MC_SMMU_BYPASS_CTRL_SHIFT)
#define MC_SMMU_CTRL_TBU_RSVD (1U << MC_SMMU_BYPASS_CTRL_SHIFT)
#define MC_SMMU_CTRL_TBU_BYPASS_SPL_STREAMID (2U << MC_SMMU_BYPASS_CTRL_SHIFT)
#define MC_SMMU_CTRL_TBU_BYPASS_NONE (3U << MC_SMMU_BYPASS_CTRL_SHIFT)
#define MC_SMMU_BYPASS_CONFIG_WRITE_ACCESS_BIT (1U << 31)
#define MC_SMMU_BYPASS_CONFIG_SETTINGS (MC_SMMU_BYPASS_CONFIG_WRITE_ACCESS_BIT | \
MC_SMMU_CTRL_TBU_BYPASS_SPL_STREAMID)
#define mc_make_sec_cfg(off, ns, ovrrd, access) \ #define mc_make_sec_cfg(off, ns, ovrrd, access) \
{ \ { \
...@@ -250,131 +94,10 @@ typedef struct tegra_mc_settings { ...@@ -250,131 +94,10 @@ typedef struct tegra_mc_settings {
uint32_t num_streamid_security_cfgs; uint32_t num_streamid_security_cfgs;
const mc_txn_override_cfg_t *txn_override_cfg; const mc_txn_override_cfg_t *txn_override_cfg;
uint32_t num_txn_override_cfgs; uint32_t num_txn_override_cfgs;
void (*reconfig_mss_clients)(void);
void (*set_txn_overrides)(void);
} tegra_mc_settings_t; } tegra_mc_settings_t;
#endif /* __ASSEMBLY__ */
/*******************************************************************************
* Memory Controller SMMU Bypass config register
******************************************************************************/
#define MC_SMMU_BYPASS_CONFIG 0x1820U
#define MC_SMMU_BYPASS_CTRL_MASK 0x3U
#define MC_SMMU_BYPASS_CTRL_SHIFT 0U
#define MC_SMMU_CTRL_TBU_BYPASS_ALL (0U << MC_SMMU_BYPASS_CTRL_SHIFT)
#define MC_SMMU_CTRL_TBU_RSVD (1U << MC_SMMU_BYPASS_CTRL_SHIFT)
#define MC_SMMU_CTRL_TBU_BYPASS_SPL_STREAMID (2U << MC_SMMU_BYPASS_CTRL_SHIFT)
#define MC_SMMU_CTRL_TBU_BYPASS_NONE (3U << MC_SMMU_BYPASS_CTRL_SHIFT)
#define MC_SMMU_BYPASS_CONFIG_WRITE_ACCESS_BIT (1U << 31)
#define MC_SMMU_BYPASS_CONFIG_SETTINGS (MC_SMMU_BYPASS_CONFIG_WRITE_ACCESS_BIT | \
MC_SMMU_CTRL_TBU_BYPASS_SPL_STREAMID)
#define MC_TXN_OVERRIDE_CONFIG_AXID_OVERRIDE_CGID (1U << 0)
#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_OVERRIDE_SO_DEV (2U << 4)
#define MC_TXN_OVERRIDE_CONFIG_AXID_OVERRIDE_SO_DEV_CGID_SO_DEV_CLIENT (1U << 12)
/*******************************************************************************
* Non-SO_DEV transactions override values for CGID_TAG bitfield for the
* MC_TXN_OVERRIDE_CONFIG_{module} registers
******************************************************************************/
#define MC_TXN_OVERRIDE_CGID_TAG_DEFAULT 0U
#define MC_TXN_OVERRIDE_CGID_TAG_CLIENT_AXI_ID 1U
#define MC_TXN_OVERRIDE_CGID_TAG_ZERO 2U
#define MC_TXN_OVERRIDE_CGID_TAG_ADR 3U
#define MC_TXN_OVERRIDE_CGID_TAG_MASK 3U
/*******************************************************************************
* Memory Controller Reset Control registers
******************************************************************************/
#define MC_CLIENT_HOTRESET_CTRL0 0x200U
#define MC_CLIENT_HOTRESET_CTRL0_RESET_VAL 0U
#define MC_CLIENT_HOTRESET_CTRL0_AFI_FLUSH_ENB (1U << 0)
#define MC_CLIENT_HOTRESET_CTRL0_HC_FLUSH_ENB (1U << 6)
#define MC_CLIENT_HOTRESET_CTRL0_HDA_FLUSH_ENB (1U << 7)
#define MC_CLIENT_HOTRESET_CTRL0_ISP2_FLUSH_ENB (1U << 8)
#define MC_CLIENT_HOTRESET_CTRL0_MPCORE_FLUSH_ENB (1U << 9)
#define MC_CLIENT_HOTRESET_CTRL0_NVENC_FLUSH_ENB (1U << 11)
#define MC_CLIENT_HOTRESET_CTRL0_SATA_FLUSH_ENB (1U << 15)
#define MC_CLIENT_HOTRESET_CTRL0_VI_FLUSH_ENB (1U << 17)
#define MC_CLIENT_HOTRESET_CTRL0_VIC_FLUSH_ENB (1U << 18)
#define MC_CLIENT_HOTRESET_CTRL0_XUSB_HOST_FLUSH_ENB (1U << 19)
#define MC_CLIENT_HOTRESET_CTRL0_XUSB_DEV_FLUSH_ENB (1U << 20)
#define MC_CLIENT_HOTRESET_CTRL0_TSEC_FLUSH_ENB (1U << 22)
#define MC_CLIENT_HOTRESET_CTRL0_SDMMC1A_FLUSH_ENB (1U << 29)
#define MC_CLIENT_HOTRESET_CTRL0_SDMMC2A_FLUSH_ENB (1U << 30)
#define MC_CLIENT_HOTRESET_CTRL0_SDMMC3A_FLUSH_ENB (1U << 31)
#define MC_CLIENT_HOTRESET_STATUS0 0x204U
#define MC_CLIENT_HOTRESET_CTRL1 0x970U
#define MC_CLIENT_HOTRESET_CTRL1_RESET_VAL 0U
#define MC_CLIENT_HOTRESET_CTRL1_SDMMC4A_FLUSH_ENB (1U << 0)
#define MC_CLIENT_HOTRESET_CTRL1_GPU_FLUSH_ENB (1U << 2)
#define MC_CLIENT_HOTRESET_CTRL1_NVDEC_FLUSH_ENB (1U << 5)
#define MC_CLIENT_HOTRESET_CTRL1_APE_FLUSH_ENB (1U << 6)
#define MC_CLIENT_HOTRESET_CTRL1_SE_FLUSH_ENB (1U << 7)
#define MC_CLIENT_HOTRESET_CTRL1_NVJPG_FLUSH_ENB (1U << 8)
#define MC_CLIENT_HOTRESET_CTRL1_ETR_FLUSH_ENB (1U << 12)
#define MC_CLIENT_HOTRESET_CTRL1_TSECB_FLUSH_ENB (1U << 13)
#define MC_CLIENT_HOTRESET_CTRL1_AXIS_FLUSH_ENB (1U << 18)
#define MC_CLIENT_HOTRESET_CTRL1_EQOS_FLUSH_ENB (1U << 19)
#define MC_CLIENT_HOTRESET_CTRL1_UFSHC_FLUSH_ENB (1U << 20)
#define MC_CLIENT_HOTRESET_CTRL1_NVDISPLAY_FLUSH_ENB (1U << 21)
#define MC_CLIENT_HOTRESET_CTRL1_BPMP_FLUSH_ENB (1U << 22)
#define MC_CLIENT_HOTRESET_CTRL1_AON_FLUSH_ENB (1U << 23)
#define MC_CLIENT_HOTRESET_CTRL1_SCE_FLUSH_ENB (1U << 24)
#define MC_CLIENT_HOTRESET_STATUS1 0x974U
/*******************************************************************************
* Memory Controller's PCFIFO client configuration registers
******************************************************************************/
#define MC_PCFIFO_CLIENT_CONFIG1 0xdd4UL
#define MC_PCFIFO_CLIENT_CONFIG1_RESET_VAL 0x20000UL
#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_AFIW_UNORDERED (0UL << 17)
#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_AFIW_MASK (1UL << 17)
#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_HDAW_UNORDERED (0UL << 21)
#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_HDAW_MASK (1UL << 21)
#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_SATAW_UNORDERED (0UL << 29)
#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_SATAW_MASK (1UL << 29)
#define MC_PCFIFO_CLIENT_CONFIG2 0xdd8UL
#define MC_PCFIFO_CLIENT_CONFIG2_RESET_VAL 0x20000UL
#define MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_HOSTW_UNORDERED (0UL << 11)
#define MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_HOSTW_MASK (1UL << 11)
#define MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_DEVW_UNORDERED (0UL << 13)
#define MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_DEVW_MASK (1UL << 13)
#define MC_PCFIFO_CLIENT_CONFIG3 0xddcUL
#define MC_PCFIFO_CLIENT_CONFIG3_RESET_VAL 0UL
#define MC_PCFIFO_CLIENT_CONFIG3_PCFIFO_SDMMCWAB_UNORDERED (0UL << 7)
#define MC_PCFIFO_CLIENT_CONFIG3_PCFIFO_SDMMCWAB_MASK (1UL << 7)
#define MC_PCFIFO_CLIENT_CONFIG4 0xde0UL
#define MC_PCFIFO_CLIENT_CONFIG4_RESET_VAL 0UL
#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SESWR_UNORDERED (0UL << 1)
#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SESWR_MASK (1UL << 1)
#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_ETRW_UNORDERED (0UL << 5)
#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_ETRW_MASK (1UL << 5)
#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AXISW_UNORDERED (0UL << 13)
#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AXISW_MASK (1UL << 13)
#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_EQOSW_UNORDERED (0UL << 15)
#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_EQOSW_ORDERED (1UL << 15)
#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_EQOSW_MASK (1UL << 15)
#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_UFSHCW_UNORDERED (0UL << 17)
#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_UFSHCW_MASK (1UL << 17)
#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_BPMPDMAW_UNORDERED (0UL << 22)
#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_BPMPDMAW_MASK (1UL << 22)
#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AONDMAW_UNORDERED (0UL << 26)
#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AONDMAW_MASK (1UL << 26)
#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SCEDMAW_UNORDERED (0UL << 30)
#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SCEDMAW_MASK (1UL << 30)
#define MC_PCFIFO_CLIENT_CONFIG5 0xbf4UL
#define MC_PCFIFO_CLIENT_CONFIG5_RESET_VAL 0UL
#define MC_PCFIFO_CLIENT_CONFIG5_PCFIFO_APEDMAW_UNORDERED (0UL << 0)
#define MC_PCFIFO_CLIENT_CONFIG5_PCFIFO_APEDMAW_MASK (1UL << 0)
#ifndef __ASSEMBLY__
#include <lib/mmio.h>
static inline uint32_t tegra_mc_read_32(uint32_t off) static inline uint32_t tegra_mc_read_32(uint32_t off)
{ {
return mmio_read_32(TEGRA_MC_BASE + off); return mmio_read_32(TEGRA_MC_BASE + off);
...@@ -410,6 +133,22 @@ static inline void tegra_mc_streamid_write_32(uint32_t off, uint32_t val) ...@@ -410,6 +133,22 @@ static inline void tegra_mc_streamid_write_32(uint32_t off, uint32_t val)
(uint32_t)TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_PASTHRU); \ (uint32_t)TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_PASTHRU); \
} }
#define mc_set_tsa_w_passthrough(client) \
{ \
mmio_write_32(TEGRA_TSA_BASE + TSA_CONFIG_STATIC0_CSW_##client, \
(TSA_CONFIG_STATIC0_CSW_RESET_W & \
(uint32_t)~TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_MASK) | \
(uint32_t)TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_PASTHRU); \
}
#define mc_set_tsa_r_passthrough(client) \
{ \
mmio_write_32(TEGRA_TSA_BASE + TSA_CONFIG_STATIC0_CSR_##client, \
(TSA_CONFIG_STATIC0_CSR_RESET_R & \
(uint32_t)~TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_MASK) | \
(uint32_t)TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_PASTHRU); \
}
#define mc_set_txn_override(client, normal_axi_id, so_dev_axi_id, normal_override, so_dev_override) \ #define mc_set_txn_override(client, normal_axi_id, so_dev_axi_id, normal_override, so_dev_override) \
{ \ { \
tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_##client, \ tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_##client, \
...@@ -426,6 +165,14 @@ static inline void tegra_mc_streamid_write_32(uint32_t off, uint32_t val) ...@@ -426,6 +165,14 @@ static inline void tegra_mc_streamid_write_32(uint32_t off, uint32_t val)
******************************************************************************/ ******************************************************************************/
tegra_mc_settings_t *tegra_get_mc_settings(void); tegra_mc_settings_t *tegra_get_mc_settings(void);
#endif /* __ASSMEBLY__ */ /*******************************************************************************
* Handler to program the scratch registers with TZDRAM settings for the
* resume firmware.
*
* Implemented by SoCs under tegra/soc/txxx
******************************************************************************/
void plat_memctrl_tzdram_setup(uint64_t phys_base, uint64_t size_in_bytes);
#endif /* __ASSEMBLY__ */
#endif /* MEMCTRL_V2_H */ #endif /* MEMCTRL_V2_H */
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef TEGRA186_PRIVATE_H
#define TEGRA186_PRIVATE_H
void tegra186_cpu_reset_handler(void);
uint64_t tegra186_get_cpu_reset_handler_base(void);
uint64_t tegra186_get_cpu_reset_handler_size(void);
uint64_t tegra186_get_smmu_ctx_offset(void);
void tegra186_set_system_suspend_entry(void);
#endif /* TEGRA186_PRIVATE_H */
...@@ -135,6 +135,7 @@ ...@@ -135,6 +135,7 @@
#define MC_GSC_BASE_LO_MASK U(0xFFFFF) #define MC_GSC_BASE_LO_MASK U(0xFFFFF)
#define MC_GSC_BASE_HI_SHIFT U(0) #define MC_GSC_BASE_HI_SHIFT U(0)
#define MC_GSC_BASE_HI_MASK U(3) #define MC_GSC_BASE_HI_MASK U(3)
#define MC_GSC_ENABLE_CPU_SECURE_BIT (U(1) << 31)
/* TZDRAM carveout configuration registers */ /* TZDRAM carveout configuration registers */
#define MC_SECURITY_CFG0_0 U(0x70) #define MC_SECURITY_CFG0_0 U(0x70)
...@@ -165,7 +166,10 @@ ...@@ -165,7 +166,10 @@
#define MC_TZRAM_BASE_LO U(0x2194) #define MC_TZRAM_BASE_LO U(0x2194)
#define MC_TZRAM_BASE_HI U(0x2198) #define MC_TZRAM_BASE_HI U(0x2198)
#define MC_TZRAM_SIZE U(0x219C) #define MC_TZRAM_SIZE U(0x219C)
#define MC_TZRAM_CLIENT_ACCESS_CFG0 U(0x21A0) #define MC_TZRAM_CLIENT_ACCESS0_CFG0 U(0x21A0)
#define MC_TZRAM_CLIENT_ACCESS1_CFG0 U(0x21A4)
#define TZRAM_ALLOW_MPCORER (U(1) << 7)
#define TZRAM_ALLOW_MPCOREW (U(1) << 25)
/******************************************************************************* /*******************************************************************************
* Tegra UART Controller constants * Tegra UART Controller constants
...@@ -232,10 +236,19 @@ ...@@ -232,10 +236,19 @@
#define SECURE_SCRATCH_RSV11_HI U(0x6AC) #define SECURE_SCRATCH_RSV11_HI U(0x6AC)
#define SECURE_SCRATCH_RSV53_LO U(0x7F8) #define SECURE_SCRATCH_RSV53_LO U(0x7F8)
#define SECURE_SCRATCH_RSV53_HI U(0x7FC) #define SECURE_SCRATCH_RSV53_HI U(0x7FC)
#define SECURE_SCRATCH_RSV54_HI U(0x804)
#define SECURE_SCRATCH_RSV55_LO U(0x808) #define SECURE_SCRATCH_RSV55_LO U(0x808)
#define SECURE_SCRATCH_RSV55_HI U(0x80C) #define SECURE_SCRATCH_RSV55_HI U(0x80C)
#define SCRATCH_RESET_VECTOR_LO SECURE_SCRATCH_RSV1_LO
#define SCRATCH_RESET_VECTOR_HI SECURE_SCRATCH_RSV1_HI
#define SCRATCH_SECURE_BOOTP_FCFG SECURE_SCRATCH_RSV6
#define SCRATCH_SMMU_TABLE_ADDR_LO SECURE_SCRATCH_RSV11_LO
#define SCRATCH_SMMU_TABLE_ADDR_HI SECURE_SCRATCH_RSV11_HI
#define SCRATCH_BL31_PARAMS_ADDR SECURE_SCRATCH_RSV53_LO
#define SCRATCH_BL31_PLAT_PARAMS_ADDR SECURE_SCRATCH_RSV53_HI
#define SCRATCH_TZDRAM_ADDR_LO SECURE_SCRATCH_RSV55_LO
#define SCRATCH_TZDRAM_ADDR_HI SECURE_SCRATCH_RSV55_HI
/******************************************************************************* /*******************************************************************************
* Tegra Memory Mapped Control Register Access constants * Tegra Memory Mapped Control Register Access constants
******************************************************************************/ ******************************************************************************/
......
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef TEGRA_MC_DEF_H
#define TEGRA_MC_DEF_H
/*******************************************************************************
* Memory Controller's PCFIFO client configuration registers
******************************************************************************/
#define MC_PCFIFO_CLIENT_CONFIG0 0xdd0U
#define MC_PCFIFO_CLIENT_CONFIG1 0xdd4U
#define MC_PCFIFO_CLIENT_CONFIG1_RESET_VAL 0x20000U
#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_AFIW_UNORDERED (0U << 17)
#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_AFIW_MASK (1U << 17)
#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_HDAW_UNORDERED (0U << 21)
#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_HDAW_MASK (1U << 21)
#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_SATAW_UNORDERED (0U << 29)
#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_SATAW_MASK (1U << 29)
#define MC_PCFIFO_CLIENT_CONFIG2 0xdd8U
#define MC_PCFIFO_CLIENT_CONFIG2_RESET_VAL 0x20000U
#define MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_HOSTW_UNORDERED (0U << 11)
#define MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_HOSTW_MASK (1U << 11)
#define MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_DEVW_UNORDERED (0U << 13)
#define MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_DEVW_MASK (1U << 13)
#define MC_PCFIFO_CLIENT_CONFIG3 0xddcU
#define MC_PCFIFO_CLIENT_CONFIG3_RESET_VAL 0U
#define MC_PCFIFO_CLIENT_CONFIG3_PCFIFO_SDMMCWAB_UNORDERED (0U << 7)
#define MC_PCFIFO_CLIENT_CONFIG3_PCFIFO_SDMMCWAB_MASK (1U << 7)
#define MC_PCFIFO_CLIENT_CONFIG4 0xde0U
#define MC_PCFIFO_CLIENT_CONFIG4_RESET_VAL 0U
#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SESWR_UNORDERED (0U << 1)
#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SESWR_MASK (1U << 1)
#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_ETRW_UNORDERED (0U << 5)
#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_ETRW_MASK (1U << 5)
#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AXISW_UNORDERED (0U << 13)
#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AXISW_MASK (1U << 13)
#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_EQOSW_UNORDERED (0U << 15)
#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_EQOSW_ORDERED (1U << 15)
#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_EQOSW_MASK (1U << 15)
#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_UFSHCW_UNORDERED (0U << 17)
#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_UFSHCW_MASK (1U << 17)
#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_BPMPDMAW_UNORDERED (0U << 22)
#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_BPMPDMAW_MASK (1U << 22)
#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AONDMAW_UNORDERED (0U << 26)
#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AONDMAW_MASK (1U << 26)
#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SCEDMAW_UNORDERED (0U << 30)
#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SCEDMAW_MASK (1U << 30)
#define MC_PCFIFO_CLIENT_CONFIG5 0xbf4U
#define MC_PCFIFO_CLIENT_CONFIG5_RESET_VAL 0U
#define MC_PCFIFO_CLIENT_CONFIG5_PCFIFO_APEDMAW_UNORDERED (0U << 0)
#define MC_PCFIFO_CLIENT_CONFIG5_PCFIFO_APEDMAW_MASK (1U << 0)
/*******************************************************************************
* Stream ID Override Config registers
******************************************************************************/
#define MC_STREAMID_OVERRIDE_CFG_PTCR 0x000U
#define MC_STREAMID_OVERRIDE_CFG_AFIR 0x070U
#define MC_STREAMID_OVERRIDE_CFG_HDAR 0x0A8U
#define MC_STREAMID_OVERRIDE_CFG_HOST1XDMAR 0x0B0U
#define MC_STREAMID_OVERRIDE_CFG_NVENCSRD 0x0E0U
#define MC_STREAMID_OVERRIDE_CFG_SATAR 0x0F8U
#define MC_STREAMID_OVERRIDE_CFG_MPCORER 0x138U
#define MC_STREAMID_OVERRIDE_CFG_NVENCSWR 0x158U
#define MC_STREAMID_OVERRIDE_CFG_AFIW 0x188U
#define MC_STREAMID_OVERRIDE_CFG_HDAW 0x1A8U
#define MC_STREAMID_OVERRIDE_CFG_MPCOREW 0x1C8U
#define MC_STREAMID_OVERRIDE_CFG_SATAW 0x1E8U
#define MC_STREAMID_OVERRIDE_CFG_ISPRA 0x220U
#define MC_STREAMID_OVERRIDE_CFG_ISPWA 0x230U
#define MC_STREAMID_OVERRIDE_CFG_ISPWB 0x238U
#define MC_STREAMID_OVERRIDE_CFG_XUSB_HOSTR 0x250U
#define MC_STREAMID_OVERRIDE_CFG_XUSB_HOSTW 0x258U
#define MC_STREAMID_OVERRIDE_CFG_XUSB_DEVR 0x260U
#define MC_STREAMID_OVERRIDE_CFG_XUSB_DEVW 0x268U
#define MC_STREAMID_OVERRIDE_CFG_TSECSRD 0x2A0U
#define MC_STREAMID_OVERRIDE_CFG_TSECSWR 0x2A8U
#define MC_STREAMID_OVERRIDE_CFG_GPUSRD 0x2C0U
#define MC_STREAMID_OVERRIDE_CFG_GPUSWR 0x2C8U
#define MC_STREAMID_OVERRIDE_CFG_SDMMCRA 0x300U
#define MC_STREAMID_OVERRIDE_CFG_SDMMCRAA 0x308U
#define MC_STREAMID_OVERRIDE_CFG_SDMMCR 0x310U
#define MC_STREAMID_OVERRIDE_CFG_SDMMCRAB 0x318U
#define MC_STREAMID_OVERRIDE_CFG_SDMMCWA 0x320U
#define MC_STREAMID_OVERRIDE_CFG_SDMMCWAA 0x328U
#define MC_STREAMID_OVERRIDE_CFG_SDMMCW 0x330U
#define MC_STREAMID_OVERRIDE_CFG_SDMMCWAB 0x338U
#define MC_STREAMID_OVERRIDE_CFG_VICSRD 0x360U
#define MC_STREAMID_OVERRIDE_CFG_VICSWR 0x368U
#define MC_STREAMID_OVERRIDE_CFG_VIW 0x390U
#define MC_STREAMID_OVERRIDE_CFG_NVDECSRD 0x3C0U
#define MC_STREAMID_OVERRIDE_CFG_NVDECSWR 0x3C8U
#define MC_STREAMID_OVERRIDE_CFG_APER 0x3D0U
#define MC_STREAMID_OVERRIDE_CFG_APEW 0x3D8U
#define MC_STREAMID_OVERRIDE_CFG_NVJPGSRD 0x3F0U
#define MC_STREAMID_OVERRIDE_CFG_NVJPGSWR 0x3F8U
#define MC_STREAMID_OVERRIDE_CFG_SESRD 0x400U
#define MC_STREAMID_OVERRIDE_CFG_SESWR 0x408U
#define MC_STREAMID_OVERRIDE_CFG_ETRR 0x420U
#define MC_STREAMID_OVERRIDE_CFG_ETRW 0x428U
#define MC_STREAMID_OVERRIDE_CFG_TSECSRDB 0x430U
#define MC_STREAMID_OVERRIDE_CFG_TSECSWRB 0x438U
#define MC_STREAMID_OVERRIDE_CFG_GPUSRD2 0x440U
#define MC_STREAMID_OVERRIDE_CFG_GPUSWR2 0x448U
#define MC_STREAMID_OVERRIDE_CFG_AXISR 0x460U
#define MC_STREAMID_OVERRIDE_CFG_AXISW 0x468U
#define MC_STREAMID_OVERRIDE_CFG_EQOSR 0x470U
#define MC_STREAMID_OVERRIDE_CFG_EQOSW 0x478U
#define MC_STREAMID_OVERRIDE_CFG_UFSHCR 0x480U
#define MC_STREAMID_OVERRIDE_CFG_UFSHCW 0x488U
#define MC_STREAMID_OVERRIDE_CFG_NVDISPLAYR 0x490U
#define MC_STREAMID_OVERRIDE_CFG_BPMPR 0x498U
#define MC_STREAMID_OVERRIDE_CFG_BPMPW 0x4A0U
#define MC_STREAMID_OVERRIDE_CFG_BPMPDMAR 0x4A8U
#define MC_STREAMID_OVERRIDE_CFG_BPMPDMAW 0x4B0U
#define MC_STREAMID_OVERRIDE_CFG_AONR 0x4B8U
#define MC_STREAMID_OVERRIDE_CFG_AONW 0x4C0U
#define MC_STREAMID_OVERRIDE_CFG_AONDMAR 0x4C8U
#define MC_STREAMID_OVERRIDE_CFG_AONDMAW 0x4D0U
#define MC_STREAMID_OVERRIDE_CFG_SCER 0x4D8U
#define MC_STREAMID_OVERRIDE_CFG_SCEW 0x4E0U
#define MC_STREAMID_OVERRIDE_CFG_SCEDMAR 0x4E8U
#define MC_STREAMID_OVERRIDE_CFG_SCEDMAW 0x4F0U
#define MC_STREAMID_OVERRIDE_CFG_APEDMAR 0x4F8U
#define MC_STREAMID_OVERRIDE_CFG_APEDMAW 0x500U
#define MC_STREAMID_OVERRIDE_CFG_NVDISPLAYR1 0x508U
#define MC_STREAMID_OVERRIDE_CFG_VICSRD1 0x510U
#define MC_STREAMID_OVERRIDE_CFG_NVDECSRD1 0x518U
/*******************************************************************************
* Macro to calculate Security cfg register addr from StreamID Override register
******************************************************************************/
#define MC_STREAMID_OVERRIDE_TO_SECURITY_CFG(addr) ((addr) + (uint32_t)sizeof(uint32_t))
#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_NO_OVERRIDE_SO_DEV (0U << 4)
#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_NON_COHERENT_SO_DEV (1U << 4)
#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_COHERENT_SO_DEV (2U << 4)
#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_COHERENT_SNOOP_SO_DEV (3U << 4)
#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_NO_OVERRIDE_NORMAL (0U << 8)
#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_NON_COHERENT_NORMAL (1U << 8)
#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_COHERENT_NORMAL (2U << 8)
#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_COHERENT_SNOOP_NORMAL (3U << 8)
#define MC_TXN_OVERRIDE_CONFIG_CGID_SO_DEV_ZERO (0U << 12)
#define MC_TXN_OVERRIDE_CONFIG_CGID_SO_DEV_CLIENT_AXI_ID (1U << 12)
/*******************************************************************************
* Memory Controller transaction override config registers
******************************************************************************/
#define MC_TXN_OVERRIDE_CONFIG_HDAR 0x10a8U
#define MC_TXN_OVERRIDE_CONFIG_BPMPW 0x14a0U
#define MC_TXN_OVERRIDE_CONFIG_PTCR 0x1000U
#define MC_TXN_OVERRIDE_CONFIG_NVDISPLAYR 0x1490U
#define MC_TXN_OVERRIDE_CONFIG_EQOSW 0x1478U
#define MC_TXN_OVERRIDE_CONFIG_NVJPGSWR 0x13f8U
#define MC_TXN_OVERRIDE_CONFIG_ISPRA 0x1220U
#define MC_TXN_OVERRIDE_CONFIG_SDMMCWAA 0x1328U
#define MC_TXN_OVERRIDE_CONFIG_VICSRD 0x1360U
#define MC_TXN_OVERRIDE_CONFIG_MPCOREW 0x11c8U
#define MC_TXN_OVERRIDE_CONFIG_GPUSRD 0x12c0U
#define MC_TXN_OVERRIDE_CONFIG_AXISR 0x1460U
#define MC_TXN_OVERRIDE_CONFIG_SCEDMAW 0x14f0U
#define MC_TXN_OVERRIDE_CONFIG_SDMMCW 0x1330U
#define MC_TXN_OVERRIDE_CONFIG_EQOSR 0x1470U
#define MC_TXN_OVERRIDE_CONFIG_APEDMAR 0x14f8U
#define MC_TXN_OVERRIDE_CONFIG_NVENCSRD 0x10e0U
#define MC_TXN_OVERRIDE_CONFIG_SDMMCRAB 0x1318U
#define MC_TXN_OVERRIDE_CONFIG_VICSRD1 0x1510U
#define MC_TXN_OVERRIDE_CONFIG_BPMPDMAR 0x14a8U
#define MC_TXN_OVERRIDE_CONFIG_VIW 0x1390U
#define MC_TXN_OVERRIDE_CONFIG_SDMMCRAA 0x1308U
#define MC_TXN_OVERRIDE_CONFIG_AXISW 0x1468U
#define MC_TXN_OVERRIDE_CONFIG_XUSB_DEVR 0x1260U
#define MC_TXN_OVERRIDE_CONFIG_UFSHCR 0x1480U
#define MC_TXN_OVERRIDE_CONFIG_TSECSWR 0x12a8U
#define MC_TXN_OVERRIDE_CONFIG_GPUSWR 0x12c8U
#define MC_TXN_OVERRIDE_CONFIG_SATAR 0x10f8U
#define MC_TXN_OVERRIDE_CONFIG_XUSB_HOSTW 0x1258U
#define MC_TXN_OVERRIDE_CONFIG_TSECSWRB 0x1438U
#define MC_TXN_OVERRIDE_CONFIG_GPUSRD2 0x1440U
#define MC_TXN_OVERRIDE_CONFIG_SCEDMAR 0x14e8U
#define MC_TXN_OVERRIDE_CONFIG_GPUSWR2 0x1448U
#define MC_TXN_OVERRIDE_CONFIG_AONDMAW 0x14d0U
#define MC_TXN_OVERRIDE_CONFIG_APEDMAW 0x1500U
#define MC_TXN_OVERRIDE_CONFIG_AONW 0x14c0U
#define MC_TXN_OVERRIDE_CONFIG_HOST1XDMAR 0x10b0U
#define MC_TXN_OVERRIDE_CONFIG_ETRR 0x1420U
#define MC_TXN_OVERRIDE_CONFIG_SESWR 0x1408U
#define MC_TXN_OVERRIDE_CONFIG_NVJPGSRD 0x13f0U
#define MC_TXN_OVERRIDE_CONFIG_NVDECSRD 0x13c0U
#define MC_TXN_OVERRIDE_CONFIG_TSECSRDB 0x1430U
#define MC_TXN_OVERRIDE_CONFIG_BPMPDMAW 0x14b0U
#define MC_TXN_OVERRIDE_CONFIG_APER 0x13d0U
#define MC_TXN_OVERRIDE_CONFIG_NVDECSRD1 0x1518U
#define MC_TXN_OVERRIDE_CONFIG_XUSB_HOSTR 0x1250U
#define MC_TXN_OVERRIDE_CONFIG_ISPWA 0x1230U
#define MC_TXN_OVERRIDE_CONFIG_SESRD 0x1400U
#define MC_TXN_OVERRIDE_CONFIG_SCER 0x14d8U
#define MC_TXN_OVERRIDE_CONFIG_AONR 0x14b8U
#define MC_TXN_OVERRIDE_CONFIG_MPCORER 0x1138U
#define MC_TXN_OVERRIDE_CONFIG_SDMMCWA 0x1320U
#define MC_TXN_OVERRIDE_CONFIG_HDAW 0x11a8U
#define MC_TXN_OVERRIDE_CONFIG_NVDECSWR 0x13c8U
#define MC_TXN_OVERRIDE_CONFIG_UFSHCW 0x1488U
#define MC_TXN_OVERRIDE_CONFIG_AONDMAR 0x14c8U
#define MC_TXN_OVERRIDE_CONFIG_SATAW 0x11e8U
#define MC_TXN_OVERRIDE_CONFIG_ETRW 0x1428U
#define MC_TXN_OVERRIDE_CONFIG_VICSWR 0x1368U
#define MC_TXN_OVERRIDE_CONFIG_NVENCSWR 0x1158U
#define MC_TXN_OVERRIDE_CONFIG_AFIR 0x1070U
#define MC_TXN_OVERRIDE_CONFIG_SDMMCWAB 0x1338U
#define MC_TXN_OVERRIDE_CONFIG_SDMMCRA 0x1300U
#define MC_TXN_OVERRIDE_CONFIG_NVDISPLAYR1 0x1508U
#define MC_TXN_OVERRIDE_CONFIG_ISPWB 0x1238U
#define MC_TXN_OVERRIDE_CONFIG_BPMPR 0x1498U
#define MC_TXN_OVERRIDE_CONFIG_APEW 0x13d8U
#define MC_TXN_OVERRIDE_CONFIG_SDMMCR 0x1310U
#define MC_TXN_OVERRIDE_CONFIG_XUSB_DEVW 0x1268U
#define MC_TXN_OVERRIDE_CONFIG_TSECSRD 0x12a0U
#define MC_TXN_OVERRIDE_CONFIG_AFIW 0x1188U
#define MC_TXN_OVERRIDE_CONFIG_SCEW 0x14e0U
#define MC_TXN_OVERRIDE_CONFIG_AXID_OVERRIDE_CGID (1U << 0)
#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_OVERRIDE_SO_DEV (2U << 4)
#define MC_TXN_OVERRIDE_CONFIG_AXID_OVERRIDE_SO_DEV_CGID_SO_DEV_CLIENT (1U << 12)
/*******************************************************************************
* Non-SO_DEV transactions override values for CGID_TAG bitfield for the
* MC_TXN_OVERRIDE_CONFIG_{module} registers
******************************************************************************/
#define MC_TXN_OVERRIDE_CGID_TAG_DEFAULT 0U
#define MC_TXN_OVERRIDE_CGID_TAG_CLIENT_AXI_ID 1U
#define MC_TXN_OVERRIDE_CGID_TAG_ZERO 2U
#define MC_TXN_OVERRIDE_CGID_TAG_ADR 3U
#define MC_TXN_OVERRIDE_CGID_TAG_MASK 3ULL
/*******************************************************************************
* Memory Controller Reset Control registers
******************************************************************************/
#define MC_CLIENT_HOTRESET_CTRL0 0x200U
#define MC_CLIENT_HOTRESET_CTRL0_RESET_VAL 0U
#define MC_CLIENT_HOTRESET_CTRL0_AFI_FLUSH_ENB (1U << 0)
#define MC_CLIENT_HOTRESET_CTRL0_HC_FLUSH_ENB (1U << 6)
#define MC_CLIENT_HOTRESET_CTRL0_HDA_FLUSH_ENB (1U << 7)
#define MC_CLIENT_HOTRESET_CTRL0_ISP2_FLUSH_ENB (1U << 8)
#define MC_CLIENT_HOTRESET_CTRL0_MPCORE_FLUSH_ENB (1U << 9)
#define MC_CLIENT_HOTRESET_CTRL0_NVENC_FLUSH_ENB (1U << 11)
#define MC_CLIENT_HOTRESET_CTRL0_SATA_FLUSH_ENB (1U << 15)
#define MC_CLIENT_HOTRESET_CTRL0_VI_FLUSH_ENB (1U << 17)
#define MC_CLIENT_HOTRESET_CTRL0_VIC_FLUSH_ENB (1U << 18)
#define MC_CLIENT_HOTRESET_CTRL0_XUSB_HOST_FLUSH_ENB (1U << 19)
#define MC_CLIENT_HOTRESET_CTRL0_XUSB_DEV_FLUSH_ENB (1U << 20)
#define MC_CLIENT_HOTRESET_CTRL0_TSEC_FLUSH_ENB (1U << 22)
#define MC_CLIENT_HOTRESET_CTRL0_SDMMC1A_FLUSH_ENB (1U << 29)
#define MC_CLIENT_HOTRESET_CTRL0_SDMMC2A_FLUSH_ENB (1U << 30)
#define MC_CLIENT_HOTRESET_CTRL0_SDMMC3A_FLUSH_ENB (1U << 31)
#define MC_CLIENT_HOTRESET_STATUS0 0x204U
#define MC_CLIENT_HOTRESET_CTRL1 0x970U
#define MC_CLIENT_HOTRESET_CTRL1_RESET_VAL 0U
#define MC_CLIENT_HOTRESET_CTRL1_SDMMC4A_FLUSH_ENB (1U << 0)
#define MC_CLIENT_HOTRESET_CTRL1_GPU_FLUSH_ENB (1U << 2)
#define MC_CLIENT_HOTRESET_CTRL1_NVDEC_FLUSH_ENB (1U << 5)
#define MC_CLIENT_HOTRESET_CTRL1_APE_FLUSH_ENB (1U << 6)
#define MC_CLIENT_HOTRESET_CTRL1_SE_FLUSH_ENB (1U << 7)
#define MC_CLIENT_HOTRESET_CTRL1_NVJPG_FLUSH_ENB (1U << 8)
#define MC_CLIENT_HOTRESET_CTRL1_ETR_FLUSH_ENB (1U << 12)
#define MC_CLIENT_HOTRESET_CTRL1_TSECB_FLUSH_ENB (1U << 13)
#define MC_CLIENT_HOTRESET_CTRL1_AXIS_FLUSH_ENB (1U << 18)
#define MC_CLIENT_HOTRESET_CTRL1_EQOS_FLUSH_ENB (1U << 19)
#define MC_CLIENT_HOTRESET_CTRL1_UFSHC_FLUSH_ENB (1U << 20)
#define MC_CLIENT_HOTRESET_CTRL1_NVDISPLAY_FLUSH_ENB (1U << 21)
#define MC_CLIENT_HOTRESET_CTRL1_BPMP_FLUSH_ENB (1U << 22)
#define MC_CLIENT_HOTRESET_CTRL1_AON_FLUSH_ENB (1U << 23)
#define MC_CLIENT_HOTRESET_CTRL1_SCE_FLUSH_ENB (1U << 24)
#define MC_CLIENT_HOTRESET_STATUS1 0x974U
#endif /* TEGRA_MC_DEF_H */
...@@ -75,6 +75,8 @@ uint32_t plat_get_console_from_id(int32_t id); ...@@ -75,6 +75,8 @@ uint32_t plat_get_console_from_id(int32_t id);
void plat_gic_setup(void); void plat_gic_setup(void);
struct tegra_bl31_params *plat_get_bl31_params(void); struct tegra_bl31_params *plat_get_bl31_params(void);
plat_params_from_bl2_t *plat_get_bl31_plat_params(void); plat_params_from_bl2_t *plat_get_bl31_plat_params(void);
void plat_early_platform_setup(void);
void plat_late_platform_setup(void);
/* Declarations for plat_secondary.c */ /* Declarations for plat_secondary.c */
void plat_secondary_setup(void); void plat_secondary_setup(void);
...@@ -126,7 +128,6 @@ int tegra_prepare_cpu_on_finish(unsigned long mpidr); ...@@ -126,7 +128,6 @@ int tegra_prepare_cpu_on_finish(unsigned long mpidr);
/* Declarations for tegra_bl31_setup.c */ /* Declarations for tegra_bl31_setup.c */
plat_params_from_bl2_t *bl31_get_plat_params(void); plat_params_from_bl2_t *bl31_get_plat_params(void);
int32_t bl31_check_ns_address(uint64_t base, uint64_t size_in_bytes); int32_t bl31_check_ns_address(uint64_t base, uint64_t size_in_bytes);
void plat_early_platform_setup(void);
/* Declarations for tegra_delay_timer.c */ /* Declarations for tegra_delay_timer.c */
void tegra_delay_timer_init(void); void tegra_delay_timer_init(void);
......
...@@ -40,5 +40,5 @@ include ${SOC_DIR}/platform_${TARGET_SOC}.mk ...@@ -40,5 +40,5 @@ include ${SOC_DIR}/platform_${TARGET_SOC}.mk
# modify BUILD_PLAT to point to SoC specific build directory # modify BUILD_PLAT to point to SoC specific build directory
BUILD_PLAT := ${BUILD_BASE}/${PLAT}/${TARGET_SOC}/${BUILD_TYPE} BUILD_PLAT := ${BUILD_BASE}/${PLAT}/${TARGET_SOC}/${BUILD_TYPE}
# enable signed comparison checks # platform cflags (enable signed comparisons, disable stdlib)
TF_CFLAGS += -Wsign-compare TF_CFLAGS += -Wsign-compare -nostdlib
...@@ -19,7 +19,8 @@ $(eval $(call add_define,MAX_XLAT_TABLES)) ...@@ -19,7 +19,8 @@ $(eval $(call add_define,MAX_XLAT_TABLES))
MAX_MMAP_REGIONS := 8 MAX_MMAP_REGIONS := 8
$(eval $(call add_define,MAX_MMAP_REGIONS)) $(eval $(call add_define,MAX_MMAP_REGIONS))
BL31_SOURCES += lib/cpus/aarch64/denver.S \ BL31_SOURCES += drivers/ti/uart/aarch64/16550_console.S \
lib/cpus/aarch64/denver.S \
${COMMON_DIR}/drivers/flowctrl/flowctrl.c \ ${COMMON_DIR}/drivers/flowctrl/flowctrl.c \
${COMMON_DIR}/drivers/memctrl/memctrl_v1.c \ ${COMMON_DIR}/drivers/memctrl/memctrl_v1.c \
${SOC_DIR}/plat_psci_handlers.c \ ${SOC_DIR}/plat_psci_handlers.c \
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment