Unverified Commit 30490b15 authored by Antonio Niño Díaz's avatar Antonio Niño Díaz Committed by GitHub
Browse files

Merge pull request #1785 from vwadekar/tf2.0-tegra-downstream-rebase-1.25.19

Tf2.0 tegra downstream rebase 1.25.19
parents d636f67e a474d3d7
......@@ -248,7 +248,6 @@ DTC_CPPFLAGS += -nostdinc -Iinclude -undef -x assembler-with-cpp
# Common sources and include directories
################################################################################
include lib/compiler-rt/compiler-rt.mk
include lib/libc/libc.mk
BL_COMMON_SOURCES += common/bl_common.c \
common/tf_log.c \
......@@ -392,6 +391,13 @@ endif
endif
################################################################################
# Include libc if not overridden
################################################################################
ifeq (${OVERRIDE_LIBC},0)
include lib/libc/libc.mk
endif
################################################################################
# Check incompatible options
################################################################################
......@@ -595,6 +601,7 @@ $(eval $(call assert_boolean,HANDLE_EA_EL3_FIRST))
$(eval $(call assert_boolean,HW_ASSISTED_COHERENCY))
$(eval $(call assert_boolean,MULTI_CONSOLE_API))
$(eval $(call assert_boolean,NS_TIMER_SWITCH))
$(eval $(call assert_boolean,OVERRIDE_LIBC))
$(eval $(call assert_boolean,PL011_GENERIC_UART))
$(eval $(call assert_boolean,PROGRAMMABLE_RESET_ADDRESS))
$(eval $(call assert_boolean,PSCI_EXTENDED_STATE_ID))
......
Tegra SoCs - Overview
=====================
- .. rubric:: T186
:name: t186
The NVIDIA® Parker (T186) series system-on-chip (SoC) delivers a heterogeneous
multi-processing (HMP) solution designed to optimize performance and
efficiency.
T186 has Dual NVIDIA Denver 2 ARM® CPU cores, plus Quad ARM Cortex®-A57 cores,
in a coherent multiprocessor configuration. The Denver 2 and Cortex-A57 cores
support ARMv8, executing both 64-bit Aarch64 code, and 32-bit Aarch32 code
including legacy ARMv7 applications. The Denver 2 processors each have 128 KB
Instruction and 64 KB Data Level 1 caches; and have a 2MB shared Level 2
unified cache. The Cortex-A57 processors each have 48 KB Instruction and 32 KB
Data Level 1 caches; and also have a 2 MB shared Level 2 unified cache. A
high speed coherency fabric connects these two processor complexes and allows
heterogeneous multi-processing with all six cores if required.
- .. rubric:: T210
:name: t210
......@@ -49,11 +66,21 @@ Directory structure
Trusted OS dispatcher
=====================
Tegra supports multiple Trusted OS', Trusted Little Kernel (TLK) being one of
them. In order to include the 'tlkd' dispatcher in the image, pass 'SPD=tlkd'
on the command line while preparing a bl31 image. This allows other Trusted OS
vendors to use the upstream code and include their dispatchers in the image
without changing any makefiles.
Tegra supports multiple Trusted OS'.
- Trusted Little Kernel (TLK): In order to include the 'tlkd' dispatcher in
the image, pass 'SPD=tlkd' on the command line while preparing a bl31 image.
- Trusty: In order to include the 'trusty' dispatcher in the image, pass
'SPD=trusty' on the command line while preparing a bl31 image.
This allows other Trusted OS vendors to use the upstream code and include
their dispatchers in the image without changing any makefiles.
These are the supported Trusted OS' by Tegra platforms.
Tegra132: TLK
Tegra210: TLK and Trusty
Tegra186: Trusty
Preparing the BL31 image to run on Tegra SoCs
=============================================
......@@ -61,7 +88,8 @@ Preparing the BL31 image to run on Tegra SoCs
.. code:: shell
CROSS_COMPILE=<path-to-aarch64-gcc>/bin/aarch64-none-elf- make PLAT=tegra \
TARGET_SOC=<target-soc e.g. t210|t132> SPD=<dispatcher e.g. tlkd> bl31
TARGET_SOC=<target-soc e.g. t186|t210|t132> SPD=<dispatcher e.g. trusty|tlkd>
bl31
Platforms wanting to use different TZDRAM\_BASE, can add ``TZDRAM_BASE=<value>``
to the build command line.
......
......@@ -575,6 +575,10 @@ Common build options
1 (do save and restore). 0 is the default. An SPD may set this to 1 if it
wants the timer registers to be saved and restored.
- ``OVERRIDE_LIBC``: This option allows platforms to override the default libc
for the BL image. It can be either 0 (include) or 1 (remove). The default
value is 0.
- ``PL011_GENERIC_UART``: Boolean option to indicate the PL011 driver that
the underlying hardware is not a full PL011 UART but a minimally compliant
generic UART, which is a subset of the PL011. The driver will not access
......
......@@ -20,7 +20,9 @@
*/
#define TLK_REGISTER_LOGBUF TLK_TOS_YIELD_FID(0x1)
#define TLK_REGISTER_REQBUF TLK_TOS_YIELD_FID(0x2)
#define TLK_REGISTER_NS_DRAM TLK_TOS_YIELD_FID(0x4)
#define TLK_SS_REGISTER_HANDLER TLK_TOS_YIELD_FID(0x3)
#define TLK_REGISTER_NS_DRAM_RANGES TLK_TOS_YIELD_FID(0x4)
#define TLK_SET_ROOT_OF_TRUST TLK_TOS_YIELD_FID(0x5)
#define TLK_RESUME_FID TLK_TOS_YIELD_FID(0x100)
#define TLK_SYSTEM_SUSPEND TLK_TOS_YIELD_FID(0xE001)
#define TLK_SYSTEM_RESUME TLK_TOS_YIELD_FID(0xE002)
......
......@@ -122,6 +122,9 @@ MULTI_CONSOLE_API := 0
# NS timer register save and restore
NS_TIMER_SWITCH := 0
# Include lib/libc in the final image
OVERRIDE_LIBC := 0
# Build PL011 UART driver in minimal generic UART mode
PL011_GENERIC_UART := 0
......
......@@ -16,7 +16,7 @@
#include <string.h>
#include <tegra_def.h>
#define BPMP_TIMEOUT 2
#define BPMP_TIMEOUT 500 /* 500ms */
static uint32_t channel_base[NR_CHANNELS];
static uint32_t bpmp_init_state = BPMP_INIT_PENDING;
......@@ -115,58 +115,117 @@ int32_t tegra_bpmp_send_receive_atomic(int mrq, const void *ob_data, int ob_sz,
int tegra_bpmp_init(void)
{
uint32_t val, base;
uint32_t val, base, timeout = BPMP_TIMEOUT;
unsigned int ch;
int ret = 0;
if (bpmp_init_state != BPMP_INIT_COMPLETE) {
if (bpmp_init_state == BPMP_INIT_PENDING) {
/* check if the bpmp processor is alive. */
val = mmio_read_32(TEGRA_RES_SEMA_BASE + STA_OFFSET);
if (val != SIGN_OF_LIFE) {
ERROR("BPMP precessor not available\n");
return -ENOTSUP;
}
do {
val = mmio_read_32(TEGRA_RES_SEMA_BASE + STA_OFFSET);
if (val != SIGN_OF_LIFE) {
mdelay(1);
timeout--;
}
/* check if clock for the atomics block is enabled */
val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_ENB_V);
if ((val & CAR_ENABLE_ATOMICS) == 0) {
ERROR("Clock to the atomics block is disabled\n");
}
} while ((val != SIGN_OF_LIFE) && (timeout > 0U));
/* check if the atomics block is out of reset */
val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEV_CLR_V);
if ((val & CAR_ENABLE_ATOMICS) == CAR_ENABLE_ATOMICS) {
ERROR("Reset to the atomics block is asserted\n");
}
if (val == SIGN_OF_LIFE) {
/* base address to get the result from Atomics */
base = TEGRA_ATOMICS_BASE + RESULT0_REG_OFFSET;
/* check if clock for the atomics block is enabled */
val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_ENB_V);
if ((val & CAR_ENABLE_ATOMICS) == 0) {
ERROR("Clock to the atomics block is disabled\n");
}
/* check if the atomics block is out of reset */
val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEV_CLR_V);
if ((val & CAR_ENABLE_ATOMICS) == CAR_ENABLE_ATOMICS) {
ERROR("Reset to the atomics block is asserted\n");
}
/* channel area is setup by BPMP before signaling handshake */
for (ch = 0; ch < NR_CHANNELS; ch++) {
/* base address to get the result from Atomics */
base = TEGRA_ATOMICS_BASE + RESULT0_REG_OFFSET;
/* issue command to get the channel base address */
mmio_write_32(base, (ch << TRIGGER_ID_SHIFT) |
ATOMIC_CMD_GET);
/* channel area is setup by BPMP before signaling handshake */
for (ch = 0; ch < NR_CHANNELS; ch++) {
/* get the base address for the channel */
channel_base[ch] = mmio_read_32(base);
/* issue command to get the channel base address */
mmio_write_32(base, (ch << TRIGGER_ID_SHIFT) |
ATOMIC_CMD_GET);
/* increment result register offset */
base += 4U;
/* get the base address for the channel */
channel_base[ch] = mmio_read_32(base);
/* increment result register offset */
base += 4U;
}
/* mark state as "initialized" */
bpmp_init_state = BPMP_INIT_COMPLETE;
/* the channel values have to be visible across all cpus */
flush_dcache_range((uint64_t)channel_base,
sizeof(channel_base));
flush_dcache_range((uint64_t)&bpmp_init_state,
sizeof(bpmp_init_state));
INFO("%s: done\n", __func__);
} else {
ERROR("BPMP not powered on\n");
/* bpmp is not present in the system */
bpmp_init_state = BPMP_NOT_PRESENT;
/* communication timed out */
ret = -ETIMEDOUT;
}
}
/* mark state as "initialized" */
bpmp_init_state = BPMP_INIT_COMPLETE;
return ret;
}
/* the channel values have to be visible across all cpus */
flush_dcache_range((uint64_t)channel_base, sizeof(channel_base));
void tegra_bpmp_suspend(void)
{
/* freeze the interface */
if (bpmp_init_state == BPMP_INIT_COMPLETE) {
bpmp_init_state = BPMP_SUSPEND_ENTRY;
flush_dcache_range((uint64_t)&bpmp_init_state,
sizeof(bpmp_init_state));
INFO("%s: done\n", __func__);
}
}
return ret;
void tegra_bpmp_resume(void)
{
uint32_t val, timeout = 0;
if (bpmp_init_state == BPMP_SUSPEND_ENTRY) {
/* check if the bpmp processor is alive. */
do {
val = mmio_read_32(TEGRA_RES_SEMA_BASE + STA_OFFSET);
if (val != SIGN_OF_LIFE) {
mdelay(1);
timeout++;
}
} while ((val != SIGN_OF_LIFE) && (timeout < BPMP_TIMEOUT));
if (val == SIGN_OF_LIFE) {
INFO("%s: BPMP took %d ms to resume\n", __func__, timeout);
/* mark state as "initialized" */
bpmp_init_state = BPMP_INIT_COMPLETE;
/* state has to be visible across all cpus */
flush_dcache_range((uint64_t)&bpmp_init_state,
sizeof(bpmp_init_state));
} else {
ERROR("BPMP not powered on\n");
}
}
}
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
......@@ -302,3 +302,49 @@ int32_t tegra_bpmp_ipc_reset_module(uint32_t rst_id)
return ret;
}
int tegra_bpmp_ipc_enable_clock(uint32_t clk_id)
{
int ret;
struct mrq_clk_request req;
/* only SE clocks are supported */
if (clk_id != TEGRA_CLK_SE) {
return -ENOTSUP;
}
/* prepare the MRQ_CLK command */
req.cmd_and_id = make_mrq_clk_cmd(CMD_CLK_ENABLE, clk_id);
ret = tegra_bpmp_ipc_send_req_atomic(MRQ_CLK, &req, sizeof(req),
NULL, 0);
if (ret != 0) {
ERROR("%s: failed for module %d with error %d\n", __func__,
clk_id, ret);
}
return ret;
}
int tegra_bpmp_ipc_disable_clock(uint32_t clk_id)
{
int ret;
struct mrq_clk_request req;
/* only SE clocks are supported */
if (clk_id != TEGRA_CLK_SE) {
return -ENOTSUP;
}
/* prepare the MRQ_CLK command */
req.cmd_and_id = make_mrq_clk_cmd(CMD_CLK_DISABLE, clk_id);
ret = tegra_bpmp_ipc_send_req_atomic(MRQ_CLK, &req, sizeof(req),
NULL, 0);
if (ret != 0) {
ERROR("%s: failed for module %d with error %d\n", __func__,
clk_id, ret);
}
return ret;
}
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
......@@ -11,10 +11,10 @@
* Flags used in IPC req
*/
#define FLAG_DO_ACK (U(1) << 0)
#define FLAG_RING_DOORBELL (U(1) << 1)
#define FLAG_RING_DOORBELL (U(1) << 1)
/* Bit 1 is designated for CCPlex in secure world */
#define HSP_MASTER_CCPLEX_BIT (U(1) << 1)
#define HSP_MASTER_CCPLEX_BIT (U(1) << 1)
/* Bit 19 is designated for BPMP in non-secure world */
#define HSP_MASTER_BPMP_BIT (U(1) << 19)
/* Timeout to receive response from BPMP is 1 sec */
......@@ -49,9 +49,10 @@ struct frame_data {
*/
/**
* MRQ code to issue a module reset command to BPMP
* MRQ command codes
*/
#define MRQ_RESET U(20)
#define MRQ_CLK U(22)
/**
* Reset sub-commands
......@@ -71,4 +72,56 @@ struct __attribute__((packed)) mrq_reset_request {
uint32_t reset_id;
};
/**
* MRQ_CLK sub-commands
*
*/
enum {
CMD_CLK_GET_RATE = 1,
CMD_CLK_SET_RATE = 2,
CMD_CLK_ROUND_RATE = 3,
CMD_CLK_GET_PARENT = 4,
CMD_CLK_SET_PARENT = 5,
CMD_CLK_IS_ENABLED = 6,
CMD_CLK_ENABLE = 7,
CMD_CLK_DISABLE = 8,
CMD_CLK_GET_ALL_INFO = 14,
CMD_CLK_GET_MAX_CLK_ID = 15,
CMD_CLK_MAX,
};
/**
* Used by the sender of an #MRQ_CLK message to control clocks. The
* clk_request is split into several sub-commands. Some sub-commands
* require no additional data. Others have a sub-command specific
* payload
*
* |sub-command |payload |
* |----------------------------|-----------------------|
* |CMD_CLK_GET_RATE |- |
* |CMD_CLK_SET_RATE |clk_set_rate |
* |CMD_CLK_ROUND_RATE |clk_round_rate |
* |CMD_CLK_GET_PARENT |- |
* |CMD_CLK_SET_PARENT |clk_set_parent |
* |CMD_CLK_IS_ENABLED |- |
* |CMD_CLK_ENABLE |- |
* |CMD_CLK_DISABLE |- |
* |CMD_CLK_GET_ALL_INFO |- |
* |CMD_CLK_GET_MAX_CLK_ID |- |
*
*/
struct mrq_clk_request {
/**
* sub-command and clock id concatenated to 32-bit word.
* - bits[31..24] is the sub-cmd.
* - bits[23..0] is the clock id
*/
uint32_t cmd_and_id;
};
/**
* Macro to prepare the MRQ_CLK sub-command
*/
#define make_mrq_clk_cmd(cmd, id) (((cmd) << 24) | (id & 0xFFFFFF))
#endif /* INTF_H */
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
......@@ -15,6 +15,7 @@
#include <flowctrl.h>
#include <pmc.h>
#include <tegra_def.h>
#include <utils_def.h>
#define CLK_RST_DEV_L_SET 0x300
#define CLK_RST_DEV_L_CLR 0x304
......@@ -75,6 +76,47 @@ static void tegra_fc_prepare_suspend(int cpu_id, uint32_t csr)
tegra_fc_cpu_csr(cpu_id, val | csr);
}
/*******************************************************************************
* After this, no core can wake from C7 until the action is reverted.
* If a wake up event is asserted, the FC state machine will stall until
* the action is reverted.
******************************************************************************/
void tegra_fc_ccplex_pgexit_lock(void)
{
unsigned int i, cpu = read_mpidr() & MPIDR_CPU_MASK;
uint32_t flags = tegra_fc_read_32(FLOWCTRL_FC_SEQ_INTERCEPT) & ~INTERCEPT_IRQ_PENDING;;
uint32_t icept_cpu_flags[] = {
INTERCEPT_EXIT_PG_CORE0,
INTERCEPT_EXIT_PG_CORE1,
INTERCEPT_EXIT_PG_CORE2,
INTERCEPT_EXIT_PG_CORE3
};
/* set the intercept flags */
for (i = 0; i < ARRAY_SIZE(icept_cpu_flags); i++) {
/* skip current CPU */
if (i == cpu)
continue;
/* enable power gate exit intercept locks */
flags |= icept_cpu_flags[i];
}
tegra_fc_write_32(FLOWCTRL_FC_SEQ_INTERCEPT, flags);
(void)tegra_fc_read_32(FLOWCTRL_FC_SEQ_INTERCEPT);
}
/*******************************************************************************
* Revert the ccplex powergate exit locks
******************************************************************************/
void tegra_fc_ccplex_pgexit_unlock(void)
{
/* clear lock bits, clear pending interrupts */
tegra_fc_write_32(FLOWCTRL_FC_SEQ_INTERCEPT, INTERCEPT_IRQ_PENDING);
(void)tegra_fc_read_32(FLOWCTRL_FC_SEQ_INTERCEPT);
}
/*******************************************************************************
* Powerdn the current CPU
******************************************************************************/
......@@ -128,6 +170,31 @@ void tegra_fc_cluster_powerdn(uint32_t mpidr)
tegra_fc_prepare_suspend(cpu, val);
}
/*******************************************************************************
* Check if cluster idle or power down state is allowed from this CPU
******************************************************************************/
bool tegra_fc_is_ccx_allowed(void)
{
unsigned int i, cpu = read_mpidr() & MPIDR_CPU_MASK;
uint32_t val;
bool ccx_allowed = true;
for (i = 0; i < ARRAY_SIZE(flowctrl_offset_cpu_csr); i++) {
/* skip current CPU */
if (i == cpu)
continue;
/* check if all other CPUs are already halted */
val = mmio_read_32(flowctrl_offset_cpu_csr[i]);
if ((val & FLOWCTRL_CSR_HALT_MASK) == 0U) {
ccx_allowed = false;
}
}
return ccx_allowed;
}
/*******************************************************************************
* Suspend the entire SoC
******************************************************************************/
......@@ -190,22 +257,19 @@ void tegra_fc_lock_active_cluster(void)
}
/*******************************************************************************
* Reset BPMP processor
* Power ON BPMP processor
******************************************************************************/
void tegra_fc_reset_bpmp(void)
void tegra_fc_bpmp_on(uint32_t entrypoint)
{
uint32_t val;
/* halt BPMP */
tegra_fc_write_32(FLOWCTRL_HALT_BPMP_EVENTS, FLOWCTRL_WAITEVENT);
/* Assert BPMP reset */
mmio_write_32(TEGRA_CAR_RESET_BASE + CLK_RST_DEV_L_SET, CLK_BPMP_RST);
/* Restore reset address (stored in PMC_SCRATCH39) */
val = tegra_pmc_read_32(PMC_SCRATCH39);
mmio_write_32(TEGRA_EVP_BASE + EVP_BPMP_RESET_VECTOR, val);
while (val != mmio_read_32(TEGRA_EVP_BASE + EVP_BPMP_RESET_VECTOR))
/* Set reset address (stored in PMC_SCRATCH39) */
mmio_write_32(TEGRA_EVP_BASE + EVP_BPMP_RESET_VECTOR, entrypoint);
while (entrypoint != mmio_read_32(TEGRA_EVP_BASE + EVP_BPMP_RESET_VECTOR))
; /* wait till value reaches EVP_BPMP_RESET_VECTOR */
/* Wait for 2us before de-asserting the reset signal. */
......@@ -217,3 +281,42 @@ void tegra_fc_reset_bpmp(void)
/* Un-halt BPMP */
tegra_fc_write_32(FLOWCTRL_HALT_BPMP_EVENTS, 0);
}
/*******************************************************************************
* Power OFF BPMP processor
******************************************************************************/
void tegra_fc_bpmp_off(void)
{
/* halt BPMP */
tegra_fc_write_32(FLOWCTRL_HALT_BPMP_EVENTS, FLOWCTRL_WAITEVENT);
/* Assert BPMP reset */
mmio_write_32(TEGRA_CAR_RESET_BASE + CLK_RST_DEV_L_SET, CLK_BPMP_RST);
/* Clear reset address */
mmio_write_32(TEGRA_EVP_BASE + EVP_BPMP_RESET_VECTOR, 0);
while (0 != mmio_read_32(TEGRA_EVP_BASE + EVP_BPMP_RESET_VECTOR))
; /* wait till value reaches EVP_BPMP_RESET_VECTOR */
}
/*******************************************************************************
* Route legacy FIQ to the GICD
******************************************************************************/
void tegra_fc_enable_fiq_to_ccplex_routing(void)
{
uint32_t val = tegra_fc_read_32(FLOW_CTLR_FLOW_DBG_QUAL);
/* set the bit to pass FIQs to the GICD */
tegra_fc_write_32(FLOW_CTLR_FLOW_DBG_QUAL, val | FLOWCTRL_FIQ2CCPLEX_ENABLE);
}
/*******************************************************************************
* Disable routing legacy FIQ to the GICD
******************************************************************************/
void tegra_fc_disable_fiq_to_ccplex_routing(void)
{
uint32_t val = tegra_fc_read_32(FLOW_CTLR_FLOW_DBG_QUAL);
/* clear the bit to pass FIQs to the GICD */
tegra_fc_write_32(FLOW_CTLR_FLOW_DBG_QUAL, val & ~FLOWCTRL_FIQ2CCPLEX_ENABLE);
}
......@@ -17,7 +17,7 @@
/* Module IDs used during power ungate procedure */
static const uint32_t pmc_cpu_powergate_id[4] = {
0, /* CPU 0 */
14, /* CPU 0 */
9, /* CPU 1 */
10, /* CPU 2 */
11 /* CPU 3 */
......@@ -97,6 +97,45 @@ void tegra_pmc_lock_cpu_vectors(void)
tegra_pmc_write_32(PMC_SECURE_DISABLE3, val);
}
/*******************************************************************************
* Find out if this is the last standing CPU
******************************************************************************/
bool tegra_pmc_is_last_on_cpu(void)
{
int i, cpu = read_mpidr() & MPIDR_CPU_MASK;
uint32_t val = tegra_pmc_read_32(PMC_PWRGATE_STATUS);;
bool status = true;
/* check if this is the last standing CPU */
for (i = 0; i < PLATFORM_MAX_CPUS_PER_CLUSTER; i++) {
/* skip the current CPU */
if (i == cpu)
continue;
/* are other CPUs already power gated? */
if ((val & ((uint32_t)1 << pmc_cpu_powergate_id[i])) != 0U) {
status = false;
}
}
return status;
}
/*******************************************************************************
* Handler to be called on exiting System suspend. Right now only DPD registers
* are cleared.
******************************************************************************/
void tegra_pmc_resume(void)
{
/* Clear DPD sample */
mmio_write_32((TEGRA_PMC_BASE + PMC_IO_DPD_SAMPLE), 0x0);
/* Clear DPD Enable */
mmio_write_32((TEGRA_PMC_BASE + PMC_DPD_ENABLE_0), 0x0);
}
/*******************************************************************************
* Restart the system
******************************************************************************/
......
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
......@@ -63,11 +63,27 @@ func console_core_putc
/* Check the input parameter */
cbz x1, putc_error
/* Prepend '\r' to '\n' */
cmp w0, #0xA
b.ne 2f
/* wait until spe is ready */
1: ldr w2, [x1]
and w2, w2, #CONSOLE_IS_BUSY
cbnz w2, 1b
/* spe is ready */
mov w2, #0xD /* '\r' */
and w2, w2, #0xFF
mov w3, #(CONSOLE_WRITE | (1 << CONSOLE_NUM_BYTES_SHIFT))
orr w2, w2, w3
str w2, [x1]
/* wait until spe is ready */
2: ldr w2, [x1]
and w2, w2, #CONSOLE_IS_BUSY
cbnz w2, 2b
/* spe is ready */
mov w2, w0
and w2, w2, #0xFF
......
......@@ -162,13 +162,15 @@ void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
}
/*
* Parse platform specific parameters - TZDRAM aperture base and size
* Parse platform specific parameters
*/
assert(plat_params != NULL);
plat_bl31_params_from_bl2.tzdram_base = plat_params->tzdram_base;
plat_bl31_params_from_bl2.tzdram_size = plat_params->tzdram_size;
plat_bl31_params_from_bl2.uart_id = plat_params->uart_id;
plat_bl31_params_from_bl2.l2_ecc_parity_prot_dis = plat_params->l2_ecc_parity_prot_dis;
plat_bl31_params_from_bl2.sc7entry_fw_size = plat_params->sc7entry_fw_size;
plat_bl31_params_from_bl2.sc7entry_fw_base = plat_params->sc7entry_fw_base;
/*
* It is very important that we run either from TZDRAM or TZSRAM base.
......@@ -404,6 +406,14 @@ void bl31_plat_arch_setup(void)
*/
boot_profiler_add_record("[TF] arch setup entry");
/* add MMIO space */
plat_mmio_map = plat_get_mmio_map();
if (plat_mmio_map != NULL) {
mmap_add(plat_mmio_map);
} else {
WARN("MMIO map not available\n");
}
/* add memory regions */
mmap_add_region(rw_start, rw_start,
rw_size,
......@@ -415,14 +425,6 @@ void bl31_plat_arch_setup(void)
code_size,
MT_CODE | MT_SECURE);
/* map TZDRAM used by BL31 as coherent memory */
if (TEGRA_TZRAM_BASE == tegra_bl31_phys_base) {
mmap_add_region(params_from_bl2->tzdram_base,
params_from_bl2->tzdram_base,
BL31_SIZE,
MT_DEVICE | MT_RW | MT_SECURE);
}
#if USE_COHERENT_MEM
coh_start = total_base + (BL_COHERENT_RAM_BASE - BL31_RO_BASE);
coh_size = BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE;
......@@ -432,18 +434,12 @@ void bl31_plat_arch_setup(void)
(uint8_t)MT_DEVICE | (uint8_t)MT_RW | (uint8_t)MT_SECURE);
#endif
/* map on-chip free running uS timer */
mmap_add_region(page_align(TEGRA_TMRUS_BASE, 0),
page_align(TEGRA_TMRUS_BASE, 0),
TEGRA_TMRUS_SIZE,
(uint8_t)MT_DEVICE | (uint8_t)MT_RO | (uint8_t)MT_SECURE);
/* add MMIO space */
plat_mmio_map = plat_get_mmio_map();
if (plat_mmio_map != NULL) {
mmap_add(plat_mmio_map);
} else {
WARN("MMIO map not available\n");
/* map TZDRAM used by BL31 as coherent memory */
if (TEGRA_TZRAM_BASE == tegra_bl31_phys_base) {
mmap_add_region(params_from_bl2->tzdram_base,
params_from_bl2->tzdram_base,
BL31_SIZE,
MT_DEVICE | MT_RW | MT_SECURE);
}
/* set up translation tables */
......
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
......@@ -16,9 +16,15 @@
#include <lib/el3_runtime/context_mgmt.h>
#include <plat/common/platform.h>
#if ENABLE_WDT_LEGACY_FIQ_HANDLING
#include <flowctrl.h>
#endif
#include <tegra_def.h>
#include <tegra_private.h>
/* Legacy FIQ used by earlier Tegra platforms */
#define LEGACY_FIQ_PPI_WDT 28U
static DEFINE_BAKERY_LOCK(tegra_fiq_lock);
/*******************************************************************************
......@@ -46,33 +52,58 @@ static uint64_t tegra_fiq_interrupt_handler(uint32_t id,
(void)handle;
(void)cookie;
bakery_lock_get(&tegra_fiq_lock);
/*
* The FIQ was generated when the execution was in the non-secure
* world. Save the context registers to start with.
* Read the pending interrupt ID
*/
cm_el1_sysregs_context_save(NON_SECURE);
irq = plat_ic_get_pending_interrupt_id();
bakery_lock_get(&tegra_fiq_lock);
/*
* Save elr_el3 and spsr_el3 from the saved context, and overwrite
* the context with the NS fiq_handler_addr and SPSR value.
* Jump to NS world only if the NS world's FIQ handler has
* been registered
*/
fiq_state[cpu].elr_el3 = read_ctx_reg((el3state_ctx), (uint32_t)(CTX_ELR_EL3));
fiq_state[cpu].spsr_el3 = read_ctx_reg((el3state_ctx), (uint32_t)(CTX_SPSR_EL3));
if (ns_fiq_handler_addr != 0U) {
/*
* The FIQ was generated when the execution was in the non-secure
* world. Save the context registers to start with.
*/
cm_el1_sysregs_context_save(NON_SECURE);
/*
* Save elr_el3 and spsr_el3 from the saved context, and overwrite
* the context with the NS fiq_handler_addr and SPSR value.
*/
fiq_state[cpu].elr_el3 = read_ctx_reg((el3state_ctx), (uint32_t)(CTX_ELR_EL3));
fiq_state[cpu].spsr_el3 = read_ctx_reg((el3state_ctx), (uint32_t)(CTX_SPSR_EL3));
/*
* Set the new ELR to continue execution in the NS world using the
* FIQ handler registered earlier.
*/
cm_set_elr_el3(NON_SECURE, ns_fiq_handler_addr);
}
#if ENABLE_WDT_LEGACY_FIQ_HANDLING
/*
* Set the new ELR to continue execution in the NS world using the
* FIQ handler registered earlier.
* Tegra platforms that use LEGACY_FIQ as the watchdog timer FIQ
* need to issue an IPI to other CPUs, to allow them to handle
* the "system hung" scenario. This interrupt is passed to the GICD
* via the Flow Controller. So, once we receive this interrupt,
* disable the routing so that we can mark it as "complete" in the
* GIC later.
*/
assert(ns_fiq_handler_addr != 0ULL);
write_ctx_reg((el3state_ctx), (uint32_t)(CTX_ELR_EL3), (ns_fiq_handler_addr));
if (irq == LEGACY_FIQ_PPI_WDT) {
tegra_fc_disable_fiq_to_ccplex_routing();
}
#endif
/*
* Mark this interrupt as complete to avoid a FIQ storm.
*/
irq = plat_ic_acknowledge_interrupt();
if (irq < 1022U) {
(void)plat_ic_acknowledge_interrupt();
plat_ic_end_of_interrupt(irq);
}
......
/*
* Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
......@@ -41,6 +41,7 @@ uint8_t tegra_fake_system_suspend;
* provide typical implementations that will be overridden by a SoC.
*/
#pragma weak tegra_soc_pwr_domain_suspend_pwrdown_early
#pragma weak tegra_soc_cpu_standby
#pragma weak tegra_soc_pwr_domain_suspend
#pragma weak tegra_soc_pwr_domain_on
#pragma weak tegra_soc_pwr_domain_off
......@@ -55,6 +56,12 @@ int32_t tegra_soc_pwr_domain_suspend_pwrdown_early(const psci_power_state_t *tar
return PSCI_E_NOT_SUPPORTED;
}
int32_t tegra_soc_cpu_standby(plat_local_state_t cpu_state)
{
(void)cpu_state;
return PSCI_E_SUCCESS;
}
int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
{
(void)target_state;
......@@ -139,14 +146,37 @@ void tegra_get_sys_suspend_power_state(psci_power_state_t *req_state)
******************************************************************************/
void tegra_cpu_standby(plat_local_state_t cpu_state)
{
u_register_t saved_scr_el3;
(void)cpu_state;
/* Tegra SoC specific handler */
if (tegra_soc_cpu_standby(cpu_state) != PSCI_E_SUCCESS)
ERROR("%s failed\n", __func__);
saved_scr_el3 = read_scr_el3();
/*
* As per ARM ARM D1.17.2, any physical IRQ interrupt received by the
* PE will be treated as a wake-up event, if SCR_EL3.IRQ is set to '1',
* irrespective of the value of the PSTATE.I bit value.
*/
write_scr_el3(saved_scr_el3 | SCR_IRQ_BIT);
/*
* Enter standby state
* dsb is good practice before using wfi to enter low power states
*
* dsb & isb is good practice before using wfi to enter low power states
*/
dsb();
isb();
wfi();
/*
* Restore saved scr_el3 that has IRQ bit cleared as we don't want EL3
* handling any further interrupts
*/
write_scr_el3(saved_scr_el3);
}
/*******************************************************************************
......@@ -244,7 +274,7 @@ void tegra_pwr_domain_on_finish(const psci_power_state_t *target_state)
/*
* Initialize the GIC cpu and distributor interfaces
*/
plat_gic_setup();
tegra_gic_init();
/*
* Check if we are exiting from deep sleep.
......
......@@ -116,6 +116,16 @@ uintptr_t tegra_sip_handler(uint32_t smc_fid,
/* new video memory carveout settings */
tegra_memctrl_videomem_setup(x1, local_x2_32);
/*
* Ensure again that GPU is still in reset after VPR resize
*/
regval = mmio_read_32(TEGRA_CAR_RESET_BASE +
TEGRA_GPU_RESET_REG_OFFSET);
if ((regval & GPU_RESET_BIT) == 0U) {
mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_GPU_RESET_GPU_SET_OFFSET,
GPU_SET_BIT);
}
SMC_RET1(handle, 0);
/*
......
......@@ -27,8 +27,10 @@
#define SIGN_OF_LIFE 0xAAAAAAAAU
/* flags to indicate bpmp driver's state */
#define BPMP_NOT_PRESENT 0xF00DBEEFU
#define BPMP_INIT_COMPLETE 0xBEEFF00DU
#define BPMP_INIT_PENDING 0xDEADBEEFU
#define BPMP_SUSPEND_ENTRY 0xF00DCAFEU
/* requests serviced by the bpmp */
#define MRQ_PING 0
......@@ -106,6 +108,16 @@ typedef struct mb_data {
*/
int tegra_bpmp_init(void);
/**
* Function to suspend the interface with the bpmp
*/
void tegra_bpmp_suspend(void);
/**
* Function to resume the interface with the bpmp
*/
void tegra_bpmp_resume(void);
/**
* Handler to send a MRQ_* command to the bpmp
*/
......
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
......@@ -17,6 +17,11 @@
#define TEGRA_RESET_ID_XUSB_PADCTL U(114)
#define TEGRA_RESET_ID_GPCDMA U(70)
/**
* Clock identifier for the SE device
*/
#define TEGRA_CLK_SE U(124)
/**
* Function to initialise the IPC with the bpmp
*/
......@@ -27,4 +32,16 @@ int32_t tegra_bpmp_ipc_init(void);
*/
int32_t tegra_bpmp_ipc_reset_module(uint32_t rst_id);
/**
* Handler to enable clock to a module. Only SE device is
* supported for now.
*/
int tegra_bpmp_ipc_enable_clock(uint32_t clk_id);
/**
* Handler to disable clock to a module. Only SE device is
* supported for now.
*/
int tegra_bpmp_ipc_disable_clock(uint32_t clk_id);
#endif /* __BPMP_IPC_H__ */
/*
* Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
......@@ -11,7 +11,7 @@
#include <tegra_def.h>
#define FLOWCTRL_HALT_CPU0_EVENTS 0x0U
#define FLOWCTRL_HALT_CPU0_EVENTS (0x0U)
#define FLOWCTRL_WAITEVENT (2U << 29)
#define FLOWCTRL_WAIT_FOR_INTERRUPT (4U << 29)
#define FLOWCTRL_JTAG_RESUME (1U << 28)
......@@ -20,19 +20,46 @@
#define FLOWCTRL_HALT_LIC_FIQ (1U << 10)
#define FLOWCTRL_HALT_GIC_IRQ (1U << 9)
#define FLOWCTRL_HALT_GIC_FIQ (1U << 8)
#define FLOWCTRL_HALT_BPMP_EVENTS 0x4U
#define FLOWCTRL_CPU0_CSR 0x8U
#define FLOW_CTRL_CSR_PWR_OFF_STS (1U << 16)
#define FLOWCTRL_HALT_BPMP_EVENTS (0x4U)
#define FLOWCTRL_CPU0_CSR (0x8U)
#define FLOWCTRL_CSR_HALT_MASK (1U << 22)
#define FLOWCTRL_CSR_PWR_OFF_STS (1U << 16)
#define FLOWCTRL_CSR_INTR_FLAG (1U << 15)
#define FLOWCTRL_CSR_EVENT_FLAG (1U << 14)
#define FLOWCTRL_CSR_IMMEDIATE_WAKE (1U << 3)
#define FLOWCTRL_CSR_ENABLE (1U << 0)
#define FLOWCTRL_HALT_CPU1_EVENTS 0x14U
#define FLOWCTRL_CPU1_CSR 0x18U
#define FLOWCTRL_CC4_CORE0_CTRL 0x6cU
#define FLOWCTRL_WAIT_WFI_BITMAP 0x100U
#define FLOWCTRL_L2_FLUSH_CONTROL 0x94U
#define FLOWCTRL_BPMP_CLUSTER_CONTROL 0x98U
#define FLOWCTRL_HALT_CPU1_EVENTS (0x14U)
#define FLOWCTRL_CPU1_CSR (0x18U)
#define FLOW_CTLR_FLOW_DBG_QUAL (0x50U)
#define FLOWCTRL_FIQ2CCPLEX_ENABLE (1U << 28)
#define FLOWCTRL_FC_SEQ_INTERCEPT (0x5cU)
#define INTERCEPT_IRQ_PENDING (0xffU)
#define INTERCEPT_HVC (U(1) << 21)
#define INTERCEPT_ENTRY_CC4 (U(1) << 20)
#define INTERCEPT_ENTRY_PG_NONCPU (U(1) << 19)
#define INTERCEPT_EXIT_PG_NONCPU (U(1) << 18)
#define INTERCEPT_ENTRY_RG_CPU (U(1) << 17)
#define INTERCEPT_EXIT_RG_CPU (U(1) << 16)
#define INTERCEPT_ENTRY_PG_CORE0 (U(1) << 15)
#define INTERCEPT_EXIT_PG_CORE0 (U(1) << 14)
#define INTERCEPT_ENTRY_PG_CORE1 (U(1) << 13)
#define INTERCEPT_EXIT_PG_CORE1 (U(1) << 12)
#define INTERCEPT_ENTRY_PG_CORE2 (U(1) << 11)
#define INTERCEPT_EXIT_PG_CORE2 (U(1) << 10)
#define INTERCEPT_ENTRY_PG_CORE3 (U(1) << 9)
#define INTERCEPT_EXIT_PG_CORE3 (U(1) << 8)
#define INTERRUPT_PENDING_NONCPU (U(1) << 7)
#define INTERRUPT_PENDING_CRAIL (U(1) << 6)
#define INTERRUPT_PENDING_CORE0 (U(1) << 5)
#define INTERRUPT_PENDING_CORE1 (U(1) << 4)
#define INTERRUPT_PENDING_CORE2 (U(1) << 3)
#define INTERRUPT_PENDING_CORE3 (U(1) << 2)
#define CC4_INTERRUPT_PENDING (U(1) << 1)
#define HVC_INTERRUPT_PENDING (U(1) << 0)
#define FLOWCTRL_CC4_CORE0_CTRL (0x6cU)
#define FLOWCTRL_WAIT_WFI_BITMAP (0x100U)
#define FLOWCTRL_L2_FLUSH_CONTROL (0x94U)
#define FLOWCTRL_BPMP_CLUSTER_CONTROL (0x98U)
#define FLOWCTRL_BPMP_CLUSTER_PWRON_LOCK (1U << 2)
#define FLOWCTRL_ENABLE_EXT 12U
......@@ -50,13 +77,19 @@ static inline void tegra_fc_write_32(uint32_t off, uint32_t val)
mmio_write_32(TEGRA_FLOWCTRL_BASE + off, val);
}
void tegra_fc_bpmp_on(uint32_t entrypoint);
void tegra_fc_bpmp_off(void);
void tegra_fc_ccplex_pgexit_lock(void);
void tegra_fc_ccplex_pgexit_unlock(void);
void tegra_fc_cluster_idle(uint32_t midr);
void tegra_fc_cpu_powerdn(uint32_t mpidr);
void tegra_fc_cluster_powerdn(uint32_t midr);
void tegra_fc_soc_powerdn(uint32_t midr);
void tegra_fc_cpu_on(int cpu);
void tegra_fc_cpu_off(int cpu);
void tegra_fc_disable_fiq_to_ccplex_routing(void);
void tegra_fc_enable_fiq_to_ccplex_routing(void);
bool tegra_fc_is_ccx_allowed(void);
void tegra_fc_lock_active_cluster(void);
void tegra_fc_reset_bpmp(void);
void tegra_fc_soc_powerdn(uint32_t midr);
#endif /* FLOWCTRL_H */
/*
* Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
......@@ -9,22 +9,37 @@
#include <lib/mmio.h>
#include <lib/utils_def.h>
#include <stdbool.h>
#include <tegra_def.h>
#define PMC_CONFIG U(0x0)
#define PMC_IO_DPD_SAMPLE U(0x20)
#define PMC_DPD_ENABLE_0 U(0x24)
#define PMC_PWRGATE_STATUS U(0x38)
#define PMC_PWRGATE_TOGGLE U(0x30)
#define PMC_SECURE_SCRATCH0 U(0xb0)
#define PMC_SECURE_SCRATCH5 U(0xc4)
#define PMC_CRYPTO_OP_0 U(0xf4)
#define PMC_TOGGLE_START U(0x100)
#define PMC_SCRATCH39 U(0x138)
#define PMC_SCRATCH41 U(0x140)
#define PMC_SECURE_SCRATCH6 U(0x224)
#define PMC_SECURE_SCRATCH7 U(0x228)
#define PMC_SECURE_DISABLE2 U(0x2c4)
#define PMC_SECURE_DISABLE2_WRITE22_ON (U(1) << 28)
#define PMC_SECURE_SCRATCH8 U(0x300)
#define PMC_SECURE_SCRATCH79 U(0x41c)
#define PMC_FUSE_CONTROL_0 U(0x450)
#define PMC_SECURE_SCRATCH22 U(0x338)
#define PMC_SECURE_DISABLE3 U(0x2d8)
#define PMC_SECURE_DISABLE3_WRITE34_ON (U(1) << 20)
#define PMC_SECURE_DISABLE3_WRITE35_ON (U(1) << 22)
#define PMC_SECURE_SCRATCH34 U(0x368)
#define PMC_SECURE_SCRATCH35 U(0x36c)
#define PMC_SECURE_SCRATCH80 U(0xa98)
#define PMC_SECURE_SCRATCH119 U(0xb34)
#define PMC_SCRATCH201 U(0x844)
static inline uint32_t tegra_pmc_read_32(uint32_t off)
{
......@@ -36,9 +51,11 @@ static inline void tegra_pmc_write_32(uint32_t off, uint32_t val)
mmio_write_32(TEGRA_PMC_BASE + off, val);
}
void tegra_pmc_cpu_on(int32_t cpu);
void tegra_pmc_cpu_setup(uint64_t reset_addr);
bool tegra_pmc_is_last_on_cpu(void);
void tegra_pmc_lock_cpu_vectors(void);
void tegra_pmc_cpu_on(int32_t cpu);
void tegra_pmc_resume(void);
__dead2 void tegra_pmc_system_reset(void);
#endif /* PMC_H */
......@@ -618,9 +618,9 @@ typedef struct smmu_regs {
.val = 0x00000000U, \
}
#define smmu_make_gnsr0_sec_cfg(name) \
#define smmu_make_gnsr0_sec_cfg(base_addr, name) \
{ \
.reg = TEGRA_SMMU0_BASE + SMMU_GNSR0_ ## name, \
.reg = base_addr + SMMU_GNSR0_ ## name, \
.val = 0x00000000U, \
}
......@@ -628,60 +628,199 @@ typedef struct smmu_regs {
* On ARM-SMMU, conditional offset to access secure aliases of non-secure registers
* is 0x400. So, add it to register address
*/
#define smmu_make_gnsr0_nsec_cfg(name) \
#define smmu_make_gnsr0_nsec_cfg(base_addr, name) \
{ \
.reg = TEGRA_SMMU0_BASE + 0x400U + SMMU_GNSR0_ ## name, \
.reg = base_addr + 0x400U + SMMU_GNSR0_ ## name, \
.val = 0x00000000U, \
}
#define smmu_make_gnsr0_smr_cfg(n) \
#define smmu_make_gnsr0_smr_cfg(base_addr, n) \
{ \
.reg = TEGRA_SMMU0_BASE + SMMU_GNSR0_SMR ## n, \
.reg = base_addr + SMMU_GNSR0_SMR ## n, \
.val = 0x00000000U, \
}
#define smmu_make_gnsr0_s2cr_cfg(n) \
#define smmu_make_gnsr0_s2cr_cfg(base_addr, n) \
{ \
.reg = TEGRA_SMMU0_BASE + SMMU_GNSR0_S2CR ## n, \
.reg = base_addr + SMMU_GNSR0_S2CR ## n, \
.val = 0x00000000U, \
}
#define smmu_make_gnsr1_cbar_cfg(n) \
#define smmu_make_gnsr1_cbar_cfg(base_addr, n) \
{ \
.reg = TEGRA_SMMU0_BASE + (1U << PGSHIFT) + SMMU_GNSR1_CBAR ## n, \
.reg = base_addr + (1U << PGSHIFT) + SMMU_GNSR1_CBAR ## n, \
.val = 0x00000000U, \
}
#define smmu_make_gnsr1_cba2r_cfg(n) \
#define smmu_make_gnsr1_cba2r_cfg(base_addr, n) \
{ \
.reg = TEGRA_SMMU0_BASE + (1U << PGSHIFT) + SMMU_GNSR1_CBA2R ## n, \
.reg = base_addr + (1U << PGSHIFT) + SMMU_GNSR1_CBA2R ## n, \
.val = 0x00000000U, \
}
#define make_smmu_cb_cfg(name, n) \
#define smmu_make_cb_cfg(base_addr, name, n) \
{ \
.reg = TEGRA_SMMU0_BASE + (CB_SIZE >> 1) + (n * (1 << PGSHIFT)) \
.reg = base_addr + (CB_SIZE >> 1) + (n * (1 << PGSHIFT)) \
+ SMMU_CBn_ ## name, \
.val = 0x00000000U, \
}
#define smmu_make_smrg_group(n) \
smmu_make_gnsr0_smr_cfg(n), \
smmu_make_gnsr0_s2cr_cfg(n), \
smmu_make_gnsr1_cbar_cfg(n), \
smmu_make_gnsr1_cba2r_cfg(n) /* don't put "," here. */
#define smmu_make_smrg_group(base_addr, n) \
smmu_make_gnsr0_smr_cfg(base_addr, n), \
smmu_make_gnsr0_s2cr_cfg(base_addr, n), \
smmu_make_gnsr1_cbar_cfg(base_addr, n), \
smmu_make_gnsr1_cba2r_cfg(base_addr, n) /* don't put "," here. */
#define smmu_make_cb_group(n) \
make_smmu_cb_cfg(SCTLR, n), \
make_smmu_cb_cfg(TCR2, n), \
make_smmu_cb_cfg(TTBR0_LO, n), \
make_smmu_cb_cfg(TTBR0_HI, n), \
make_smmu_cb_cfg(TCR, n), \
make_smmu_cb_cfg(PRRR_MAIR0, n),\
make_smmu_cb_cfg(FSR, n), \
make_smmu_cb_cfg(FAR_LO, n), \
make_smmu_cb_cfg(FAR_HI, n), \
make_smmu_cb_cfg(FSYNR0, n) /* don't put "," here. */
#define smmu_make_cb_group(base_addr, n) \
smmu_make_cb_cfg(base_addr, SCTLR, n), \
smmu_make_cb_cfg(base_addr, TCR2, n), \
smmu_make_cb_cfg(base_addr, TTBR0_LO, n), \
smmu_make_cb_cfg(base_addr, TTBR0_HI, n), \
smmu_make_cb_cfg(base_addr, TCR, n), \
smmu_make_cb_cfg(base_addr, PRRR_MAIR0, n),\
smmu_make_cb_cfg(base_addr, FSR, n), \
smmu_make_cb_cfg(base_addr, FAR_LO, n), \
smmu_make_cb_cfg(base_addr, FAR_HI, n), \
smmu_make_cb_cfg(base_addr, FSYNR0, n) /* don't put "," here. */
#define smmu_make_cfg(base_addr) \
smmu_make_gnsr0_nsec_cfg(base_addr, CR0), \
smmu_make_gnsr0_sec_cfg(base_addr, IDR0), \
smmu_make_gnsr0_sec_cfg(base_addr, IDR1), \
smmu_make_gnsr0_sec_cfg(base_addr, IDR2), \
smmu_make_gnsr0_nsec_cfg(base_addr, GFSR), \
smmu_make_gnsr0_nsec_cfg(base_addr, GFSYNR0), \
smmu_make_gnsr0_nsec_cfg(base_addr, GFSYNR1), \
smmu_make_gnsr0_nsec_cfg(base_addr, TLBGSTATUS),\
smmu_make_gnsr0_nsec_cfg(base_addr, PIDR2), \
smmu_make_smrg_group(base_addr, 0), \
smmu_make_smrg_group(base_addr, 1), \
smmu_make_smrg_group(base_addr, 2), \
smmu_make_smrg_group(base_addr, 3), \
smmu_make_smrg_group(base_addr, 4), \
smmu_make_smrg_group(base_addr, 5), \
smmu_make_smrg_group(base_addr, 6), \
smmu_make_smrg_group(base_addr, 7), \
smmu_make_smrg_group(base_addr, 8), \
smmu_make_smrg_group(base_addr, 9), \
smmu_make_smrg_group(base_addr, 10), \
smmu_make_smrg_group(base_addr, 11), \
smmu_make_smrg_group(base_addr, 12), \
smmu_make_smrg_group(base_addr, 13), \
smmu_make_smrg_group(base_addr, 14), \
smmu_make_smrg_group(base_addr, 15), \
smmu_make_smrg_group(base_addr, 16), \
smmu_make_smrg_group(base_addr, 17), \
smmu_make_smrg_group(base_addr, 18), \
smmu_make_smrg_group(base_addr, 19), \
smmu_make_smrg_group(base_addr, 20), \
smmu_make_smrg_group(base_addr, 21), \
smmu_make_smrg_group(base_addr, 22), \
smmu_make_smrg_group(base_addr, 23), \
smmu_make_smrg_group(base_addr, 24), \
smmu_make_smrg_group(base_addr, 25), \
smmu_make_smrg_group(base_addr, 26), \
smmu_make_smrg_group(base_addr, 27), \
smmu_make_smrg_group(base_addr, 28), \
smmu_make_smrg_group(base_addr, 29), \
smmu_make_smrg_group(base_addr, 30), \
smmu_make_smrg_group(base_addr, 31), \
smmu_make_smrg_group(base_addr, 32), \
smmu_make_smrg_group(base_addr, 33), \
smmu_make_smrg_group(base_addr, 34), \
smmu_make_smrg_group(base_addr, 35), \
smmu_make_smrg_group(base_addr, 36), \
smmu_make_smrg_group(base_addr, 37), \
smmu_make_smrg_group(base_addr, 38), \
smmu_make_smrg_group(base_addr, 39), \
smmu_make_smrg_group(base_addr, 40), \
smmu_make_smrg_group(base_addr, 41), \
smmu_make_smrg_group(base_addr, 42), \
smmu_make_smrg_group(base_addr, 43), \
smmu_make_smrg_group(base_addr, 44), \
smmu_make_smrg_group(base_addr, 45), \
smmu_make_smrg_group(base_addr, 46), \
smmu_make_smrg_group(base_addr, 47), \
smmu_make_smrg_group(base_addr, 48), \
smmu_make_smrg_group(base_addr, 49), \
smmu_make_smrg_group(base_addr, 50), \
smmu_make_smrg_group(base_addr, 51), \
smmu_make_smrg_group(base_addr, 52), \
smmu_make_smrg_group(base_addr, 53), \
smmu_make_smrg_group(base_addr, 54), \
smmu_make_smrg_group(base_addr, 55), \
smmu_make_smrg_group(base_addr, 56), \
smmu_make_smrg_group(base_addr, 57), \
smmu_make_smrg_group(base_addr, 58), \
smmu_make_smrg_group(base_addr, 59), \
smmu_make_smrg_group(base_addr, 60), \
smmu_make_smrg_group(base_addr, 61), \
smmu_make_smrg_group(base_addr, 62), \
smmu_make_smrg_group(base_addr, 63), \
smmu_make_cb_group(base_addr, 0), \
smmu_make_cb_group(base_addr, 1), \
smmu_make_cb_group(base_addr, 2), \
smmu_make_cb_group(base_addr, 3), \
smmu_make_cb_group(base_addr, 4), \
smmu_make_cb_group(base_addr, 5), \
smmu_make_cb_group(base_addr, 6), \
smmu_make_cb_group(base_addr, 7), \
smmu_make_cb_group(base_addr, 8), \
smmu_make_cb_group(base_addr, 9), \
smmu_make_cb_group(base_addr, 10), \
smmu_make_cb_group(base_addr, 11), \
smmu_make_cb_group(base_addr, 12), \
smmu_make_cb_group(base_addr, 13), \
smmu_make_cb_group(base_addr, 14), \
smmu_make_cb_group(base_addr, 15), \
smmu_make_cb_group(base_addr, 16), \
smmu_make_cb_group(base_addr, 17), \
smmu_make_cb_group(base_addr, 18), \
smmu_make_cb_group(base_addr, 19), \
smmu_make_cb_group(base_addr, 20), \
smmu_make_cb_group(base_addr, 21), \
smmu_make_cb_group(base_addr, 22), \
smmu_make_cb_group(base_addr, 23), \
smmu_make_cb_group(base_addr, 24), \
smmu_make_cb_group(base_addr, 25), \
smmu_make_cb_group(base_addr, 26), \
smmu_make_cb_group(base_addr, 27), \
smmu_make_cb_group(base_addr, 28), \
smmu_make_cb_group(base_addr, 29), \
smmu_make_cb_group(base_addr, 30), \
smmu_make_cb_group(base_addr, 31), \
smmu_make_cb_group(base_addr, 32), \
smmu_make_cb_group(base_addr, 33), \
smmu_make_cb_group(base_addr, 34), \
smmu_make_cb_group(base_addr, 35), \
smmu_make_cb_group(base_addr, 36), \
smmu_make_cb_group(base_addr, 37), \
smmu_make_cb_group(base_addr, 38), \
smmu_make_cb_group(base_addr, 39), \
smmu_make_cb_group(base_addr, 40), \
smmu_make_cb_group(base_addr, 41), \
smmu_make_cb_group(base_addr, 42), \
smmu_make_cb_group(base_addr, 43), \
smmu_make_cb_group(base_addr, 44), \
smmu_make_cb_group(base_addr, 45), \
smmu_make_cb_group(base_addr, 46), \
smmu_make_cb_group(base_addr, 47), \
smmu_make_cb_group(base_addr, 48), \
smmu_make_cb_group(base_addr, 49), \
smmu_make_cb_group(base_addr, 50), \
smmu_make_cb_group(base_addr, 51), \
smmu_make_cb_group(base_addr, 52), \
smmu_make_cb_group(base_addr, 53), \
smmu_make_cb_group(base_addr, 54), \
smmu_make_cb_group(base_addr, 55), \
smmu_make_cb_group(base_addr, 56), \
smmu_make_cb_group(base_addr, 57), \
smmu_make_cb_group(base_addr, 58), \
smmu_make_cb_group(base_addr, 59), \
smmu_make_cb_group(base_addr, 60), \
smmu_make_cb_group(base_addr, 61), \
smmu_make_cb_group(base_addr, 62), \
smmu_make_cb_group(base_addr, 63) /* don't put "," here. */
#define smmu_bypass_cfg \
{ \
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment