diff --git a/docs/plat/nvidia-tegra.rst b/docs/plat/nvidia-tegra.rst index 56dfacfc89d40493dd2ddcff126fb639e2280aa5..e244c1c99ae9b47ca67e9cb06187cc6704f0f6aa 100644 --- a/docs/plat/nvidia-tegra.rst +++ b/docs/plat/nvidia-tegra.rst @@ -80,6 +80,8 @@ uint64\_t tzdram\_size; uint64\_t tzdram\_base; /* UART port ID \*/ int uart\_id; +/* L2 ECC parity protection disable flag \*/ +int l2\_ecc\_parity\_prot\_dis; } plat\_params\_from\_bl2\_t; Power Management diff --git a/plat/nvidia/tegra/common/aarch64/tegra_helpers.S b/plat/nvidia/tegra/common/aarch64/tegra_helpers.S index 0476ba8262364c7b17ce17ef52c7b2b9bc96a571..2bf9a225d9fed4a4af48eb99256cb3d6346b5978 100644 --- a/plat/nvidia/tegra/common/aarch64/tegra_helpers.S +++ b/plat/nvidia/tegra/common/aarch64/tegra_helpers.S @@ -11,6 +11,7 @@ #include <cortex_a57.h> #include <platform_def.h> #include <tegra_def.h> +#include <tegra_platform.h> #define MIDR_PN_CORTEX_A57 0xD07 @@ -45,7 +46,6 @@ .globl ns_image_entrypoint .globl tegra_bl31_phys_base .globl tegra_console_base - .globl tegra_enable_l2_ecc_parity_prot /* --------------------- * Common CPU init code @@ -92,20 +92,6 @@ msr actlr_el2, x0 isb - /* ------------------------------------------------------- - * Enable L2 ECC and Parity Protection - * ------------------------------------------------------- - */ - adr x0, tegra_enable_l2_ecc_parity_prot - ldr x0, [x0] - cbz x0, 1f - mrs x0, CORTEX_A57_L2CTLR_EL1 - and x1, x0, #CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT - cbnz x1, 1f - orr x0, x0, #CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT - msr CORTEX_A57_L2CTLR_EL1, x0 - isb - /* -------------------------------- * Enable the cycle count register * -------------------------------- @@ -326,6 +312,23 @@ func tegra_secure_entrypoint _align=6 #if ERRATA_TEGRA_INVALIDATE_BTB_AT_BOOT + /* -------------------------------------------------------- + * Skip the invalidate BTB workaround for Tegra210B01 SKUs. + * -------------------------------------------------------- + */ + mov x0, #TEGRA_MISC_BASE + add x0, x0, #HARDWARE_REVISION_OFFSET + ldr w1, [x0] + lsr w1, w1, #CHIP_ID_SHIFT + and w1, w1, #CHIP_ID_MASK + cmp w1, #TEGRA_CHIPID_TEGRA21 /* T210? */ + b.ne 2f + ldr w1, [x0] + lsr w1, w1, #MAJOR_VERSION_SHIFT + and w1, w1, #MAJOR_VERSION_MASK + cmp w1, #0x02 /* T210 B01? */ + b.eq 2f + /* ------------------------------------------------------- * Invalidate BTB along with I$ to remove any stale * entries from the branch predictor array. @@ -382,7 +385,7 @@ func tegra_secure_entrypoint _align=6 .rept 65 nop .endr - +2: /* -------------------------------------------------- * Do not insert instructions here * -------------------------------------------------- @@ -460,10 +463,3 @@ tegra_bl31_phys_base: */ tegra_console_base: .quad 0 - - /* -------------------------------------------------- - * Enable L2 ECC and Parity Protection - * -------------------------------------------------- - */ -tegra_enable_l2_ecc_parity_prot: - .quad 0 diff --git a/plat/nvidia/tegra/common/drivers/bpmp/bpmp.c b/plat/nvidia/tegra/common/drivers/bpmp/bpmp.c new file mode 100644 index 0000000000000000000000000000000000000000..96e3667d95ec80d2f448f46ee1b2f9547d94657b --- /dev/null +++ b/plat/nvidia/tegra/common/drivers/bpmp/bpmp.c @@ -0,0 +1,173 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch_helpers.h> +#include <assert.h> +#include <bpmp.h> +#include <common/debug.h> +#include <delay_timer.h> +#include <errno.h> +#include <mmio.h> +#include <platform.h> +#include <stdbool.h> +#include <string.h> +#include <tegra_def.h> + +#define BPMP_TIMEOUT_10US 10 + +static uint32_t channel_base[NR_CHANNELS]; +static uint32_t bpmp_init_state = BPMP_INIT_PENDING; + +static uint32_t channel_field(unsigned int ch) +{ + return mmio_read_32(TEGRA_RES_SEMA_BASE + STA_OFFSET) & CH_MASK(ch); +} + +static bool master_free(unsigned int ch) +{ + return channel_field(ch) == MA_FREE(ch); +} + +static bool master_acked(unsigned int ch) +{ + return channel_field(ch) == MA_ACKD(ch); +} + +static void signal_slave(unsigned int ch) +{ + mmio_write_32(TEGRA_RES_SEMA_BASE + CLR_OFFSET, CH_MASK(ch)); +} + +static void free_master(unsigned int ch) +{ + mmio_write_32(TEGRA_RES_SEMA_BASE + CLR_OFFSET, + MA_ACKD(ch) ^ MA_FREE(ch)); +} + +/* should be called with local irqs disabled */ +int32_t tegra_bpmp_send_receive_atomic(int mrq, const void *ob_data, int ob_sz, + void *ib_data, int ib_sz) +{ + unsigned int ch = (unsigned int)plat_my_core_pos(); + mb_data_t *p = (mb_data_t *)(uintptr_t)channel_base[ch]; + int32_t ret = -ETIMEDOUT, timeout = 0; + + if (bpmp_init_state == BPMP_INIT_COMPLETE) { + + /* loop until BPMP is free */ + for (timeout = 0; timeout < BPMP_TIMEOUT_10US; timeout++) { + if (master_free(ch) == true) { + break; + } + + udelay(1); + } + + if (timeout != BPMP_TIMEOUT_10US) { + + /* generate the command struct */ + p->code = mrq; + p->flags = DO_ACK; + (void)memcpy((void *)p->data, ob_data, (size_t)ob_sz); + + /* signal command ready to the BPMP */ + signal_slave(ch); + mmio_write_32(TEGRA_PRI_ICTLR_BASE + CPU_IEP_FIR_SET, + (1UL << INT_SHR_SEM_OUTBOX_FULL)); + + /* loop until the command is executed */ + for (timeout = 0; timeout < BPMP_TIMEOUT_10US; timeout++) { + if (master_acked(ch) == true) { + break; + } + + udelay(1); + } + + if (timeout != BPMP_TIMEOUT_10US) { + + /* get the command response */ + (void)memcpy(ib_data, (const void *)p->data, + (size_t)ib_sz); + + /* return error code */ + ret = p->code; + + /* free this channel */ + free_master(ch); + } + } + + } else { + /* return error code */ + ret = -EINVAL; + } + + if (timeout == BPMP_TIMEOUT_10US) { + ERROR("Timed out waiting for bpmp's response"); + } + + return ret; +} + +int tegra_bpmp_init(void) +{ + uint32_t val, base; + unsigned int ch; + int ret = 0; + + if (bpmp_init_state != BPMP_INIT_COMPLETE) { + + /* check if the bpmp processor is alive. */ + val = mmio_read_32(TEGRA_RES_SEMA_BASE + STA_OFFSET); + if (val != SIGN_OF_LIFE) { + ERROR("BPMP precessor not available\n"); + ret = -ENOTSUP; + } + + /* check if clock for the atomics block is enabled */ + val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_ENB_V); + if ((val & CAR_ENABLE_ATOMICS) == 0) { + ERROR("Clock to the atomics block is disabled\n"); + } + + /* check if the atomics block is out of reset */ + val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEV_CLR_V); + if ((val & CAR_ENABLE_ATOMICS) == CAR_ENABLE_ATOMICS) { + ERROR("Reset to the atomics block is asserted\n"); + } + + /* base address to get the result from Atomics */ + base = TEGRA_ATOMICS_BASE + RESULT0_REG_OFFSET; + + /* channel area is setup by BPMP before signaling handshake */ + for (ch = 0; ch < NR_CHANNELS; ch++) { + + /* issue command to get the channel base address */ + mmio_write_32(base, (ch << TRIGGER_ID_SHIFT) | + ATOMIC_CMD_GET); + + /* get the base address for the channel */ + channel_base[ch] = mmio_read_32(base); + + /* increment result register offset */ + base += 4UL; + } + + /* mark state as "initialized" */ + if (ret == 0) + bpmp_init_state = BPMP_INIT_COMPLETE; + + /* the channel values have to be visible across all cpus */ + flush_dcache_range((uint64_t)channel_base, sizeof(channel_base)); + flush_dcache_range((uint64_t)&bpmp_init_state, + sizeof(bpmp_init_state)); + + INFO("%s: done\n", __func__); + } + + return ret; +} diff --git a/plat/nvidia/tegra/common/drivers/memctrl/memctrl_v2.c b/plat/nvidia/tegra/common/drivers/memctrl/memctrl_v2.c index 55b9152232ca1258cdc5fc50a89e297069084658..de431b75b66aa9bb1c972fe7b63128e358a9381e 100644 --- a/plat/nvidia/tegra/common/drivers/memctrl/memctrl_v2.c +++ b/plat/nvidia/tegra/common/drivers/memctrl/memctrl_v2.c @@ -87,6 +87,9 @@ static void tegra_memctrl_reconfig_mss_clients(void) * strongly ordered MSS clients. ROC needs to be single point * of control on overriding the memory type. So, remove TSA's * memtype override. + * + * MC clients with default SO_DEV override still enabled at TSA: + * AONW, BPMPW, SCEW, APEW */ #if ENABLE_AFI_DEVICE mc_set_tsa_passthrough(AFIW); @@ -106,63 +109,121 @@ static void tegra_memctrl_reconfig_mss_clients(void) mc_set_tsa_passthrough(AONDMAW); mc_set_tsa_passthrough(SCEDMAW); - /* - * Change COH_PATH_OVERRIDE_SO_DEV from NO_OVERRIDE -> FORCE_COHERENT - * for boot and strongly ordered MSS clients. This steers all sodev - * transactions to ROC. + /* Parker has no IO Coherency support and need the following: + * Ordered MC Clients on Parker are AFI, EQOS, SATA, XUSB. + * ISO clients(DISP, VI, EQOS) should never snoop caches and + * don't need ROC/PCFIFO ordering. + * ISO clients(EQOS) that need ordering should use PCFIFO ordering + * and bypass ROC ordering by using FORCE_NON_COHERENT path. + * FORCE_NON_COHERENT/FORCE_COHERENT config take precedence + * over SMMU attributes. + * Force all Normal memory transactions from ISO and non-ISO to be + * non-coherent(bypass ROC, avoid cache snoop to avoid perf hit). + * Force the SO_DEV transactions from ordered ISO clients(EQOS) to + * non-coherent path and enable MC PCFIFO interlock for ordering. + * Force the SO_DEV transactions from ordered non-ISO clients (PCIe, + * XUSB, SATA) to coherent so that the transactions are + * ordered by ROC. + * PCFIFO ensure write ordering. + * Read after Write ordering is maintained/enforced by MC clients. + * Clients that need PCIe type write ordering must + * go through ROC ordering. + * Ordering enable for Read clients is not necessary. + * R5's and A9 would get necessary ordering from AXI and + * don't need ROC ordering enable: + * - MMIO ordering is through dev mapping and MMIO + * accesses bypass SMMU. + * - Normal memory is accessed through SMMU and ordering is + * ensured by client and AXI. + * - Ack point for Normal memory is WCAM in MC. + * - MMIO's can be early acked and AXI ensures dev memory ordering, + * Client ensures read/write direction change ordering. + * - See Bug 200312466 for more details. * - * Change AXID_OVERRIDE/AXID_OVERRIDE_SO_DEV only for some clients - * whose AXI IDs we know and trust. + * CGID_TAG_ADR is only present from T186 A02. As this code is common + * between A01 and A02, tegra_memctrl_set_overrides() programs + * CGID_TAG_ADR for the necessary clients on A02. */ - -#if ENABLE_AFI_DEVICE - /* Match AFIW */ - mc_set_forced_coherent_so_dev_cfg(AFIR); -#endif - + mc_set_txn_override(HDAR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(BPMPW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(PTCR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(NVDISPLAYR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(EQOSW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(NVJPGSWR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(ISPRA, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(SDMMCWAA, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(VICSRD, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(MPCOREW, CGID_TAG_DEFAULT, SO_DEV_ZERO, NO_OVERRIDE, NO_OVERRIDE); + mc_set_txn_override(GPUSRD, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(AXISR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(SCEDMAW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(SDMMCW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(EQOSR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + /* See bug 200131110 comment #35*/ + mc_set_txn_override(APEDMAR, CGID_TAG_CLIENT_AXI_ID, SO_DEV_CLIENT_AXI_ID, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(NVENCSRD, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(SDMMCRAB, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(VICSRD1, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(BPMPDMAR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(VIW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(SDMMCRAA, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(AXISW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(XUSB_DEVR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(UFSHCR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(TSECSWR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(GPUSWR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(SATAR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(XUSB_HOSTW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_COHERENT); + mc_set_txn_override(TSECSWRB, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(GPUSRD2, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(SCEDMAR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(GPUSWR2, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(AONDMAW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + /* See bug 200131110 comment #35*/ + mc_set_txn_override(APEDMAW, CGID_TAG_CLIENT_AXI_ID, SO_DEV_CLIENT_AXI_ID, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(AONW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(HOST1XDMAR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(ETRR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(SESWR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(NVJPGSRD, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(NVDECSRD, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(TSECSRDB, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(BPMPDMAW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(APER, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(NVDECSRD1, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(XUSB_HOSTR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(ISPWA, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(SESRD, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(SCER, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(AONR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(MPCORER, CGID_TAG_DEFAULT, SO_DEV_ZERO, NO_OVERRIDE, NO_OVERRIDE); + mc_set_txn_override(SDMMCWA, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(HDAW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(NVDECSWR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(UFSHCW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(AONDMAR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(SATAW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_COHERENT); + mc_set_txn_override(ETRW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(VICSWR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(NVENCSWR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + /* See bug 200131110 comment #35 */ + mc_set_txn_override(AFIR, CGID_TAG_DEFAULT, SO_DEV_CLIENT_AXI_ID, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(SDMMCWAB, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(SDMMCRA, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(NVDISPLAYR1, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(ISPWB, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(BPMPR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(APEW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(SDMMCR, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); + mc_set_txn_override(XUSB_DEVW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_COHERENT); + mc_set_txn_override(TSECSRD, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); /* * See bug 200131110 comment #35 - there are no normal requests * and AWID for SO/DEV requests is hardcoded in RTL for a * particular PCIE controller */ -#if ENABLE_AFI_DEVICE - mc_set_forced_coherent_so_dev_cfg(AFIW); -#endif - mc_set_forced_coherent_cfg(HDAR); - mc_set_forced_coherent_cfg(HDAW); - mc_set_forced_coherent_cfg(SATAR); - mc_set_forced_coherent_cfg(SATAW); - mc_set_forced_coherent_cfg(XUSB_HOSTR); - mc_set_forced_coherent_cfg(XUSB_HOSTW); - mc_set_forced_coherent_cfg(XUSB_DEVR); - mc_set_forced_coherent_cfg(XUSB_DEVW); - mc_set_forced_coherent_cfg(SDMMCRAB); - mc_set_forced_coherent_cfg(SDMMCWAB); - - /* Match APEDMAW */ - mc_set_forced_coherent_axid_so_dev_cfg(APEDMAR); - - /* - * See bug 200131110 comment #35 - AWID for normal requests - * is 0x80 and AWID for SO/DEV requests is 0x01 - */ - mc_set_forced_coherent_axid_so_dev_cfg(APEDMAW); - mc_set_forced_coherent_cfg(SESRD); - mc_set_forced_coherent_cfg(SESWR); - mc_set_forced_coherent_cfg(ETRR); - mc_set_forced_coherent_cfg(ETRW); - mc_set_forced_coherent_cfg(AXISR); - mc_set_forced_coherent_cfg(AXISW); - mc_set_forced_coherent_cfg(EQOSR); - mc_set_forced_coherent_cfg(EQOSW); - mc_set_forced_coherent_cfg(UFSHCR); - mc_set_forced_coherent_cfg(UFSHCW); - mc_set_forced_coherent_cfg(BPMPDMAR); - mc_set_forced_coherent_cfg(BPMPDMAW); - mc_set_forced_coherent_cfg(AONDMAR); - mc_set_forced_coherent_cfg(AONDMAW); - mc_set_forced_coherent_cfg(SCEDMAR); - mc_set_forced_coherent_cfg(SCEDMAW); + mc_set_txn_override(AFIW, CGID_TAG_DEFAULT, SO_DEV_CLIENT_AXI_ID, FORCE_NON_COHERENT, FORCE_COHERENT); + mc_set_txn_override(SCEW, CGID_TAG_DEFAULT, SO_DEV_ZERO, FORCE_NON_COHERENT, FORCE_NON_COHERENT); /* * At this point, ordering can occur at ROC. So, remove PCFIFO's @@ -192,56 +253,18 @@ static void tegra_memctrl_reconfig_mss_clients(void) mc_set_pcfifo_unordered_boot_so_mss(4, SESWR) & mc_set_pcfifo_unordered_boot_so_mss(4, ETRW) & mc_set_pcfifo_unordered_boot_so_mss(4, AXISW) & - mc_set_pcfifo_unordered_boot_so_mss(4, EQOSW) & mc_set_pcfifo_unordered_boot_so_mss(4, UFSHCW) & mc_set_pcfifo_unordered_boot_so_mss(4, BPMPDMAW) & mc_set_pcfifo_unordered_boot_so_mss(4, AONDMAW) & mc_set_pcfifo_unordered_boot_so_mss(4, SCEDMAW); + /* EQOSW is the only client that has PCFIFO order enabled. */ + val |= mc_set_pcfifo_ordered_boot_so_mss(4, EQOSW); tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG4, val); val = MC_PCFIFO_CLIENT_CONFIG5_RESET_VAL & mc_set_pcfifo_unordered_boot_so_mss(5, APEDMAW); tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG5, val); - /* - * At this point, ordering can occur at ROC. SMMU need not - * reorder any requests. - * - * Change SMMU_*_ORDERED_CLIENT from ORDERED -> UNORDERED - * for boot and strongly ordered MSS clients - */ - val = MC_SMMU_CLIENT_CONFIG1_RESET_VAL & -#if ENABLE_AFI_DEVICE - mc_set_smmu_unordered_boot_so_mss(1, AFIW) & -#endif - mc_set_smmu_unordered_boot_so_mss(1, HDAW) & - mc_set_smmu_unordered_boot_so_mss(1, SATAW); - tegra_mc_write_32(MC_SMMU_CLIENT_CONFIG1, val); - - val = MC_SMMU_CLIENT_CONFIG2_RESET_VAL & - mc_set_smmu_unordered_boot_so_mss(2, XUSB_HOSTW) & - mc_set_smmu_unordered_boot_so_mss(2, XUSB_DEVW); - tegra_mc_write_32(MC_SMMU_CLIENT_CONFIG2, val); - - val = MC_SMMU_CLIENT_CONFIG3_RESET_VAL & - mc_set_smmu_unordered_boot_so_mss(3, SDMMCWAB); - tegra_mc_write_32(MC_SMMU_CLIENT_CONFIG3, val); - - val = MC_SMMU_CLIENT_CONFIG4_RESET_VAL & - mc_set_smmu_unordered_boot_so_mss(4, SESWR) & - mc_set_smmu_unordered_boot_so_mss(4, ETRW) & - mc_set_smmu_unordered_boot_so_mss(4, AXISW) & - mc_set_smmu_unordered_boot_so_mss(4, EQOSW) & - mc_set_smmu_unordered_boot_so_mss(4, UFSHCW) & - mc_set_smmu_unordered_boot_so_mss(4, BPMPDMAW) & - mc_set_smmu_unordered_boot_so_mss(4, AONDMAW) & - mc_set_smmu_unordered_boot_so_mss(4, SCEDMAW); - tegra_mc_write_32(MC_SMMU_CLIENT_CONFIG4, val); - - val = MC_SMMU_CLIENT_CONFIG5_RESET_VAL & - mc_set_smmu_unordered_boot_so_mss(5, APEDMAW); - tegra_mc_write_32(MC_SMMU_CLIENT_CONFIG5, val); - /* * Deassert HOTRESET FLUSH_ENABLE for boot and strongly ordered MSS * clients to allow memory traffic from all clients to start passing diff --git a/plat/nvidia/tegra/common/tegra_bl31_setup.c b/plat/nvidia/tegra/common/tegra_bl31_setup.c index b49665027165dfabb52e34cb3bbe6ed89786a071..0806307467cf5bce23c49b2c711b2ecb1d1901d8 100644 --- a/plat/nvidia/tegra/common/tegra_bl31_setup.c +++ b/plat/nvidia/tegra/common/tegra_bl31_setup.c @@ -27,6 +27,7 @@ #include <memctrl.h> #include <tegra_def.h> +#include <tegra_platform.h> #include <tegra_private.h> /* length of Trusty's input parameters (in bytes) */ @@ -122,6 +123,7 @@ void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1, plat_params_from_bl2_t *plat_params = (plat_params_from_bl2_t *)arg1; image_info_t bl32_img_info = { {0} }; uint64_t tzdram_start, tzdram_end, bl32_start, bl32_end; + uint32_t console_clock; /* * For RESET_TO_BL31 systems, BL31 is the first bootloader to run so @@ -155,6 +157,7 @@ void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1, plat_bl31_params_from_bl2.tzdram_base = plat_params->tzdram_base; plat_bl31_params_from_bl2.tzdram_size = plat_params->tzdram_size; plat_bl31_params_from_bl2.uart_id = plat_params->uart_id; + plat_bl31_params_from_bl2.l2_ecc_parity_prot_dis = plat_params->l2_ecc_parity_prot_dis; /* * It is very important that we run either from TZDRAM or TZSRAM base. @@ -164,6 +167,15 @@ void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1, (TEGRA_TZRAM_BASE != BL31_BASE)) panic(); + /* + * Reference clock used by the FPGAs is a lot slower. + */ + if (tegra_platform_is_fpga() == 1U) { + console_clock = TEGRA_BOOT_UART_CLK_13_MHZ; + } else { + console_clock = TEGRA_BOOT_UART_CLK_408_MHZ; + } + /* * Get the base address of the UART controller to be used for the * console @@ -174,8 +186,8 @@ void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1, /* * Configure the UART port to be used as the console */ - console_init(tegra_console_base, TEGRA_BOOT_UART_CLK_IN_HZ, - TEGRA_CONSOLE_BAUDRATE); + console_init(tegra_console_base, console_clock, + TEGRA_CONSOLE_BAUDRATE); } /* @@ -239,6 +251,11 @@ void plat_trusty_set_boot_args(aapcs64_params_t *args) args->arg0 = bl32_mem_size; args->arg1 = bl32_boot_params; args->arg2 = TRUSTY_PARAMS_LEN_BYTES; + + /* update EKS size */ + if (args->arg4 != 0U) { + args->arg2 = args->arg4; + } } #endif diff --git a/plat/nvidia/tegra/common/tegra_fiq_glue.c b/plat/nvidia/tegra/common/tegra_fiq_glue.c index 0b663cef429fd67e5abb0eba73bcad5f061d7fa4..9a43f76956b99962d048c271940456054cf27479 100644 --- a/plat/nvidia/tegra/common/tegra_fiq_glue.c +++ b/plat/nvidia/tegra/common/tegra_fiq_glue.c @@ -41,6 +41,11 @@ static uint64_t tegra_fiq_interrupt_handler(uint32_t id, uint32_t cpu = plat_my_core_pos(); uint32_t irq; + (void)id; + (void)flags; + (void)handle; + (void)cookie; + bakery_lock_get(&tegra_fiq_lock); /* diff --git a/plat/nvidia/tegra/common/tegra_platform.c b/plat/nvidia/tegra/common/tegra_platform.c index 10edf9229c423c14aa7ac0808019488765639cd8..72da12663ed2b7c849c38a42aed4fde3ddd42984 100644 --- a/plat/nvidia/tegra/common/tegra_platform.c +++ b/plat/nvidia/tegra/common/tegra_platform.c @@ -1,12 +1,12 @@ /* - * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ #include <arch_helpers.h> +#include <assert.h> #include <lib/mmio.h> - #include <tegra_def.h> #include <tegra_platform.h> #include <tegra_private.h> @@ -19,35 +19,33 @@ typedef enum tegra_platform { TEGRA_PLATFORM_QT, TEGRA_PLATFORM_FPGA, TEGRA_PLATFORM_EMULATION, + TEGRA_PLATFORM_LINSIM, + TEGRA_PLATFORM_UNIT_FPGA, + TEGRA_PLATFORM_VIRT_DEV_KIT, TEGRA_PLATFORM_MAX, } tegra_platform_t; /******************************************************************************* * Tegra macros defining all the SoC minor versions ******************************************************************************/ -#define TEGRA_MINOR_QT 0 -#define TEGRA_MINOR_FPGA 1 -#define TEGRA_MINOR_EMULATION_MIN 2 -#define TEGRA_MINOR_EMULATION_MAX 10 - -/******************************************************************************* - * Tegra major, minor version helper macros - ******************************************************************************/ -#define MAJOR_VERSION_SHIFT 0x4 -#define MAJOR_VERSION_MASK 0xF -#define MINOR_VERSION_SHIFT 0x10 -#define MINOR_VERSION_MASK 0xF -#define CHIP_ID_SHIFT 8 -#define CHIP_ID_MASK 0xFF +#define TEGRA_MINOR_QT U(0) +#define TEGRA_MINOR_FPGA U(1) +#define TEGRA_MINOR_ASIM_QT U(2) +#define TEGRA_MINOR_ASIM_LINSIM U(3) +#define TEGRA_MINOR_DSIM_ASIM_LINSIM U(4) +#define TEGRA_MINOR_UNIT_FPGA U(5) +#define TEGRA_MINOR_VIRT_DEV_KIT U(6) /******************************************************************************* - * Tegra chip ID values + * Tegra macros defining all the SoC pre_si_platform ******************************************************************************/ -typedef enum tegra_chipid { - TEGRA_CHIPID_TEGRA13 = 0x13, - TEGRA_CHIPID_TEGRA21 = 0x21, - TEGRA_CHIPID_TEGRA18 = 0x18, -} tegra_chipid_t; +#define TEGRA_PRE_SI_QT U(1) +#define TEGRA_PRE_SI_FPGA U(2) +#define TEGRA_PRE_SI_UNIT_FPGA U(3) +#define TEGRA_PRE_SI_ASIM_QT U(4) +#define TEGRA_PRE_SI_ASIM_LINSIM U(5) +#define TEGRA_PRE_SI_DSIM_ASIM_LINSIM U(6) +#define TEGRA_PRE_SI_VDK U(8) /* * Read the chip ID value @@ -73,25 +71,38 @@ uint32_t tegra_get_chipid_minor(void) return (tegra_get_chipid() >> MINOR_VERSION_SHIFT) & MINOR_VERSION_MASK; } -uint8_t tegra_chipid_is_t132(void) +/* + * Read the chip's pre_si_platform valus from the chip ID value + */ +static uint32_t tegra_get_chipid_pre_si_platform(void) { - uint32_t chip_id = (tegra_get_chipid() >> CHIP_ID_SHIFT) & CHIP_ID_MASK; + return (tegra_get_chipid() >> PRE_SI_PLATFORM_SHIFT) & PRE_SI_PLATFORM_MASK; +} - return (chip_id == TEGRA_CHIPID_TEGRA13); +bool tegra_chipid_is_t132(void) +{ + uint32_t chip_id = ((tegra_get_chipid() >> CHIP_ID_SHIFT) & CHIP_ID_MASK); + + return (chip_id == (uint32_t)TEGRA_CHIPID_TEGRA13); } -uint8_t tegra_chipid_is_t210(void) +bool tegra_chipid_is_t186(void) { uint32_t chip_id = (tegra_get_chipid() >> CHIP_ID_SHIFT) & CHIP_ID_MASK; - return (chip_id == TEGRA_CHIPID_TEGRA21); + return (chip_id == TEGRA_CHIPID_TEGRA18); } -uint8_t tegra_chipid_is_t186(void) +bool tegra_chipid_is_t210(void) { uint32_t chip_id = (tegra_get_chipid() >> CHIP_ID_SHIFT) & CHIP_ID_MASK; - return (chip_id == TEGRA_CHIPID_TEGRA18); + return (chip_id == (uint32_t)TEGRA_CHIPID_TEGRA21); +} + +bool tegra_chipid_is_t210_b01(void) +{ + return (tegra_chipid_is_t210() && (tegra_get_chipid_major() == 0x2UL)); } /* @@ -99,54 +110,152 @@ uint8_t tegra_chipid_is_t186(void) */ static tegra_platform_t tegra_get_platform(void) { - uint32_t major = tegra_get_chipid_major(); - uint32_t minor = tegra_get_chipid_minor(); + uint32_t major, minor, pre_si_platform; + tegra_platform_t ret; + + /* get the major/minor chip ID values */ + major = tegra_get_chipid_major(); + minor = tegra_get_chipid_minor(); + pre_si_platform = tegra_get_chipid_pre_si_platform(); - /* Actual silicon platforms have a non-zero major version */ - if (major > 0) - return TEGRA_PLATFORM_SILICON; + if (major == 0U) { + /* + * The minor version number is used by simulation platforms + */ + switch (minor) { + /* + * Cadence's QuickTurn emulation system is a Solaris-based + * chip emulation system + */ + case TEGRA_MINOR_QT: + case TEGRA_MINOR_ASIM_QT: + ret = TEGRA_PLATFORM_QT; + break; - /* - * The minor version number is used by simulation platforms - */ + /* + * FPGAs are used during early software/hardware development + */ + case TEGRA_MINOR_FPGA: + ret = TEGRA_PLATFORM_FPGA; + break; + /* + * Linsim is a reconfigurable, clock-driven, mixed RTL/cmodel + * simulation framework. + */ + case TEGRA_MINOR_ASIM_LINSIM: + case TEGRA_MINOR_DSIM_ASIM_LINSIM: + ret = TEGRA_PLATFORM_LINSIM; + break; - /* - * Cadence's QuickTurn emulation system is a Solaris-based - * chip emulation system - */ - if (minor == TEGRA_MINOR_QT) - return TEGRA_PLATFORM_QT; + /* + * Unit FPGAs run the actual hardware block IP on the FPGA with + * the other parts of the system using Linsim. + */ + case TEGRA_MINOR_UNIT_FPGA: + ret = TEGRA_PLATFORM_UNIT_FPGA; + break; + /* + * The Virtualizer Development Kit (VDK) is the standard chip + * development from Synopsis. + */ + case TEGRA_MINOR_VIRT_DEV_KIT: + ret = TEGRA_PLATFORM_VIRT_DEV_KIT; + break; - /* - * FPGAs are used during early software/hardware development - */ - if (minor == TEGRA_MINOR_FPGA) - return TEGRA_PLATFORM_FPGA; + default: + ret = TEGRA_PLATFORM_MAX; + break; + } - /* Minor version reserved for other emulation platforms */ - if ((minor > TEGRA_MINOR_FPGA) && (minor <= TEGRA_MINOR_EMULATION_MAX)) - return TEGRA_PLATFORM_EMULATION; + } else if (pre_si_platform > 0U) { - /* unsupported platform */ - return TEGRA_PLATFORM_MAX; + switch (pre_si_platform) { + /* + * Cadence's QuickTurn emulation system is a Solaris-based + * chip emulation system + */ + case TEGRA_PRE_SI_QT: + case TEGRA_PRE_SI_ASIM_QT: + ret = TEGRA_PLATFORM_QT; + break; + + /* + * FPGAs are used during early software/hardware development + */ + case TEGRA_PRE_SI_FPGA: + ret = TEGRA_PLATFORM_FPGA; + break; + /* + * Linsim is a reconfigurable, clock-driven, mixed RTL/cmodel + * simulation framework. + */ + case TEGRA_PRE_SI_ASIM_LINSIM: + case TEGRA_PRE_SI_DSIM_ASIM_LINSIM: + ret = TEGRA_PLATFORM_LINSIM; + break; + + /* + * Unit FPGAs run the actual hardware block IP on the FPGA with + * the other parts of the system using Linsim. + */ + case TEGRA_PRE_SI_UNIT_FPGA: + ret = TEGRA_PLATFORM_UNIT_FPGA; + break; + /* + * The Virtualizer Development Kit (VDK) is the standard chip + * development from Synopsis. + */ + case TEGRA_PRE_SI_VDK: + ret = TEGRA_PLATFORM_VIRT_DEV_KIT; + break; + + default: + ret = TEGRA_PLATFORM_MAX; + break; + } + + } else { + /* Actual silicon platforms have a non-zero major version */ + ret = TEGRA_PLATFORM_SILICON; + } + + return ret; } -uint8_t tegra_platform_is_silicon(void) +bool tegra_platform_is_silicon(void) { - return (tegra_get_platform() == TEGRA_PLATFORM_SILICON); + return ((tegra_get_platform() == TEGRA_PLATFORM_SILICON) ? true : false); } -uint8_t tegra_platform_is_qt(void) +bool tegra_platform_is_qt(void) { - return (tegra_get_platform() == TEGRA_PLATFORM_QT); + return ((tegra_get_platform() == TEGRA_PLATFORM_QT) ? true : false); } -uint8_t tegra_platform_is_fpga(void) +bool tegra_platform_is_linsim(void) { - return (tegra_get_platform() == TEGRA_PLATFORM_FPGA); + tegra_platform_t plat = tegra_get_platform(); + + return (((plat == TEGRA_PLATFORM_LINSIM) || + (plat == TEGRA_PLATFORM_UNIT_FPGA)) ? true : false); } -uint8_t tegra_platform_is_emulation(void) +bool tegra_platform_is_fpga(void) +{ + return ((tegra_get_platform() == TEGRA_PLATFORM_FPGA) ? true : false); +} + +bool tegra_platform_is_emulation(void) { return (tegra_get_platform() == TEGRA_PLATFORM_EMULATION); } + +bool tegra_platform_is_unit_fpga(void) +{ + return ((tegra_get_platform() == TEGRA_PLATFORM_UNIT_FPGA) ? true : false); +} + +bool tegra_platform_is_virt_dev_kit(void) +{ + return ((tegra_get_platform() == TEGRA_PLATFORM_VIRT_DEV_KIT) ? true : false); +} diff --git a/plat/nvidia/tegra/common/tegra_pm.c b/plat/nvidia/tegra/common/tegra_pm.c index 8361ddd3d35602fcefadd3e07c78f67a75b464a8..ce44983b5f782da2083e03ea928fb2f7cb720bf4 100644 --- a/plat/nvidia/tegra/common/tegra_pm.c +++ b/plat/nvidia/tegra/common/tegra_pm.c @@ -21,6 +21,7 @@ #include <memctrl.h> #include <pmc.h> #include <tegra_def.h> +#include <tegra_platform.h> #include <tegra_private.h> extern uint64_t tegra_bl31_phys_base; @@ -222,6 +223,7 @@ __dead2 void tegra_pwr_domain_power_down_wfi(const psci_power_state_t void tegra_pwr_domain_on_finish(const psci_power_state_t *target_state) { plat_params_from_bl2_t *plat_params; + uint32_t console_clock; /* * Initialize the GIC cpu and distributor interfaces @@ -234,10 +236,19 @@ void tegra_pwr_domain_on_finish(const psci_power_state_t *target_state) if (target_state->pwr_domain_state[PLAT_MAX_PWR_LVL] == PSTATE_ID_SOC_POWERDN) { + /* + * Reference clock used by the FPGAs is a lot slower. + */ + if (tegra_platform_is_fpga() == 1U) { + console_clock = TEGRA_BOOT_UART_CLK_13_MHZ; + } else { + console_clock = TEGRA_BOOT_UART_CLK_408_MHZ; + } + /* Initialize the runtime console */ if (tegra_console_base != (uint64_t)0) { - console_init(tegra_console_base, TEGRA_BOOT_UART_CLK_IN_HZ, - TEGRA_CONSOLE_BAUDRATE); + console_init(tegra_console_base, console_clock, + TEGRA_CONSOLE_BAUDRATE); } /* diff --git a/plat/nvidia/tegra/common/tegra_sip_calls.c b/plat/nvidia/tegra/common/tegra_sip_calls.c index e50d12faf89de0165c7a858fd73bdf4c0e9dbfdc..e7acecea44d3276e4930028c37eb1ee16964c91e 100644 --- a/plat/nvidia/tegra/common/tegra_sip_calls.c +++ b/plat/nvidia/tegra/common/tegra_sip_calls.c @@ -31,20 +31,29 @@ ******************************************************************************/ extern uint8_t tegra_fake_system_suspend; - /******************************************************************************* * SoC specific SiP handler ******************************************************************************/ #pragma weak plat_sip_handler -int plat_sip_handler(uint32_t smc_fid, +int32_t plat_sip_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4, - void *cookie, + const void *cookie, void *handle, uint64_t flags) { + /* unused parameters */ + (void)smc_fid; + (void)x1; + (void)x2; + (void)x3; + (void)x4; + (void)cookie; + (void)handle; + (void)flags; + return -ENOTSUP; } @@ -61,112 +70,115 @@ uintptr_t tegra_sip_handler(uint32_t smc_fid, u_register_t flags) { uint32_t regval; - int err; + int32_t err; /* Check if this is a SoC specific SiP */ err = plat_sip_handler(smc_fid, x1, x2, x3, x4, cookie, handle, flags); - if (err == 0) - SMC_RET1(handle, (uint64_t)err); - - switch (smc_fid) { - - case TEGRA_SIP_NEW_VIDEOMEM_REGION: - - /* clean up the high bits */ - x2 = (uint32_t)x2; - - /* - * Check if Video Memory overlaps TZDRAM (contains bl31/bl32) - * or falls outside of the valid DRAM range - */ - err = bl31_check_ns_address(x1, x2); - if (err) - SMC_RET1(handle, err); - - /* - * Check if Video Memory is aligned to 1MB. - */ - if ((x1 & 0xFFFFF) || (x2 & 0xFFFFF)) { - ERROR("Unaligned Video Memory base address!\n"); - SMC_RET1(handle, -ENOTSUP); - } - - /* - * The GPU is the user of the Video Memory region. In order to - * transition to the new memory region smoothly, we program the - * new base/size ONLY if the GPU is in reset mode. - */ - regval = mmio_read_32(TEGRA_CAR_RESET_BASE + - TEGRA_GPU_RESET_REG_OFFSET); - if ((regval & GPU_RESET_BIT) == 0U) { - ERROR("GPU not in reset! Video Memory setup failed\n"); - SMC_RET1(handle, -ENOTSUP); - } - - /* new video memory carveout settings */ - tegra_memctrl_videomem_setup(x1, x2); + if (err == 0) { - SMC_RET1(handle, 0); - break; + SMC_RET1(handle, (uint64_t)err); - /* - * The NS world registers the address of its handler to be - * used for processing the FIQ. This is normally used by the - * NS FIQ debugger driver to detect system hangs by programming - * a watchdog timer to fire a FIQ interrupt. - */ - case TEGRA_SIP_FIQ_NS_ENTRYPOINT: + } else { + + switch (smc_fid) { + + case TEGRA_SIP_NEW_VIDEOMEM_REGION: + + /* clean up the high bits */ + x2 = (uint32_t)x2; + + /* + * Check if Video Memory overlaps TZDRAM (contains bl31/bl32) + * or falls outside of the valid DRAM range + */ + err = bl31_check_ns_address(x1, x2); + if (err != 0) { + SMC_RET1(handle, (uint64_t)err); + } + + /* + * Check if Video Memory is aligned to 1MB. + */ + if (((x1 & 0xFFFFFU) != 0U) || ((x2 & 0xFFFFFU) != 0U)) { + ERROR("Unaligned Video Memory base address!\n"); + SMC_RET1(handle, -ENOTSUP); + } + + /* + * The GPU is the user of the Video Memory region. In order to + * transition to the new memory region smoothly, we program the + * new base/size ONLY if the GPU is in reset mode. + */ + regval = mmio_read_32(TEGRA_CAR_RESET_BASE + + TEGRA_GPU_RESET_REG_OFFSET); + if ((regval & GPU_RESET_BIT) == 0UL) { + ERROR("GPU not in reset! Video Memory setup failed\n"); + SMC_RET1(handle, -ENOTSUP); + } + + /* new video memory carveout settings */ + tegra_memctrl_videomem_setup(x1, (uint32_t)x2); - if (!x1) - SMC_RET1(handle, SMC_UNK); + SMC_RET1(handle, 0); /* - * TODO: Check if x1 contains a valid DRAM address + * The NS world registers the address of its handler to be + * used for processing the FIQ. This is normally used by the + * NS FIQ debugger driver to detect system hangs by programming + * a watchdog timer to fire a FIQ interrupt. */ + case TEGRA_SIP_FIQ_NS_ENTRYPOINT: - /* store the NS world's entrypoint */ - tegra_fiq_set_ns_entrypoint(x1); - - SMC_RET1(handle, 0); - break; + if (x1 == 0U) { + SMC_RET1(handle, SMC_UNK); + } - /* - * The NS world's FIQ handler issues this SMC to get the NS EL1/EL0 - * CPU context when the FIQ interrupt was triggered. This allows the - * NS world to understand the CPU state when the watchdog interrupt - * triggered. - */ - case TEGRA_SIP_FIQ_NS_GET_CONTEXT: + /* + * TODO: Check if x1 contains a valid DRAM address + */ - /* retrieve context registers when FIQ triggered */ - tegra_fiq_get_intr_context(); + /* store the NS world's entrypoint */ + tegra_fiq_set_ns_entrypoint(x1); - SMC_RET0(handle); - break; - - case TEGRA_SIP_ENABLE_FAKE_SYSTEM_SUSPEND: - /* - * System suspend fake mode is set if we are on VDK and we make - * a debug SIP call. This mode ensures that we excercise debug - * path instead of the regular code path to suit the pre-silicon - * platform needs. These include replacing the call to WFI by - * a warm reset request. - */ - if (tegra_platform_is_emulation() != 0U) { - - tegra_fake_system_suspend = 1; SMC_RET1(handle, 0); - } /* - * We return to the external world as if this SIP is not - * implemented in case, we are not running on VDK. + * The NS world's FIQ handler issues this SMC to get the NS EL1/EL0 + * CPU context when the FIQ interrupt was triggered. This allows the + * NS world to understand the CPU state when the watchdog interrupt + * triggered. */ - break; - - default: - ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid); - break; + case TEGRA_SIP_FIQ_NS_GET_CONTEXT: + + /* retrieve context registers when FIQ triggered */ + (void)tegra_fiq_get_intr_context(); + + SMC_RET0(handle); + + case TEGRA_SIP_ENABLE_FAKE_SYSTEM_SUSPEND: + /* + * System suspend fake mode is set if we are on VDK and we make + * a debug SIP call. This mode ensures that we excercise debug + * path instead of the regular code path to suit the pre-silicon + * platform needs. These include replacing the call to WFI by + * a warm reset request. + */ + if (tegra_platform_is_virt_dev_kit() != false) { + + tegra_fake_system_suspend = 1; + SMC_RET1(handle, 0); + } + + /* + * We return to the external world as if this SIP is not + * implemented in case, we are not running on VDK. + */ + break; + + default: + ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid); + break; + } } SMC_RET1(handle, SMC_UNK); @@ -176,9 +188,9 @@ uintptr_t tegra_sip_handler(uint32_t smc_fid, DECLARE_RT_SVC( tegra_sip_fast, - OEN_SIP_START, - OEN_SIP_END, - SMC_TYPE_FAST, - NULL, - tegra_sip_handler + (OEN_SIP_START), + (OEN_SIP_END), + (SMC_TYPE_FAST), + (NULL), + (tegra_sip_handler) ); diff --git a/plat/nvidia/tegra/common/tegra_topology.c b/plat/nvidia/tegra/common/tegra_topology.c index 893f28ff24137b6f5b8a434ab54301b485f6ecc3..4f6cf932e6cfad04cb2dbb2c9b5483633240c6f5 100644 --- a/plat/nvidia/tegra/common/tegra_topology.c +++ b/plat/nvidia/tegra/common/tegra_topology.c @@ -7,41 +7,38 @@ #include <platform_def.h> #include <arch.h> +#include <platform.h> #include <lib/psci/psci.h> -extern const unsigned char tegra_power_domain_tree_desc[]; #pragma weak plat_core_pos_by_mpidr -/******************************************************************************* - * This function returns the Tegra default topology tree information. - ******************************************************************************/ -const unsigned char *plat_get_power_domain_tree_desc(void) -{ - return tegra_power_domain_tree_desc; -} - /******************************************************************************* * This function implements a part of the critical interface between the psci * generic layer and the platform that allows the former to query the platform * to convert an MPIDR to a unique linear index. An error code (-1) is returned * in case the MPIDR is invalid. ******************************************************************************/ -int plat_core_pos_by_mpidr(u_register_t mpidr) +int32_t plat_core_pos_by_mpidr(u_register_t mpidr) { - unsigned int cluster_id, cpu_id; + u_register_t cluster_id, cpu_id; + int32_t result; + + cluster_id = (mpidr >> (u_register_t)MPIDR_AFF1_SHIFT) & (u_register_t)MPIDR_AFFLVL_MASK; + cpu_id = (mpidr >> (u_register_t)MPIDR_AFF0_SHIFT) & (u_register_t)MPIDR_AFFLVL_MASK; - cluster_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK; - cpu_id = (mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK; + result = (int32_t)cpu_id + ((int32_t)cluster_id * 4); - if (cluster_id >= PLATFORM_CLUSTER_COUNT) - return PSCI_E_NOT_PRESENT; + if (cluster_id >= (u_register_t)PLATFORM_CLUSTER_COUNT) { + result = PSCI_E_NOT_PRESENT; + } /* * Validate cpu_id by checking whether it represents a CPU in * one of the two clusters present on the platform. */ - if (cpu_id >= PLATFORM_MAX_CPUS_PER_CLUSTER) - return PSCI_E_NOT_PRESENT; + if (cpu_id >= (u_register_t)PLATFORM_MAX_CPUS_PER_CLUSTER) { + result = PSCI_E_NOT_PRESENT; + } - return (cpu_id + (cluster_id * 4)); + return result; } diff --git a/plat/nvidia/tegra/include/drivers/bpmp.h b/plat/nvidia/tegra/include/drivers/bpmp.h new file mode 100644 index 0000000000000000000000000000000000000000..27f57df4bedf691eb917a755f5b62bbdf4063435 --- /dev/null +++ b/plat/nvidia/tegra/include/drivers/bpmp.h @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef BPMP_H +#define BPMP_H + +#include <stdint.h> + +/* macro to enable clock to the Atomics block */ +#define CAR_ENABLE_ATOMICS (1UL << 16) + +/* command to get the channel base addresses from bpmp */ +#define ATOMIC_CMD_GET 4UL + +/* Hardware IRQ # used to signal bpmp of an incoming command */ +#define INT_SHR_SEM_OUTBOX_FULL 6UL + +/* macros to decode the bpmp's state */ +#define CH_MASK(ch) (0x3UL << ((ch) * 2UL)) +#define MA_FREE(ch) (0x2UL << ((ch) * 2UL)) +#define MA_ACKD(ch) (0x3UL << ((ch) * 2UL)) + +/* response from bpmp to indicate it has powered up */ +#define SIGN_OF_LIFE 0xAAAAAAAAUL + +/* flags to indicate bpmp driver's state */ +#define BPMP_INIT_COMPLETE 0xBEEFF00DUL +#define BPMP_INIT_PENDING 0xDEADBEEFUL + +/* requests serviced by the bpmp */ +#define MRQ_PING 0 +#define MRQ_QUERY_TAG 1 +#define MRQ_DO_IDLE 2 +#define MRQ_TOLERATE_IDLE 3 +#define MRQ_MODULE_LOAD 4 +#define MRQ_MODULE_UNLOAD 5 +#define MRQ_SWITCH_CLUSTER 6 +#define MRQ_TRACE_MODIFY 7 +#define MRQ_WRITE_TRACE 8 +#define MRQ_THREADED_PING 9 +#define MRQ_CPUIDLE_USAGE 10 +#define MRQ_MODULE_MAIL 11 +#define MRQ_SCX_ENABLE 12 +#define MRQ_BPMPIDLE_USAGE 14 +#define MRQ_HEAP_USAGE 15 +#define MRQ_SCLK_SKIP_SET_RATE 16 +#define MRQ_ENABLE_SUSPEND 17 +#define MRQ_PASR_MASK 18 +#define MRQ_DEBUGFS 19 +#define MRQ_THERMAL 27 + +/* Tegra PM states as known to BPMP */ +#define TEGRA_PM_CC1 9 +#define TEGRA_PM_CC4 12 +#define TEGRA_PM_CC6 14 +#define TEGRA_PM_CC7 15 +#define TEGRA_PM_SC1 17 +#define TEGRA_PM_SC2 18 +#define TEGRA_PM_SC3 19 +#define TEGRA_PM_SC4 20 +#define TEGRA_PM_SC7 23 + +/* flag to indicate if entry into a CCx power state is allowed */ +#define BPMP_CCx_ALLOWED 0UL + +/* number of communication channels to interact with the bpmp */ +#define NR_CHANNELS 4U + +/* flag to ask bpmp to acknowledge command packet */ +#define NO_ACK (0UL << 0UL) +#define DO_ACK (1UL << 0UL) + +/* size of the command/response data */ +#define MSG_DATA_MAX_SZ 120U + +/** + * command/response packet to/from the bpmp + * + * command + * ------- + * code: MRQ_* command + * flags: DO_ACK or NO_ACK + * data: + * [0] = cpu # + * [1] = cluster power state (TEGRA_PM_CCx) + * [2] = system power state (TEGRA_PM_SCx) + * + * response + * --------- + * code: error code + * flags: not used + * data: + * [0-3] = response value + */ +typedef struct mb_data { + int32_t code; + uint32_t flags; + uint8_t data[MSG_DATA_MAX_SZ]; +} mb_data_t; + +/** + * Function to initialise the interface with the bpmp + */ +int tegra_bpmp_init(void); + +/** + * Handler to send a MRQ_* command to the bpmp + */ +int32_t tegra_bpmp_send_receive_atomic(int mrq, const void *ob_data, int ob_sz, + void *ib_data, int ib_sz); + +#endif /* BPMP_H */ diff --git a/plat/nvidia/tegra/include/drivers/memctrl_v2.h b/plat/nvidia/tegra/include/drivers/memctrl_v2.h index 957ff54c12fe9a28c69537c1135af7bd8db09620..ffe5269a8086db183ff840771a57397cb8634e66 100644 --- a/plat/nvidia/tegra/include/drivers/memctrl_v2.h +++ b/plat/nvidia/tegra/include/drivers/memctrl_v2.h @@ -100,6 +100,19 @@ ******************************************************************************/ #define MC_STREAMID_OVERRIDE_TO_SECURITY_CFG(addr) (addr + sizeof(uint32_t)) +#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_NO_OVERRIDE_SO_DEV (0UL << 4) +#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_NON_COHERENT_SO_DEV (1UL << 4) +#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_COHERENT_SO_DEV (2UL << 4) +#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_COHERENT_SNOOP_SO_DEV (3UL << 4) + +#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_NO_OVERRIDE_NORMAL (0UL << 8) +#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_NON_COHERENT_NORMAL (1UL << 8) +#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_COHERENT_NORMAL (2UL << 8) +#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_FORCE_COHERENT_SNOOP_NORMAL (3UL << 8) + +#define MC_TXN_OVERRIDE_CONFIG_CGID_SO_DEV_ZERO (0UL << 12) +#define MC_TXN_OVERRIDE_CONFIG_CGID_SO_DEV_CLIENT_AXI_ID (1UL << 12) + /******************************************************************************* * Memory Controller transaction override config registers ******************************************************************************/ @@ -312,98 +325,51 @@ typedef struct tegra_mc_settings { /******************************************************************************* * Memory Controller's PCFIFO client configuration registers ******************************************************************************/ -#define MC_PCFIFO_CLIENT_CONFIG1 0xdd4 -#define MC_PCFIFO_CLIENT_CONFIG1_RESET_VAL 0x20000 -#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_AFIW_UNORDERED (0 << 17) -#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_AFIW_MASK (1 << 17) -#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_HDAW_UNORDERED (0 << 21) -#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_HDAW_MASK (1 << 21) -#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_SATAW_UNORDERED (0 << 29) -#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_SATAW_MASK (1 << 29) - -#define MC_PCFIFO_CLIENT_CONFIG2 0xdd8 -#define MC_PCFIFO_CLIENT_CONFIG2_RESET_VAL 0x20000 -#define MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_HOSTW_UNORDERED (0 << 11) -#define MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_HOSTW_MASK (1 << 11) -#define MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_DEVW_UNORDERED (0 << 13) -#define MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_DEVW_MASK (1 << 13) - -#define MC_PCFIFO_CLIENT_CONFIG3 0xddc -#define MC_PCFIFO_CLIENT_CONFIG3_RESET_VAL 0 -#define MC_PCFIFO_CLIENT_CONFIG3_PCFIFO_SDMMCWAB_UNORDERED (0 << 7) -#define MC_PCFIFO_CLIENT_CONFIG3_PCFIFO_SDMMCWAB_MASK (1 << 7) - -#define MC_PCFIFO_CLIENT_CONFIG4 0xde0 -#define MC_PCFIFO_CLIENT_CONFIG4_RESET_VAL 0 -#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SESWR_UNORDERED (0 << 1) -#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SESWR_MASK (1 << 1) -#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_ETRW_UNORDERED (0 << 5) -#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_ETRW_MASK (1 << 5) -#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AXISW_UNORDERED (0 << 13) -#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AXISW_MASK (1 << 13) -#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_EQOSW_UNORDERED (0 << 15) -#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_EQOSW_MASK (1 << 15) -#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_UFSHCW_UNORDERED (0 << 17) -#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_UFSHCW_MASK (1 << 17) -#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_BPMPDMAW_UNORDERED (0 << 22) -#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_BPMPDMAW_MASK (1 << 22) -#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AONDMAW_UNORDERED (0 << 26) -#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AONDMAW_MASK (1 << 26) -#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SCEDMAW_UNORDERED (0 << 30) -#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SCEDMAW_MASK (1 << 30) - -#define MC_PCFIFO_CLIENT_CONFIG5 0xbf4 -#define MC_PCFIFO_CLIENT_CONFIG5_RESET_VAL 0 -#define MC_PCFIFO_CLIENT_CONFIG5_PCFIFO_APEDMAW_UNORDERED (0 << 0) -#define MC_PCFIFO_CLIENT_CONFIG5_PCFIFO_APEDMAW_MASK (1 << 0) - -/******************************************************************************* - * Memory Controller's SMMU client configuration registers - ******************************************************************************/ -#define MC_SMMU_CLIENT_CONFIG1 0x44 -#define MC_SMMU_CLIENT_CONFIG1_RESET_VAL 0x20000 -#define MC_SMMU_CLIENT_CONFIG1_AFIW_UNORDERED (0 << 17) -#define MC_SMMU_CLIENT_CONFIG1_AFIW_MASK (1 << 17) -#define MC_SMMU_CLIENT_CONFIG1_HDAW_UNORDERED (0 << 21) -#define MC_SMMU_CLIENT_CONFIG1_HDAW_MASK (1 << 21) -#define MC_SMMU_CLIENT_CONFIG1_SATAW_UNORDERED (0 << 29) -#define MC_SMMU_CLIENT_CONFIG1_SATAW_MASK (1 << 29) - -#define MC_SMMU_CLIENT_CONFIG2 0x48 -#define MC_SMMU_CLIENT_CONFIG2_RESET_VAL 0x20000 -#define MC_SMMU_CLIENT_CONFIG2_XUSB_HOSTW_UNORDERED (0 << 11) -#define MC_SMMU_CLIENT_CONFIG2_XUSB_HOSTW_MASK (1 << 11) -#define MC_SMMU_CLIENT_CONFIG2_XUSB_DEVW_UNORDERED (0 << 13) -#define MC_SMMU_CLIENT_CONFIG2_XUSB_DEVW_MASK (1 << 13) - -#define MC_SMMU_CLIENT_CONFIG3 0x4c -#define MC_SMMU_CLIENT_CONFIG3_RESET_VAL 0 -#define MC_SMMU_CLIENT_CONFIG3_SDMMCWAB_UNORDERED (0 << 7) -#define MC_SMMU_CLIENT_CONFIG3_SDMMCWAB_MASK (1 << 7) - -#define MC_SMMU_CLIENT_CONFIG4 0xb9c -#define MC_SMMU_CLIENT_CONFIG4_RESET_VAL 0 -#define MC_SMMU_CLIENT_CONFIG4_SESWR_UNORDERED (0 << 1) -#define MC_SMMU_CLIENT_CONFIG4_SESWR_MASK (1 << 1) -#define MC_SMMU_CLIENT_CONFIG4_ETRW_UNORDERED (0 << 5) -#define MC_SMMU_CLIENT_CONFIG4_ETRW_MASK (1 << 5) -#define MC_SMMU_CLIENT_CONFIG4_AXISW_UNORDERED (0 << 13) -#define MC_SMMU_CLIENT_CONFIG4_AXISW_MASK (1 << 13) -#define MC_SMMU_CLIENT_CONFIG4_EQOSW_UNORDERED (0 << 15) -#define MC_SMMU_CLIENT_CONFIG4_EQOSW_MASK (1 << 15) -#define MC_SMMU_CLIENT_CONFIG4_UFSHCW_UNORDERED (0 << 17) -#define MC_SMMU_CLIENT_CONFIG4_UFSHCW_MASK (1 << 17) -#define MC_SMMU_CLIENT_CONFIG4_BPMPDMAW_UNORDERED (0 << 22) -#define MC_SMMU_CLIENT_CONFIG4_BPMPDMAW_MASK (1 << 22) -#define MC_SMMU_CLIENT_CONFIG4_AONDMAW_UNORDERED (0 << 26) -#define MC_SMMU_CLIENT_CONFIG4_AONDMAW_MASK (1 << 26) -#define MC_SMMU_CLIENT_CONFIG4_SCEDMAW_UNORDERED (0 << 30) -#define MC_SMMU_CLIENT_CONFIG4_SCEDMAW_MASK (1 << 30) - -#define MC_SMMU_CLIENT_CONFIG5 0xbac -#define MC_SMMU_CLIENT_CONFIG5_RESET_VAL 0 -#define MC_SMMU_CLIENT_CONFIG5_APEDMAW_UNORDERED (0 << 0) -#define MC_SMMU_CLIENT_CONFIG5_APEDMAW_MASK (1 << 0) +#define MC_PCFIFO_CLIENT_CONFIG1 0xdd4UL +#define MC_PCFIFO_CLIENT_CONFIG1_RESET_VAL 0x20000UL +#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_AFIW_UNORDERED (0UL << 17) +#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_AFIW_MASK (1UL << 17) +#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_HDAW_UNORDERED (0UL << 21) +#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_HDAW_MASK (1UL << 21) +#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_SATAW_UNORDERED (0UL << 29) +#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_SATAW_MASK (1UL << 29) + +#define MC_PCFIFO_CLIENT_CONFIG2 0xdd8UL +#define MC_PCFIFO_CLIENT_CONFIG2_RESET_VAL 0x20000UL +#define MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_HOSTW_UNORDERED (0UL << 11) +#define MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_HOSTW_MASK (1UL << 11) +#define MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_DEVW_UNORDERED (0UL << 13) +#define MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_DEVW_MASK (1UL << 13) + +#define MC_PCFIFO_CLIENT_CONFIG3 0xddcUL +#define MC_PCFIFO_CLIENT_CONFIG3_RESET_VAL 0UL +#define MC_PCFIFO_CLIENT_CONFIG3_PCFIFO_SDMMCWAB_UNORDERED (0UL << 7) +#define MC_PCFIFO_CLIENT_CONFIG3_PCFIFO_SDMMCWAB_MASK (1UL << 7) + +#define MC_PCFIFO_CLIENT_CONFIG4 0xde0UL +#define MC_PCFIFO_CLIENT_CONFIG4_RESET_VAL 0UL +#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SESWR_UNORDERED (0UL << 1) +#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SESWR_MASK (1UL << 1) +#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_ETRW_UNORDERED (0UL << 5) +#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_ETRW_MASK (1UL << 5) +#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AXISW_UNORDERED (0UL << 13) +#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AXISW_MASK (1UL << 13) +#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_EQOSW_UNORDERED (0UL << 15) +#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_EQOSW_ORDERED (1UL << 15) +#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_EQOSW_MASK (1UL << 15) +#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_UFSHCW_UNORDERED (0UL << 17) +#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_UFSHCW_MASK (1UL << 17) +#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_BPMPDMAW_UNORDERED (0UL << 22) +#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_BPMPDMAW_MASK (1UL << 22) +#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AONDMAW_UNORDERED (0UL << 26) +#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AONDMAW_MASK (1UL << 26) +#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SCEDMAW_UNORDERED (0UL << 30) +#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SCEDMAW_MASK (1UL << 30) + +#define MC_PCFIFO_CLIENT_CONFIG5 0xbf4UL +#define MC_PCFIFO_CLIENT_CONFIG5_RESET_VAL 0UL +#define MC_PCFIFO_CLIENT_CONFIG5_PCFIFO_APEDMAW_UNORDERED (0UL << 0) +#define MC_PCFIFO_CLIENT_CONFIG5_PCFIFO_APEDMAW_MASK (1UL << 0) #ifndef __ASSEMBLY__ @@ -433,9 +399,8 @@ static inline void tegra_mc_streamid_write_32(uint32_t off, uint32_t val) (~MC_PCFIFO_CLIENT_CONFIG##id##_PCFIFO_##client##_MASK | \ MC_PCFIFO_CLIENT_CONFIG##id##_PCFIFO_##client##_UNORDERED) -#define mc_set_smmu_unordered_boot_so_mss(id, client) \ - (~MC_PCFIFO_CLIENT_CONFIG##id##_PCFIFO_##client##_MASK | \ - MC_PCFIFO_CLIENT_CONFIG##id##_PCFIFO_##client##_UNORDERED) +#define mc_set_pcfifo_ordered_boot_so_mss(id, client) \ + MC_PCFIFO_CLIENT_CONFIG##id##_PCFIFO_##client##_ORDERED #define mc_set_tsa_passthrough(client) \ { \ @@ -445,25 +410,13 @@ static inline void tegra_mc_streamid_write_32(uint32_t off, uint32_t val) TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_PASTHRU); \ } -#define mc_set_forced_coherent_cfg(client) \ - { \ - tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_##client, \ - MC_TXN_OVERRIDE_CONFIG_COH_PATH_OVERRIDE_SO_DEV); \ - } - -#define mc_set_forced_coherent_so_dev_cfg(client) \ - { \ - tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_##client, \ - MC_TXN_OVERRIDE_CONFIG_COH_PATH_OVERRIDE_SO_DEV | \ - MC_TXN_OVERRIDE_CONFIG_AXID_OVERRIDE_SO_DEV_CGID_SO_DEV_CLIENT); \ - } - -#define mc_set_forced_coherent_axid_so_dev_cfg(client) \ +#define mc_set_txn_override(client, normal_axi_id, so_dev_axi_id, normal_override, so_dev_override) \ { \ tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_##client, \ - MC_TXN_OVERRIDE_CONFIG_COH_PATH_OVERRIDE_SO_DEV | \ - MC_TXN_OVERRIDE_CONFIG_AXID_OVERRIDE_CGID | \ - MC_TXN_OVERRIDE_CONFIG_AXID_OVERRIDE_SO_DEV_CGID_SO_DEV_CLIENT); \ + MC_TXN_OVERRIDE_##normal_axi_id | \ + MC_TXN_OVERRIDE_CONFIG_COH_PATH_##so_dev_override##_SO_DEV | \ + MC_TXN_OVERRIDE_CONFIG_COH_PATH_##normal_override##_NORMAL | \ + MC_TXN_OVERRIDE_CONFIG_CGID_##so_dev_axi_id); \ } /******************************************************************************* diff --git a/plat/nvidia/tegra/include/drivers/security_engine.h b/plat/nvidia/tegra/include/drivers/security_engine.h new file mode 100644 index 0000000000000000000000000000000000000000..abfb21730266633681d15355bf436ff132442f33 --- /dev/null +++ b/plat/nvidia/tegra/include/drivers/security_engine.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef SECURITY_ENGINE_H +#define SECURITY_ENGINE_H + +/******************************************************************************* + * Structure definition + ******************************************************************************/ + +/* Security Engine Linked List */ +struct tegra_se_ll { + /* DMA buffer address */ + uint32_t addr; + /* Data length in DMA buffer */ + uint32_t data_len; +}; + +#define SE_LL_MAX_BUFFER_NUM 4 +typedef struct tegra_se_io_lst { + volatile uint32_t last_buff_num; + volatile struct tegra_se_ll buffer[SE_LL_MAX_BUFFER_NUM]; +} tegra_se_io_lst_t __attribute__((aligned(4))); + +/* SE device structure */ +typedef struct tegra_se_dev { + /* Security Engine ID */ + const int se_num; + /* SE base address */ + const uint64_t se_base; + /* SE context size in AES blocks */ + const uint32_t ctx_size_blks; + /* pointer to source linked list buffer */ + tegra_se_io_lst_t *src_ll_buf; + /* pointer to destination linked list buffer */ + tegra_se_io_lst_t *dst_ll_buf; +} tegra_se_dev_t; + +/******************************************************************************* + * Public interface + ******************************************************************************/ +void tegra_se_init(void); +int tegra_se_suspend(void); +void tegra_se_resume(void); +int tegra_se_save_tzram(void); + +#endif /* SECURITY_ENGINE_H */ diff --git a/plat/nvidia/tegra/include/platform_def.h b/plat/nvidia/tegra/include/platform_def.h index d10dc262aa91fa15066c3125c84a7c4f24f7d01c..0a0126b1e094d8232af8064fa8c55b5aa1b34177 100644 --- a/plat/nvidia/tegra/include/platform_def.h +++ b/plat/nvidia/tegra/include/platform_def.h @@ -34,7 +34,8 @@ * Platform console related constants ******************************************************************************/ #define TEGRA_CONSOLE_BAUDRATE U(115200) -#define TEGRA_BOOT_UART_CLK_IN_HZ U(408000000) +#define TEGRA_BOOT_UART_CLK_13_MHZ U(13000000) +#define TEGRA_BOOT_UART_CLK_408_MHZ U(408000000) /******************************************************************************* * Platform memory map related constants diff --git a/plat/nvidia/tegra/include/t210/tegra_def.h b/plat/nvidia/tegra/include/t210/tegra_def.h index 14cdfd59dbdbe09610f66b9683b34bae91777c66..8d71cae837a148876ad50bc138424397eca7fa00 100644 --- a/plat/nvidia/tegra/include/t210/tegra_def.h +++ b/plat/nvidia/tegra/include/t210/tegra_def.h @@ -32,6 +32,11 @@ #define PLAT_MAX_RET_STATE U(1) #define PLAT_MAX_OFF_STATE (PSTATE_ID_SOC_POWERDN + U(1)) +/******************************************************************************* + * iRAM memory constants + ******************************************************************************/ +#define TEGRA_IRAM_BASE 0x40000000 + /******************************************************************************* * GIC memory map ******************************************************************************/ @@ -55,6 +60,20 @@ ENABLE_WRAP_INCR_MASTER1_BIT | \ ENABLE_WRAP_INCR_MASTER0_BIT) +/******************************************************************************* + * Tegra Resource Semaphore constants + ******************************************************************************/ +#define TEGRA_RES_SEMA_BASE 0x60001000UL +#define STA_OFFSET 0UL +#define SET_OFFSET 4UL +#define CLR_OFFSET 8UL + +/******************************************************************************* + * Tegra Primary Interrupt Controller constants + ******************************************************************************/ +#define TEGRA_PRI_ICTLR_BASE 0x60004000UL +#define CPU_IEP_FIR_SET 0x18UL + /******************************************************************************* * Tegra micro-seconds timer constants ******************************************************************************/ @@ -67,12 +86,19 @@ #define TEGRA_CAR_RESET_BASE U(0x60006000) #define TEGRA_GPU_RESET_REG_OFFSET U(0x28C) #define GPU_RESET_BIT (U(1) << 24) +#define TEGRA_RST_DEV_CLR_V U(0x434) +#define TEGRA_CLK_ENB_V U(0x440) /******************************************************************************* * Tegra Flow Controller constants ******************************************************************************/ #define TEGRA_FLOWCTRL_BASE U(0x60007000) +/******************************************************************************* + * Tegra AHB arbitration controller + ******************************************************************************/ +#define TEGRA_AHB_ARB_BASE 0x6000C000UL + /******************************************************************************* * Tegra Secure Boot Controller constants ******************************************************************************/ @@ -103,6 +129,15 @@ ******************************************************************************/ #define TEGRA_PMC_BASE U(0x7000E400) +/******************************************************************************* + * Tegra Atomics constants + ******************************************************************************/ +#define TEGRA_ATOMICS_BASE 0x70016000UL +#define TRIGGER0_REG_OFFSET 0UL +#define TRIGGER_WIDTH_SHIFT 4UL +#define TRIGGER_ID_SHIFT 16UL +#define RESULT0_REG_OFFSET 0xC00UL + /******************************************************************************* * Tegra Memory Controller constants ******************************************************************************/ @@ -118,6 +153,15 @@ #define MC_VIDEO_PROTECT_BASE_LO U(0x648) #define MC_VIDEO_PROTECT_SIZE_MB U(0x64c) +/******************************************************************************* + * Tegra SE constants + ******************************************************************************/ +#define TEGRA_SE1_BASE U(0x70012000) +#define TEGRA_SE2_BASE U(0x70412000) +#define TEGRA_PKA1_BASE U(0x70420000) +#define TEGRA_SE2_RANGE_SIZE U(0x2000) +#define SE_TZRAM_SECURITY U(0x4) + /******************************************************************************* * Tegra TZRAM constants ******************************************************************************/ diff --git a/plat/nvidia/tegra/include/tegra_platform.h b/plat/nvidia/tegra/include/tegra_platform.h index 63a0e01e34a0191706abd9b35ff821cb5b356974..1e7ba165c619f32fe5d5d26771f3380b37ddddc8 100644 --- a/plat/nvidia/tegra/include/tegra_platform.h +++ b/plat/nvidia/tegra/include/tegra_platform.h @@ -8,27 +8,56 @@ #define TEGRA_PLATFORM_H #include <cdefs.h> +#include <stdbool.h> +#include <utils_def.h> + +/******************************************************************************* + * Tegra major, minor version helper macros + ******************************************************************************/ +#define MAJOR_VERSION_SHIFT U(0x4) +#define MAJOR_VERSION_MASK U(0xF) +#define MINOR_VERSION_SHIFT U(0x10) +#define MINOR_VERSION_MASK U(0xF) +#define CHIP_ID_SHIFT U(8) +#define CHIP_ID_MASK U(0xFF) +#define PRE_SI_PLATFORM_SHIFT U(0x14) +#define PRE_SI_PLATFORM_MASK U(0xF) + +/******************************************************************************* + * Tegra chip ID values + ******************************************************************************/ +#define TEGRA_CHIPID_TEGRA13 U(0x13) +#define TEGRA_CHIPID_TEGRA21 U(0x21) +#define TEGRA_CHIPID_TEGRA18 U(0x18) + +#ifndef __ASSEMBLY__ /* - * Tegra chip major/minor version + * Tegra chip ID major/minor identifiers */ uint32_t tegra_get_chipid_major(void); uint32_t tegra_get_chipid_minor(void); /* - * Tegra chip identifiers + * Tegra chip ID identifiers */ -uint8_t tegra_chipid_is_t132(void); -uint8_t tegra_chipid_is_t210(void); -uint8_t tegra_chipid_is_t186(void); +bool tegra_chipid_is_t132(void); +bool tegra_chipid_is_t186(void); +bool tegra_chipid_is_t210(void); +bool tegra_chipid_is_t210_b01(void); /* * Tegra platform identifiers */ -uint8_t tegra_platform_is_silicon(void); -uint8_t tegra_platform_is_qt(void); -uint8_t tegra_platform_is_emulation(void); -uint8_t tegra_platform_is_fpga(void); +bool tegra_platform_is_silicon(void); +bool tegra_platform_is_qt(void); +bool tegra_platform_is_emulation(void); +bool tegra_platform_is_linsim(void); +bool tegra_platform_is_fpga(void); +bool tegra_platform_is_unit_fpga(void); +bool tegra_platform_is_virt_dev_kit(void); + +#endif /* __ASSEMBLY__ */ #endif /* TEGRA_PLATFORM_H */ diff --git a/plat/nvidia/tegra/include/tegra_private.h b/plat/nvidia/tegra/include/tegra_private.h index 93223cc2413a09e80b4d12090f30514d7514821b..168292735b8ad653ca2e3e677d12c0046e68d53b 100644 --- a/plat/nvidia/tegra/include/tegra_private.h +++ b/plat/nvidia/tegra/include/tegra_private.h @@ -32,8 +32,15 @@ typedef struct plat_params_from_bl2 { uint64_t tzdram_base; /* UART port ID */ int uart_id; + /* L2 ECC parity protection disable flag */ + int l2_ecc_parity_prot_dis; } plat_params_from_bl2_t; +/******************************************************************************* + * Helper function to access l2ctlr_el1 register on Cortex-A57 CPUs + ******************************************************************************/ +DEFINE_RENAME_SYSREG_RW_FUNCS(l2ctlr_el1, CORTEX_A57_L2CTLR_EL1) + /******************************************************************************* * Struct describing parameters passed to bl31 ******************************************************************************/ @@ -47,19 +54,19 @@ struct tegra_bl31_params { }; /* Declarations for plat_psci_handlers.c */ -int32_t tegra_soc_validate_power_state(unsigned int power_state, +int32_t tegra_soc_validate_power_state(uint32_t power_state, psci_power_state_t *req_state); /* Declarations for plat_setup.c */ const mmap_region_t *plat_get_mmio_map(void); -uint32_t plat_get_console_from_id(int id); +uint32_t plat_get_console_from_id(int32_t id); void plat_gic_setup(void); struct tegra_bl31_params *plat_get_bl31_params(void); plat_params_from_bl2_t *plat_get_bl31_plat_params(void); /* Declarations for plat_secondary.c */ void plat_secondary_setup(void); -int plat_lock_cpu_vectors(void); +int32_t plat_lock_cpu_vectors(void); /* Declarations for tegra_fiq_glue.c */ void tegra_fiq_handler_setup(void); @@ -92,4 +99,22 @@ void tegra_delay_timer_init(void); void tegra_secure_entrypoint(void); void tegra186_cpu_reset_handler(void); +/* Declarations for tegra_sip_calls.c */ +uintptr_t tegra_sip_handler(uint32_t smc_fid, + u_register_t x1, + u_register_t x2, + u_register_t x3, + u_register_t x4, + void *cookie, + void *handle, + u_register_t flags); +int plat_sip_handler(uint32_t smc_fid, + uint64_t x1, + uint64_t x2, + uint64_t x3, + uint64_t x4, + const void *cookie, + void *handle, + uint64_t flags); + #endif /* TEGRA_PRIVATE_H */ diff --git a/plat/nvidia/tegra/soc/t132/plat_setup.c b/plat/nvidia/tegra/soc/t132/plat_setup.c index f72b73ed52d4f2c495ca033b47b59dcbc175de4c..3f9cda965fa3c062d1645a148a9c02b335ed3329 100644 --- a/plat/nvidia/tegra/soc/t132/plat_setup.c +++ b/plat/nvidia/tegra/soc/t132/plat_setup.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -7,25 +7,10 @@ #include <arch_helpers.h> #include <common/bl_common.h> #include <lib/xlat_tables/xlat_tables_v2.h> - +#include <platform.h> #include <tegra_def.h> #include <tegra_private.h> -/******************************************************************************* - * The Tegra power domain tree has a single system level power domain i.e. a - * single root node. The first entry in the power domain descriptor specifies - * the number of power domains at the highest power level. - ******************************************************************************* - */ -const unsigned char tegra_power_domain_tree_desc[] = { - /* No of root nodes */ - 1, - /* No of clusters */ - PLATFORM_CLUSTER_COUNT, - /* No of CPU cores */ - PLATFORM_CORE_COUNT, -}; - /* sets of MMIO ranges setup */ #define MMIO_RANGE_0_ADDR 0x50000000 #define MMIO_RANGE_1_ADDR 0x60000000 @@ -54,6 +39,29 @@ const mmap_region_t *plat_get_mmio_map(void) return tegra_mmap; } +/******************************************************************************* + * The Tegra power domain tree has a single system level power domain i.e. a + * single root node. The first entry in the power domain descriptor specifies + * the number of power domains at the highest power level. + ******************************************************************************* + */ +const unsigned char tegra_power_domain_tree_desc[] = { + /* No of root nodes */ + 1, + /* No of clusters */ + PLATFORM_CLUSTER_COUNT, + /* No of CPU cores */ + PLATFORM_CORE_COUNT, +}; + +/******************************************************************************* + * This function returns the Tegra default topology tree information. + ******************************************************************************/ +const unsigned char *plat_get_power_domain_tree_desc(void) +{ + return tegra_power_domain_tree_desc; +} + unsigned int plat_get_syscnt_freq2(void) { return 12000000; diff --git a/plat/nvidia/tegra/soc/t132/plat_sip_calls.c b/plat/nvidia/tegra/soc/t132/plat_sip_calls.c index 02dd1cd56ca670d2c4202f568d880da5b4c312fd..90c6bb2a1b5a3a232396cf298b752b01b4e9c0e1 100644 --- a/plat/nvidia/tegra/soc/t132/plat_sip_calls.c +++ b/plat/nvidia/tegra/soc/t132/plat_sip_calls.c @@ -38,7 +38,7 @@ int plat_sip_handler(uint32_t smc_fid, uint64_t x2, uint64_t x3, uint64_t x4, - void *cookie, + const void *cookie, void *handle, uint64_t flags) { diff --git a/plat/nvidia/tegra/soc/t186/drivers/mce/ari.c b/plat/nvidia/tegra/soc/t186/drivers/mce/ari.c index 1429a6158dfb82a49e04e6803007a8805ba492e6..599e46e74feeecc56fe595bf23dad45bc488dfa4 100644 --- a/plat/nvidia/tegra/soc/t186/drivers/mce/ari.c +++ b/plat/nvidia/tegra/soc/t186/drivers/mce/ari.c @@ -99,9 +99,9 @@ static int32_t ari_request_wait(uint32_t ari_base, uint32_t evt_mask, uint32_t r ret = 0; } else { /* For shutdown/reboot commands, we dont have to check for timeouts */ - if ((req == (uint32_t)TEGRA_ARI_MISC_CCPLEX) && - ((lo == (uint32_t)TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF) || - (lo == (uint32_t)TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT))) { + if ((req == TEGRA_ARI_MISC_CCPLEX) && + ((lo == TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF) || + (lo == TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT))) { ret = 0; } else { /* @@ -161,38 +161,38 @@ int32_t ari_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccp uint32_t system, uint8_t sys_state_force, uint32_t wake_mask, uint8_t update_wake_mask) { - uint32_t val = 0U; + uint64_t val = 0U; /* clean the previous response state */ ari_clobber_response(ari_base); /* update CLUSTER_CSTATE? */ if (cluster != 0U) { - val |= (cluster & (uint32_t)CLUSTER_CSTATE_MASK) | - (uint32_t)CLUSTER_CSTATE_UPDATE_BIT; + val |= (cluster & CLUSTER_CSTATE_MASK) | + CLUSTER_CSTATE_UPDATE_BIT; } /* update CCPLEX_CSTATE? */ if (ccplex != 0U) { - val |= ((ccplex & (uint32_t)CCPLEX_CSTATE_MASK) << (uint32_t)CCPLEX_CSTATE_SHIFT) | - (uint32_t)CCPLEX_CSTATE_UPDATE_BIT; + val |= ((ccplex & CCPLEX_CSTATE_MASK) << CCPLEX_CSTATE_SHIFT) | + CCPLEX_CSTATE_UPDATE_BIT; } /* update SYSTEM_CSTATE? */ if (system != 0U) { - val |= ((system & (uint32_t)SYSTEM_CSTATE_MASK) << (uint32_t)SYSTEM_CSTATE_SHIFT) | - (((uint32_t)sys_state_force << SYSTEM_CSTATE_FORCE_UPDATE_SHIFT) | - (uint32_t)SYSTEM_CSTATE_UPDATE_BIT); + val |= ((system & SYSTEM_CSTATE_MASK) << SYSTEM_CSTATE_SHIFT) | + (((uint64_t)sys_state_force << SYSTEM_CSTATE_FORCE_UPDATE_SHIFT) | + SYSTEM_CSTATE_UPDATE_BIT); } /* update wake mask value? */ if (update_wake_mask != 0U) { - val |= (uint32_t)CSTATE_WAKE_MASK_UPDATE_BIT; + val |= CSTATE_WAKE_MASK_UPDATE_BIT; } /* set the updated cstate info */ - return ari_request_wait(ari_base, 0U, TEGRA_ARI_UPDATE_CSTATE_INFO, val, - wake_mask); + return ari_request_wait(ari_base, 0U, TEGRA_ARI_UPDATE_CSTATE_INFO, + (uint32_t)val, wake_mask); } int32_t ari_update_crossover_time(uint32_t ari_base, uint32_t type, uint32_t time) @@ -299,10 +299,8 @@ int32_t ari_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time int32_t ret, result; /* check for allowed power state */ - if ((state != TEGRA_ARI_CORE_C0) && - (state != TEGRA_ARI_CORE_C1) && - (state != TEGRA_ARI_CORE_C6) && - (state != TEGRA_ARI_CORE_C7)) { + if ((state != TEGRA_ARI_CORE_C0) && (state != TEGRA_ARI_CORE_C1) && + (state != TEGRA_ARI_CORE_C6) && (state != TEGRA_ARI_CORE_C7)) { ERROR("%s: unknown cstate (%d)\n", __func__, state); result = EINVAL; } else { @@ -325,10 +323,10 @@ int32_t ari_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time int32_t ari_online_core(uint32_t ari_base, uint32_t core) { - uint64_t cpu = read_mpidr() & (uint64_t)(MPIDR_CPU_MASK); - uint64_t cluster = (read_mpidr() & (uint64_t)(MPIDR_CLUSTER_MASK)) >> - (uint64_t)(MPIDR_AFFINITY_BITS); - uint64_t impl = (read_midr() >> (uint64_t)MIDR_IMPL_SHIFT) & (uint64_t)MIDR_IMPL_MASK; + uint64_t cpu = read_mpidr() & (MPIDR_CPU_MASK); + uint64_t cluster = (read_mpidr() & (MPIDR_CLUSTER_MASK)) >> + (MPIDR_AFFINITY_BITS); + uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK; int32_t ret; /* construct the current CPU # */ @@ -342,8 +340,7 @@ int32_t ari_online_core(uint32_t ari_base, uint32_t core) /* * The Denver cluster has 2 CPUs only - 0, 1. */ - if ((impl == (uint32_t)DENVER_IMPL) && - ((core == 2U) || (core == 3U))) { + if ((impl == DENVER_IMPL) && ((core == 2U) || (core == 3U))) { ERROR("%s: unknown core id (%d)\n", __func__, core); ret = EINVAL; } else { @@ -465,7 +462,7 @@ int32_t ari_update_ccplex_gsc(uint32_t ari_base, uint32_t gsc_idx) { int32_t ret = 0; /* sanity check GSC ID */ - if (gsc_idx > (uint32_t)TEGRA_ARI_GSC_VPR_IDX) { + if (gsc_idx > TEGRA_ARI_GSC_VPR_IDX) { ret = EINVAL; } else { /* clean the previous response state */ @@ -497,8 +494,8 @@ int32_t ari_read_write_uncore_perfmon(uint32_t ari_base, uint64_t req, uint64_t *data) { int32_t ret, result; - uint32_t val; - uint8_t req_cmd, req_status; + uint32_t val, req_status; + uint8_t req_cmd; req_cmd = (uint8_t)(req >> UNCORE_PERFMON_CMD_SHIFT); @@ -523,7 +520,7 @@ int32_t ari_read_write_uncore_perfmon(uint32_t ari_base, uint64_t req, result = ret; } else { /* read the command status value */ - req_status = (uint8_t)ari_get_response_high(ari_base) & + req_status = ari_get_response_high(ari_base) & UNCORE_PERFMON_RESP_STATUS_MASK; /* diff --git a/plat/nvidia/tegra/soc/t186/drivers/mce/mce.c b/plat/nvidia/tegra/soc/t186/drivers/mce/mce.c index 828ad3cbdd1757702fb19f3c3ab38e603454e735..e948e9903dacfc9f05bf65fb24b0d500cf21d715 100644 --- a/plat/nvidia/tegra/soc/t186/drivers/mce/mce.c +++ b/plat/nvidia/tegra/soc/t186/drivers/mce/mce.c @@ -111,8 +111,8 @@ static mce_config_t mce_cfg_table[MCE_ARI_APERTURES_MAX] = { static uint32_t mce_get_curr_cpu_ari_base(void) { uint64_t mpidr = read_mpidr(); - uint64_t cpuid = mpidr & (uint64_t)MPIDR_CPU_MASK; - uint64_t impl = (read_midr() >> (uint64_t)MIDR_IMPL_SHIFT) & (uint64_t)MIDR_IMPL_MASK; + uint64_t cpuid = mpidr & MPIDR_CPU_MASK; + uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK; /* * T186 has 2 CPU clusters, one with Denver CPUs and the other with @@ -131,9 +131,9 @@ static uint32_t mce_get_curr_cpu_ari_base(void) static arch_mce_ops_t *mce_get_curr_cpu_ops(void) { uint64_t mpidr = read_mpidr(); - uint64_t cpuid = mpidr & (uint64_t)MPIDR_CPU_MASK; - uint64_t impl = (read_midr() >> (uint64_t)MIDR_IMPL_SHIFT) & - (uint64_t)MIDR_IMPL_MASK; + uint64_t cpuid = mpidr & MPIDR_CPU_MASK; + uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & + MIDR_IMPL_MASK; /* * T186 has 2 CPU clusters, one with Denver CPUs and the other with @@ -172,9 +172,6 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1, switch (cmd) { case MCE_CMD_ENTER_CSTATE: ret = ops->enter_cstate(cpu_ari_base, arg0, arg1); - if (ret < 0) { - ERROR("%s: enter_cstate failed(%d)\n", __func__, ret); - } break; @@ -183,30 +180,22 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1, * get the parameters required for the update cstate info * command */ - arg3 = read_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X4)); - arg4 = read_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X5)); - arg5 = read_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X6)); + arg3 = read_ctx_reg(gp_regs, CTX_GPREG_X4); + arg4 = read_ctx_reg(gp_regs, CTX_GPREG_X5); + arg5 = read_ctx_reg(gp_regs, CTX_GPREG_X6); ret = ops->update_cstate_info(cpu_ari_base, (uint32_t)arg0, (uint32_t)arg1, (uint32_t)arg2, (uint8_t)arg3, (uint32_t)arg4, (uint8_t)arg5); - if (ret < 0) { - ERROR("%s: update_cstate_info failed(%d)\n", - __func__, ret); - } - write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X4), (0)); - write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X5), (0)); - write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X6), (0)); + write_ctx_reg(gp_regs, CTX_GPREG_X4, (0ULL)); + write_ctx_reg(gp_regs, CTX_GPREG_X5, (0ULL)); + write_ctx_reg(gp_regs, CTX_GPREG_X6, (0ULL)); break; case MCE_CMD_UPDATE_CROSSOVER_TIME: ret = ops->update_crossover_time(cpu_ari_base, arg0, arg1); - if (ret < 0) { - ERROR("%s: update_crossover_time failed(%d)\n", - __func__, ret); - } break; @@ -214,61 +203,40 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1, ret64 = ops->read_cstate_stats(cpu_ari_base, arg0); /* update context to return cstate stats value */ - write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (ret64)); - write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X2), (ret64)); + write_ctx_reg(gp_regs, CTX_GPREG_X1, (ret64)); + write_ctx_reg(gp_regs, CTX_GPREG_X2, (ret64)); break; case MCE_CMD_WRITE_CSTATE_STATS: ret = ops->write_cstate_stats(cpu_ari_base, arg0, arg1); - if (ret < 0) { - ERROR("%s: write_cstate_stats failed(%d)\n", - __func__, ret); - } break; case MCE_CMD_IS_CCX_ALLOWED: ret = ops->is_ccx_allowed(cpu_ari_base, arg0, arg1); - if (ret < 0) { - ERROR("%s: is_ccx_allowed failed(%d)\n", __func__, ret); - break; - } /* update context to return CCx status value */ - write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), - (uint64_t)(ret)); + write_ctx_reg(gp_regs, CTX_GPREG_X1, (uint64_t)(ret)); break; case MCE_CMD_IS_SC7_ALLOWED: ret = ops->is_sc7_allowed(cpu_ari_base, arg0, arg1); - if (ret < 0) { - ERROR("%s: is_sc7_allowed failed(%d)\n", __func__, ret); - break; - } /* update context to return SC7 status value */ - write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), - (uint64_t)(ret)); - write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X3), - (uint64_t)(ret)); + write_ctx_reg(gp_regs, CTX_GPREG_X1, (uint64_t)(ret)); + write_ctx_reg(gp_regs, CTX_GPREG_X3, (uint64_t)(ret)); break; case MCE_CMD_ONLINE_CORE: ret = ops->online_core(cpu_ari_base, arg0); - if (ret < 0) { - ERROR("%s: online_core failed(%d)\n", __func__, ret); - } break; case MCE_CMD_CC3_CTRL: ret = ops->cc3_ctrl(cpu_ari_base, arg0, arg1, arg2); - if (ret < 0) { - ERROR("%s: cc3_ctrl failed(%d)\n", __func__, ret); - } break; @@ -277,10 +245,10 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1, arg0); /* update context to return if echo'd data matched source */ - write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), - ((ret64 == arg0) ? 1ULL : 0ULL)); - write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X2), - ((ret64 == arg0) ? 1ULL : 0ULL)); + write_ctx_reg(gp_regs, CTX_GPREG_X1, ((ret64 == arg0) ? + 1ULL : 0ULL)); + write_ctx_reg(gp_regs, CTX_GPREG_X2, ((ret64 == arg0) ? + 1ULL : 0ULL)); break; @@ -292,10 +260,8 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1, * version = minor(63:32) | major(31:0). Update context * to return major and minor version number. */ - write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), - (ret64)); - write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X2), - (ret64 >> 32ULL)); + write_ctx_reg(gp_regs, CTX_GPREG_X1, (ret64)); + write_ctx_reg(gp_regs, CTX_GPREG_X2, (ret64 >> 32ULL)); break; @@ -304,32 +270,22 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1, TEGRA_ARI_MISC_FEATURE_LEAF_0, arg0); /* update context to return features value */ - write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (ret64)); + write_ctx_reg(gp_regs, CTX_GPREG_X1, (ret64)); break; case MCE_CMD_ROC_FLUSH_CACHE_TRBITS: ret = ops->roc_flush_cache_trbits(cpu_ari_base); - if (ret < 0) { - ERROR("%s: flush cache_trbits failed(%d)\n", __func__, - ret); - } break; case MCE_CMD_ROC_FLUSH_CACHE: ret = ops->roc_flush_cache(cpu_ari_base); - if (ret < 0) { - ERROR("%s: flush cache failed(%d)\n", __func__, ret); - } break; case MCE_CMD_ROC_CLEAN_CACHE: ret = ops->roc_clean_cache(cpu_ari_base); - if (ret < 0) { - ERROR("%s: clean cache failed(%d)\n", __func__, ret); - } break; @@ -337,9 +293,9 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1, ret64 = ops->read_write_mca(cpu_ari_base, arg0, &arg1); /* update context to return MCA data/error */ - write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (ret64)); - write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X2), (arg1)); - write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X3), (ret64)); + write_ctx_reg(gp_regs, CTX_GPREG_X1, (ret64)); + write_ctx_reg(gp_regs, CTX_GPREG_X2, (arg1)); + write_ctx_reg(gp_regs, CTX_GPREG_X3, (ret64)); break; @@ -347,8 +303,8 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1, ret64 = ops->read_write_mca(cpu_ari_base, arg0, &arg1); /* update context to return MCA error */ - write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (ret64)); - write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X3), (ret64)); + write_ctx_reg(gp_regs, CTX_GPREG_X1, (ret64)); + write_ctx_reg(gp_regs, CTX_GPREG_X3, (ret64)); break; @@ -375,7 +331,7 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1, ret = ops->read_write_uncore_perfmon(cpu_ari_base, arg0, &arg1); /* update context to return data */ - write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (arg1)); + write_ctx_reg(gp_regs, CTX_GPREG_X1, (arg1)); break; case MCE_CMD_MISC_CCPLEX: diff --git a/plat/nvidia/tegra/soc/t186/drivers/mce/nvg.c b/plat/nvidia/tegra/soc/t186/drivers/mce/nvg.c index 1ac3710daf5a348e4c732e21048dc4fbdaf30f13..44ee8fbf6811b36725a73a612345e2248dc7d6b7 100644 --- a/plat/nvidia/tegra/soc/t186/drivers/mce/nvg.c +++ b/plat/nvidia/tegra/soc/t186/drivers/mce/nvg.c @@ -200,15 +200,14 @@ int32_t nvg_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time int32_t nvg_online_core(uint32_t ari_base, uint32_t core) { - uint64_t cpu = read_mpidr() & (uint64_t)MPIDR_CPU_MASK; - uint64_t impl = (read_midr() >> (uint64_t)MIDR_IMPL_SHIFT) & - (uint64_t)MIDR_IMPL_MASK; + uint64_t cpu = read_mpidr() & MPIDR_CPU_MASK; + uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK; int32_t ret = 0; (void)ari_base; /* sanity check code id */ - if ((core >= (uint32_t)MCE_CORE_ID_MAX) || (cpu == core)) { + if ((core >= MCE_CORE_ID_MAX) || (cpu == core)) { ERROR("%s: unsupported core id (%d)\n", __func__, core); ret = EINVAL; } else { diff --git a/plat/nvidia/tegra/soc/t186/plat_psci_handlers.c b/plat/nvidia/tegra/soc/t186/plat_psci_handlers.c index fb94bcedf97bdd3231e2918a26ed73eff36e8861..7a9ce287a06bb916a6161406efa08efecd1839fc 100644 --- a/plat/nvidia/tegra/soc/t186/plat_psci_handlers.c +++ b/plat/nvidia/tegra/soc/t186/plat_psci_handlers.c @@ -12,6 +12,7 @@ #include <common/bl_common.h> #include <common/debug.h> #include <context.h> +#include <cortex_a57.h> #include <denver.h> #include <lib/el3_runtime/context_mgmt.h> #include <lib/psci/psci.h> @@ -29,32 +30,33 @@ extern void tegra186_cpu_reset_handler(void); extern uint32_t __tegra186_cpu_reset_handler_end, __tegra186_smmu_context; +/* TZDRAM offset for saving SMMU context */ +#define TEGRA186_SMMU_CTX_OFFSET 16UL + /* state id mask */ -#define TEGRA186_STATE_ID_MASK 0xF +#define TEGRA186_STATE_ID_MASK 0xFU /* constants to get power state's wake time */ -#define TEGRA186_WAKE_TIME_MASK 0x0FFFFFF0 -#define TEGRA186_WAKE_TIME_SHIFT 4 +#define TEGRA186_WAKE_TIME_MASK 0x0FFFFFF0U +#define TEGRA186_WAKE_TIME_SHIFT 4U /* default core wake mask for CPU_SUSPEND */ -#define TEGRA186_CORE_WAKE_MASK 0x180c +#define TEGRA186_CORE_WAKE_MASK 0x180cU /* context size to save during system suspend */ -#define TEGRA186_SE_CONTEXT_SIZE 3 +#define TEGRA186_SE_CONTEXT_SIZE 3U static uint32_t se_regs[TEGRA186_SE_CONTEXT_SIZE]; -static struct t18x_psci_percpu_data { - unsigned int wake_time; -} __aligned(CACHE_WRITEBACK_GRANULE) percpu_data[PLATFORM_CORE_COUNT]; - -/* System power down state */ -uint32_t tegra186_system_powerdn_state = TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF; +static struct tegra_psci_percpu_data { + uint32_t wake_time; +} __aligned(CACHE_WRITEBACK_GRANULE) tegra_percpu_data[PLATFORM_CORE_COUNT]; -int32_t tegra_soc_validate_power_state(unsigned int power_state, +int32_t tegra_soc_validate_power_state(uint32_t power_state, psci_power_state_t *req_state) { - int state_id = psci_get_pstate_id(power_state) & TEGRA186_STATE_ID_MASK; - int cpu = plat_my_core_pos(); + uint8_t state_id = (uint8_t)psci_get_pstate_id(power_state) & TEGRA186_STATE_ID_MASK; + uint32_t cpu = plat_my_core_pos(); + int32_t ret = PSCI_E_SUCCESS; /* save the core wake time (in TSC ticks)*/ - percpu_data[cpu].wake_time = (power_state & TEGRA186_WAKE_TIME_MASK) + tegra_percpu_data[cpu].wake_time = (power_state & TEGRA186_WAKE_TIME_MASK) << TEGRA186_WAKE_TIME_SHIFT; /* @@ -64,8 +66,8 @@ int32_t tegra_soc_validate_power_state(unsigned int power_state, * from DRAM in that function, because the L2 cache is not flushed * unless the cluster is entering CC6/CC7. */ - clean_dcache_range((uint64_t)&percpu_data[cpu], - sizeof(percpu_data[cpu])); + clean_dcache_range((uint64_t)&tegra_percpu_data[cpu], + sizeof(tegra_percpu_data[cpu])); /* Sanity check the requested state id */ switch (state_id) { @@ -80,18 +82,19 @@ int32_t tegra_soc_validate_power_state(unsigned int power_state, default: ERROR("%s: unsupported state id (%d)\n", __func__, state_id); - return PSCI_E_INVALID_PARAMS; + ret = PSCI_E_INVALID_PARAMS; + break; } - return PSCI_E_SUCCESS; + return ret; } -int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state) +int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state) { const plat_local_state_t *pwr_domain_state; - unsigned int stateid_afflvl0, stateid_afflvl2; - int cpu = plat_my_core_pos(); - plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params(); + uint8_t stateid_afflvl0, stateid_afflvl2; + uint32_t cpu = plat_my_core_pos(); + const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params(); mce_cstate_info_t cstate_info = { 0 }; uint64_t smmu_ctx_base; uint32_t val; @@ -109,8 +112,8 @@ int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state) /* Enter CPU idle/powerdown */ val = (stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ? TEGRA_ARI_CORE_C6 : TEGRA_ARI_CORE_C7; - (void)mce_command_handler(MCE_CMD_ENTER_CSTATE, val, - percpu_data[cpu].wake_time, 0); + (void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, (uint64_t)val, + tegra_percpu_data[cpu].wake_time, 0U); } else if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) { @@ -138,18 +141,20 @@ int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state) cstate_info.system_state_force = 1; cstate_info.update_wake_mask = 1; mce_update_cstate_info(&cstate_info); - /* Loop until system suspend is allowed */ do { - val = mce_command_handler(MCE_CMD_IS_SC7_ALLOWED, + val = (uint32_t)mce_command_handler( + (uint64_t)MCE_CMD_IS_SC7_ALLOWED, TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, - 0); - } while (val == 0); + 0U); + } while (val == 0U); /* Instruct the MCE to enter system suspend state */ - (void)mce_command_handler(MCE_CMD_ENTER_CSTATE, - TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0); + (void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, + TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U); + } else { + ; /* do nothing */ } return PSCI_E_SUCCESS; @@ -159,23 +164,28 @@ int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state) * Platform handler to calculate the proper target power level at the * specified affinity level ******************************************************************************/ -plat_local_state_t tegra_soc_get_target_pwr_state(unsigned int lvl, +plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl, const plat_local_state_t *states, - unsigned int ncpu) + uint32_t ncpu) { plat_local_state_t target = *states; - int cpu = plat_my_core_pos(), ret, cluster_powerdn = 1; - int core_pos = read_mpidr() & MPIDR_CPU_MASK; + uint32_t pos = 0; + plat_local_state_t result = PSCI_LOCAL_STATE_RUN; + uint32_t cpu = plat_my_core_pos(), num_cpu = ncpu; + int32_t ret, cluster_powerdn = 1; + uint64_t core_pos = read_mpidr() & (uint64_t)MPIDR_CPU_MASK; mce_cstate_info_t cstate_info = { 0 }; /* get the power state at this level */ - if (lvl == MPIDR_AFFLVL1) - target = *(states + core_pos); - if (lvl == MPIDR_AFFLVL2) - target = *(states + cpu); + if (lvl == (uint32_t)MPIDR_AFFLVL1) { + target = states[core_pos]; + } + if (lvl == (uint32_t)MPIDR_AFFLVL2) { + target = states[cpu]; + } /* CPU suspend */ - if (lvl == MPIDR_AFFLVL1 && target == PSTATE_ID_CORE_POWERDN) { + if ((lvl == (uint32_t)MPIDR_AFFLVL1) && (target == PSTATE_ID_CORE_POWERDN)) { /* Program default wake mask */ cstate_info.wake_mask = TEGRA186_CORE_WAKE_MASK; @@ -183,25 +193,29 @@ plat_local_state_t tegra_soc_get_target_pwr_state(unsigned int lvl, mce_update_cstate_info(&cstate_info); /* Check if CCx state is allowed. */ - ret = mce_command_handler(MCE_CMD_IS_CCX_ALLOWED, - TEGRA_ARI_CORE_C7, percpu_data[cpu].wake_time, - 0); - if (ret) - return PSTATE_ID_CORE_POWERDN; + ret = mce_command_handler((uint64_t)MCE_CMD_IS_CCX_ALLOWED, + TEGRA_ARI_CORE_C7, tegra_percpu_data[cpu].wake_time, + 0U); + if (ret != 0) { + result = PSTATE_ID_CORE_POWERDN; + } } /* CPU off */ - if (lvl == MPIDR_AFFLVL1 && target == PLAT_MAX_OFF_STATE) { + if ((lvl == (uint32_t)MPIDR_AFFLVL1) && (target == PLAT_MAX_OFF_STATE)) { /* find out the number of ON cpus in the cluster */ do { - target = *states++; - if (target != PLAT_MAX_OFF_STATE) + target = states[pos]; + if (target != PLAT_MAX_OFF_STATE) { cluster_powerdn = 0; - } while (--ncpu); + } + --num_cpu; + pos++; + } while (num_cpu != 0U); /* Enable cluster powerdn from last CPU in the cluster */ - if (cluster_powerdn) { + if (cluster_powerdn != 0) { /* Enable CC7 state and turn off wake mask */ cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7; @@ -209,12 +223,13 @@ plat_local_state_t tegra_soc_get_target_pwr_state(unsigned int lvl, mce_update_cstate_info(&cstate_info); /* Check if CCx state is allowed. */ - ret = mce_command_handler(MCE_CMD_IS_CCX_ALLOWED, + ret = mce_command_handler((uint64_t)MCE_CMD_IS_CCX_ALLOWED, TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, - 0); - if (ret) - return PSTATE_ID_CORE_POWERDN; + 0U); + if (ret != 0) { + result = PSTATE_ID_CORE_POWERDN; + } } else { @@ -225,20 +240,21 @@ plat_local_state_t tegra_soc_get_target_pwr_state(unsigned int lvl, } /* System Suspend */ - if (((lvl == MPIDR_AFFLVL2) || (lvl == MPIDR_AFFLVL1)) && - (target == PSTATE_ID_SOC_POWERDN)) - return PSTATE_ID_SOC_POWERDN; + if (((lvl == (uint32_t)MPIDR_AFFLVL2) || (lvl == (uint32_t)MPIDR_AFFLVL1)) && + (target == PSTATE_ID_SOC_POWERDN)) { + result = PSTATE_ID_SOC_POWERDN; + } /* default state */ - return PSCI_LOCAL_STATE_RUN; + return result; } -int tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state) +int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state) { const plat_local_state_t *pwr_domain_state = target_state->pwr_domain_state; - plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params(); - unsigned int stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] & + const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params(); + uint8_t stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] & TEGRA186_STATE_ID_MASK; uint64_t val; @@ -250,7 +266,7 @@ int tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state) */ val = params_from_bl2->tzdram_base + ((uintptr_t)&__tegra186_cpu_reset_handler_end - - (uintptr_t)tegra186_cpu_reset_handler); + (uintptr_t)&tegra186_cpu_reset_handler); memcpy16((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE, (uintptr_t)&__BL31_END__ - (uintptr_t)BL31_BASE); } @@ -258,30 +274,49 @@ int tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state) return PSCI_E_SUCCESS; } -int tegra_soc_pwr_domain_on(u_register_t mpidr) +int32_t tegra_soc_pwr_domain_on(u_register_t mpidr) { - uint32_t target_cpu = mpidr & MPIDR_CPU_MASK; + uint32_t target_cpu = mpidr & (uint64_t)MPIDR_CPU_MASK; uint32_t target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >> - MPIDR_AFFINITY_BITS; + (uint64_t)MPIDR_AFFINITY_BITS; + int32_t ret = PSCI_E_SUCCESS; + + if (target_cluster > (uint64_t)MPIDR_AFFLVL1) { - if (target_cluster > MPIDR_AFFLVL1) { ERROR("%s: unsupported CPU (0x%lx)\n", __func__, mpidr); - return PSCI_E_NOT_PRESENT; - } + ret = PSCI_E_NOT_PRESENT; - /* construct the target CPU # */ - target_cpu |= (target_cluster << 2); + } else { + /* construct the target CPU # */ + target_cpu |= (target_cluster << 2); - mce_command_handler(MCE_CMD_ONLINE_CORE, target_cpu, 0, 0); + (void)mce_command_handler((uint64_t)MCE_CMD_ONLINE_CORE, target_cpu, 0U, 0U); + } - return PSCI_E_SUCCESS; + return ret; } -int tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state) +int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state) { - int stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL]; - int stateid_afflvl0 = target_state->pwr_domain_state[MPIDR_AFFLVL0]; + uint8_t stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL]; + uint8_t stateid_afflvl0 = target_state->pwr_domain_state[MPIDR_AFFLVL0]; mce_cstate_info_t cstate_info = { 0 }; + uint64_t impl, val; + const plat_params_from_bl2_t *plat_params = bl31_get_plat_params(); + + impl = (read_midr() >> MIDR_IMPL_SHIFT) & (uint64_t)MIDR_IMPL_MASK; + + /* + * Enable ECC and Parity Protection for Cortex-A57 CPUs (Tegra186 + * A02p and beyond). + */ + if ((plat_params->l2_ecc_parity_prot_dis != 1) && + (impl != (uint64_t)DENVER_IMPL)) { + + val = read_l2ctlr_el1(); + val |= CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT; + write_l2ctlr_el1(val); + } /* * Reset power state info for CPUs when onlining, we set @@ -328,65 +363,28 @@ int tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state) return PSCI_E_SUCCESS; } -int tegra_soc_pwr_domain_off(const psci_power_state_t *target_state) +int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state) { - int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK; + uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & (uint64_t)MIDR_IMPL_MASK; + + (void)target_state; /* Disable Denver's DCO operations */ - if (impl == DENVER_IMPL) + if (impl == DENVER_IMPL) { denver_disable_dco(); + } /* Turn off CPU */ - (void)mce_command_handler(MCE_CMD_ENTER_CSTATE, TEGRA_ARI_CORE_C7, - MCE_CORE_SLEEP_TIME_INFINITE, 0); + (void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, TEGRA_ARI_CORE_C7, + MCE_CORE_SLEEP_TIME_INFINITE, 0U); return PSCI_E_SUCCESS; } __dead2 void tegra_soc_prepare_system_off(void) { - mce_cstate_info_t cstate_info = { 0 }; - uint32_t val; - - if (tegra186_system_powerdn_state == TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF) { - - /* power off the entire system */ - mce_enter_ccplex_state(tegra186_system_powerdn_state); - - } else if (tegra186_system_powerdn_state == TEGRA_ARI_SYSTEM_SC8) { - - /* Prepare for quasi power down */ - cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7; - cstate_info.system = TEGRA_ARI_SYSTEM_SC8; - cstate_info.system_state_force = 1; - cstate_info.update_wake_mask = 1; - mce_update_cstate_info(&cstate_info); - - /* loop until other CPUs power down */ - do { - val = mce_command_handler(MCE_CMD_IS_SC7_ALLOWED, - TEGRA_ARI_CORE_C7, - MCE_CORE_SLEEP_TIME_INFINITE, - 0); - } while (val == 0); - - /* Enter quasi power down state */ - (void)mce_command_handler(MCE_CMD_ENTER_CSTATE, - TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0); - - /* disable GICC */ - tegra_gic_cpuif_deactivate(); - - /* power down core */ - prepare_cpu_pwr_dwn(); - - /* flush L1/L2 data caches */ - dcsw_op_all(DCCISW); - - } else { - ERROR("%s: unsupported power down state (%d)\n", __func__, - tegra186_system_powerdn_state); - } + /* power off the entire system */ + mce_enter_ccplex_state(TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF); wfi(); @@ -396,7 +394,7 @@ __dead2 void tegra_soc_prepare_system_off(void) } } -int tegra_soc_prepare_system_reset(void) +int32_t tegra_soc_prepare_system_reset(void) { mce_enter_ccplex_state(TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT); diff --git a/plat/nvidia/tegra/soc/t186/plat_secondary.c b/plat/nvidia/tegra/soc/t186/plat_secondary.c index 4485e2733a034a2c1eeb94ecf7d7c325ef17e276..35a403bc7d0dbe80c87204ee749f4c9521b0ac95 100644 --- a/plat/nvidia/tegra/soc/t186/plat_secondary.c +++ b/plat/nvidia/tegra/soc/t186/plat_secondary.c @@ -14,14 +14,13 @@ #include <tegra_def.h> #include <tegra_private.h> -#define MISCREG_CPU_RESET_VECTOR 0x2000 -#define MISCREG_AA64_RST_LOW 0x2004 -#define MISCREG_AA64_RST_HIGH 0x2008 +#define MISCREG_AA64_RST_LOW 0x2004U +#define MISCREG_AA64_RST_HIGH 0x2008U -#define SCRATCH_SECURE_RSV1_SCRATCH_0 0x658 -#define SCRATCH_SECURE_RSV1_SCRATCH_1 0x65C +#define SCRATCH_SECURE_RSV1_SCRATCH_0 0x658U +#define SCRATCH_SECURE_RSV1_SCRATCH_1 0x65CU -#define CPU_RESET_MODE_AA64 1 +#define CPU_RESET_MODE_AA64 1U extern void memcpy16(void *dest, const void *src, unsigned int length); @@ -34,7 +33,7 @@ extern uint64_t __tegra186_cpu_reset_handler_end; void plat_secondary_setup(void) { uint32_t addr_low, addr_high; - plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params(); + const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params(); uint64_t cpu_reset_handler_base; INFO("Setting up secondary CPU boot\n"); @@ -58,7 +57,7 @@ void plat_secondary_setup(void) } addr_low = (uint32_t)cpu_reset_handler_base | CPU_RESET_MODE_AA64; - addr_high = (uint32_t)((cpu_reset_handler_base >> 32) & 0x7ff); + addr_high = (uint32_t)((cpu_reset_handler_base >> 32U) & 0x7ffU); /* write lower 32 bits first, then the upper 11 bits */ mmio_write_32(TEGRA_MISC_BASE + MISCREG_AA64_RST_LOW, addr_low); @@ -71,5 +70,5 @@ void plat_secondary_setup(void) addr_high); /* update reset vector address to the CCPLEX */ - mce_update_reset_vector(); + (void)mce_update_reset_vector(); } diff --git a/plat/nvidia/tegra/soc/t186/plat_setup.c b/plat/nvidia/tegra/soc/t186/plat_setup.c index 15dbd163907db3f12c54baddbebb6afb55460b30..bbd19c1c86e6f556252af2b204ce69b903795ba3 100644 --- a/plat/nvidia/tegra/soc/t186/plat_setup.c +++ b/plat/nvidia/tegra/soc/t186/plat_setup.c @@ -27,15 +27,12 @@ #include <tegra_platform.h> #include <tegra_private.h> -DEFINE_RENAME_SYSREG_RW_FUNCS(l2ctlr_el1, CORTEX_A57_L2CTLR_EL1) -extern uint64_t tegra_enable_l2_ecc_parity_prot; - /******************************************************************************* * Tegra186 CPU numbers in cluster #0 ******************************************************************************* */ -#define TEGRA186_CLUSTER0_CORE2 2 -#define TEGRA186_CLUSTER0_CORE3 3 +#define TEGRA186_CLUSTER0_CORE2 2U +#define TEGRA186_CLUSTER0_CORE3 3U /******************************************************************************* * The Tegra power domain tree has a single system level power domain i.e. a @@ -43,7 +40,7 @@ extern uint64_t tegra_enable_l2_ecc_parity_prot; * the number of power domains at the highest power level. ******************************************************************************* */ -const unsigned char tegra_power_domain_tree_desc[] = { +const uint8_t tegra_power_domain_tree_desc[] = { /* No of root nodes */ 1, /* No of clusters */ @@ -54,45 +51,53 @@ const unsigned char tegra_power_domain_tree_desc[] = { PLATFORM_MAX_CPUS_PER_CLUSTER }; +/******************************************************************************* + * This function returns the Tegra default topology tree information. + ******************************************************************************/ +const uint8_t *plat_get_power_domain_tree_desc(void) +{ + return tegra_power_domain_tree_desc; +} + /* * Table of regions to map using the MMU. */ static const mmap_region_t tegra_mmap[] = { - MAP_REGION_FLAT(TEGRA_MISC_BASE, 0x10000, /* 64KB */ + MAP_REGION_FLAT(TEGRA_MISC_BASE, 0x10000U, /* 64KB */ MT_DEVICE | MT_RW | MT_SECURE), - MAP_REGION_FLAT(TEGRA_TSA_BASE, 0x20000, /* 128KB */ + MAP_REGION_FLAT(TEGRA_TSA_BASE, 0x20000U, /* 128KB */ MT_DEVICE | MT_RW | MT_SECURE), - MAP_REGION_FLAT(TEGRA_MC_STREAMID_BASE, 0x10000, /* 64KB */ + MAP_REGION_FLAT(TEGRA_MC_STREAMID_BASE, 0x10000U, /* 64KB */ MT_DEVICE | MT_RW | MT_SECURE), - MAP_REGION_FLAT(TEGRA_MC_BASE, 0x10000, /* 64KB */ + MAP_REGION_FLAT(TEGRA_MC_BASE, 0x10000U, /* 64KB */ MT_DEVICE | MT_RW | MT_SECURE), - MAP_REGION_FLAT(TEGRA_UARTA_BASE, 0x20000, /* 128KB - UART A, B*/ + MAP_REGION_FLAT(TEGRA_UARTA_BASE, 0x20000U, /* 128KB - UART A, B*/ MT_DEVICE | MT_RW | MT_SECURE), - MAP_REGION_FLAT(TEGRA_UARTC_BASE, 0x20000, /* 128KB - UART C, G */ + MAP_REGION_FLAT(TEGRA_UARTC_BASE, 0x20000U, /* 128KB - UART C, G */ MT_DEVICE | MT_RW | MT_SECURE), - MAP_REGION_FLAT(TEGRA_UARTD_BASE, 0x30000, /* 192KB - UART D, E, F */ + MAP_REGION_FLAT(TEGRA_UARTD_BASE, 0x30000U, /* 192KB - UART D, E, F */ MT_DEVICE | MT_RW | MT_SECURE), - MAP_REGION_FLAT(TEGRA_FUSE_BASE, 0x10000, /* 64KB */ + MAP_REGION_FLAT(TEGRA_FUSE_BASE, 0x10000U, /* 64KB */ MT_DEVICE | MT_RW | MT_SECURE), - MAP_REGION_FLAT(TEGRA_GICD_BASE, 0x20000, /* 128KB */ + MAP_REGION_FLAT(TEGRA_GICD_BASE, 0x20000U, /* 128KB */ MT_DEVICE | MT_RW | MT_SECURE), - MAP_REGION_FLAT(TEGRA_SE0_BASE, 0x10000, /* 64KB */ + MAP_REGION_FLAT(TEGRA_SE0_BASE, 0x10000U, /* 64KB */ MT_DEVICE | MT_RW | MT_SECURE), - MAP_REGION_FLAT(TEGRA_PKA1_BASE, 0x10000, /* 64KB */ + MAP_REGION_FLAT(TEGRA_PKA1_BASE, 0x10000U, /* 64KB */ MT_DEVICE | MT_RW | MT_SECURE), - MAP_REGION_FLAT(TEGRA_RNG1_BASE, 0x10000, /* 64KB */ + MAP_REGION_FLAT(TEGRA_RNG1_BASE, 0x10000U, /* 64KB */ MT_DEVICE | MT_RW | MT_SECURE), - MAP_REGION_FLAT(TEGRA_CAR_RESET_BASE, 0x10000, /* 64KB */ + MAP_REGION_FLAT(TEGRA_CAR_RESET_BASE, 0x10000U, /* 64KB */ MT_DEVICE | MT_RW | MT_SECURE), - MAP_REGION_FLAT(TEGRA_PMC_BASE, 0x40000, /* 256KB */ + MAP_REGION_FLAT(TEGRA_PMC_BASE, 0x40000U, /* 256KB */ MT_DEVICE | MT_RW | MT_SECURE), - MAP_REGION_FLAT(TEGRA_SCRATCH_BASE, 0x10000, /* 64KB */ + MAP_REGION_FLAT(TEGRA_SCRATCH_BASE, 0x10000U, /* 64KB */ MT_DEVICE | MT_RW | MT_SECURE), - MAP_REGION_FLAT(TEGRA_MMCRAB_BASE, 0x60000, /* 384KB */ + MAP_REGION_FLAT(TEGRA_MMCRAB_BASE, 0x60000U, /* 384KB */ MT_DEVICE | MT_RW | MT_SECURE), - MAP_REGION_FLAT(TEGRA_ARM_ACTMON_CTR_BASE, 0x20000, /* 128KB - ARM/Denver */ + MAP_REGION_FLAT(TEGRA_ARM_ACTMON_CTR_BASE, 0x20000U, /* 128KB - ARM/Denver */ MT_DEVICE | MT_RW | MT_SECURE), - MAP_REGION_FLAT(TEGRA_SMMU0_BASE, 0x1000000, /* 64KB */ + MAP_REGION_FLAT(TEGRA_SMMU0_BASE, 0x1000000U, /* 64KB */ MT_DEVICE | MT_RW | MT_SECURE), {0} }; @@ -109,7 +114,7 @@ const mmap_region_t *plat_get_mmio_map(void) /******************************************************************************* * Handler to get the System Counter Frequency ******************************************************************************/ -unsigned int plat_get_syscnt_freq2(void) +uint32_t plat_get_syscnt_freq2(void) { return 31250000; } @@ -136,56 +141,42 @@ static uint32_t tegra186_uart_addresses[TEGRA186_MAX_UART_PORTS + 1] = { /******************************************************************************* * Retrieve the UART controller base to be used as the console ******************************************************************************/ -uint32_t plat_get_console_from_id(int id) +uint32_t plat_get_console_from_id(int32_t id) { - if (id > TEGRA186_MAX_UART_PORTS) - return 0; + uint32_t ret; - return tegra186_uart_addresses[id]; -} + if (id > TEGRA186_MAX_UART_PORTS) { + ret = 0; + } else { + ret = tegra186_uart_addresses[id]; + } -/* represent chip-version as concatenation of major (15:12), minor (11:8) and subrev (7:0) */ -#define TEGRA186_VER_A02P 0x1201 + return ret; +} /******************************************************************************* * Handler for early platform setup ******************************************************************************/ void plat_early_platform_setup(void) { - int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK; - uint32_t chip_subrev, val; + uint64_t impl, val; + const plat_params_from_bl2_t *plat_params = bl31_get_plat_params(); /* sanity check MCE firmware compatibility */ mce_verify_firmware_version(); + impl = (read_midr() >> MIDR_IMPL_SHIFT) & (uint64_t)MIDR_IMPL_MASK; + /* - * Enable ECC and Parity Protection for Cortex-A57 CPUs - * for Tegra A02p SKUs + * Enable ECC and Parity Protection for Cortex-A57 CPUs (Tegra186 + * A02p and beyond). */ - if (impl != DENVER_IMPL) { - - /* get the major, minor and sub-version values */ - chip_subrev = mmio_read_32(TEGRA_FUSE_BASE + OPT_SUBREVISION) & - SUBREVISION_MASK; - - /* prepare chip version number */ - val = (tegra_get_chipid_major() << 12) | - (tegra_get_chipid_minor() << 8) | - chip_subrev; + if ((plat_params->l2_ecc_parity_prot_dis != 1) && + (impl != (uint64_t)DENVER_IMPL)) { - /* enable L2 ECC for Tegra186 A02P and beyond */ - if (val >= TEGRA186_VER_A02P) { - - val = read_l2ctlr_el1(); - val |= CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT; - write_l2ctlr_el1(val); - - /* - * Set the flag to enable ECC/Parity Protection - * when we exit System Suspend or Cluster Powerdn - */ - tegra_enable_l2_ecc_parity_prot = 1; - } + val = read_l2ctlr_el1(); + val |= CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT; + write_l2ctlr_el1(val); } } @@ -208,8 +199,9 @@ void plat_gic_setup(void) * Initialize the FIQ handler only if the platform supports any * FIQ interrupt sources. */ - if (sizeof(tegra186_interrupt_props) > 0) + if (sizeof(tegra186_interrupt_props) > 0U) { tegra_fiq_handler_setup(); + } } /******************************************************************************* @@ -242,33 +234,34 @@ plat_params_from_bl2_t *plat_get_bl31_plat_params(void) * to convert an MPIDR to a unique linear index. An error code (-1) is returned * in case the MPIDR is invalid. ******************************************************************************/ -int plat_core_pos_by_mpidr(u_register_t mpidr) +int32_t plat_core_pos_by_mpidr(u_register_t mpidr) { - unsigned int cluster_id, cpu_id, pos; + u_register_t cluster_id, cpu_id, pos; + int32_t ret; - cluster_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK; - cpu_id = (mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK; + cluster_id = (mpidr >> (u_register_t)MPIDR_AFF1_SHIFT) & (u_register_t)MPIDR_AFFLVL_MASK; + cpu_id = (mpidr >> (u_register_t)MPIDR_AFF0_SHIFT) & (u_register_t)MPIDR_AFFLVL_MASK; /* * Validate cluster_id by checking whether it represents * one of the two clusters present on the platform. - */ - if (cluster_id >= PLATFORM_CLUSTER_COUNT) - return PSCI_E_NOT_PRESENT; - - /* * Validate cpu_id by checking whether it represents a CPU in * one of the two clusters present on the platform. */ - if (cpu_id >= PLATFORM_MAX_CPUS_PER_CLUSTER) - return PSCI_E_NOT_PRESENT; - - /* calculate the core position */ - pos = cpu_id + (cluster_id << 2); - - /* check for non-existent CPUs */ - if (pos == TEGRA186_CLUSTER0_CORE2 || pos == TEGRA186_CLUSTER0_CORE3) - return PSCI_E_NOT_PRESENT; + if ((cluster_id >= (u_register_t)PLATFORM_CLUSTER_COUNT) || + (cpu_id >= (u_register_t)PLATFORM_MAX_CPUS_PER_CLUSTER)) { + ret = PSCI_E_NOT_PRESENT; + } else { + /* calculate the core position */ + pos = cpu_id + (cluster_id << 2U); + + /* check for non-existent CPUs */ + if ((pos == TEGRA186_CLUSTER0_CORE2) || (pos == TEGRA186_CLUSTER0_CORE3)) { + ret = PSCI_E_NOT_PRESENT; + } else { + ret = (int32_t)pos; + } + } - return pos; + return ret; } diff --git a/plat/nvidia/tegra/soc/t186/plat_sip_calls.c b/plat/nvidia/tegra/soc/t186/plat_sip_calls.c index bf98fcf648b6c3599513bfa47406379b1b33b43a..955029e234ec7bc2a68ce9f1bf3064f167e8e4bc 100644 --- a/plat/nvidia/tegra/soc/t186/plat_sip_calls.c +++ b/plat/nvidia/tegra/soc/t186/plat_sip_calls.c @@ -20,8 +20,6 @@ #include <t18x_ari.h> #include <tegra_private.h> -extern uint32_t tegra186_system_powerdn_state; - /******************************************************************************* * Offset to read the ref_clk counter value ******************************************************************************/ @@ -30,7 +28,6 @@ extern uint32_t tegra186_system_powerdn_state; /******************************************************************************* * Tegra186 SiP SMCs ******************************************************************************/ -#define TEGRA_SIP_SYSTEM_SHUTDOWN_STATE 0xC2FFFE01 #define TEGRA_SIP_GET_ACTMON_CLK_COUNTERS 0xC2FFFE02 #define TEGRA_SIP_MCE_CMD_ENTER_CSTATE 0xC2FFFF00 #define TEGRA_SIP_MCE_CMD_UPDATE_CSTATE_INFO 0xC2FFFF01 @@ -60,7 +57,7 @@ int plat_sip_handler(uint32_t smc_fid, uint64_t x2, uint64_t x3, uint64_t x4, - void *cookie, + const void *cookie, void *handle, uint64_t flags) { @@ -115,33 +112,6 @@ int plat_sip_handler(uint32_t smc_fid, return 0; - case TEGRA_SIP_SYSTEM_SHUTDOWN_STATE: - - /* clean up the high bits */ - x1 = (uint32_t)x1; - - /* - * SC8 is a special Tegra186 system state where the CPUs and - * DRAM are powered down but the other subsystem is still - * alive. - */ - if ((x1 == TEGRA_ARI_SYSTEM_SC8) || - (x1 == TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF)) { - - tegra186_system_powerdn_state = x1; - flush_dcache_range( - (uintptr_t)&tegra186_system_powerdn_state, - sizeof(tegra186_system_powerdn_state)); - - } else { - - ERROR("%s: unhandled powerdn state (%d)\n", __func__, - (uint32_t)x1); - return -ENOTSUP; - } - - return 0; - /* * This function ID reads the Activity monitor's core/ref clock * counter values for a core/cluster. diff --git a/plat/nvidia/tegra/soc/t210/drivers/se/se_private.h b/plat/nvidia/tegra/soc/t210/drivers/se/se_private.h new file mode 100644 index 0000000000000000000000000000000000000000..01577477e963747210b4f2e4c53a171d68c7d7b2 --- /dev/null +++ b/plat/nvidia/tegra/soc/t210/drivers/se/se_private.h @@ -0,0 +1,249 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef SE_PRIVATE_H +#define SE_PRIVATE_H + +#include <stdbool.h> +#include <security_engine.h> + +/* + * PMC registers + */ + +/* Secure scratch registers */ +#define PMC_SECURE_SCRATCH4_OFFSET 0xC0U +#define PMC_SECURE_SCRATCH5_OFFSET 0xC4U +#define PMC_SECURE_SCRATCH6_OFFSET 0x224U +#define PMC_SECURE_SCRATCH7_OFFSET 0x228U +#define PMC_SECURE_SCRATCH120_OFFSET 0xB38U +#define PMC_SECURE_SCRATCH121_OFFSET 0xB3CU +#define PMC_SECURE_SCRATCH122_OFFSET 0xB40U +#define PMC_SECURE_SCRATCH123_OFFSET 0xB44U + +/* + * AHB arbitration memory write queue + */ +#define ARAHB_MEM_WRQUE_MST_ID_OFFSET 0xFCU +#define ARAHB_MST_ID_SE2_MASK (0x1U << 13) +#define ARAHB_MST_ID_SE_MASK (0x1U << 14) + +/* SE Status register */ +#define SE_STATUS_OFFSET 0x800U +#define SE_STATUS_SHIFT 0 +#define SE_STATUS_IDLE \ + ((0U) << SE_STATUS_SHIFT) +#define SE_STATUS_BUSY \ + ((1U) << SE_STATUS_SHIFT) +#define SE_STATUS(x) \ + ((x) & ((0x3U) << SE_STATUS_SHIFT)) + +/* SE config register */ +#define SE_CONFIG_REG_OFFSET 0x14U +#define SE_CONFIG_ENC_ALG_SHIFT 12 +#define SE_CONFIG_ENC_ALG_AES_ENC \ + ((1U) << SE_CONFIG_ENC_ALG_SHIFT) +#define SE_CONFIG_ENC_ALG_RNG \ + ((2U) << SE_CONFIG_ENC_ALG_SHIFT) +#define SE_CONFIG_ENC_ALG_SHA \ + ((3U) << SE_CONFIG_ENC_ALG_SHIFT) +#define SE_CONFIG_ENC_ALG_RSA \ + ((4U) << SE_CONFIG_ENC_ALG_SHIFT) +#define SE_CONFIG_ENC_ALG_NOP \ + ((0U) << SE_CONFIG_ENC_ALG_SHIFT) +#define SE_CONFIG_ENC_ALG(x) \ + ((x) & ((0xFU) << SE_CONFIG_ENC_ALG_SHIFT)) + +#define SE_CONFIG_DEC_ALG_SHIFT 8 +#define SE_CONFIG_DEC_ALG_AES \ + ((1U) << SE_CONFIG_DEC_ALG_SHIFT) +#define SE_CONFIG_DEC_ALG_NOP \ + ((0U) << SE_CONFIG_DEC_ALG_SHIFT) +#define SE_CONFIG_DEC_ALG(x) \ + ((x) & ((0xFU) << SE_CONFIG_DEC_ALG_SHIFT)) + +#define SE_CONFIG_DST_SHIFT 2 +#define SE_CONFIG_DST_MEMORY \ + ((0U) << SE_CONFIG_DST_SHIFT) +#define SE_CONFIG_DST_HASHREG \ + ((1U) << SE_CONFIG_DST_SHIFT) +#define SE_CONFIG_DST_KEYTAB \ + ((2U) << SE_CONFIG_DST_SHIFT) +#define SE_CONFIG_DST_SRK \ + ((3U) << SE_CONFIG_DST_SHIFT) +#define SE_CONFIG_DST_RSAREG \ + ((4U) << SE_CONFIG_DST_SHIFT) +#define SE_CONFIG_DST(x) \ + ((x) & ((0x7U) << SE_CONFIG_DST_SHIFT)) + +/* DRBG random number generator config */ +#define SE_RNG_CONFIG_REG_OFFSET 0x340 + +#define DRBG_MODE_SHIFT 0 +#define DRBG_MODE_NORMAL \ + ((0UL) << DRBG_MODE_SHIFT) +#define DRBG_MODE_FORCE_INSTANTION \ + ((1UL) << DRBG_MODE_SHIFT) +#define DRBG_MODE_FORCE_RESEED \ + ((2UL) << DRBG_MODE_SHIFT) +#define SE_RNG_CONFIG_MODE(x) \ + ((x) & ((0x3UL) << DRBG_MODE_SHIFT)) + +#define DRBG_SRC_SHIFT 2 +#define DRBG_SRC_NONE \ + ((0UL) << DRBG_SRC_SHIFT) +#define DRBG_SRC_ENTROPY \ + ((1UL) << DRBG_SRC_SHIFT) +#define DRBG_SRC_LFSR \ + ((2UL) << DRBG_SRC_SHIFT) +#define SE_RNG_SRC_CONFIG_MODE(x) \ + ((x) & ((0x3UL) << DRBG_SRC_SHIFT)) + +/* DRBG random number generator entropy config */ +#define SE_RNG_SRC_CONFIG_REG_OFFSET 0x344U + +#define DRBG_RO_ENT_SRC_SHIFT 1 +#define DRBG_RO_ENT_SRC_ENABLE \ + ((1U) << DRBG_RO_ENT_SRC_SHIFT) +#define DRBG_RO_ENT_SRC_DISABLE \ + ((0U) << DRBG_RO_ENT_SRC_SHIFT) +#define SE_RNG_SRC_CONFIG_RO_ENT_SRC(x) \ + ((x) & ((0x1U) << DRBG_RO_ENT_SRC_SHIFT)) + +#define DRBG_RO_ENT_SRC_LOCK_SHIFT 0 +#define DRBG_RO_ENT_SRC_LOCK_ENABLE \ + ((1U) << DRBG_RO_ENT_SRC_LOCK_SHIFT) +#define DRBG_RO_ENT_SRC_LOCK_DISABLE \ + ((0U) << DRBG_RO_ENT_SRC_LOCK_SHIFT) +#define SE_RNG_SRC_CONFIG_RO_ENT_SRC_LOCK(x) \ + ((x) & ((0x1U) << DRBG_RO_ENT_SRC_LOCK_SHIFT)) + +#define DRBG_RO_ENT_IGNORE_MEM_SHIFT 12 +#define DRBG_RO_ENT_IGNORE_MEM_ENABLE \ + ((1U) << DRBG_RO_ENT_IGNORE_MEM_SHIFT) +#define DRBG_RO_ENT_IGNORE_MEM_DISABLE \ + ((0U) << DRBG_RO_ENT_IGNORE_MEM_SHIFT) +#define SE_RNG_SRC_CONFIG_RO_ENT_IGNORE_MEM(x) \ + ((x) & ((0x1U) << DRBG_RO_ENT_IGNORE_MEM_SHIFT)) + +/* SE OPERATION */ +#define SE_OPERATION_REG_OFFSET 0x8U +#define SE_OPERATION_SHIFT 0 +#define SE_OP_ABORT \ + ((0x0U) << SE_OPERATION_SHIFT) +#define SE_OP_START \ + ((0x1U) << SE_OPERATION_SHIFT) +#define SE_OP_RESTART \ + ((0x2U) << SE_OPERATION_SHIFT) +#define SE_OP_CTX_SAVE \ + ((0x3U) << SE_OPERATION_SHIFT) +#define SE_OP_RESTART_IN \ + ((0x4U) << SE_OPERATION_SHIFT) +#define SE_OPERATION(x) \ + ((x) & ((0x7U) << SE_OPERATION_SHIFT)) + +/* SE_CTX_SAVE_AUTO */ +#define SE_CTX_SAVE_AUTO_REG_OFFSET 0x74U + +/* Enable */ +#define SE_CTX_SAVE_AUTO_ENABLE_SHIFT 0 +#define SE_CTX_SAVE_AUTO_DIS \ + ((0U) << SE_CTX_SAVE_AUTO_ENABLE_SHIFT) +#define SE_CTX_SAVE_AUTO_EN \ + ((1U) << SE_CTX_SAVE_AUTO_ENABLE_SHIFT) +#define SE_CTX_SAVE_AUTO_ENABLE(x) \ + ((x) & ((0x1U) << SE_CTX_SAVE_AUTO_ENABLE_SHIFT)) + +/* Lock */ +#define SE_CTX_SAVE_AUTO_LOCK_SHIFT 8 +#define SE_CTX_SAVE_AUTO_LOCK_EN \ + ((1U) << SE_CTX_SAVE_AUTO_LOCK_SHIFT) +#define SE_CTX_SAVE_AUTO_LOCK_DIS \ + ((0U) << SE_CTX_SAVE_AUTO_LOCK_SHIFT) +#define SE_CTX_SAVE_AUTO_LOCK(x) \ + ((x) & ((0x1U) << SE_CTX_SAVE_AUTO_LOCK_SHIFT)) + +/* Current context save number of blocks */ +#define SE_CTX_SAVE_AUTO_CURR_CNT_SHIFT 16 +#define SE_CTX_SAVE_AUTO_CURR_CNT_MASK 0x3FFU +#define SE_CTX_SAVE_GET_BLK_COUNT(x) \ + (((x) >> SE_CTX_SAVE_AUTO_CURR_CNT_SHIFT) & \ + SE_CTX_SAVE_AUTO_CURR_CNT_MASK) + +#define SE_CTX_SAVE_SIZE_BLOCKS_SE1 133 +#define SE_CTX_SAVE_SIZE_BLOCKS_SE2 646 + +/* SE TZRAM OPERATION - only for SE1 */ +#define SE_TZRAM_OPERATION 0x540U + +#define SE_TZRAM_OP_MODE_SHIFT 1 +#define SE_TZRAM_OP_MODE_SAVE \ + ((0U) << SE_TZRAM_OP_MODE_SHIFT) +#define SE_TZRAM_OP_MODE_RESTORE \ + ((1U) << SE_TZRAM_OP_MODE_SHIFT) +#define SE_TZRAM_OP_MODE(x) \ + ((x) & ((0x1U) << SE_TZRAM_OP_MODE_SHIFT)) + +#define SE_TZRAM_OP_BUSY_SHIFT 2 +#define SE_TZRAM_OP_BUSY_OFF \ + ((0U) << SE_TZRAM_OP_BUSY_SHIFT) +#define SE_TZRAM_OP_BUSY_ON \ + ((1U) << SE_TZRAM_OP_BUSY_SHIFT) +#define SE_TZRAM_OP_BUSY(x) \ + ((x) & ((0x1U) << SE_TZRAM_OP_BUSY_SHIFT)) + +#define SE_TZRAM_OP_REQ_SHIFT 0 +#define SE_TZRAM_OP_REQ_IDLE \ + ((0U) << SE_TZRAM_OP_REQ_SHIFT) +#define SE_TZRAM_OP_REQ_INIT \ + ((1U) << SE_TZRAM_OP_REQ_SHIFT) +#define SE_TZRAM_OP_REQ(x) \ + ((x) & ((0x1U) << SE_TZRAM_OP_REQ_SHIFT)) + +/* SE Interrupt */ +#define SE_INT_STATUS_REG_OFFSET 0x10U +#define SE_INT_OP_DONE_SHIFT 4 +#define SE_INT_OP_DONE_CLEAR \ + ((0U) << SE_INT_OP_DONE_SHIFT) +#define SE_INT_OP_DONE_ACTIVE \ + ((1U) << SE_INT_OP_DONE_SHIFT) +#define SE_INT_OP_DONE(x) \ + ((x) & ((0x1U) << SE_INT_OP_DONE_SHIFT)) + +/* SE error status */ +#define SE_ERR_STATUS_REG_OFFSET 0x804U + +/* SE linked list (LL) register */ +#define SE_IN_LL_ADDR_REG_OFFSET 0x18U +#define SE_OUT_LL_ADDR_REG_OFFSET 0x24U +#define SE_BLOCK_COUNT_REG_OFFSET 0x318U + +/* AES data sizes */ +#define TEGRA_SE_AES_BLOCK_SIZE 16 +#define TEGRA_SE_AES_MIN_KEY_SIZE 16 +#define TEGRA_SE_AES_MAX_KEY_SIZE 32 +#define TEGRA_SE_AES_IV_SIZE 16 + +/******************************************************************************* + * Inline functions definition + ******************************************************************************/ + +static inline uint32_t tegra_se_read_32(const tegra_se_dev_t *dev, uint32_t offset) +{ + return mmio_read_32(dev->se_base + offset); +} + +static inline void tegra_se_write_32(const tegra_se_dev_t *dev, uint32_t offset, uint32_t val) +{ + mmio_write_32(dev->se_base + offset, val); +} + +/******************************************************************************* + * Prototypes + ******************************************************************************/ + +#endif /* SE_PRIVATE_H */ diff --git a/plat/nvidia/tegra/soc/t210/drivers/se/security_engine.c b/plat/nvidia/tegra/soc/t210/drivers/se/security_engine.c new file mode 100644 index 0000000000000000000000000000000000000000..fa99db62084b757ffe895f6687b9fb269a93b3c5 --- /dev/null +++ b/plat/nvidia/tegra/soc/t210/drivers/se/security_engine.c @@ -0,0 +1,503 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch_helpers.h> +#include <assert.h> +#include <common/debug.h> +#include <delay_timer.h> +#include <errno.h> +#include <mmio.h> +#include <psci.h> +#include <se_private.h> +#include <security_engine.h> +#include <tegra_platform.h> + +/******************************************************************************* + * Constants and Macros + ******************************************************************************/ + +#define TIMEOUT_100MS 100UL // Timeout in 100ms + +/******************************************************************************* + * Data structure and global variables + ******************************************************************************/ + +/* The security engine contexts are formatted as follows: + * + * SE1 CONTEXT: + * #--------------------------------# + * | Random Data 1 Block | + * #--------------------------------# + * | Sticky Bits 2 Blocks | + * #--------------------------------# + * | Key Table 64 Blocks | + * | For each Key (x16): | + * | Key: 2 Blocks | + * | Original-IV: 1 Block | + * | Updated-IV: 1 Block | + * #--------------------------------# + * | RSA Keys 64 Blocks | + * #--------------------------------# + * | Known Pattern 1 Block | + * #--------------------------------# + * + * SE2/PKA1 CONTEXT: + * #--------------------------------# + * | Random Data 1 Block | + * #--------------------------------# + * | Sticky Bits 2 Blocks | + * #--------------------------------# + * | Key Table 64 Blocks | + * | For each Key (x16): | + * | Key: 2 Blocks | + * | Original-IV: 1 Block | + * | Updated-IV: 1 Block | + * #--------------------------------# + * | RSA Keys 64 Blocks | + * #--------------------------------# + * | PKA sticky bits 1 Block | + * #--------------------------------# + * | PKA keys 512 Blocks | + * #--------------------------------# + * | Known Pattern 1 Block | + * #--------------------------------# + */ + +/* SE input and output linked list buffers */ +static tegra_se_io_lst_t se1_src_ll_buf; +static tegra_se_io_lst_t se1_dst_ll_buf; + +/* SE2 input and output linked list buffers */ +static tegra_se_io_lst_t se2_src_ll_buf; +static tegra_se_io_lst_t se2_dst_ll_buf; + +/* SE1 security engine device handle */ +static tegra_se_dev_t se_dev_1 = { + .se_num = 1, + /* setup base address for se */ + .se_base = TEGRA_SE1_BASE, + /* Setup context size in AES blocks */ + .ctx_size_blks = SE_CTX_SAVE_SIZE_BLOCKS_SE1, + /* Setup SRC buffers for SE operations */ + .src_ll_buf = &se1_src_ll_buf, + /* Setup DST buffers for SE operations */ + .dst_ll_buf = &se1_dst_ll_buf, +}; + +/* SE2 security engine device handle */ +static tegra_se_dev_t se_dev_2 = { + .se_num = 2, + /* setup base address for se */ + .se_base = TEGRA_SE2_BASE, + /* Setup context size in AES blocks */ + .ctx_size_blks = SE_CTX_SAVE_SIZE_BLOCKS_SE2, + /* Setup SRC buffers for SE operations */ + .src_ll_buf = &se2_src_ll_buf, + /* Setup DST buffers for SE operations */ + .dst_ll_buf = &se2_dst_ll_buf, +}; + +/******************************************************************************* + * Functions Definition + ******************************************************************************/ + +static void tegra_se_make_data_coherent(const tegra_se_dev_t *se_dev) +{ + flush_dcache_range(((uint64_t)(se_dev->src_ll_buf)), + sizeof(tegra_se_io_lst_t)); + flush_dcache_range(((uint64_t)(se_dev->dst_ll_buf)), + sizeof(tegra_se_io_lst_t)); +} + +/* + * Check that SE operation has completed after kickoff + * This function is invoked after an SE operation has been started, + * and it checks the following conditions: + * 1. SE_INT_STATUS = SE_OP_DONE + * 2. SE_STATUS = IDLE + * 3. AHB bus data transfer complete. + * 4. SE_ERR_STATUS is clean. + */ +static int32_t tegra_se_operation_complete(const tegra_se_dev_t *se_dev) +{ + uint32_t val = 0; + int32_t ret = 0; + uint32_t timeout; + + /* Poll the SE interrupt register to ensure H/W operation complete */ + val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET); + for (timeout = 0; (SE_INT_OP_DONE(val) == SE_INT_OP_DONE_CLEAR) && + (timeout < TIMEOUT_100MS); timeout++) { + mdelay(1); + val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET); + } + + if (timeout == TIMEOUT_100MS) { + ERROR("%s: ERR: Atomic context save operation timeout!\n", + __func__); + ret = -ETIMEDOUT; + } + + /* Poll the SE status idle to ensure H/W operation complete */ + if (ret == 0) { + val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET); + for (timeout = 0; (val != 0U) && (timeout < TIMEOUT_100MS); + timeout++) { + mdelay(1); + val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET); + } + + if (timeout == TIMEOUT_100MS) { + ERROR("%s: ERR: MEM_INTERFACE and SE state " + "idle state timeout.\n", __func__); + ret = -ETIMEDOUT; + } + } + + /* Check AHB bus transfer complete */ + if (ret == 0) { + val = mmio_read_32(TEGRA_AHB_ARB_BASE + ARAHB_MEM_WRQUE_MST_ID_OFFSET); + for (timeout = 0; ((val & (ARAHB_MST_ID_SE_MASK | ARAHB_MST_ID_SE2_MASK)) != 0U) && + (timeout < TIMEOUT_100MS); timeout++) { + mdelay(1); + val = mmio_read_32(TEGRA_AHB_ARB_BASE + ARAHB_MEM_WRQUE_MST_ID_OFFSET); + } + + if (timeout == TIMEOUT_100MS) { + ERROR("%s: SE write over AHB timeout.\n", __func__); + ret = -ETIMEDOUT; + } + } + + /* Ensure that no errors are thrown during operation */ + if (ret == 0) { + val = tegra_se_read_32(se_dev, SE_ERR_STATUS_REG_OFFSET); + if (val != 0U) { + ERROR("%s: error during SE operation! 0x%x", __func__, val); + ret = -ENOTSUP; + } + } + + return ret; +} + +/* + * Verify the SE context save auto has been enabled. + * SE_CTX_SAVE_AUTO.ENABLE == ENABLE + * If the SE context save auto is not enabled, then set + * the context save auto enable and lock the setting. + * If the SE context save auto is not enabled and the + * enable setting is locked, then return an error. + */ +static inline int32_t tegra_se_ctx_save_auto_enable(const tegra_se_dev_t *se_dev) +{ + uint32_t val; + int32_t ret = 0; + + val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET); + if (SE_CTX_SAVE_AUTO_ENABLE(val) == SE_CTX_SAVE_AUTO_DIS) { + if (SE_CTX_SAVE_AUTO_LOCK(val) == SE_CTX_SAVE_AUTO_LOCK_EN) { + ERROR("%s: ERR: Cannot enable atomic. Write locked!\n", + __func__); + ret = -EACCES; + } + + /* Program SE_CTX_SAVE_AUTO */ + if (ret == 0) { + tegra_se_write_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET, + SE_CTX_SAVE_AUTO_LOCK_EN | + SE_CTX_SAVE_AUTO_EN); + } + } + + return ret; +} + +/* + * Wait for SE engine to be idle and clear pending interrupts before + * starting the next SE operation. + */ +static int32_t tegra_se_operation_prepare(const tegra_se_dev_t *se_dev) +{ + int32_t ret = 0; + uint32_t val = 0; + uint32_t timeout; + + /* Wait for previous operation to finish */ + val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET); + for (timeout = 0; (val != 0U) && (timeout < TIMEOUT_100MS); timeout++) { + mdelay(1); + val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET); + } + + if (timeout == TIMEOUT_100MS) { + ERROR("%s: ERR: SE status is not idle!\n", __func__); + ret = -ETIMEDOUT; + } + + /* Clear any pending interrupts from previous operation */ + val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET); + tegra_se_write_32(se_dev, SE_INT_STATUS_REG_OFFSET, val); + return ret; +} + +/* + * SE atomic context save. At SC7 entry, SE driver triggers the + * hardware automatically performs the context save operation. + */ +static int32_t tegra_se_context_save_atomic(const tegra_se_dev_t *se_dev) +{ + int32_t ret = 0; + uint32_t val = 0; + uint32_t blk_count_limit = 0; + uint32_t block_count; + + /* Check that previous operation is finalized */ + ret = tegra_se_operation_prepare(se_dev); + + /* Ensure HW atomic context save has been enabled + * This should have been done at boot time. + * SE_CTX_SAVE_AUTO.ENABLE == ENABLE + */ + if (ret == 0) { + ret = tegra_se_ctx_save_auto_enable(se_dev); + } + + /* Read the context save progress counter: block_count + * Ensure no previous context save has been triggered + * SE_CTX_SAVE_AUTO.CURR_CNT == 0 + */ + if (ret == 0) { + val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET); + block_count = SE_CTX_SAVE_GET_BLK_COUNT(val); + if (block_count != 0U) { + ERROR("%s: ctx_save triggered multiple times\n", + __func__); + ret = -EALREADY; + } + } + + /* Set the destination block count when the context save complete */ + if (ret == 0) { + blk_count_limit = block_count + se_dev->ctx_size_blks; + } + + /* Program SE_CONFIG register as for RNG operation + * SE_CONFIG.ENC_ALG = RNG + * SE_CONFIG.DEC_ALG = NOP + * SE_CONFIG.ENC_MODE is ignored + * SE_CONFIG.DEC_MODE is ignored + * SE_CONFIG.DST = MEMORY + */ + if (ret == 0) { + val = (SE_CONFIG_ENC_ALG_RNG | + SE_CONFIG_DEC_ALG_NOP | + SE_CONFIG_DST_MEMORY); + tegra_se_write_32(se_dev, SE_CONFIG_REG_OFFSET, val); + + tegra_se_make_data_coherent(se_dev); + + /* SE_CTX_SAVE operation */ + tegra_se_write_32(se_dev, SE_OPERATION_REG_OFFSET, + SE_OP_CTX_SAVE); + + ret = tegra_se_operation_complete(se_dev); + } + + /* Check that context has written the correct number of blocks */ + if (ret == 0) { + val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET); + if (SE_CTX_SAVE_GET_BLK_COUNT(val) != blk_count_limit) { + ERROR("%s: expected %d blocks but %d were written\n", + __func__, blk_count_limit, val); + ret = -ECANCELED; + } + } + + return ret; +} + +/* + * Security engine primitive operations, including normal operation + * and the context save operation. + */ +static int tegra_se_perform_operation(const tegra_se_dev_t *se_dev, uint32_t nbytes) +{ + uint32_t nblocks = nbytes / TEGRA_SE_AES_BLOCK_SIZE; + int ret = 0; + + assert(se_dev); + + /* Use device buffers for in and out */ + tegra_se_write_32(se_dev, SE_OUT_LL_ADDR_REG_OFFSET, ((uint64_t)(se_dev->dst_ll_buf))); + tegra_se_write_32(se_dev, SE_IN_LL_ADDR_REG_OFFSET, ((uint64_t)(se_dev->src_ll_buf))); + + /* Check that previous operation is finalized */ + ret = tegra_se_operation_prepare(se_dev); + if (ret != 0) { + goto op_error; + } + + /* Program SE operation size */ + if (nblocks) { + tegra_se_write_32(se_dev, SE_BLOCK_COUNT_REG_OFFSET, nblocks - 1); + } + + /* Make SE LL data coherent before the SE operation */ + tegra_se_make_data_coherent(se_dev); + + /* Start hardware operation */ + tegra_se_write_32(se_dev, SE_OPERATION_REG_OFFSET, SE_OP_START); + + /* Wait for operation to finish */ + ret = tegra_se_operation_complete(se_dev); + +op_error: + return ret; +} + +/* + * Security Engine sequence to generat SRK + * SE and SE2 will generate different SRK by different + * entropy seeds. + */ +static int tegra_se_generate_srk(const tegra_se_dev_t *se_dev) +{ + int ret = PSCI_E_INTERN_FAIL; + uint32_t val; + + /* Confgure the following hardware register settings: + * SE_CONFIG.DEC_ALG = NOP + * SE_CONFIG.ENC_ALG = RNG + * SE_CONFIG.DST = SRK + * SE_OPERATION.OP = START + * SE_CRYPTO_LAST_BLOCK = 0 + */ + se_dev->src_ll_buf->last_buff_num = 0; + se_dev->dst_ll_buf->last_buff_num = 0; + + /* Configure random number generator */ + val = (DRBG_MODE_FORCE_RESEED | DRBG_SRC_ENTROPY); + tegra_se_write_32(se_dev, SE_RNG_CONFIG_REG_OFFSET, val); + + /* Configure output destination = SRK */ + val = (SE_CONFIG_ENC_ALG_RNG | + SE_CONFIG_DEC_ALG_NOP | + SE_CONFIG_DST_SRK); + tegra_se_write_32(se_dev, SE_CONFIG_REG_OFFSET, val); + + /* Perform hardware operation */ + ret = tegra_se_perform_operation(se_dev, 0); + + return ret; +} + +/* + * Initialize the SE engine handle + */ +void tegra_se_init(void) +{ + INFO("%s: start SE init\n", __func__); + + /* Generate random SRK to initialize DRBG */ + tegra_se_generate_srk(&se_dev_1); + tegra_se_generate_srk(&se_dev_2); + + INFO("%s: SE init done\n", __func__); +} + +/* + * Security engine power suspend entry point. + * This function is invoked from PSCI power domain suspend handler. + */ +int32_t tegra_se_suspend(void) +{ + int32_t ret = 0; + + /* Atomic context save se2 and pka1 */ + INFO("%s: SE2/PKA1 atomic context save\n", __func__); + ret = tegra_se_context_save_atomic(&se_dev_2); + + /* Atomic context save se */ + if (ret == 0) { + INFO("%s: SE1 atomic context save\n", __func__); + ret = tegra_se_context_save_atomic(&se_dev_1); + } + + if (ret == 0) { + INFO("%s: SE atomic context save done\n", __func__); + } + + return ret; +} + +/* + * Save TZRAM to shadow TZRAM in AON + */ +int32_t tegra_se_save_tzram(void) +{ + uint32_t val = 0; + int32_t ret = 0; + uint32_t timeout; + + INFO("%s: SE TZRAM save start\n", __func__); + + val = (SE_TZRAM_OP_REQ_INIT | SE_TZRAM_OP_MODE_SAVE); + tegra_se_write_32(&se_dev_1, SE_TZRAM_OPERATION, val); + + val = tegra_se_read_32(&se_dev_1, SE_TZRAM_OPERATION); + for (timeout = 0; (SE_TZRAM_OP_BUSY(val) == SE_TZRAM_OP_BUSY_ON) && + (timeout < TIMEOUT_100MS); timeout++) { + mdelay(1); + val = tegra_se_read_32(&se_dev_1, SE_TZRAM_OPERATION); + } + + if (timeout == TIMEOUT_100MS) { + ERROR("%s: ERR: TZRAM save timeout!\n", __func__); + ret = -ETIMEDOUT; + } + + if (ret == 0) { + INFO("%s: SE TZRAM save done!\n", __func__); + } + + return ret; +} + +/* + * The function is invoked by SE resume + */ +static void tegra_se_warm_boot_resume(const tegra_se_dev_t *se_dev) +{ + uint32_t val; + + assert(se_dev); + + /* Lock RNG source to ENTROPY on resume */ + val = DRBG_RO_ENT_IGNORE_MEM_ENABLE | + DRBG_RO_ENT_SRC_LOCK_ENABLE | + DRBG_RO_ENT_SRC_ENABLE; + tegra_se_write_32(se_dev, SE_RNG_SRC_CONFIG_REG_OFFSET, val); + + /* Enable and lock the SE atomic context save setting */ + if (tegra_se_ctx_save_auto_enable(se_dev) != 0) { + ERROR("%s: ERR: enable SE%d context save auto failed!\n", + __func__, se_dev->se_num); + } + + /* Set a random value to SRK to initialize DRBG */ + tegra_se_generate_srk(se_dev); +} + +/* + * The function is invoked on SC7 resume + */ +void tegra_se_resume(void) +{ + tegra_se_warm_boot_resume(&se_dev_1); + tegra_se_warm_boot_resume(&se_dev_2); +} diff --git a/plat/nvidia/tegra/soc/t210/plat_psci_handlers.c b/plat/nvidia/tegra/soc/t210/plat_psci_handlers.c index 27786d3ca66c5415c92b30c8ce5331b14781a5e7..ed30ff404e952d4318c91f7a8f0ea611b1a792c4 100644 --- a/plat/nvidia/tegra/soc/t210/plat_psci_handlers.c +++ b/plat/nvidia/tegra/soc/t210/plat_psci_handlers.c @@ -1,13 +1,11 @@ /* - * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ #include <assert.h> - -#include <platform_def.h> - +#include <cortex_a57.h> #include <arch_helpers.h> #include <common/debug.h> #include <drivers/delay_timer.h> @@ -15,10 +13,14 @@ #include <lib/psci/psci.h> #include <plat/common/platform.h> +#include <bpmp.h> #include <flowctrl.h> #include <pmc.h> +#include <platform_def.h> +#include <security_engine.h> #include <tegra_def.h> #include <tegra_private.h> +#include <tegra_platform.h> /* * Register used to clear CPU reset signals. Each CPU has two reset @@ -55,7 +57,7 @@ int32_t tegra_soc_validate_power_state(unsigned int power_state, * Cluster powerdown/idle request only for afflvl 1 */ req_state->pwr_domain_state[MPIDR_AFFLVL1] = state_id; - req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id; + req_state->pwr_domain_state[MPIDR_AFFLVL0] = PSTATE_ID_CORE_POWERDN; break; @@ -87,9 +89,11 @@ plat_local_state_t tegra_soc_get_target_pwr_state(unsigned int lvl, const plat_local_state_t *states, unsigned int ncpu) { - plat_local_state_t target = *states; + plat_local_state_t target = PSCI_LOCAL_STATE_RUN; int cpu = plat_my_core_pos(); int core_pos = read_mpidr() & MPIDR_CPU_MASK; + uint32_t bpmp_reply, data[3]; + int ret; /* get the power state at this level */ if (lvl == MPIDR_AFFLVL1) @@ -97,19 +101,57 @@ plat_local_state_t tegra_soc_get_target_pwr_state(unsigned int lvl, if (lvl == MPIDR_AFFLVL2) target = *(states + cpu); - /* Cluster idle/power-down */ - if ((lvl == MPIDR_AFFLVL1) && ((target == PSTATE_ID_CLUSTER_IDLE) || - (target == PSTATE_ID_CLUSTER_POWERDN))) { - return target; - } + if ((lvl == MPIDR_AFFLVL1) && (target == PSTATE_ID_CLUSTER_IDLE)) { + + /* initialize the bpmp interface */ + (void)tegra_bpmp_init(); + + /* Cluster idle */ + data[0] = (uint32_t)cpu; + data[1] = TEGRA_PM_CC6; + data[2] = TEGRA_PM_SC1; + ret = tegra_bpmp_send_receive_atomic(MRQ_DO_IDLE, + (void *)&data, (int)sizeof(data), + (void *)&bpmp_reply, (int)sizeof(bpmp_reply)); + + /* check if cluster idle entry is allowed */ + if ((ret != 0L) || (bpmp_reply != BPMP_CCx_ALLOWED)) { + + /* Cluster idle not allowed */ + target = PSCI_LOCAL_STATE_RUN; + } + + } else if ((lvl == MPIDR_AFFLVL1) && (target == PSTATE_ID_CLUSTER_POWERDN)) { + + /* initialize the bpmp interface */ + (void)tegra_bpmp_init(); + + /* Cluster power-down */ + data[0] = (uint32_t)cpu; + data[1] = TEGRA_PM_CC7; + data[2] = TEGRA_PM_SC1; + ret = tegra_bpmp_send_receive_atomic(MRQ_DO_IDLE, + (void *)&data, (int)sizeof(data), + (void *)&bpmp_reply, (int)sizeof(bpmp_reply)); + + /* check if cluster power down is allowed */ + if ((ret != 0L) || (bpmp_reply != BPMP_CCx_ALLOWED)) { + + /* Cluster power down not allowed */ + target = PSCI_LOCAL_STATE_RUN; + } - /* System Suspend */ - if (((lvl == MPIDR_AFFLVL2) || (lvl == MPIDR_AFFLVL1)) && - (target == PSTATE_ID_SOC_POWERDN)) - return PSTATE_ID_SOC_POWERDN; + } else if (((lvl == MPIDR_AFFLVL2) || (lvl == MPIDR_AFFLVL1)) && + (target == PSTATE_ID_SOC_POWERDN)) { - /* default state */ - return PSCI_LOCAL_STATE_RUN; + /* System Suspend */ + target = PSTATE_ID_SOC_POWERDN; + + } else { + ; /* do nothing */ + } + + return target; } int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state) @@ -120,27 +162,43 @@ int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state) unsigned int stateid_afflvl2 = pwr_domain_state[MPIDR_AFFLVL2]; unsigned int stateid_afflvl1 = pwr_domain_state[MPIDR_AFFLVL1]; unsigned int stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0]; + int ret = PSCI_E_SUCCESS; if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) { assert((stateid_afflvl0 == PLAT_MAX_OFF_STATE) || - (stateid_afflvl0 == PSTATE_ID_SOC_POWERDN)); + (stateid_afflvl0 == PSTATE_ID_SOC_POWERDN)); assert((stateid_afflvl1 == PLAT_MAX_OFF_STATE) || - (stateid_afflvl1 == PSTATE_ID_SOC_POWERDN)); + (stateid_afflvl1 == PSTATE_ID_SOC_POWERDN)); + + if (tegra_chipid_is_t210_b01()) { + + /* Suspend se/se2 and pka1 */ + if (tegra_se_suspend() != 0) { + ret = PSCI_E_INTERN_FAIL; + } + + /* Save tzram contents */ + if (tegra_se_save_tzram() != 0) { + ret = PSCI_E_INTERN_FAIL; + } + } - /* suspend the entire soc */ - tegra_fc_soc_powerdn(mpidr); + /* enter system suspend */ + if (ret == PSCI_E_SUCCESS) { + tegra_fc_soc_powerdn(mpidr); + } } else if (stateid_afflvl1 == PSTATE_ID_CLUSTER_IDLE) { - assert(stateid_afflvl0 == PSTATE_ID_CLUSTER_IDLE); + assert(stateid_afflvl0 == PSTATE_ID_CORE_POWERDN); /* Prepare for cluster idle */ tegra_fc_cluster_idle(mpidr); } else if (stateid_afflvl1 == PSTATE_ID_CLUSTER_POWERDN) { - assert(stateid_afflvl0 == PSTATE_ID_CLUSTER_POWERDN); + assert(stateid_afflvl0 == PSTATE_ID_CORE_POWERDN); /* Prepare for cluster powerdn */ tegra_fc_cluster_powerdn(mpidr); @@ -151,23 +209,40 @@ int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state) tegra_fc_cpu_powerdn(mpidr); } else { - ERROR("%s: Unknown state id\n", __func__); - return PSCI_E_NOT_SUPPORTED; + ERROR("%s: Unknown state id (%d, %d, %d)\n", __func__, + stateid_afflvl2, stateid_afflvl1, stateid_afflvl0); + ret = PSCI_E_NOT_SUPPORTED; } - return PSCI_E_SUCCESS; + return ret; } int tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state) { + const plat_params_from_bl2_t *plat_params = bl31_get_plat_params(); uint32_t val; + /* platform parameter passed by the previous bootloader */ + if (plat_params->l2_ecc_parity_prot_dis != 1) { + /* Enable ECC Parity Protection for Cortex-A57 CPUs */ + val = read_l2ctlr_el1(); + val |= (uint64_t)CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT; + write_l2ctlr_el1(val); + } + /* * Check if we are exiting from SOC_POWERDN. */ if (target_state->pwr_domain_state[PLAT_MAX_PWR_LVL] == PLAT_SYS_SUSPEND_STATE_ID) { + /* + * Security engine resume + */ + if (tegra_chipid_is_t210_b01()) { + tegra_se_resume(); + } + /* * Lock scratch registers which hold the CPU vectors */ @@ -231,7 +306,7 @@ int tegra_soc_prepare_system_reset(void) * for the PMC APB clock would not be changed due to system reset. */ mmio_write_32((uintptr_t)TEGRA_CAR_RESET_BASE + SCLK_BURST_POLICY, - SCLK_BURST_POLICY_DEFAULT); + SCLK_BURST_POLICY_DEFAULT); mmio_write_32((uintptr_t)TEGRA_CAR_RESET_BASE + SCLK_RATE, 0); /* Wait 1 ms to make sure clock source/device logic is stabilized. */ diff --git a/plat/nvidia/tegra/soc/t210/plat_setup.c b/plat/nvidia/tegra/soc/t210/plat_setup.c index c7f7165c0a7fdcdc29ef4cfb66f31ee1da25c818..6246dde90f98e6dac708fecd6e36909aec9014c4 100644 --- a/plat/nvidia/tegra/soc/t210/plat_setup.c +++ b/plat/nvidia/tegra/soc/t210/plat_setup.c @@ -1,34 +1,21 @@ /* - * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ #include <arch_helpers.h> +#include <bpmp.h> +#include <cortex_a57.h> #include <common/bl_common.h> #include <drivers/console.h> #include <lib/xlat_tables/xlat_tables_v2.h> - +#include <platform.h> +#include <security_engine.h> #include <tegra_def.h> +#include <tegra_platform.h> #include <tegra_private.h> -/******************************************************************************* - * The Tegra power domain tree has a single system level power domain i.e. a - * single root node. The first entry in the power domain descriptor specifies - * the number of power domains at the highest power level. - ******************************************************************************* - */ -const unsigned char tegra_power_domain_tree_desc[] = { - /* No of root nodes */ - 1, - /* No of clusters */ - PLATFORM_CLUSTER_COUNT, - /* No of CPU cores - cluster0 */ - PLATFORM_MAX_CPUS_PER_CLUSTER, - /* No of CPU cores - cluster1 */ - PLATFORM_MAX_CPUS_PER_CLUSTER -}; - /* sets of MMIO ranges setup */ #define MMIO_RANGE_0_ADDR 0x50000000 #define MMIO_RANGE_1_ADDR 0x60000000 @@ -39,6 +26,8 @@ const unsigned char tegra_power_domain_tree_desc[] = { * Table of regions to map using the MMU. */ static const mmap_region_t tegra_mmap[] = { + MAP_REGION_FLAT(TEGRA_IRAM_BASE, 0x40000, /* 256KB */ + MT_DEVICE | MT_RW | MT_SECURE), MAP_REGION_FLAT(MMIO_RANGE_0_ADDR, MMIO_RANGE_SIZE, MT_DEVICE | MT_RW | MT_SECURE), MAP_REGION_FLAT(MMIO_RANGE_1_ADDR, MMIO_RANGE_SIZE, @@ -53,10 +42,43 @@ static const mmap_region_t tegra_mmap[] = { ******************************************************************************/ const mmap_region_t *plat_get_mmio_map(void) { + /* Add the map region for security engine SE2 */ + if (tegra_chipid_is_t210_b01()) { + mmap_add_region((uint64_t)TEGRA_SE2_BASE, + (uint64_t)TEGRA_SE2_BASE, + (uint64_t)TEGRA_SE2_RANGE_SIZE, + MT_DEVICE | MT_RW | MT_SECURE); + } + /* MMIO space */ return tegra_mmap; } +/******************************************************************************* + * The Tegra power domain tree has a single system level power domain i.e. a + * single root node. The first entry in the power domain descriptor specifies + * the number of power domains at the highest power level. + ******************************************************************************* + */ +const unsigned char tegra_power_domain_tree_desc[] = { + /* No of root nodes */ + 1, + /* No of clusters */ + PLATFORM_CLUSTER_COUNT, + /* No of CPU cores - cluster0 */ + PLATFORM_MAX_CPUS_PER_CLUSTER, + /* No of CPU cores - cluster1 */ + PLATFORM_MAX_CPUS_PER_CLUSTER +}; + +/******************************************************************************* + * This function returns the Tegra default topology tree information. + ******************************************************************************/ +const unsigned char *plat_get_power_domain_tree_desc(void) +{ + return tegra_power_domain_tree_desc; +} + /******************************************************************************* * Handler to get the System Counter Frequency ******************************************************************************/ @@ -93,6 +115,28 @@ uint32_t plat_get_console_from_id(int id) return tegra210_uart_addresses[id]; } +/******************************************************************************* + * Handler for early platform setup + ******************************************************************************/ +void plat_early_platform_setup(void) +{ + const plat_params_from_bl2_t *plat_params = bl31_get_plat_params(); + uint64_t val; + + /* platform parameter passed by the previous bootloader */ + if (plat_params->l2_ecc_parity_prot_dis != 1) { + /* Enable ECC Parity Protection for Cortex-A57 CPUs */ + val = read_l2ctlr_el1(); + val |= (uint64_t)CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT; + write_l2ctlr_el1(val); + } + + /* Initialize security engine driver */ + if (tegra_chipid_is_t210_b01()) { + tegra_se_init(); + } +} + /******************************************************************************* * Initialize the GIC and SGIs ******************************************************************************/ diff --git a/plat/nvidia/tegra/soc/t210/platform_t210.mk b/plat/nvidia/tegra/soc/t210/platform_t210.mk index b0a474c88e89c79ef4e1de5f037a0fa71402d722..a9ab0d821a115ab2d3ecede9bc9a3af1092b878b 100644 --- a/plat/nvidia/tegra/soc/t210/platform_t210.mk +++ b/plat/nvidia/tegra/soc/t210/platform_t210.mk @@ -16,18 +16,22 @@ $(eval $(call add_define,PLATFORM_CLUSTER_COUNT)) PLATFORM_MAX_CPUS_PER_CLUSTER := 4 $(eval $(call add_define,PLATFORM_MAX_CPUS_PER_CLUSTER)) -MAX_XLAT_TABLES := 4 +MAX_XLAT_TABLES := 10 $(eval $(call add_define,MAX_XLAT_TABLES)) -MAX_MMAP_REGIONS := 8 +MAX_MMAP_REGIONS := 10 $(eval $(call add_define,MAX_MMAP_REGIONS)) +PLAT_INCLUDES += -I${SOC_DIR}/drivers/se + BL31_SOURCES += lib/cpus/aarch64/cortex_a53.S \ lib/cpus/aarch64/cortex_a57.S \ + ${COMMON_DIR}/drivers/bpmp/bpmp.c \ ${COMMON_DIR}/drivers/flowctrl/flowctrl.c \ ${COMMON_DIR}/drivers/memctrl/memctrl_v1.c \ ${SOC_DIR}/plat_psci_handlers.c \ ${SOC_DIR}/plat_setup.c \ + ${SOC_DIR}/drivers/se/security_engine.c \ ${SOC_DIR}/plat_secondary.c # Enable workarounds for selected Cortex-A57 erratas.