Unverified Commit af4aad2f authored by Antonio Niño Díaz's avatar Antonio Niño Díaz Committed by GitHub
Browse files

Merge pull request #1733 from vwadekar/tf2.0-tegra-downstream-rebase-1.3.19

Tegra downstream rebase 1.3.19
parents cd1f39b4 0f426f8f
......@@ -111,8 +111,8 @@ static mce_config_t mce_cfg_table[MCE_ARI_APERTURES_MAX] = {
static uint32_t mce_get_curr_cpu_ari_base(void)
{
uint64_t mpidr = read_mpidr();
uint64_t cpuid = mpidr & (uint64_t)MPIDR_CPU_MASK;
uint64_t impl = (read_midr() >> (uint64_t)MIDR_IMPL_SHIFT) & (uint64_t)MIDR_IMPL_MASK;
uint64_t cpuid = mpidr & MPIDR_CPU_MASK;
uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
/*
* T186 has 2 CPU clusters, one with Denver CPUs and the other with
......@@ -131,9 +131,9 @@ static uint32_t mce_get_curr_cpu_ari_base(void)
static arch_mce_ops_t *mce_get_curr_cpu_ops(void)
{
uint64_t mpidr = read_mpidr();
uint64_t cpuid = mpidr & (uint64_t)MPIDR_CPU_MASK;
uint64_t impl = (read_midr() >> (uint64_t)MIDR_IMPL_SHIFT) &
(uint64_t)MIDR_IMPL_MASK;
uint64_t cpuid = mpidr & MPIDR_CPU_MASK;
uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) &
MIDR_IMPL_MASK;
/*
* T186 has 2 CPU clusters, one with Denver CPUs and the other with
......@@ -172,9 +172,6 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
switch (cmd) {
case MCE_CMD_ENTER_CSTATE:
ret = ops->enter_cstate(cpu_ari_base, arg0, arg1);
if (ret < 0) {
ERROR("%s: enter_cstate failed(%d)\n", __func__, ret);
}
break;
......@@ -183,30 +180,22 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
* get the parameters required for the update cstate info
* command
*/
arg3 = read_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X4));
arg4 = read_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X5));
arg5 = read_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X6));
arg3 = read_ctx_reg(gp_regs, CTX_GPREG_X4);
arg4 = read_ctx_reg(gp_regs, CTX_GPREG_X5);
arg5 = read_ctx_reg(gp_regs, CTX_GPREG_X6);
ret = ops->update_cstate_info(cpu_ari_base, (uint32_t)arg0,
(uint32_t)arg1, (uint32_t)arg2, (uint8_t)arg3,
(uint32_t)arg4, (uint8_t)arg5);
if (ret < 0) {
ERROR("%s: update_cstate_info failed(%d)\n",
__func__, ret);
}
write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X4), (0));
write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X5), (0));
write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X6), (0));
write_ctx_reg(gp_regs, CTX_GPREG_X4, (0ULL));
write_ctx_reg(gp_regs, CTX_GPREG_X5, (0ULL));
write_ctx_reg(gp_regs, CTX_GPREG_X6, (0ULL));
break;
case MCE_CMD_UPDATE_CROSSOVER_TIME:
ret = ops->update_crossover_time(cpu_ari_base, arg0, arg1);
if (ret < 0) {
ERROR("%s: update_crossover_time failed(%d)\n",
__func__, ret);
}
break;
......@@ -214,61 +203,40 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
ret64 = ops->read_cstate_stats(cpu_ari_base, arg0);
/* update context to return cstate stats value */
write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (ret64));
write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X2), (ret64));
write_ctx_reg(gp_regs, CTX_GPREG_X1, (ret64));
write_ctx_reg(gp_regs, CTX_GPREG_X2, (ret64));
break;
case MCE_CMD_WRITE_CSTATE_STATS:
ret = ops->write_cstate_stats(cpu_ari_base, arg0, arg1);
if (ret < 0) {
ERROR("%s: write_cstate_stats failed(%d)\n",
__func__, ret);
}
break;
case MCE_CMD_IS_CCX_ALLOWED:
ret = ops->is_ccx_allowed(cpu_ari_base, arg0, arg1);
if (ret < 0) {
ERROR("%s: is_ccx_allowed failed(%d)\n", __func__, ret);
break;
}
/* update context to return CCx status value */
write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1),
(uint64_t)(ret));
write_ctx_reg(gp_regs, CTX_GPREG_X1, (uint64_t)(ret));
break;
case MCE_CMD_IS_SC7_ALLOWED:
ret = ops->is_sc7_allowed(cpu_ari_base, arg0, arg1);
if (ret < 0) {
ERROR("%s: is_sc7_allowed failed(%d)\n", __func__, ret);
break;
}
/* update context to return SC7 status value */
write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1),
(uint64_t)(ret));
write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X3),
(uint64_t)(ret));
write_ctx_reg(gp_regs, CTX_GPREG_X1, (uint64_t)(ret));
write_ctx_reg(gp_regs, CTX_GPREG_X3, (uint64_t)(ret));
break;
case MCE_CMD_ONLINE_CORE:
ret = ops->online_core(cpu_ari_base, arg0);
if (ret < 0) {
ERROR("%s: online_core failed(%d)\n", __func__, ret);
}
break;
case MCE_CMD_CC3_CTRL:
ret = ops->cc3_ctrl(cpu_ari_base, arg0, arg1, arg2);
if (ret < 0) {
ERROR("%s: cc3_ctrl failed(%d)\n", __func__, ret);
}
break;
......@@ -277,10 +245,10 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
arg0);
/* update context to return if echo'd data matched source */
write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1),
((ret64 == arg0) ? 1ULL : 0ULL));
write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X2),
((ret64 == arg0) ? 1ULL : 0ULL));
write_ctx_reg(gp_regs, CTX_GPREG_X1, ((ret64 == arg0) ?
1ULL : 0ULL));
write_ctx_reg(gp_regs, CTX_GPREG_X2, ((ret64 == arg0) ?
1ULL : 0ULL));
break;
......@@ -292,10 +260,8 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
* version = minor(63:32) | major(31:0). Update context
* to return major and minor version number.
*/
write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1),
(ret64));
write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X2),
(ret64 >> 32ULL));
write_ctx_reg(gp_regs, CTX_GPREG_X1, (ret64));
write_ctx_reg(gp_regs, CTX_GPREG_X2, (ret64 >> 32ULL));
break;
......@@ -304,32 +270,22 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
TEGRA_ARI_MISC_FEATURE_LEAF_0, arg0);
/* update context to return features value */
write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (ret64));
write_ctx_reg(gp_regs, CTX_GPREG_X1, (ret64));
break;
case MCE_CMD_ROC_FLUSH_CACHE_TRBITS:
ret = ops->roc_flush_cache_trbits(cpu_ari_base);
if (ret < 0) {
ERROR("%s: flush cache_trbits failed(%d)\n", __func__,
ret);
}
break;
case MCE_CMD_ROC_FLUSH_CACHE:
ret = ops->roc_flush_cache(cpu_ari_base);
if (ret < 0) {
ERROR("%s: flush cache failed(%d)\n", __func__, ret);
}
break;
case MCE_CMD_ROC_CLEAN_CACHE:
ret = ops->roc_clean_cache(cpu_ari_base);
if (ret < 0) {
ERROR("%s: clean cache failed(%d)\n", __func__, ret);
}
break;
......@@ -337,9 +293,9 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
ret64 = ops->read_write_mca(cpu_ari_base, arg0, &arg1);
/* update context to return MCA data/error */
write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (ret64));
write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X2), (arg1));
write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X3), (ret64));
write_ctx_reg(gp_regs, CTX_GPREG_X1, (ret64));
write_ctx_reg(gp_regs, CTX_GPREG_X2, (arg1));
write_ctx_reg(gp_regs, CTX_GPREG_X3, (ret64));
break;
......@@ -347,8 +303,8 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
ret64 = ops->read_write_mca(cpu_ari_base, arg0, &arg1);
/* update context to return MCA error */
write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (ret64));
write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X3), (ret64));
write_ctx_reg(gp_regs, CTX_GPREG_X1, (ret64));
write_ctx_reg(gp_regs, CTX_GPREG_X3, (ret64));
break;
......@@ -375,7 +331,7 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
ret = ops->read_write_uncore_perfmon(cpu_ari_base, arg0, &arg1);
/* update context to return data */
write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (arg1));
write_ctx_reg(gp_regs, CTX_GPREG_X1, (arg1));
break;
case MCE_CMD_MISC_CCPLEX:
......
......@@ -200,15 +200,14 @@ int32_t nvg_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time
int32_t nvg_online_core(uint32_t ari_base, uint32_t core)
{
uint64_t cpu = read_mpidr() & (uint64_t)MPIDR_CPU_MASK;
uint64_t impl = (read_midr() >> (uint64_t)MIDR_IMPL_SHIFT) &
(uint64_t)MIDR_IMPL_MASK;
uint64_t cpu = read_mpidr() & MPIDR_CPU_MASK;
uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
int32_t ret = 0;
(void)ari_base;
/* sanity check code id */
if ((core >= (uint32_t)MCE_CORE_ID_MAX) || (cpu == core)) {
if ((core >= MCE_CORE_ID_MAX) || (cpu == core)) {
ERROR("%s: unsupported core id (%d)\n", __func__, core);
ret = EINVAL;
} else {
......
......@@ -12,6 +12,7 @@
#include <common/bl_common.h>
#include <common/debug.h>
#include <context.h>
#include <cortex_a57.h>
#include <denver.h>
#include <lib/el3_runtime/context_mgmt.h>
#include <lib/psci/psci.h>
......@@ -29,32 +30,33 @@ extern void tegra186_cpu_reset_handler(void);
extern uint32_t __tegra186_cpu_reset_handler_end,
__tegra186_smmu_context;
/* TZDRAM offset for saving SMMU context */
#define TEGRA186_SMMU_CTX_OFFSET 16UL
/* state id mask */
#define TEGRA186_STATE_ID_MASK 0xF
#define TEGRA186_STATE_ID_MASK 0xFU
/* constants to get power state's wake time */
#define TEGRA186_WAKE_TIME_MASK 0x0FFFFFF0
#define TEGRA186_WAKE_TIME_SHIFT 4
#define TEGRA186_WAKE_TIME_MASK 0x0FFFFFF0U
#define TEGRA186_WAKE_TIME_SHIFT 4U
/* default core wake mask for CPU_SUSPEND */
#define TEGRA186_CORE_WAKE_MASK 0x180c
#define TEGRA186_CORE_WAKE_MASK 0x180cU
/* context size to save during system suspend */
#define TEGRA186_SE_CONTEXT_SIZE 3
#define TEGRA186_SE_CONTEXT_SIZE 3U
static uint32_t se_regs[TEGRA186_SE_CONTEXT_SIZE];
static struct t18x_psci_percpu_data {
unsigned int wake_time;
} __aligned(CACHE_WRITEBACK_GRANULE) percpu_data[PLATFORM_CORE_COUNT];
/* System power down state */
uint32_t tegra186_system_powerdn_state = TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF;
static struct tegra_psci_percpu_data {
uint32_t wake_time;
} __aligned(CACHE_WRITEBACK_GRANULE) tegra_percpu_data[PLATFORM_CORE_COUNT];
int32_t tegra_soc_validate_power_state(unsigned int power_state,
int32_t tegra_soc_validate_power_state(uint32_t power_state,
psci_power_state_t *req_state)
{
int state_id = psci_get_pstate_id(power_state) & TEGRA186_STATE_ID_MASK;
int cpu = plat_my_core_pos();
uint8_t state_id = (uint8_t)psci_get_pstate_id(power_state) & TEGRA186_STATE_ID_MASK;
uint32_t cpu = plat_my_core_pos();
int32_t ret = PSCI_E_SUCCESS;
/* save the core wake time (in TSC ticks)*/
percpu_data[cpu].wake_time = (power_state & TEGRA186_WAKE_TIME_MASK)
tegra_percpu_data[cpu].wake_time = (power_state & TEGRA186_WAKE_TIME_MASK)
<< TEGRA186_WAKE_TIME_SHIFT;
/*
......@@ -64,8 +66,8 @@ int32_t tegra_soc_validate_power_state(unsigned int power_state,
* from DRAM in that function, because the L2 cache is not flushed
* unless the cluster is entering CC6/CC7.
*/
clean_dcache_range((uint64_t)&percpu_data[cpu],
sizeof(percpu_data[cpu]));
clean_dcache_range((uint64_t)&tegra_percpu_data[cpu],
sizeof(tegra_percpu_data[cpu]));
/* Sanity check the requested state id */
switch (state_id) {
......@@ -80,18 +82,19 @@ int32_t tegra_soc_validate_power_state(unsigned int power_state,
default:
ERROR("%s: unsupported state id (%d)\n", __func__, state_id);
return PSCI_E_INVALID_PARAMS;
ret = PSCI_E_INVALID_PARAMS;
break;
}
return PSCI_E_SUCCESS;
return ret;
}
int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
{
const plat_local_state_t *pwr_domain_state;
unsigned int stateid_afflvl0, stateid_afflvl2;
int cpu = plat_my_core_pos();
plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
uint8_t stateid_afflvl0, stateid_afflvl2;
uint32_t cpu = plat_my_core_pos();
const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
mce_cstate_info_t cstate_info = { 0 };
uint64_t smmu_ctx_base;
uint32_t val;
......@@ -109,8 +112,8 @@ int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
/* Enter CPU idle/powerdown */
val = (stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ?
TEGRA_ARI_CORE_C6 : TEGRA_ARI_CORE_C7;
(void)mce_command_handler(MCE_CMD_ENTER_CSTATE, val,
percpu_data[cpu].wake_time, 0);
(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, (uint64_t)val,
tegra_percpu_data[cpu].wake_time, 0U);
} else if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
......@@ -138,18 +141,20 @@ int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
cstate_info.system_state_force = 1;
cstate_info.update_wake_mask = 1;
mce_update_cstate_info(&cstate_info);
/* Loop until system suspend is allowed */
do {
val = mce_command_handler(MCE_CMD_IS_SC7_ALLOWED,
val = (uint32_t)mce_command_handler(
(uint64_t)MCE_CMD_IS_SC7_ALLOWED,
TEGRA_ARI_CORE_C7,
MCE_CORE_SLEEP_TIME_INFINITE,
0);
} while (val == 0);
0U);
} while (val == 0U);
/* Instruct the MCE to enter system suspend state */
(void)mce_command_handler(MCE_CMD_ENTER_CSTATE,
TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0);
(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U);
} else {
; /* do nothing */
}
return PSCI_E_SUCCESS;
......@@ -159,23 +164,28 @@ int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
* Platform handler to calculate the proper target power level at the
* specified affinity level
******************************************************************************/
plat_local_state_t tegra_soc_get_target_pwr_state(unsigned int lvl,
plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl,
const plat_local_state_t *states,
unsigned int ncpu)
uint32_t ncpu)
{
plat_local_state_t target = *states;
int cpu = plat_my_core_pos(), ret, cluster_powerdn = 1;
int core_pos = read_mpidr() & MPIDR_CPU_MASK;
uint32_t pos = 0;
plat_local_state_t result = PSCI_LOCAL_STATE_RUN;
uint32_t cpu = plat_my_core_pos(), num_cpu = ncpu;
int32_t ret, cluster_powerdn = 1;
uint64_t core_pos = read_mpidr() & (uint64_t)MPIDR_CPU_MASK;
mce_cstate_info_t cstate_info = { 0 };
/* get the power state at this level */
if (lvl == MPIDR_AFFLVL1)
target = *(states + core_pos);
if (lvl == MPIDR_AFFLVL2)
target = *(states + cpu);
if (lvl == (uint32_t)MPIDR_AFFLVL1) {
target = states[core_pos];
}
if (lvl == (uint32_t)MPIDR_AFFLVL2) {
target = states[cpu];
}
/* CPU suspend */
if (lvl == MPIDR_AFFLVL1 && target == PSTATE_ID_CORE_POWERDN) {
if ((lvl == (uint32_t)MPIDR_AFFLVL1) && (target == PSTATE_ID_CORE_POWERDN)) {
/* Program default wake mask */
cstate_info.wake_mask = TEGRA186_CORE_WAKE_MASK;
......@@ -183,25 +193,29 @@ plat_local_state_t tegra_soc_get_target_pwr_state(unsigned int lvl,
mce_update_cstate_info(&cstate_info);
/* Check if CCx state is allowed. */
ret = mce_command_handler(MCE_CMD_IS_CCX_ALLOWED,
TEGRA_ARI_CORE_C7, percpu_data[cpu].wake_time,
0);
if (ret)
return PSTATE_ID_CORE_POWERDN;
ret = mce_command_handler((uint64_t)MCE_CMD_IS_CCX_ALLOWED,
TEGRA_ARI_CORE_C7, tegra_percpu_data[cpu].wake_time,
0U);
if (ret != 0) {
result = PSTATE_ID_CORE_POWERDN;
}
}
/* CPU off */
if (lvl == MPIDR_AFFLVL1 && target == PLAT_MAX_OFF_STATE) {
if ((lvl == (uint32_t)MPIDR_AFFLVL1) && (target == PLAT_MAX_OFF_STATE)) {
/* find out the number of ON cpus in the cluster */
do {
target = *states++;
if (target != PLAT_MAX_OFF_STATE)
target = states[pos];
if (target != PLAT_MAX_OFF_STATE) {
cluster_powerdn = 0;
} while (--ncpu);
}
--num_cpu;
pos++;
} while (num_cpu != 0U);
/* Enable cluster powerdn from last CPU in the cluster */
if (cluster_powerdn) {
if (cluster_powerdn != 0) {
/* Enable CC7 state and turn off wake mask */
cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7;
......@@ -209,12 +223,13 @@ plat_local_state_t tegra_soc_get_target_pwr_state(unsigned int lvl,
mce_update_cstate_info(&cstate_info);
/* Check if CCx state is allowed. */
ret = mce_command_handler(MCE_CMD_IS_CCX_ALLOWED,
ret = mce_command_handler((uint64_t)MCE_CMD_IS_CCX_ALLOWED,
TEGRA_ARI_CORE_C7,
MCE_CORE_SLEEP_TIME_INFINITE,
0);
if (ret)
return PSTATE_ID_CORE_POWERDN;
0U);
if (ret != 0) {
result = PSTATE_ID_CORE_POWERDN;
}
} else {
......@@ -225,20 +240,21 @@ plat_local_state_t tegra_soc_get_target_pwr_state(unsigned int lvl,
}
/* System Suspend */
if (((lvl == MPIDR_AFFLVL2) || (lvl == MPIDR_AFFLVL1)) &&
(target == PSTATE_ID_SOC_POWERDN))
return PSTATE_ID_SOC_POWERDN;
if (((lvl == (uint32_t)MPIDR_AFFLVL2) || (lvl == (uint32_t)MPIDR_AFFLVL1)) &&
(target == PSTATE_ID_SOC_POWERDN)) {
result = PSTATE_ID_SOC_POWERDN;
}
/* default state */
return PSCI_LOCAL_STATE_RUN;
return result;
}
int tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
{
const plat_local_state_t *pwr_domain_state =
target_state->pwr_domain_state;
plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
unsigned int stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
uint8_t stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
TEGRA186_STATE_ID_MASK;
uint64_t val;
......@@ -250,7 +266,7 @@ int tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
*/
val = params_from_bl2->tzdram_base +
((uintptr_t)&__tegra186_cpu_reset_handler_end -
(uintptr_t)tegra186_cpu_reset_handler);
(uintptr_t)&tegra186_cpu_reset_handler);
memcpy16((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE,
(uintptr_t)&__BL31_END__ - (uintptr_t)BL31_BASE);
}
......@@ -258,30 +274,49 @@ int tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
return PSCI_E_SUCCESS;
}
int tegra_soc_pwr_domain_on(u_register_t mpidr)
int32_t tegra_soc_pwr_domain_on(u_register_t mpidr)
{
uint32_t target_cpu = mpidr & MPIDR_CPU_MASK;
uint32_t target_cpu = mpidr & (uint64_t)MPIDR_CPU_MASK;
uint32_t target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >>
MPIDR_AFFINITY_BITS;
(uint64_t)MPIDR_AFFINITY_BITS;
int32_t ret = PSCI_E_SUCCESS;
if (target_cluster > (uint64_t)MPIDR_AFFLVL1) {
if (target_cluster > MPIDR_AFFLVL1) {
ERROR("%s: unsupported CPU (0x%lx)\n", __func__, mpidr);
return PSCI_E_NOT_PRESENT;
}
ret = PSCI_E_NOT_PRESENT;
/* construct the target CPU # */
target_cpu |= (target_cluster << 2);
} else {
/* construct the target CPU # */
target_cpu |= (target_cluster << 2);
mce_command_handler(MCE_CMD_ONLINE_CORE, target_cpu, 0, 0);
(void)mce_command_handler((uint64_t)MCE_CMD_ONLINE_CORE, target_cpu, 0U, 0U);
}
return PSCI_E_SUCCESS;
return ret;
}
int tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
{
int stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL];
int stateid_afflvl0 = target_state->pwr_domain_state[MPIDR_AFFLVL0];
uint8_t stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL];
uint8_t stateid_afflvl0 = target_state->pwr_domain_state[MPIDR_AFFLVL0];
mce_cstate_info_t cstate_info = { 0 };
uint64_t impl, val;
const plat_params_from_bl2_t *plat_params = bl31_get_plat_params();
impl = (read_midr() >> MIDR_IMPL_SHIFT) & (uint64_t)MIDR_IMPL_MASK;
/*
* Enable ECC and Parity Protection for Cortex-A57 CPUs (Tegra186
* A02p and beyond).
*/
if ((plat_params->l2_ecc_parity_prot_dis != 1) &&
(impl != (uint64_t)DENVER_IMPL)) {
val = read_l2ctlr_el1();
val |= CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT;
write_l2ctlr_el1(val);
}
/*
* Reset power state info for CPUs when onlining, we set
......@@ -328,65 +363,28 @@ int tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
return PSCI_E_SUCCESS;
}
int tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
{
int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & (uint64_t)MIDR_IMPL_MASK;
(void)target_state;
/* Disable Denver's DCO operations */
if (impl == DENVER_IMPL)
if (impl == DENVER_IMPL) {
denver_disable_dco();
}
/* Turn off CPU */
(void)mce_command_handler(MCE_CMD_ENTER_CSTATE, TEGRA_ARI_CORE_C7,
MCE_CORE_SLEEP_TIME_INFINITE, 0);
(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, TEGRA_ARI_CORE_C7,
MCE_CORE_SLEEP_TIME_INFINITE, 0U);
return PSCI_E_SUCCESS;
}
__dead2 void tegra_soc_prepare_system_off(void)
{
mce_cstate_info_t cstate_info = { 0 };
uint32_t val;
if (tegra186_system_powerdn_state == TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF) {
/* power off the entire system */
mce_enter_ccplex_state(tegra186_system_powerdn_state);
} else if (tegra186_system_powerdn_state == TEGRA_ARI_SYSTEM_SC8) {
/* Prepare for quasi power down */
cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7;
cstate_info.system = TEGRA_ARI_SYSTEM_SC8;
cstate_info.system_state_force = 1;
cstate_info.update_wake_mask = 1;
mce_update_cstate_info(&cstate_info);
/* loop until other CPUs power down */
do {
val = mce_command_handler(MCE_CMD_IS_SC7_ALLOWED,
TEGRA_ARI_CORE_C7,
MCE_CORE_SLEEP_TIME_INFINITE,
0);
} while (val == 0);
/* Enter quasi power down state */
(void)mce_command_handler(MCE_CMD_ENTER_CSTATE,
TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0);
/* disable GICC */
tegra_gic_cpuif_deactivate();
/* power down core */
prepare_cpu_pwr_dwn();
/* flush L1/L2 data caches */
dcsw_op_all(DCCISW);
} else {
ERROR("%s: unsupported power down state (%d)\n", __func__,
tegra186_system_powerdn_state);
}
/* power off the entire system */
mce_enter_ccplex_state(TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF);
wfi();
......@@ -396,7 +394,7 @@ __dead2 void tegra_soc_prepare_system_off(void)
}
}
int tegra_soc_prepare_system_reset(void)
int32_t tegra_soc_prepare_system_reset(void)
{
mce_enter_ccplex_state(TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT);
......
......@@ -14,14 +14,13 @@
#include <tegra_def.h>
#include <tegra_private.h>
#define MISCREG_CPU_RESET_VECTOR 0x2000
#define MISCREG_AA64_RST_LOW 0x2004
#define MISCREG_AA64_RST_HIGH 0x2008
#define MISCREG_AA64_RST_LOW 0x2004U
#define MISCREG_AA64_RST_HIGH 0x2008U
#define SCRATCH_SECURE_RSV1_SCRATCH_0 0x658
#define SCRATCH_SECURE_RSV1_SCRATCH_1 0x65C
#define SCRATCH_SECURE_RSV1_SCRATCH_0 0x658U
#define SCRATCH_SECURE_RSV1_SCRATCH_1 0x65CU
#define CPU_RESET_MODE_AA64 1
#define CPU_RESET_MODE_AA64 1U
extern void memcpy16(void *dest, const void *src, unsigned int length);
......@@ -34,7 +33,7 @@ extern uint64_t __tegra186_cpu_reset_handler_end;
void plat_secondary_setup(void)
{
uint32_t addr_low, addr_high;
plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
uint64_t cpu_reset_handler_base;
INFO("Setting up secondary CPU boot\n");
......@@ -58,7 +57,7 @@ void plat_secondary_setup(void)
}
addr_low = (uint32_t)cpu_reset_handler_base | CPU_RESET_MODE_AA64;
addr_high = (uint32_t)((cpu_reset_handler_base >> 32) & 0x7ff);
addr_high = (uint32_t)((cpu_reset_handler_base >> 32U) & 0x7ffU);
/* write lower 32 bits first, then the upper 11 bits */
mmio_write_32(TEGRA_MISC_BASE + MISCREG_AA64_RST_LOW, addr_low);
......@@ -71,5 +70,5 @@ void plat_secondary_setup(void)
addr_high);
/* update reset vector address to the CCPLEX */
mce_update_reset_vector();
(void)mce_update_reset_vector();
}
......@@ -27,15 +27,12 @@
#include <tegra_platform.h>
#include <tegra_private.h>
DEFINE_RENAME_SYSREG_RW_FUNCS(l2ctlr_el1, CORTEX_A57_L2CTLR_EL1)
extern uint64_t tegra_enable_l2_ecc_parity_prot;
/*******************************************************************************
* Tegra186 CPU numbers in cluster #0
*******************************************************************************
*/
#define TEGRA186_CLUSTER0_CORE2 2
#define TEGRA186_CLUSTER0_CORE3 3
#define TEGRA186_CLUSTER0_CORE2 2U
#define TEGRA186_CLUSTER0_CORE3 3U
/*******************************************************************************
* The Tegra power domain tree has a single system level power domain i.e. a
......@@ -43,7 +40,7 @@ extern uint64_t tegra_enable_l2_ecc_parity_prot;
* the number of power domains at the highest power level.
*******************************************************************************
*/
const unsigned char tegra_power_domain_tree_desc[] = {
const uint8_t tegra_power_domain_tree_desc[] = {
/* No of root nodes */
1,
/* No of clusters */
......@@ -54,45 +51,53 @@ const unsigned char tegra_power_domain_tree_desc[] = {
PLATFORM_MAX_CPUS_PER_CLUSTER
};
/*******************************************************************************
* This function returns the Tegra default topology tree information.
******************************************************************************/
const uint8_t *plat_get_power_domain_tree_desc(void)
{
return tegra_power_domain_tree_desc;
}
/*
* Table of regions to map using the MMU.
*/
static const mmap_region_t tegra_mmap[] = {
MAP_REGION_FLAT(TEGRA_MISC_BASE, 0x10000, /* 64KB */
MAP_REGION_FLAT(TEGRA_MISC_BASE, 0x10000U, /* 64KB */
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(TEGRA_TSA_BASE, 0x20000, /* 128KB */
MAP_REGION_FLAT(TEGRA_TSA_BASE, 0x20000U, /* 128KB */
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(TEGRA_MC_STREAMID_BASE, 0x10000, /* 64KB */
MAP_REGION_FLAT(TEGRA_MC_STREAMID_BASE, 0x10000U, /* 64KB */
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(TEGRA_MC_BASE, 0x10000, /* 64KB */
MAP_REGION_FLAT(TEGRA_MC_BASE, 0x10000U, /* 64KB */
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(TEGRA_UARTA_BASE, 0x20000, /* 128KB - UART A, B*/
MAP_REGION_FLAT(TEGRA_UARTA_BASE, 0x20000U, /* 128KB - UART A, B*/
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(TEGRA_UARTC_BASE, 0x20000, /* 128KB - UART C, G */
MAP_REGION_FLAT(TEGRA_UARTC_BASE, 0x20000U, /* 128KB - UART C, G */
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(TEGRA_UARTD_BASE, 0x30000, /* 192KB - UART D, E, F */
MAP_REGION_FLAT(TEGRA_UARTD_BASE, 0x30000U, /* 192KB - UART D, E, F */
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(TEGRA_FUSE_BASE, 0x10000, /* 64KB */
MAP_REGION_FLAT(TEGRA_FUSE_BASE, 0x10000U, /* 64KB */
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(TEGRA_GICD_BASE, 0x20000, /* 128KB */
MAP_REGION_FLAT(TEGRA_GICD_BASE, 0x20000U, /* 128KB */
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(TEGRA_SE0_BASE, 0x10000, /* 64KB */
MAP_REGION_FLAT(TEGRA_SE0_BASE, 0x10000U, /* 64KB */
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(TEGRA_PKA1_BASE, 0x10000, /* 64KB */
MAP_REGION_FLAT(TEGRA_PKA1_BASE, 0x10000U, /* 64KB */
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(TEGRA_RNG1_BASE, 0x10000, /* 64KB */
MAP_REGION_FLAT(TEGRA_RNG1_BASE, 0x10000U, /* 64KB */
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(TEGRA_CAR_RESET_BASE, 0x10000, /* 64KB */
MAP_REGION_FLAT(TEGRA_CAR_RESET_BASE, 0x10000U, /* 64KB */
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(TEGRA_PMC_BASE, 0x40000, /* 256KB */
MAP_REGION_FLAT(TEGRA_PMC_BASE, 0x40000U, /* 256KB */
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(TEGRA_SCRATCH_BASE, 0x10000, /* 64KB */
MAP_REGION_FLAT(TEGRA_SCRATCH_BASE, 0x10000U, /* 64KB */
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(TEGRA_MMCRAB_BASE, 0x60000, /* 384KB */
MAP_REGION_FLAT(TEGRA_MMCRAB_BASE, 0x60000U, /* 384KB */
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(TEGRA_ARM_ACTMON_CTR_BASE, 0x20000, /* 128KB - ARM/Denver */
MAP_REGION_FLAT(TEGRA_ARM_ACTMON_CTR_BASE, 0x20000U, /* 128KB - ARM/Denver */
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(TEGRA_SMMU0_BASE, 0x1000000, /* 64KB */
MAP_REGION_FLAT(TEGRA_SMMU0_BASE, 0x1000000U, /* 64KB */
MT_DEVICE | MT_RW | MT_SECURE),
{0}
};
......@@ -109,7 +114,7 @@ const mmap_region_t *plat_get_mmio_map(void)
/*******************************************************************************
* Handler to get the System Counter Frequency
******************************************************************************/
unsigned int plat_get_syscnt_freq2(void)
uint32_t plat_get_syscnt_freq2(void)
{
return 31250000;
}
......@@ -136,56 +141,42 @@ static uint32_t tegra186_uart_addresses[TEGRA186_MAX_UART_PORTS + 1] = {
/*******************************************************************************
* Retrieve the UART controller base to be used as the console
******************************************************************************/
uint32_t plat_get_console_from_id(int id)
uint32_t plat_get_console_from_id(int32_t id)
{
if (id > TEGRA186_MAX_UART_PORTS)
return 0;
uint32_t ret;
return tegra186_uart_addresses[id];
}
if (id > TEGRA186_MAX_UART_PORTS) {
ret = 0;
} else {
ret = tegra186_uart_addresses[id];
}
/* represent chip-version as concatenation of major (15:12), minor (11:8) and subrev (7:0) */
#define TEGRA186_VER_A02P 0x1201
return ret;
}
/*******************************************************************************
* Handler for early platform setup
******************************************************************************/
void plat_early_platform_setup(void)
{
int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
uint32_t chip_subrev, val;
uint64_t impl, val;
const plat_params_from_bl2_t *plat_params = bl31_get_plat_params();
/* sanity check MCE firmware compatibility */
mce_verify_firmware_version();
impl = (read_midr() >> MIDR_IMPL_SHIFT) & (uint64_t)MIDR_IMPL_MASK;
/*
* Enable ECC and Parity Protection for Cortex-A57 CPUs
* for Tegra A02p SKUs
* Enable ECC and Parity Protection for Cortex-A57 CPUs (Tegra186
* A02p and beyond).
*/
if (impl != DENVER_IMPL) {
/* get the major, minor and sub-version values */
chip_subrev = mmio_read_32(TEGRA_FUSE_BASE + OPT_SUBREVISION) &
SUBREVISION_MASK;
/* prepare chip version number */
val = (tegra_get_chipid_major() << 12) |
(tegra_get_chipid_minor() << 8) |
chip_subrev;
if ((plat_params->l2_ecc_parity_prot_dis != 1) &&
(impl != (uint64_t)DENVER_IMPL)) {
/* enable L2 ECC for Tegra186 A02P and beyond */
if (val >= TEGRA186_VER_A02P) {
val = read_l2ctlr_el1();
val |= CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT;
write_l2ctlr_el1(val);
/*
* Set the flag to enable ECC/Parity Protection
* when we exit System Suspend or Cluster Powerdn
*/
tegra_enable_l2_ecc_parity_prot = 1;
}
val = read_l2ctlr_el1();
val |= CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT;
write_l2ctlr_el1(val);
}
}
......@@ -208,8 +199,9 @@ void plat_gic_setup(void)
* Initialize the FIQ handler only if the platform supports any
* FIQ interrupt sources.
*/
if (sizeof(tegra186_interrupt_props) > 0)
if (sizeof(tegra186_interrupt_props) > 0U) {
tegra_fiq_handler_setup();
}
}
/*******************************************************************************
......@@ -242,33 +234,34 @@ plat_params_from_bl2_t *plat_get_bl31_plat_params(void)
* to convert an MPIDR to a unique linear index. An error code (-1) is returned
* in case the MPIDR is invalid.
******************************************************************************/
int plat_core_pos_by_mpidr(u_register_t mpidr)
int32_t plat_core_pos_by_mpidr(u_register_t mpidr)
{
unsigned int cluster_id, cpu_id, pos;
u_register_t cluster_id, cpu_id, pos;
int32_t ret;
cluster_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
cpu_id = (mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK;
cluster_id = (mpidr >> (u_register_t)MPIDR_AFF1_SHIFT) & (u_register_t)MPIDR_AFFLVL_MASK;
cpu_id = (mpidr >> (u_register_t)MPIDR_AFF0_SHIFT) & (u_register_t)MPIDR_AFFLVL_MASK;
/*
* Validate cluster_id by checking whether it represents
* one of the two clusters present on the platform.
*/
if (cluster_id >= PLATFORM_CLUSTER_COUNT)
return PSCI_E_NOT_PRESENT;
/*
* Validate cpu_id by checking whether it represents a CPU in
* one of the two clusters present on the platform.
*/
if (cpu_id >= PLATFORM_MAX_CPUS_PER_CLUSTER)
return PSCI_E_NOT_PRESENT;
/* calculate the core position */
pos = cpu_id + (cluster_id << 2);
/* check for non-existent CPUs */
if (pos == TEGRA186_CLUSTER0_CORE2 || pos == TEGRA186_CLUSTER0_CORE3)
return PSCI_E_NOT_PRESENT;
if ((cluster_id >= (u_register_t)PLATFORM_CLUSTER_COUNT) ||
(cpu_id >= (u_register_t)PLATFORM_MAX_CPUS_PER_CLUSTER)) {
ret = PSCI_E_NOT_PRESENT;
} else {
/* calculate the core position */
pos = cpu_id + (cluster_id << 2U);
/* check for non-existent CPUs */
if ((pos == TEGRA186_CLUSTER0_CORE2) || (pos == TEGRA186_CLUSTER0_CORE3)) {
ret = PSCI_E_NOT_PRESENT;
} else {
ret = (int32_t)pos;
}
}
return pos;
return ret;
}
......@@ -20,8 +20,6 @@
#include <t18x_ari.h>
#include <tegra_private.h>
extern uint32_t tegra186_system_powerdn_state;
/*******************************************************************************
* Offset to read the ref_clk counter value
******************************************************************************/
......@@ -30,7 +28,6 @@ extern uint32_t tegra186_system_powerdn_state;
/*******************************************************************************
* Tegra186 SiP SMCs
******************************************************************************/
#define TEGRA_SIP_SYSTEM_SHUTDOWN_STATE 0xC2FFFE01
#define TEGRA_SIP_GET_ACTMON_CLK_COUNTERS 0xC2FFFE02
#define TEGRA_SIP_MCE_CMD_ENTER_CSTATE 0xC2FFFF00
#define TEGRA_SIP_MCE_CMD_UPDATE_CSTATE_INFO 0xC2FFFF01
......@@ -60,7 +57,7 @@ int plat_sip_handler(uint32_t smc_fid,
uint64_t x2,
uint64_t x3,
uint64_t x4,
void *cookie,
const void *cookie,
void *handle,
uint64_t flags)
{
......@@ -115,33 +112,6 @@ int plat_sip_handler(uint32_t smc_fid,
return 0;
case TEGRA_SIP_SYSTEM_SHUTDOWN_STATE:
/* clean up the high bits */
x1 = (uint32_t)x1;
/*
* SC8 is a special Tegra186 system state where the CPUs and
* DRAM are powered down but the other subsystem is still
* alive.
*/
if ((x1 == TEGRA_ARI_SYSTEM_SC8) ||
(x1 == TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF)) {
tegra186_system_powerdn_state = x1;
flush_dcache_range(
(uintptr_t)&tegra186_system_powerdn_state,
sizeof(tegra186_system_powerdn_state));
} else {
ERROR("%s: unhandled powerdn state (%d)\n", __func__,
(uint32_t)x1);
return -ENOTSUP;
}
return 0;
/*
* This function ID reads the Activity monitor's core/ref clock
* counter values for a core/cluster.
......
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef SE_PRIVATE_H
#define SE_PRIVATE_H
#include <stdbool.h>
#include <security_engine.h>
/*
* PMC registers
*/
/* Secure scratch registers */
#define PMC_SECURE_SCRATCH4_OFFSET 0xC0U
#define PMC_SECURE_SCRATCH5_OFFSET 0xC4U
#define PMC_SECURE_SCRATCH6_OFFSET 0x224U
#define PMC_SECURE_SCRATCH7_OFFSET 0x228U
#define PMC_SECURE_SCRATCH120_OFFSET 0xB38U
#define PMC_SECURE_SCRATCH121_OFFSET 0xB3CU
#define PMC_SECURE_SCRATCH122_OFFSET 0xB40U
#define PMC_SECURE_SCRATCH123_OFFSET 0xB44U
/*
* AHB arbitration memory write queue
*/
#define ARAHB_MEM_WRQUE_MST_ID_OFFSET 0xFCU
#define ARAHB_MST_ID_SE2_MASK (0x1U << 13)
#define ARAHB_MST_ID_SE_MASK (0x1U << 14)
/* SE Status register */
#define SE_STATUS_OFFSET 0x800U
#define SE_STATUS_SHIFT 0
#define SE_STATUS_IDLE \
((0U) << SE_STATUS_SHIFT)
#define SE_STATUS_BUSY \
((1U) << SE_STATUS_SHIFT)
#define SE_STATUS(x) \
((x) & ((0x3U) << SE_STATUS_SHIFT))
/* SE config register */
#define SE_CONFIG_REG_OFFSET 0x14U
#define SE_CONFIG_ENC_ALG_SHIFT 12
#define SE_CONFIG_ENC_ALG_AES_ENC \
((1U) << SE_CONFIG_ENC_ALG_SHIFT)
#define SE_CONFIG_ENC_ALG_RNG \
((2U) << SE_CONFIG_ENC_ALG_SHIFT)
#define SE_CONFIG_ENC_ALG_SHA \
((3U) << SE_CONFIG_ENC_ALG_SHIFT)
#define SE_CONFIG_ENC_ALG_RSA \
((4U) << SE_CONFIG_ENC_ALG_SHIFT)
#define SE_CONFIG_ENC_ALG_NOP \
((0U) << SE_CONFIG_ENC_ALG_SHIFT)
#define SE_CONFIG_ENC_ALG(x) \
((x) & ((0xFU) << SE_CONFIG_ENC_ALG_SHIFT))
#define SE_CONFIG_DEC_ALG_SHIFT 8
#define SE_CONFIG_DEC_ALG_AES \
((1U) << SE_CONFIG_DEC_ALG_SHIFT)
#define SE_CONFIG_DEC_ALG_NOP \
((0U) << SE_CONFIG_DEC_ALG_SHIFT)
#define SE_CONFIG_DEC_ALG(x) \
((x) & ((0xFU) << SE_CONFIG_DEC_ALG_SHIFT))
#define SE_CONFIG_DST_SHIFT 2
#define SE_CONFIG_DST_MEMORY \
((0U) << SE_CONFIG_DST_SHIFT)
#define SE_CONFIG_DST_HASHREG \
((1U) << SE_CONFIG_DST_SHIFT)
#define SE_CONFIG_DST_KEYTAB \
((2U) << SE_CONFIG_DST_SHIFT)
#define SE_CONFIG_DST_SRK \
((3U) << SE_CONFIG_DST_SHIFT)
#define SE_CONFIG_DST_RSAREG \
((4U) << SE_CONFIG_DST_SHIFT)
#define SE_CONFIG_DST(x) \
((x) & ((0x7U) << SE_CONFIG_DST_SHIFT))
/* DRBG random number generator config */
#define SE_RNG_CONFIG_REG_OFFSET 0x340
#define DRBG_MODE_SHIFT 0
#define DRBG_MODE_NORMAL \
((0UL) << DRBG_MODE_SHIFT)
#define DRBG_MODE_FORCE_INSTANTION \
((1UL) << DRBG_MODE_SHIFT)
#define DRBG_MODE_FORCE_RESEED \
((2UL) << DRBG_MODE_SHIFT)
#define SE_RNG_CONFIG_MODE(x) \
((x) & ((0x3UL) << DRBG_MODE_SHIFT))
#define DRBG_SRC_SHIFT 2
#define DRBG_SRC_NONE \
((0UL) << DRBG_SRC_SHIFT)
#define DRBG_SRC_ENTROPY \
((1UL) << DRBG_SRC_SHIFT)
#define DRBG_SRC_LFSR \
((2UL) << DRBG_SRC_SHIFT)
#define SE_RNG_SRC_CONFIG_MODE(x) \
((x) & ((0x3UL) << DRBG_SRC_SHIFT))
/* DRBG random number generator entropy config */
#define SE_RNG_SRC_CONFIG_REG_OFFSET 0x344U
#define DRBG_RO_ENT_SRC_SHIFT 1
#define DRBG_RO_ENT_SRC_ENABLE \
((1U) << DRBG_RO_ENT_SRC_SHIFT)
#define DRBG_RO_ENT_SRC_DISABLE \
((0U) << DRBG_RO_ENT_SRC_SHIFT)
#define SE_RNG_SRC_CONFIG_RO_ENT_SRC(x) \
((x) & ((0x1U) << DRBG_RO_ENT_SRC_SHIFT))
#define DRBG_RO_ENT_SRC_LOCK_SHIFT 0
#define DRBG_RO_ENT_SRC_LOCK_ENABLE \
((1U) << DRBG_RO_ENT_SRC_LOCK_SHIFT)
#define DRBG_RO_ENT_SRC_LOCK_DISABLE \
((0U) << DRBG_RO_ENT_SRC_LOCK_SHIFT)
#define SE_RNG_SRC_CONFIG_RO_ENT_SRC_LOCK(x) \
((x) & ((0x1U) << DRBG_RO_ENT_SRC_LOCK_SHIFT))
#define DRBG_RO_ENT_IGNORE_MEM_SHIFT 12
#define DRBG_RO_ENT_IGNORE_MEM_ENABLE \
((1U) << DRBG_RO_ENT_IGNORE_MEM_SHIFT)
#define DRBG_RO_ENT_IGNORE_MEM_DISABLE \
((0U) << DRBG_RO_ENT_IGNORE_MEM_SHIFT)
#define SE_RNG_SRC_CONFIG_RO_ENT_IGNORE_MEM(x) \
((x) & ((0x1U) << DRBG_RO_ENT_IGNORE_MEM_SHIFT))
/* SE OPERATION */
#define SE_OPERATION_REG_OFFSET 0x8U
#define SE_OPERATION_SHIFT 0
#define SE_OP_ABORT \
((0x0U) << SE_OPERATION_SHIFT)
#define SE_OP_START \
((0x1U) << SE_OPERATION_SHIFT)
#define SE_OP_RESTART \
((0x2U) << SE_OPERATION_SHIFT)
#define SE_OP_CTX_SAVE \
((0x3U) << SE_OPERATION_SHIFT)
#define SE_OP_RESTART_IN \
((0x4U) << SE_OPERATION_SHIFT)
#define SE_OPERATION(x) \
((x) & ((0x7U) << SE_OPERATION_SHIFT))
/* SE_CTX_SAVE_AUTO */
#define SE_CTX_SAVE_AUTO_REG_OFFSET 0x74U
/* Enable */
#define SE_CTX_SAVE_AUTO_ENABLE_SHIFT 0
#define SE_CTX_SAVE_AUTO_DIS \
((0U) << SE_CTX_SAVE_AUTO_ENABLE_SHIFT)
#define SE_CTX_SAVE_AUTO_EN \
((1U) << SE_CTX_SAVE_AUTO_ENABLE_SHIFT)
#define SE_CTX_SAVE_AUTO_ENABLE(x) \
((x) & ((0x1U) << SE_CTX_SAVE_AUTO_ENABLE_SHIFT))
/* Lock */
#define SE_CTX_SAVE_AUTO_LOCK_SHIFT 8
#define SE_CTX_SAVE_AUTO_LOCK_EN \
((1U) << SE_CTX_SAVE_AUTO_LOCK_SHIFT)
#define SE_CTX_SAVE_AUTO_LOCK_DIS \
((0U) << SE_CTX_SAVE_AUTO_LOCK_SHIFT)
#define SE_CTX_SAVE_AUTO_LOCK(x) \
((x) & ((0x1U) << SE_CTX_SAVE_AUTO_LOCK_SHIFT))
/* Current context save number of blocks */
#define SE_CTX_SAVE_AUTO_CURR_CNT_SHIFT 16
#define SE_CTX_SAVE_AUTO_CURR_CNT_MASK 0x3FFU
#define SE_CTX_SAVE_GET_BLK_COUNT(x) \
(((x) >> SE_CTX_SAVE_AUTO_CURR_CNT_SHIFT) & \
SE_CTX_SAVE_AUTO_CURR_CNT_MASK)
#define SE_CTX_SAVE_SIZE_BLOCKS_SE1 133
#define SE_CTX_SAVE_SIZE_BLOCKS_SE2 646
/* SE TZRAM OPERATION - only for SE1 */
#define SE_TZRAM_OPERATION 0x540U
#define SE_TZRAM_OP_MODE_SHIFT 1
#define SE_TZRAM_OP_MODE_SAVE \
((0U) << SE_TZRAM_OP_MODE_SHIFT)
#define SE_TZRAM_OP_MODE_RESTORE \
((1U) << SE_TZRAM_OP_MODE_SHIFT)
#define SE_TZRAM_OP_MODE(x) \
((x) & ((0x1U) << SE_TZRAM_OP_MODE_SHIFT))
#define SE_TZRAM_OP_BUSY_SHIFT 2
#define SE_TZRAM_OP_BUSY_OFF \
((0U) << SE_TZRAM_OP_BUSY_SHIFT)
#define SE_TZRAM_OP_BUSY_ON \
((1U) << SE_TZRAM_OP_BUSY_SHIFT)
#define SE_TZRAM_OP_BUSY(x) \
((x) & ((0x1U) << SE_TZRAM_OP_BUSY_SHIFT))
#define SE_TZRAM_OP_REQ_SHIFT 0
#define SE_TZRAM_OP_REQ_IDLE \
((0U) << SE_TZRAM_OP_REQ_SHIFT)
#define SE_TZRAM_OP_REQ_INIT \
((1U) << SE_TZRAM_OP_REQ_SHIFT)
#define SE_TZRAM_OP_REQ(x) \
((x) & ((0x1U) << SE_TZRAM_OP_REQ_SHIFT))
/* SE Interrupt */
#define SE_INT_STATUS_REG_OFFSET 0x10U
#define SE_INT_OP_DONE_SHIFT 4
#define SE_INT_OP_DONE_CLEAR \
((0U) << SE_INT_OP_DONE_SHIFT)
#define SE_INT_OP_DONE_ACTIVE \
((1U) << SE_INT_OP_DONE_SHIFT)
#define SE_INT_OP_DONE(x) \
((x) & ((0x1U) << SE_INT_OP_DONE_SHIFT))
/* SE error status */
#define SE_ERR_STATUS_REG_OFFSET 0x804U
/* SE linked list (LL) register */
#define SE_IN_LL_ADDR_REG_OFFSET 0x18U
#define SE_OUT_LL_ADDR_REG_OFFSET 0x24U
#define SE_BLOCK_COUNT_REG_OFFSET 0x318U
/* AES data sizes */
#define TEGRA_SE_AES_BLOCK_SIZE 16
#define TEGRA_SE_AES_MIN_KEY_SIZE 16
#define TEGRA_SE_AES_MAX_KEY_SIZE 32
#define TEGRA_SE_AES_IV_SIZE 16
/*******************************************************************************
* Inline functions definition
******************************************************************************/
static inline uint32_t tegra_se_read_32(const tegra_se_dev_t *dev, uint32_t offset)
{
return mmio_read_32(dev->se_base + offset);
}
static inline void tegra_se_write_32(const tegra_se_dev_t *dev, uint32_t offset, uint32_t val)
{
mmio_write_32(dev->se_base + offset, val);
}
/*******************************************************************************
* Prototypes
******************************************************************************/
#endif /* SE_PRIVATE_H */
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch_helpers.h>
#include <assert.h>
#include <common/debug.h>
#include <delay_timer.h>
#include <errno.h>
#include <mmio.h>
#include <psci.h>
#include <se_private.h>
#include <security_engine.h>
#include <tegra_platform.h>
/*******************************************************************************
* Constants and Macros
******************************************************************************/
#define TIMEOUT_100MS 100UL // Timeout in 100ms
/*******************************************************************************
* Data structure and global variables
******************************************************************************/
/* The security engine contexts are formatted as follows:
*
* SE1 CONTEXT:
* #--------------------------------#
* | Random Data 1 Block |
* #--------------------------------#
* | Sticky Bits 2 Blocks |
* #--------------------------------#
* | Key Table 64 Blocks |
* | For each Key (x16): |
* | Key: 2 Blocks |
* | Original-IV: 1 Block |
* | Updated-IV: 1 Block |
* #--------------------------------#
* | RSA Keys 64 Blocks |
* #--------------------------------#
* | Known Pattern 1 Block |
* #--------------------------------#
*
* SE2/PKA1 CONTEXT:
* #--------------------------------#
* | Random Data 1 Block |
* #--------------------------------#
* | Sticky Bits 2 Blocks |
* #--------------------------------#
* | Key Table 64 Blocks |
* | For each Key (x16): |
* | Key: 2 Blocks |
* | Original-IV: 1 Block |
* | Updated-IV: 1 Block |
* #--------------------------------#
* | RSA Keys 64 Blocks |
* #--------------------------------#
* | PKA sticky bits 1 Block |
* #--------------------------------#
* | PKA keys 512 Blocks |
* #--------------------------------#
* | Known Pattern 1 Block |
* #--------------------------------#
*/
/* SE input and output linked list buffers */
static tegra_se_io_lst_t se1_src_ll_buf;
static tegra_se_io_lst_t se1_dst_ll_buf;
/* SE2 input and output linked list buffers */
static tegra_se_io_lst_t se2_src_ll_buf;
static tegra_se_io_lst_t se2_dst_ll_buf;
/* SE1 security engine device handle */
static tegra_se_dev_t se_dev_1 = {
.se_num = 1,
/* setup base address for se */
.se_base = TEGRA_SE1_BASE,
/* Setup context size in AES blocks */
.ctx_size_blks = SE_CTX_SAVE_SIZE_BLOCKS_SE1,
/* Setup SRC buffers for SE operations */
.src_ll_buf = &se1_src_ll_buf,
/* Setup DST buffers for SE operations */
.dst_ll_buf = &se1_dst_ll_buf,
};
/* SE2 security engine device handle */
static tegra_se_dev_t se_dev_2 = {
.se_num = 2,
/* setup base address for se */
.se_base = TEGRA_SE2_BASE,
/* Setup context size in AES blocks */
.ctx_size_blks = SE_CTX_SAVE_SIZE_BLOCKS_SE2,
/* Setup SRC buffers for SE operations */
.src_ll_buf = &se2_src_ll_buf,
/* Setup DST buffers for SE operations */
.dst_ll_buf = &se2_dst_ll_buf,
};
/*******************************************************************************
* Functions Definition
******************************************************************************/
static void tegra_se_make_data_coherent(const tegra_se_dev_t *se_dev)
{
flush_dcache_range(((uint64_t)(se_dev->src_ll_buf)),
sizeof(tegra_se_io_lst_t));
flush_dcache_range(((uint64_t)(se_dev->dst_ll_buf)),
sizeof(tegra_se_io_lst_t));
}
/*
* Check that SE operation has completed after kickoff
* This function is invoked after an SE operation has been started,
* and it checks the following conditions:
* 1. SE_INT_STATUS = SE_OP_DONE
* 2. SE_STATUS = IDLE
* 3. AHB bus data transfer complete.
* 4. SE_ERR_STATUS is clean.
*/
static int32_t tegra_se_operation_complete(const tegra_se_dev_t *se_dev)
{
uint32_t val = 0;
int32_t ret = 0;
uint32_t timeout;
/* Poll the SE interrupt register to ensure H/W operation complete */
val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET);
for (timeout = 0; (SE_INT_OP_DONE(val) == SE_INT_OP_DONE_CLEAR) &&
(timeout < TIMEOUT_100MS); timeout++) {
mdelay(1);
val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET);
}
if (timeout == TIMEOUT_100MS) {
ERROR("%s: ERR: Atomic context save operation timeout!\n",
__func__);
ret = -ETIMEDOUT;
}
/* Poll the SE status idle to ensure H/W operation complete */
if (ret == 0) {
val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
for (timeout = 0; (val != 0U) && (timeout < TIMEOUT_100MS);
timeout++) {
mdelay(1);
val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
}
if (timeout == TIMEOUT_100MS) {
ERROR("%s: ERR: MEM_INTERFACE and SE state "
"idle state timeout.\n", __func__);
ret = -ETIMEDOUT;
}
}
/* Check AHB bus transfer complete */
if (ret == 0) {
val = mmio_read_32(TEGRA_AHB_ARB_BASE + ARAHB_MEM_WRQUE_MST_ID_OFFSET);
for (timeout = 0; ((val & (ARAHB_MST_ID_SE_MASK | ARAHB_MST_ID_SE2_MASK)) != 0U) &&
(timeout < TIMEOUT_100MS); timeout++) {
mdelay(1);
val = mmio_read_32(TEGRA_AHB_ARB_BASE + ARAHB_MEM_WRQUE_MST_ID_OFFSET);
}
if (timeout == TIMEOUT_100MS) {
ERROR("%s: SE write over AHB timeout.\n", __func__);
ret = -ETIMEDOUT;
}
}
/* Ensure that no errors are thrown during operation */
if (ret == 0) {
val = tegra_se_read_32(se_dev, SE_ERR_STATUS_REG_OFFSET);
if (val != 0U) {
ERROR("%s: error during SE operation! 0x%x", __func__, val);
ret = -ENOTSUP;
}
}
return ret;
}
/*
* Verify the SE context save auto has been enabled.
* SE_CTX_SAVE_AUTO.ENABLE == ENABLE
* If the SE context save auto is not enabled, then set
* the context save auto enable and lock the setting.
* If the SE context save auto is not enabled and the
* enable setting is locked, then return an error.
*/
static inline int32_t tegra_se_ctx_save_auto_enable(const tegra_se_dev_t *se_dev)
{
uint32_t val;
int32_t ret = 0;
val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET);
if (SE_CTX_SAVE_AUTO_ENABLE(val) == SE_CTX_SAVE_AUTO_DIS) {
if (SE_CTX_SAVE_AUTO_LOCK(val) == SE_CTX_SAVE_AUTO_LOCK_EN) {
ERROR("%s: ERR: Cannot enable atomic. Write locked!\n",
__func__);
ret = -EACCES;
}
/* Program SE_CTX_SAVE_AUTO */
if (ret == 0) {
tegra_se_write_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET,
SE_CTX_SAVE_AUTO_LOCK_EN |
SE_CTX_SAVE_AUTO_EN);
}
}
return ret;
}
/*
* Wait for SE engine to be idle and clear pending interrupts before
* starting the next SE operation.
*/
static int32_t tegra_se_operation_prepare(const tegra_se_dev_t *se_dev)
{
int32_t ret = 0;
uint32_t val = 0;
uint32_t timeout;
/* Wait for previous operation to finish */
val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
for (timeout = 0; (val != 0U) && (timeout < TIMEOUT_100MS); timeout++) {
mdelay(1);
val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
}
if (timeout == TIMEOUT_100MS) {
ERROR("%s: ERR: SE status is not idle!\n", __func__);
ret = -ETIMEDOUT;
}
/* Clear any pending interrupts from previous operation */
val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET);
tegra_se_write_32(se_dev, SE_INT_STATUS_REG_OFFSET, val);
return ret;
}
/*
* SE atomic context save. At SC7 entry, SE driver triggers the
* hardware automatically performs the context save operation.
*/
static int32_t tegra_se_context_save_atomic(const tegra_se_dev_t *se_dev)
{
int32_t ret = 0;
uint32_t val = 0;
uint32_t blk_count_limit = 0;
uint32_t block_count;
/* Check that previous operation is finalized */
ret = tegra_se_operation_prepare(se_dev);
/* Ensure HW atomic context save has been enabled
* This should have been done at boot time.
* SE_CTX_SAVE_AUTO.ENABLE == ENABLE
*/
if (ret == 0) {
ret = tegra_se_ctx_save_auto_enable(se_dev);
}
/* Read the context save progress counter: block_count
* Ensure no previous context save has been triggered
* SE_CTX_SAVE_AUTO.CURR_CNT == 0
*/
if (ret == 0) {
val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET);
block_count = SE_CTX_SAVE_GET_BLK_COUNT(val);
if (block_count != 0U) {
ERROR("%s: ctx_save triggered multiple times\n",
__func__);
ret = -EALREADY;
}
}
/* Set the destination block count when the context save complete */
if (ret == 0) {
blk_count_limit = block_count + se_dev->ctx_size_blks;
}
/* Program SE_CONFIG register as for RNG operation
* SE_CONFIG.ENC_ALG = RNG
* SE_CONFIG.DEC_ALG = NOP
* SE_CONFIG.ENC_MODE is ignored
* SE_CONFIG.DEC_MODE is ignored
* SE_CONFIG.DST = MEMORY
*/
if (ret == 0) {
val = (SE_CONFIG_ENC_ALG_RNG |
SE_CONFIG_DEC_ALG_NOP |
SE_CONFIG_DST_MEMORY);
tegra_se_write_32(se_dev, SE_CONFIG_REG_OFFSET, val);
tegra_se_make_data_coherent(se_dev);
/* SE_CTX_SAVE operation */
tegra_se_write_32(se_dev, SE_OPERATION_REG_OFFSET,
SE_OP_CTX_SAVE);
ret = tegra_se_operation_complete(se_dev);
}
/* Check that context has written the correct number of blocks */
if (ret == 0) {
val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET);
if (SE_CTX_SAVE_GET_BLK_COUNT(val) != blk_count_limit) {
ERROR("%s: expected %d blocks but %d were written\n",
__func__, blk_count_limit, val);
ret = -ECANCELED;
}
}
return ret;
}
/*
* Security engine primitive operations, including normal operation
* and the context save operation.
*/
static int tegra_se_perform_operation(const tegra_se_dev_t *se_dev, uint32_t nbytes)
{
uint32_t nblocks = nbytes / TEGRA_SE_AES_BLOCK_SIZE;
int ret = 0;
assert(se_dev);
/* Use device buffers for in and out */
tegra_se_write_32(se_dev, SE_OUT_LL_ADDR_REG_OFFSET, ((uint64_t)(se_dev->dst_ll_buf)));
tegra_se_write_32(se_dev, SE_IN_LL_ADDR_REG_OFFSET, ((uint64_t)(se_dev->src_ll_buf)));
/* Check that previous operation is finalized */
ret = tegra_se_operation_prepare(se_dev);
if (ret != 0) {
goto op_error;
}
/* Program SE operation size */
if (nblocks) {
tegra_se_write_32(se_dev, SE_BLOCK_COUNT_REG_OFFSET, nblocks - 1);
}
/* Make SE LL data coherent before the SE operation */
tegra_se_make_data_coherent(se_dev);
/* Start hardware operation */
tegra_se_write_32(se_dev, SE_OPERATION_REG_OFFSET, SE_OP_START);
/* Wait for operation to finish */
ret = tegra_se_operation_complete(se_dev);
op_error:
return ret;
}
/*
* Security Engine sequence to generat SRK
* SE and SE2 will generate different SRK by different
* entropy seeds.
*/
static int tegra_se_generate_srk(const tegra_se_dev_t *se_dev)
{
int ret = PSCI_E_INTERN_FAIL;
uint32_t val;
/* Confgure the following hardware register settings:
* SE_CONFIG.DEC_ALG = NOP
* SE_CONFIG.ENC_ALG = RNG
* SE_CONFIG.DST = SRK
* SE_OPERATION.OP = START
* SE_CRYPTO_LAST_BLOCK = 0
*/
se_dev->src_ll_buf->last_buff_num = 0;
se_dev->dst_ll_buf->last_buff_num = 0;
/* Configure random number generator */
val = (DRBG_MODE_FORCE_RESEED | DRBG_SRC_ENTROPY);
tegra_se_write_32(se_dev, SE_RNG_CONFIG_REG_OFFSET, val);
/* Configure output destination = SRK */
val = (SE_CONFIG_ENC_ALG_RNG |
SE_CONFIG_DEC_ALG_NOP |
SE_CONFIG_DST_SRK);
tegra_se_write_32(se_dev, SE_CONFIG_REG_OFFSET, val);
/* Perform hardware operation */
ret = tegra_se_perform_operation(se_dev, 0);
return ret;
}
/*
* Initialize the SE engine handle
*/
void tegra_se_init(void)
{
INFO("%s: start SE init\n", __func__);
/* Generate random SRK to initialize DRBG */
tegra_se_generate_srk(&se_dev_1);
tegra_se_generate_srk(&se_dev_2);
INFO("%s: SE init done\n", __func__);
}
/*
* Security engine power suspend entry point.
* This function is invoked from PSCI power domain suspend handler.
*/
int32_t tegra_se_suspend(void)
{
int32_t ret = 0;
/* Atomic context save se2 and pka1 */
INFO("%s: SE2/PKA1 atomic context save\n", __func__);
ret = tegra_se_context_save_atomic(&se_dev_2);
/* Atomic context save se */
if (ret == 0) {
INFO("%s: SE1 atomic context save\n", __func__);
ret = tegra_se_context_save_atomic(&se_dev_1);
}
if (ret == 0) {
INFO("%s: SE atomic context save done\n", __func__);
}
return ret;
}
/*
* Save TZRAM to shadow TZRAM in AON
*/
int32_t tegra_se_save_tzram(void)
{
uint32_t val = 0;
int32_t ret = 0;
uint32_t timeout;
INFO("%s: SE TZRAM save start\n", __func__);
val = (SE_TZRAM_OP_REQ_INIT | SE_TZRAM_OP_MODE_SAVE);
tegra_se_write_32(&se_dev_1, SE_TZRAM_OPERATION, val);
val = tegra_se_read_32(&se_dev_1, SE_TZRAM_OPERATION);
for (timeout = 0; (SE_TZRAM_OP_BUSY(val) == SE_TZRAM_OP_BUSY_ON) &&
(timeout < TIMEOUT_100MS); timeout++) {
mdelay(1);
val = tegra_se_read_32(&se_dev_1, SE_TZRAM_OPERATION);
}
if (timeout == TIMEOUT_100MS) {
ERROR("%s: ERR: TZRAM save timeout!\n", __func__);
ret = -ETIMEDOUT;
}
if (ret == 0) {
INFO("%s: SE TZRAM save done!\n", __func__);
}
return ret;
}
/*
* The function is invoked by SE resume
*/
static void tegra_se_warm_boot_resume(const tegra_se_dev_t *se_dev)
{
uint32_t val;
assert(se_dev);
/* Lock RNG source to ENTROPY on resume */
val = DRBG_RO_ENT_IGNORE_MEM_ENABLE |
DRBG_RO_ENT_SRC_LOCK_ENABLE |
DRBG_RO_ENT_SRC_ENABLE;
tegra_se_write_32(se_dev, SE_RNG_SRC_CONFIG_REG_OFFSET, val);
/* Enable and lock the SE atomic context save setting */
if (tegra_se_ctx_save_auto_enable(se_dev) != 0) {
ERROR("%s: ERR: enable SE%d context save auto failed!\n",
__func__, se_dev->se_num);
}
/* Set a random value to SRK to initialize DRBG */
tegra_se_generate_srk(se_dev);
}
/*
* The function is invoked on SC7 resume
*/
void tegra_se_resume(void)
{
tegra_se_warm_boot_resume(&se_dev_1);
tegra_se_warm_boot_resume(&se_dev_2);
}
/*
* Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
#include <platform_def.h>
#include <cortex_a57.h>
#include <arch_helpers.h>
#include <common/debug.h>
#include <drivers/delay_timer.h>
......@@ -15,10 +13,14 @@
#include <lib/psci/psci.h>
#include <plat/common/platform.h>
#include <bpmp.h>
#include <flowctrl.h>
#include <pmc.h>
#include <platform_def.h>
#include <security_engine.h>
#include <tegra_def.h>
#include <tegra_private.h>
#include <tegra_platform.h>
/*
* Register used to clear CPU reset signals. Each CPU has two reset
......@@ -55,7 +57,7 @@ int32_t tegra_soc_validate_power_state(unsigned int power_state,
* Cluster powerdown/idle request only for afflvl 1
*/
req_state->pwr_domain_state[MPIDR_AFFLVL1] = state_id;
req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id;
req_state->pwr_domain_state[MPIDR_AFFLVL0] = PSTATE_ID_CORE_POWERDN;
break;
......@@ -87,9 +89,11 @@ plat_local_state_t tegra_soc_get_target_pwr_state(unsigned int lvl,
const plat_local_state_t *states,
unsigned int ncpu)
{
plat_local_state_t target = *states;
plat_local_state_t target = PSCI_LOCAL_STATE_RUN;
int cpu = plat_my_core_pos();
int core_pos = read_mpidr() & MPIDR_CPU_MASK;
uint32_t bpmp_reply, data[3];
int ret;
/* get the power state at this level */
if (lvl == MPIDR_AFFLVL1)
......@@ -97,19 +101,57 @@ plat_local_state_t tegra_soc_get_target_pwr_state(unsigned int lvl,
if (lvl == MPIDR_AFFLVL2)
target = *(states + cpu);
/* Cluster idle/power-down */
if ((lvl == MPIDR_AFFLVL1) && ((target == PSTATE_ID_CLUSTER_IDLE) ||
(target == PSTATE_ID_CLUSTER_POWERDN))) {
return target;
}
if ((lvl == MPIDR_AFFLVL1) && (target == PSTATE_ID_CLUSTER_IDLE)) {
/* initialize the bpmp interface */
(void)tegra_bpmp_init();
/* Cluster idle */
data[0] = (uint32_t)cpu;
data[1] = TEGRA_PM_CC6;
data[2] = TEGRA_PM_SC1;
ret = tegra_bpmp_send_receive_atomic(MRQ_DO_IDLE,
(void *)&data, (int)sizeof(data),
(void *)&bpmp_reply, (int)sizeof(bpmp_reply));
/* check if cluster idle entry is allowed */
if ((ret != 0L) || (bpmp_reply != BPMP_CCx_ALLOWED)) {
/* Cluster idle not allowed */
target = PSCI_LOCAL_STATE_RUN;
}
} else if ((lvl == MPIDR_AFFLVL1) && (target == PSTATE_ID_CLUSTER_POWERDN)) {
/* initialize the bpmp interface */
(void)tegra_bpmp_init();
/* Cluster power-down */
data[0] = (uint32_t)cpu;
data[1] = TEGRA_PM_CC7;
data[2] = TEGRA_PM_SC1;
ret = tegra_bpmp_send_receive_atomic(MRQ_DO_IDLE,
(void *)&data, (int)sizeof(data),
(void *)&bpmp_reply, (int)sizeof(bpmp_reply));
/* check if cluster power down is allowed */
if ((ret != 0L) || (bpmp_reply != BPMP_CCx_ALLOWED)) {
/* Cluster power down not allowed */
target = PSCI_LOCAL_STATE_RUN;
}
/* System Suspend */
if (((lvl == MPIDR_AFFLVL2) || (lvl == MPIDR_AFFLVL1)) &&
(target == PSTATE_ID_SOC_POWERDN))
return PSTATE_ID_SOC_POWERDN;
} else if (((lvl == MPIDR_AFFLVL2) || (lvl == MPIDR_AFFLVL1)) &&
(target == PSTATE_ID_SOC_POWERDN)) {
/* default state */
return PSCI_LOCAL_STATE_RUN;
/* System Suspend */
target = PSTATE_ID_SOC_POWERDN;
} else {
; /* do nothing */
}
return target;
}
int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
......@@ -120,27 +162,43 @@ int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
unsigned int stateid_afflvl2 = pwr_domain_state[MPIDR_AFFLVL2];
unsigned int stateid_afflvl1 = pwr_domain_state[MPIDR_AFFLVL1];
unsigned int stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0];
int ret = PSCI_E_SUCCESS;
if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
assert((stateid_afflvl0 == PLAT_MAX_OFF_STATE) ||
(stateid_afflvl0 == PSTATE_ID_SOC_POWERDN));
(stateid_afflvl0 == PSTATE_ID_SOC_POWERDN));
assert((stateid_afflvl1 == PLAT_MAX_OFF_STATE) ||
(stateid_afflvl1 == PSTATE_ID_SOC_POWERDN));
(stateid_afflvl1 == PSTATE_ID_SOC_POWERDN));
if (tegra_chipid_is_t210_b01()) {
/* Suspend se/se2 and pka1 */
if (tegra_se_suspend() != 0) {
ret = PSCI_E_INTERN_FAIL;
}
/* Save tzram contents */
if (tegra_se_save_tzram() != 0) {
ret = PSCI_E_INTERN_FAIL;
}
}
/* suspend the entire soc */
tegra_fc_soc_powerdn(mpidr);
/* enter system suspend */
if (ret == PSCI_E_SUCCESS) {
tegra_fc_soc_powerdn(mpidr);
}
} else if (stateid_afflvl1 == PSTATE_ID_CLUSTER_IDLE) {
assert(stateid_afflvl0 == PSTATE_ID_CLUSTER_IDLE);
assert(stateid_afflvl0 == PSTATE_ID_CORE_POWERDN);
/* Prepare for cluster idle */
tegra_fc_cluster_idle(mpidr);
} else if (stateid_afflvl1 == PSTATE_ID_CLUSTER_POWERDN) {
assert(stateid_afflvl0 == PSTATE_ID_CLUSTER_POWERDN);
assert(stateid_afflvl0 == PSTATE_ID_CORE_POWERDN);
/* Prepare for cluster powerdn */
tegra_fc_cluster_powerdn(mpidr);
......@@ -151,23 +209,40 @@ int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
tegra_fc_cpu_powerdn(mpidr);
} else {
ERROR("%s: Unknown state id\n", __func__);
return PSCI_E_NOT_SUPPORTED;
ERROR("%s: Unknown state id (%d, %d, %d)\n", __func__,
stateid_afflvl2, stateid_afflvl1, stateid_afflvl0);
ret = PSCI_E_NOT_SUPPORTED;
}
return PSCI_E_SUCCESS;
return ret;
}
int tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
{
const plat_params_from_bl2_t *plat_params = bl31_get_plat_params();
uint32_t val;
/* platform parameter passed by the previous bootloader */
if (plat_params->l2_ecc_parity_prot_dis != 1) {
/* Enable ECC Parity Protection for Cortex-A57 CPUs */
val = read_l2ctlr_el1();
val |= (uint64_t)CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT;
write_l2ctlr_el1(val);
}
/*
* Check if we are exiting from SOC_POWERDN.
*/
if (target_state->pwr_domain_state[PLAT_MAX_PWR_LVL] ==
PLAT_SYS_SUSPEND_STATE_ID) {
/*
* Security engine resume
*/
if (tegra_chipid_is_t210_b01()) {
tegra_se_resume();
}
/*
* Lock scratch registers which hold the CPU vectors
*/
......@@ -231,7 +306,7 @@ int tegra_soc_prepare_system_reset(void)
* for the PMC APB clock would not be changed due to system reset.
*/
mmio_write_32((uintptr_t)TEGRA_CAR_RESET_BASE + SCLK_BURST_POLICY,
SCLK_BURST_POLICY_DEFAULT);
SCLK_BURST_POLICY_DEFAULT);
mmio_write_32((uintptr_t)TEGRA_CAR_RESET_BASE + SCLK_RATE, 0);
/* Wait 1 ms to make sure clock source/device logic is stabilized. */
......
/*
* Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch_helpers.h>
#include <bpmp.h>
#include <cortex_a57.h>
#include <common/bl_common.h>
#include <drivers/console.h>
#include <lib/xlat_tables/xlat_tables_v2.h>
#include <platform.h>
#include <security_engine.h>
#include <tegra_def.h>
#include <tegra_platform.h>
#include <tegra_private.h>
/*******************************************************************************
* The Tegra power domain tree has a single system level power domain i.e. a
* single root node. The first entry in the power domain descriptor specifies
* the number of power domains at the highest power level.
*******************************************************************************
*/
const unsigned char tegra_power_domain_tree_desc[] = {
/* No of root nodes */
1,
/* No of clusters */
PLATFORM_CLUSTER_COUNT,
/* No of CPU cores - cluster0 */
PLATFORM_MAX_CPUS_PER_CLUSTER,
/* No of CPU cores - cluster1 */
PLATFORM_MAX_CPUS_PER_CLUSTER
};
/* sets of MMIO ranges setup */
#define MMIO_RANGE_0_ADDR 0x50000000
#define MMIO_RANGE_1_ADDR 0x60000000
......@@ -39,6 +26,8 @@ const unsigned char tegra_power_domain_tree_desc[] = {
* Table of regions to map using the MMU.
*/
static const mmap_region_t tegra_mmap[] = {
MAP_REGION_FLAT(TEGRA_IRAM_BASE, 0x40000, /* 256KB */
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(MMIO_RANGE_0_ADDR, MMIO_RANGE_SIZE,
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(MMIO_RANGE_1_ADDR, MMIO_RANGE_SIZE,
......@@ -53,10 +42,43 @@ static const mmap_region_t tegra_mmap[] = {
******************************************************************************/
const mmap_region_t *plat_get_mmio_map(void)
{
/* Add the map region for security engine SE2 */
if (tegra_chipid_is_t210_b01()) {
mmap_add_region((uint64_t)TEGRA_SE2_BASE,
(uint64_t)TEGRA_SE2_BASE,
(uint64_t)TEGRA_SE2_RANGE_SIZE,
MT_DEVICE | MT_RW | MT_SECURE);
}
/* MMIO space */
return tegra_mmap;
}
/*******************************************************************************
* The Tegra power domain tree has a single system level power domain i.e. a
* single root node. The first entry in the power domain descriptor specifies
* the number of power domains at the highest power level.
*******************************************************************************
*/
const unsigned char tegra_power_domain_tree_desc[] = {
/* No of root nodes */
1,
/* No of clusters */
PLATFORM_CLUSTER_COUNT,
/* No of CPU cores - cluster0 */
PLATFORM_MAX_CPUS_PER_CLUSTER,
/* No of CPU cores - cluster1 */
PLATFORM_MAX_CPUS_PER_CLUSTER
};
/*******************************************************************************
* This function returns the Tegra default topology tree information.
******************************************************************************/
const unsigned char *plat_get_power_domain_tree_desc(void)
{
return tegra_power_domain_tree_desc;
}
/*******************************************************************************
* Handler to get the System Counter Frequency
******************************************************************************/
......@@ -93,6 +115,28 @@ uint32_t plat_get_console_from_id(int id)
return tegra210_uart_addresses[id];
}
/*******************************************************************************
* Handler for early platform setup
******************************************************************************/
void plat_early_platform_setup(void)
{
const plat_params_from_bl2_t *plat_params = bl31_get_plat_params();
uint64_t val;
/* platform parameter passed by the previous bootloader */
if (plat_params->l2_ecc_parity_prot_dis != 1) {
/* Enable ECC Parity Protection for Cortex-A57 CPUs */
val = read_l2ctlr_el1();
val |= (uint64_t)CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT;
write_l2ctlr_el1(val);
}
/* Initialize security engine driver */
if (tegra_chipid_is_t210_b01()) {
tegra_se_init();
}
}
/*******************************************************************************
* Initialize the GIC and SGIs
******************************************************************************/
......
......@@ -16,18 +16,22 @@ $(eval $(call add_define,PLATFORM_CLUSTER_COUNT))
PLATFORM_MAX_CPUS_PER_CLUSTER := 4
$(eval $(call add_define,PLATFORM_MAX_CPUS_PER_CLUSTER))
MAX_XLAT_TABLES := 4
MAX_XLAT_TABLES := 10
$(eval $(call add_define,MAX_XLAT_TABLES))
MAX_MMAP_REGIONS := 8
MAX_MMAP_REGIONS := 10
$(eval $(call add_define,MAX_MMAP_REGIONS))
PLAT_INCLUDES += -I${SOC_DIR}/drivers/se
BL31_SOURCES += lib/cpus/aarch64/cortex_a53.S \
lib/cpus/aarch64/cortex_a57.S \
${COMMON_DIR}/drivers/bpmp/bpmp.c \
${COMMON_DIR}/drivers/flowctrl/flowctrl.c \
${COMMON_DIR}/drivers/memctrl/memctrl_v1.c \
${SOC_DIR}/plat_psci_handlers.c \
${SOC_DIR}/plat_setup.c \
${SOC_DIR}/drivers/se/security_engine.c \
${SOC_DIR}/plat_secondary.c
# Enable workarounds for selected Cortex-A57 erratas.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment