Unverified Commit c40c88f8 authored by Antonio Niño Díaz's avatar Antonio Niño Díaz Committed by GitHub
Browse files

Merge pull request #1764 from vwadekar/tf2.0-tegra-downstream-rebase-1.7.19

Tf2.0 tegra downstream rebase 1.7.19
parents fbf35335 650d9c52
...@@ -586,12 +586,12 @@ ...@@ -586,12 +586,12 @@
/******************************************************************************* /*******************************************************************************
* SMMU Global Aux. Control Register * SMMU Global Aux. Control Register
******************************************************************************/ ******************************************************************************/
#define SMMU_CBn_ACTLR_CPRE_BIT (1U << 1) #define SMMU_CBn_ACTLR_CPRE_BIT (1ULL << 1U)
/******************************************************************************* /*******************************************************************************
* SMMU configuration constants * SMMU configuration constants
******************************************************************************/ ******************************************************************************/
#define ID1_PAGESIZE (1U << 31) #define ID1_PAGESIZE (1U << 31U)
#define ID1_NUMPAGENDXB_SHIFT 28U #define ID1_NUMPAGENDXB_SHIFT 28U
#define ID1_NUMPAGENDXB_MASK 7U #define ID1_NUMPAGENDXB_MASK 7U
#define ID1_NUMS2CB_SHIFT 16U #define ID1_NUMS2CB_SHIFT 16U
...@@ -705,5 +705,6 @@ typedef struct smmu_regs { ...@@ -705,5 +705,6 @@ typedef struct smmu_regs {
void tegra_smmu_init(void); void tegra_smmu_init(void);
void tegra_smmu_save_context(uint64_t smmu_ctx_addr); void tegra_smmu_save_context(uint64_t smmu_ctx_addr);
smmu_regs_t *plat_get_smmu_ctx(void); smmu_regs_t *plat_get_smmu_ctx(void);
uint32_t plat_get_num_smmu_devices(void);
#endif /* SMMU_H */ #endif /* SMMU_H */
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef __PROFILER_H__
#define __PROFILER_H__
/*******************************************************************************
* Number of bytes of memory used by the profiler on Tegra
******************************************************************************/
#define PROFILER_SIZE_BYTES U(0x1000)
void boot_profiler_init(uint64_t shmem_base, uint32_t tmr_base);
void boot_profiler_add_record(const char *str);
void boot_profiler_deinit(void);
#endif /* __PROFILER_H__ */
...@@ -83,6 +83,9 @@ ...@@ -83,6 +83,9 @@
******************************************************************************/ ******************************************************************************/
#define TEGRA_MC_BASE U(0x70019000) #define TEGRA_MC_BASE U(0x70019000)
/* Memory Controller Interrupt Status */
#define MC_INTSTATUS 0x00U
/* TZDRAM carveout configuration registers */ /* TZDRAM carveout configuration registers */
#define MC_SECURITY_CFG0_0 U(0x70) #define MC_SECURITY_CFG0_0 U(0x70)
#define MC_SECURITY_CFG1_0 U(0x74) #define MC_SECURITY_CFG1_0 U(0x74)
......
...@@ -112,8 +112,13 @@ ...@@ -112,8 +112,13 @@
#define TSA_CONFIG_STATIC0_CSW_XUSB_HOSTW U(0x15018) #define TSA_CONFIG_STATIC0_CSW_XUSB_HOSTW U(0x15018)
#define TSA_CONFIG_STATIC0_CSW_XUSB_HOSTW_RESET U(0x1100) #define TSA_CONFIG_STATIC0_CSW_XUSB_HOSTW_RESET U(0x1100)
#define TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_MASK (U(0x3) << 11) #define TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_MASK (ULL(0x3) << 11)
#define TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_PASTHRU (U(0) << 11) #define TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_PASTHRU (ULL(0) << 11)
/*******************************************************************************
* Tegra General Purpose Centralised DMA constants
******************************************************************************/
#define TEGRA_GPCDMA_BASE ULL(0x2610000)
/******************************************************************************* /*******************************************************************************
* Tegra Memory Controller constants * Tegra Memory Controller constants
...@@ -124,7 +129,7 @@ ...@@ -124,7 +129,7 @@
/* General Security Carveout register macros */ /* General Security Carveout register macros */
#define MC_GSC_CONFIG_REGS_SIZE U(0x40) #define MC_GSC_CONFIG_REGS_SIZE U(0x40)
#define MC_GSC_LOCK_CFG_SETTINGS_BIT (U(1) << 1) #define MC_GSC_LOCK_CFG_SETTINGS_BIT (U(1) << 1)
#define MC_GSC_ENABLE_TZ_LOCK_BIT (U(1) << 0) #define MC_GSC_ENABLE_TZ_LOCK_BIT (ULL(1) << 0)
#define MC_GSC_SIZE_RANGE_4KB_SHIFT U(27) #define MC_GSC_SIZE_RANGE_4KB_SHIFT U(27)
#define MC_GSC_BASE_LO_SHIFT U(12) #define MC_GSC_BASE_LO_SHIFT U(12)
#define MC_GSC_BASE_LO_MASK U(0xFFFFF) #define MC_GSC_BASE_LO_MASK U(0xFFFFF)
...@@ -136,6 +141,10 @@ ...@@ -136,6 +141,10 @@
#define MC_SECURITY_CFG1_0 U(0x74) #define MC_SECURITY_CFG1_0 U(0x74)
#define MC_SECURITY_CFG3_0 U(0x9BC) #define MC_SECURITY_CFG3_0 U(0x9BC)
#define MC_SECURITY_BOM_MASK (U(0xFFF) << 20)
#define MC_SECURITY_SIZE_MB_MASK (U(0x1FFF) << 0)
#define MC_SECURITY_BOM_HI_MASK (U(0x3) << 0)
/* Video Memory carveout configuration registers */ /* Video Memory carveout configuration registers */
#define MC_VIDEO_PROTECT_BASE_HI U(0x978) #define MC_VIDEO_PROTECT_BASE_HI U(0x978)
#define MC_VIDEO_PROTECT_BASE_LO U(0x648) #define MC_VIDEO_PROTECT_BASE_LO U(0x648)
...@@ -198,6 +207,8 @@ ...@@ -198,6 +207,8 @@
#define TEGRA_CAR_RESET_BASE U(0x05000000) #define TEGRA_CAR_RESET_BASE U(0x05000000)
#define TEGRA_GPU_RESET_REG_OFFSET U(0x30) #define TEGRA_GPU_RESET_REG_OFFSET U(0x30)
#define GPU_RESET_BIT (U(1) << 0) #define GPU_RESET_BIT (U(1) << 0)
#define TEGRA_GPCDMA_RST_SET_REG_OFFSET U(0x6A0004)
#define TEGRA_GPCDMA_RST_CLR_REG_OFFSET U(0x6A0008)
/******************************************************************************* /*******************************************************************************
* Tegra micro-seconds timer constants * Tegra micro-seconds timer constants
......
...@@ -89,6 +89,16 @@ ...@@ -89,6 +89,16 @@
#define TEGRA_RST_DEV_CLR_V U(0x434) #define TEGRA_RST_DEV_CLR_V U(0x434)
#define TEGRA_CLK_ENB_V U(0x440) #define TEGRA_CLK_ENB_V U(0x440)
/* SE Clock Offsets */
#define TEGRA_RST_DEVICES_V 0x358UL
#define SE_RESET_BIT (0x1UL << 31)
#define TEGRA_RST_DEVICES_W 0x35CUL
#define ENTROPY_CLK_ENB_BIT (0x1UL << 21)
#define TEGRA_CLK_OUT_ENB_V 0x360UL
#define SE_CLK_ENB_BIT (0x1UL << 31)
#define TEGRA_CLK_OUT_ENB_W 0x364UL
#define ENTROPY_RESET_BIT (0x1UL << 21)
/******************************************************************************* /*******************************************************************************
* Tegra Flow Controller constants * Tegra Flow Controller constants
******************************************************************************/ ******************************************************************************/
...@@ -124,6 +134,16 @@ ...@@ -124,6 +134,16 @@
#define TEGRA_UARTD_BASE U(0x70006300) #define TEGRA_UARTD_BASE U(0x70006300)
#define TEGRA_UARTE_BASE U(0x70006400) #define TEGRA_UARTE_BASE U(0x70006400)
/*******************************************************************************
* Tegra Fuse Controller related constants
******************************************************************************/
#define TEGRA_FUSE_BASE 0x7000F800UL
#define FUSE_BOOT_SECURITY_INFO 0x268UL
#define FUSE_ATOMIC_SAVE_CARVEOUT_EN (0x1U << 7)
#define FUSE_JTAG_SECUREID_VALID (0x104UL)
#define ECID_VALID (0x1UL)
/******************************************************************************* /*******************************************************************************
* Tegra Power Mgmt Controller constants * Tegra Power Mgmt Controller constants
******************************************************************************/ ******************************************************************************/
...@@ -143,6 +163,9 @@ ...@@ -143,6 +163,9 @@
******************************************************************************/ ******************************************************************************/
#define TEGRA_MC_BASE U(0x70019000) #define TEGRA_MC_BASE U(0x70019000)
/* Memory Controller Interrupt Status */
#define MC_INTSTATUS 0x00U
/* TZDRAM carveout configuration registers */ /* TZDRAM carveout configuration registers */
#define MC_SECURITY_CFG0_0 U(0x70) #define MC_SECURITY_CFG0_0 U(0x70)
#define MC_SECURITY_CFG1_0 U(0x74) #define MC_SECURITY_CFG1_0 U(0x74)
...@@ -153,6 +176,10 @@ ...@@ -153,6 +176,10 @@
#define MC_VIDEO_PROTECT_BASE_LO U(0x648) #define MC_VIDEO_PROTECT_BASE_LO U(0x648)
#define MC_VIDEO_PROTECT_SIZE_MB U(0x64c) #define MC_VIDEO_PROTECT_SIZE_MB U(0x64c)
/* SMMU configuration registers*/
#define MC_SMMU_PPCS_ASID_0 0x270U
#define PPCS_SMMU_ENABLE (0x1U << 31)
/******************************************************************************* /*******************************************************************************
* Tegra SE constants * Tegra SE constants
******************************************************************************/ ******************************************************************************/
...@@ -168,4 +195,10 @@ ...@@ -168,4 +195,10 @@
#define TEGRA_TZRAM_BASE U(0x7C010000) #define TEGRA_TZRAM_BASE U(0x7C010000)
#define TEGRA_TZRAM_SIZE U(0x10000) #define TEGRA_TZRAM_SIZE U(0x10000)
/*******************************************************************************
* Tegra TZRAM carveout constants
******************************************************************************/
#define TEGRA_TZRAM_CARVEOUT_BASE U(0x7C04C000)
#define TEGRA_TZRAM_CARVEOUT_SIZE U(0x4000)
#endif /* TEGRA_DEF_H */ #endif /* TEGRA_DEF_H */
...@@ -46,7 +46,6 @@ bool tegra_chipid_is_t186(void); ...@@ -46,7 +46,6 @@ bool tegra_chipid_is_t186(void);
bool tegra_chipid_is_t210(void); bool tegra_chipid_is_t210(void);
bool tegra_chipid_is_t210_b01(void); bool tegra_chipid_is_t210_b01(void);
/* /*
* Tegra platform identifiers * Tegra platform identifiers
*/ */
......
...@@ -22,6 +22,16 @@ ...@@ -22,6 +22,16 @@
#define TEGRA_DRAM_BASE ULL(0x80000000) #define TEGRA_DRAM_BASE ULL(0x80000000)
#define TEGRA_DRAM_END ULL(0x27FFFFFFF) #define TEGRA_DRAM_END ULL(0x27FFFFFFF)
/*******************************************************************************
* Implementation defined ACTLR_EL1 bit definitions
******************************************************************************/
#define ACTLR_EL1_PMSTATE_MASK (ULL(0xF) << 0)
/*******************************************************************************
* Implementation defined ACTLR_EL2 bit definitions
******************************************************************************/
#define ACTLR_EL2_PMSTATE_MASK (ULL(0xF) << 0)
/******************************************************************************* /*******************************************************************************
* Struct for parameters received from BL2 * Struct for parameters received from BL2
******************************************************************************/ ******************************************************************************/
...@@ -31,9 +41,11 @@ typedef struct plat_params_from_bl2 { ...@@ -31,9 +41,11 @@ typedef struct plat_params_from_bl2 {
/* TZ memory base */ /* TZ memory base */
uint64_t tzdram_base; uint64_t tzdram_base;
/* UART port ID */ /* UART port ID */
int uart_id; int32_t uart_id;
/* L2 ECC parity protection disable flag */ /* L2 ECC parity protection disable flag */
int l2_ecc_parity_prot_dis; int32_t l2_ecc_parity_prot_dis;
/* SHMEM base address for storing the boot logs */
uint64_t boot_profiler_shmem_base;
} plat_params_from_bl2_t; } plat_params_from_bl2_t;
/******************************************************************************* /*******************************************************************************
...@@ -82,7 +94,30 @@ extern uint8_t tegra_fake_system_suspend; ...@@ -82,7 +94,30 @@ extern uint8_t tegra_fake_system_suspend;
void tegra_pm_system_suspend_entry(void); void tegra_pm_system_suspend_entry(void);
void tegra_pm_system_suspend_exit(void); void tegra_pm_system_suspend_exit(void);
int tegra_system_suspended(void); int32_t tegra_system_suspended(void);
int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state);
int32_t tegra_soc_pwr_domain_on(u_register_t mpidr);
int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state);
int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state);
int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state);
int32_t tegra_soc_prepare_system_reset(void);
__dead2 void tegra_soc_prepare_system_off(void);
plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl,
const plat_local_state_t *states,
uint32_t ncpu);
void tegra_get_sys_suspend_power_state(psci_power_state_t *req_state);
void tegra_cpu_standby(plat_local_state_t cpu_state);
int32_t tegra_pwr_domain_on(u_register_t mpidr);
void tegra_pwr_domain_off(const psci_power_state_t *target_state);
void tegra_pwr_domain_suspend(const psci_power_state_t *target_state);
void __dead2 tegra_pwr_domain_power_down_wfi(const psci_power_state_t *target_state);
void tegra_pwr_domain_on_finish(const psci_power_state_t *target_state);
void tegra_pwr_domain_suspend_finish(const psci_power_state_t *target_state);
__dead2 void tegra_system_off(void);
__dead2 void tegra_system_reset(void);
int32_t tegra_validate_power_state(uint32_t power_state,
psci_power_state_t *req_state);
int32_t tegra_validate_ns_entrypoint(uintptr_t entrypoint);
/* Declarations for tegraXXX_pm.c */ /* Declarations for tegraXXX_pm.c */
int tegra_prepare_cpu_suspend(unsigned int id, unsigned int afflvl); int tegra_prepare_cpu_suspend(unsigned int id, unsigned int afflvl);
...@@ -90,7 +125,7 @@ int tegra_prepare_cpu_on_finish(unsigned long mpidr); ...@@ -90,7 +125,7 @@ int tegra_prepare_cpu_on_finish(unsigned long mpidr);
/* Declarations for tegra_bl31_setup.c */ /* Declarations for tegra_bl31_setup.c */
plat_params_from_bl2_t *bl31_get_plat_params(void); plat_params_from_bl2_t *bl31_get_plat_params(void);
int bl31_check_ns_address(uint64_t base, uint64_t size_in_bytes); int32_t bl31_check_ns_address(uint64_t base, uint64_t size_in_bytes);
void plat_early_platform_setup(void); void plat_early_platform_setup(void);
/* Declarations for tegra_delay_timer.c */ /* Declarations for tegra_delay_timer.c */
......
...@@ -12,6 +12,8 @@ $(eval $(call add_define,CRASH_REPORTING)) ...@@ -12,6 +12,8 @@ $(eval $(call add_define,CRASH_REPORTING))
# enable assert() for release/debug builds # enable assert() for release/debug builds
ENABLE_ASSERTIONS := 1 ENABLE_ASSERTIONS := 1
PLAT_LOG_LEVEL_ASSERT := 40
$(eval $(call add_define,PLAT_LOG_LEVEL_ASSERT))
# enable dynamic memory mapping # enable dynamic memory mapping
PLAT_XLAT_TABLES_DYNAMIC := 1 PLAT_XLAT_TABLES_DYNAMIC := 1
...@@ -29,6 +31,9 @@ USE_COHERENT_MEM := 0 ...@@ -29,6 +31,9 @@ USE_COHERENT_MEM := 0
# do not enable SVE # do not enable SVE
ENABLE_SVE_FOR_NS := 0 ENABLE_SVE_FOR_NS := 0
# enable D-cache early during CPU warmboot
WARMBOOT_ENABLE_DCACHE_EARLY := 1
include plat/nvidia/tegra/common/tegra_common.mk include plat/nvidia/tegra/common/tegra_common.mk
include ${SOC_DIR}/platform_${TARGET_SOC}.mk include ${SOC_DIR}/platform_${TARGET_SOC}.mk
......
...@@ -98,19 +98,24 @@ int tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state) ...@@ -98,19 +98,24 @@ int tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
int tegra_soc_pwr_domain_off(const psci_power_state_t *target_state) int tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
{ {
uint64_t val;
tegra_fc_cpu_off(read_mpidr() & MPIDR_CPU_MASK); tegra_fc_cpu_off(read_mpidr() & MPIDR_CPU_MASK);
/* Disable DCO operations */ /* Disable DCO operations */
denver_disable_dco(); denver_disable_dco();
/* Power down the CPU */ /* Power down the CPU */
write_actlr_el1(DENVER_CPU_STATE_POWER_DOWN); val = read_actlr_el1() & ~ACTLR_EL1_PMSTATE_MASK;
write_actlr_el1(val | DENVER_CPU_STATE_POWER_DOWN);
return PSCI_E_SUCCESS; return PSCI_E_SUCCESS;
} }
int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state) int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
{ {
uint64_t val;
#if ENABLE_ASSERTIONS #if ENABLE_ASSERTIONS
int cpu = read_mpidr() & MPIDR_CPU_MASK; int cpu = read_mpidr() & MPIDR_CPU_MASK;
...@@ -128,7 +133,8 @@ int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state) ...@@ -128,7 +133,8 @@ int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
denver_disable_dco(); denver_disable_dco();
/* Program the suspend state ID */ /* Program the suspend state ID */
write_actlr_el1(target_state->pwr_domain_state[PLAT_MAX_PWR_LVL]); val = read_actlr_el1() & ~ACTLR_EL1_PMSTATE_MASK;
write_actlr_el1(val | target_state->pwr_domain_state[PLAT_MAX_PWR_LVL]);
return PSCI_E_SUCCESS; return PSCI_E_SUCCESS;
} }
......
...@@ -151,7 +151,7 @@ int32_t ari_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time) ...@@ -151,7 +151,7 @@ int32_t ari_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time)
/* Enter the cstate, to be woken up after wake_time (TSC ticks) */ /* Enter the cstate, to be woken up after wake_time (TSC ticks) */
ret = ari_request_wait(ari_base, ARI_EVT_MASK_STANDBYWFI_BIT, ret = ari_request_wait(ari_base, ARI_EVT_MASK_STANDBYWFI_BIT,
TEGRA_ARI_ENTER_CSTATE, state, wake_time); (uint32_t)TEGRA_ARI_ENTER_CSTATE, state, wake_time);
} }
return ret; return ret;
...@@ -191,7 +191,7 @@ int32_t ari_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccp ...@@ -191,7 +191,7 @@ int32_t ari_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccp
} }
/* set the updated cstate info */ /* set the updated cstate info */
return ari_request_wait(ari_base, 0U, TEGRA_ARI_UPDATE_CSTATE_INFO, return ari_request_wait(ari_base, 0U, (uint32_t)TEGRA_ARI_UPDATE_CSTATE_INFO,
(uint32_t)val, wake_mask); (uint32_t)val, wake_mask);
} }
...@@ -208,8 +208,8 @@ int32_t ari_update_crossover_time(uint32_t ari_base, uint32_t type, uint32_t tim ...@@ -208,8 +208,8 @@ int32_t ari_update_crossover_time(uint32_t ari_base, uint32_t type, uint32_t tim
ari_clobber_response(ari_base); ari_clobber_response(ari_base);
/* update crossover threshold time */ /* update crossover threshold time */
ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_UPDATE_CROSSOVER, ret = ari_request_wait(ari_base, 0U,
type, time); (uint32_t)TEGRA_ARI_UPDATE_CROSSOVER, type, time);
} }
return ret; return ret;
...@@ -227,7 +227,8 @@ uint64_t ari_read_cstate_stats(uint32_t ari_base, uint32_t state) ...@@ -227,7 +227,8 @@ uint64_t ari_read_cstate_stats(uint32_t ari_base, uint32_t state)
/* clean the previous response state */ /* clean the previous response state */
ari_clobber_response(ari_base); ari_clobber_response(ari_base);
ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_CSTATE_STATS, state, 0U); ret = ari_request_wait(ari_base, 0U,
(uint32_t)TEGRA_ARI_CSTATE_STATS, state, 0U);
if (ret != 0) { if (ret != 0) {
result = EINVAL; result = EINVAL;
} else { } else {
...@@ -243,8 +244,8 @@ int32_t ari_write_cstate_stats(uint32_t ari_base, uint32_t state, uint32_t stats ...@@ -243,8 +244,8 @@ int32_t ari_write_cstate_stats(uint32_t ari_base, uint32_t state, uint32_t stats
ari_clobber_response(ari_base); ari_clobber_response(ari_base);
/* write the cstate stats */ /* write the cstate stats */
return ari_request_wait(ari_base, 0U, TEGRA_ARI_WRITE_CSTATE_STATS, state, return ari_request_wait(ari_base, 0U, (uint32_t)TEGRA_ARI_WRITE_CSTATE_STATS,
stats); state, stats);
} }
uint64_t ari_enumeration_misc(uint32_t ari_base, uint32_t cmd, uint32_t data) uint64_t ari_enumeration_misc(uint32_t ari_base, uint32_t cmd, uint32_t data)
...@@ -261,7 +262,7 @@ uint64_t ari_enumeration_misc(uint32_t ari_base, uint32_t cmd, uint32_t data) ...@@ -261,7 +262,7 @@ uint64_t ari_enumeration_misc(uint32_t ari_base, uint32_t cmd, uint32_t data)
local_data = 0U; local_data = 0U;
} }
ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_MISC, cmd, local_data); ret = ari_request_wait(ari_base, 0U, (uint32_t)TEGRA_ARI_MISC, cmd, local_data);
if (ret != 0) { if (ret != 0) {
resp = (uint64_t)ret; resp = (uint64_t)ret;
} else { } else {
...@@ -281,8 +282,8 @@ int32_t ari_is_ccx_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time ...@@ -281,8 +282,8 @@ int32_t ari_is_ccx_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time
/* clean the previous response state */ /* clean the previous response state */
ari_clobber_response(ari_base); ari_clobber_response(ari_base);
ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_IS_CCX_ALLOWED, state & 0x7U, ret = ari_request_wait(ari_base, 0U, (uint32_t)TEGRA_ARI_IS_CCX_ALLOWED,
wake_time); state & 0x7U, wake_time);
if (ret != 0) { if (ret != 0) {
ERROR("%s: failed (%d)\n", __func__, ret); ERROR("%s: failed (%d)\n", __func__, ret);
result = 0U; result = 0U;
...@@ -307,8 +308,8 @@ int32_t ari_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time ...@@ -307,8 +308,8 @@ int32_t ari_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time
/* clean the previous response state */ /* clean the previous response state */
ari_clobber_response(ari_base); ari_clobber_response(ari_base);
ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_IS_SC7_ALLOWED, state, ret = ari_request_wait(ari_base, 0U,
wake_time); (uint32_t)TEGRA_ARI_IS_SC7_ALLOWED, state, wake_time);
if (ret != 0) { if (ret != 0) {
ERROR("%s: failed (%d)\n", __func__, ret); ERROR("%s: failed (%d)\n", __func__, ret);
result = 0; result = 0;
...@@ -346,7 +347,8 @@ int32_t ari_online_core(uint32_t ari_base, uint32_t core) ...@@ -346,7 +347,8 @@ int32_t ari_online_core(uint32_t ari_base, uint32_t core)
} else { } else {
/* clean the previous response state */ /* clean the previous response state */
ari_clobber_response(ari_base); ari_clobber_response(ari_base);
ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_ONLINE_CORE, core, 0U); ret = ari_request_wait(ari_base, 0U,
(uint32_t)TEGRA_ARI_ONLINE_CORE, core, 0U);
} }
} }
...@@ -374,7 +376,8 @@ int32_t ari_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t en ...@@ -374,7 +376,8 @@ int32_t ari_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t en
((volt & MCE_AUTO_CC3_VTG_MASK) << MCE_AUTO_CC3_VTG_SHIFT) |\ ((volt & MCE_AUTO_CC3_VTG_MASK) << MCE_AUTO_CC3_VTG_SHIFT) |\
((enable != 0U) ? MCE_AUTO_CC3_ENABLE_BIT : 0U)); ((enable != 0U) ? MCE_AUTO_CC3_ENABLE_BIT : 0U));
return ari_request_wait(ari_base, 0U, TEGRA_ARI_CC3_CTRL, val, 0U); return ari_request_wait(ari_base, 0U,
(uint32_t)TEGRA_ARI_CC3_CTRL, val, 0U);
} }
int32_t ari_reset_vector_update(uint32_t ari_base) int32_t ari_reset_vector_update(uint32_t ari_base)
...@@ -386,7 +389,8 @@ int32_t ari_reset_vector_update(uint32_t ari_base) ...@@ -386,7 +389,8 @@ int32_t ari_reset_vector_update(uint32_t ari_base)
* Need to program the CPU reset vector one time during cold boot * Need to program the CPU reset vector one time during cold boot
* and SC7 exit * and SC7 exit
*/ */
(void)ari_request_wait(ari_base, 0U, TEGRA_ARI_COPY_MISCREG_AA64_RST, 0U, 0U); (void)ari_request_wait(ari_base, 0U,
(uint32_t)TEGRA_ARI_COPY_MISCREG_AA64_RST, 0U, 0U);
return 0; return 0;
} }
...@@ -396,8 +400,8 @@ int32_t ari_roc_flush_cache_trbits(uint32_t ari_base) ...@@ -396,8 +400,8 @@ int32_t ari_roc_flush_cache_trbits(uint32_t ari_base)
/* clean the previous response state */ /* clean the previous response state */
ari_clobber_response(ari_base); ari_clobber_response(ari_base);
return ari_request_wait(ari_base, 0U, TEGRA_ARI_ROC_FLUSH_CACHE_TRBITS, return ari_request_wait(ari_base, 0U,
0U, 0U); (uint32_t)TEGRA_ARI_ROC_FLUSH_CACHE_TRBITS, 0U, 0U);
} }
int32_t ari_roc_flush_cache(uint32_t ari_base) int32_t ari_roc_flush_cache(uint32_t ari_base)
...@@ -405,8 +409,8 @@ int32_t ari_roc_flush_cache(uint32_t ari_base) ...@@ -405,8 +409,8 @@ int32_t ari_roc_flush_cache(uint32_t ari_base)
/* clean the previous response state */ /* clean the previous response state */
ari_clobber_response(ari_base); ari_clobber_response(ari_base);
return ari_request_wait(ari_base, 0U, TEGRA_ARI_ROC_FLUSH_CACHE_ONLY, return ari_request_wait(ari_base, 0U,
0U, 0U); (uint32_t)TEGRA_ARI_ROC_FLUSH_CACHE_ONLY, 0U, 0U);
} }
int32_t ari_roc_clean_cache(uint32_t ari_base) int32_t ari_roc_clean_cache(uint32_t ari_base)
...@@ -414,8 +418,8 @@ int32_t ari_roc_clean_cache(uint32_t ari_base) ...@@ -414,8 +418,8 @@ int32_t ari_roc_clean_cache(uint32_t ari_base)
/* clean the previous response state */ /* clean the previous response state */
ari_clobber_response(ari_base); ari_clobber_response(ari_base);
return ari_request_wait(ari_base, 0U, TEGRA_ARI_ROC_CLEAN_CACHE_ONLY, return ari_request_wait(ari_base, 0U,
0U, 0U); (uint32_t)TEGRA_ARI_ROC_CLEAN_CACHE_ONLY, 0U, 0U);
} }
uint64_t ari_read_write_mca(uint32_t ari_base, uint64_t cmd, uint64_t *data) uint64_t ari_read_write_mca(uint32_t ari_base, uint64_t cmd, uint64_t *data)
...@@ -432,7 +436,7 @@ uint64_t ari_read_write_mca(uint32_t ari_base, uint64_t cmd, uint64_t *data) ...@@ -432,7 +436,7 @@ uint64_t ari_read_write_mca(uint32_t ari_base, uint64_t cmd, uint64_t *data)
ari_write_32(ari_base, (uint32_t)cmd, ARI_RESPONSE_DATA_LO); ari_write_32(ari_base, (uint32_t)cmd, ARI_RESPONSE_DATA_LO);
ari_write_32(ari_base, (uint32_t)(cmd >> 32U), ARI_RESPONSE_DATA_HI); ari_write_32(ari_base, (uint32_t)(cmd >> 32U), ARI_RESPONSE_DATA_HI);
ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_MCA, ret = ari_request_wait(ari_base, 0U, (uint32_t)TEGRA_ARI_MCA,
(uint32_t)mca_arg_data, (uint32_t)mca_arg_data,
(uint32_t)(mca_arg_data >> 32U)); (uint32_t)(mca_arg_data >> 32U));
if (ret == 0) { if (ret == 0) {
...@@ -473,7 +477,8 @@ int32_t ari_update_ccplex_gsc(uint32_t ari_base, uint32_t gsc_idx) ...@@ -473,7 +477,8 @@ int32_t ari_update_ccplex_gsc(uint32_t ari_base, uint32_t gsc_idx)
* the ID, from the MC registers and update the internal GSC registers * the ID, from the MC registers and update the internal GSC registers
* of the CCPLEX. * of the CCPLEX.
*/ */
(void)ari_request_wait(ari_base, 0U, TEGRA_ARI_UPDATE_CCPLEX_GSC, gsc_idx, 0U); (void)ari_request_wait(ari_base, 0U,
(uint32_t)TEGRA_ARI_UPDATE_CCPLEX_GSC, gsc_idx, 0U);
} }
return ret; return ret;
...@@ -487,7 +492,8 @@ void ari_enter_ccplex_state(uint32_t ari_base, uint32_t state_idx) ...@@ -487,7 +492,8 @@ void ari_enter_ccplex_state(uint32_t ari_base, uint32_t state_idx)
/* /*
* The MCE will shutdown or restart the entire system * The MCE will shutdown or restart the entire system
*/ */
(void)ari_request_wait(ari_base, 0U, TEGRA_ARI_MISC_CCPLEX, state_idx, 0U); (void)ari_request_wait(ari_base, 0U,
(uint32_t)TEGRA_ARI_MISC_CCPLEX, state_idx, 0U);
} }
int32_t ari_read_write_uncore_perfmon(uint32_t ari_base, uint64_t req, int32_t ari_read_write_uncore_perfmon(uint32_t ari_base, uint64_t req,
...@@ -514,8 +520,8 @@ int32_t ari_read_write_uncore_perfmon(uint32_t ari_base, uint64_t req, ...@@ -514,8 +520,8 @@ int32_t ari_read_write_uncore_perfmon(uint32_t ari_base, uint64_t req,
val = (req_cmd == UNCORE_PERFMON_CMD_WRITE) ? val = (req_cmd == UNCORE_PERFMON_CMD_WRITE) ?
(uint32_t)*data : 0U; (uint32_t)*data : 0U;
ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_PERFMON, val, ret = ari_request_wait(ari_base, 0U,
(uint32_t)req); (uint32_t)TEGRA_ARI_PERFMON, val, (uint32_t)req);
if (ret != 0) { if (ret != 0) {
result = ret; result = ret;
} else { } else {
...@@ -552,6 +558,7 @@ void ari_misc_ccplex(uint32_t ari_base, uint32_t index, uint32_t value) ...@@ -552,6 +558,7 @@ void ari_misc_ccplex(uint32_t ari_base, uint32_t index, uint32_t value)
} else { } else {
/* clean the previous response state */ /* clean the previous response state */
ari_clobber_response(ari_base); ari_clobber_response(ari_base);
(void)ari_request_wait(ari_base, 0U, TEGRA_ARI_MISC_CCPLEX, index, value); (void)ari_request_wait(ari_base, 0U,
(uint32_t)TEGRA_ARI_MISC_CCPLEX, index, value);
} }
} }
...@@ -170,12 +170,12 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1, ...@@ -170,12 +170,12 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
cpu_ari_base = mce_get_curr_cpu_ari_base(); cpu_ari_base = mce_get_curr_cpu_ari_base();
switch (cmd) { switch (cmd) {
case MCE_CMD_ENTER_CSTATE: case (uint64_t)MCE_CMD_ENTER_CSTATE:
ret = ops->enter_cstate(cpu_ari_base, arg0, arg1); ret = ops->enter_cstate(cpu_ari_base, arg0, arg1);
break; break;
case MCE_CMD_UPDATE_CSTATE_INFO: case (uint64_t)MCE_CMD_UPDATE_CSTATE_INFO:
/* /*
* get the parameters required for the update cstate info * get the parameters required for the update cstate info
* command * command
...@@ -194,12 +194,12 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1, ...@@ -194,12 +194,12 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
break; break;
case MCE_CMD_UPDATE_CROSSOVER_TIME: case (uint64_t)MCE_CMD_UPDATE_CROSSOVER_TIME:
ret = ops->update_crossover_time(cpu_ari_base, arg0, arg1); ret = ops->update_crossover_time(cpu_ari_base, arg0, arg1);
break; break;
case MCE_CMD_READ_CSTATE_STATS: case (uint64_t)MCE_CMD_READ_CSTATE_STATS:
ret64 = ops->read_cstate_stats(cpu_ari_base, arg0); ret64 = ops->read_cstate_stats(cpu_ari_base, arg0);
/* update context to return cstate stats value */ /* update context to return cstate stats value */
...@@ -208,12 +208,12 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1, ...@@ -208,12 +208,12 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
break; break;
case MCE_CMD_WRITE_CSTATE_STATS: case (uint64_t)MCE_CMD_WRITE_CSTATE_STATS:
ret = ops->write_cstate_stats(cpu_ari_base, arg0, arg1); ret = ops->write_cstate_stats(cpu_ari_base, arg0, arg1);
break; break;
case MCE_CMD_IS_CCX_ALLOWED: case (uint64_t)MCE_CMD_IS_CCX_ALLOWED:
ret = ops->is_ccx_allowed(cpu_ari_base, arg0, arg1); ret = ops->is_ccx_allowed(cpu_ari_base, arg0, arg1);
/* update context to return CCx status value */ /* update context to return CCx status value */
...@@ -221,7 +221,7 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1, ...@@ -221,7 +221,7 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
break; break;
case MCE_CMD_IS_SC7_ALLOWED: case (uint64_t)MCE_CMD_IS_SC7_ALLOWED:
ret = ops->is_sc7_allowed(cpu_ari_base, arg0, arg1); ret = ops->is_sc7_allowed(cpu_ari_base, arg0, arg1);
/* update context to return SC7 status value */ /* update context to return SC7 status value */
...@@ -230,17 +230,17 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1, ...@@ -230,17 +230,17 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
break; break;
case MCE_CMD_ONLINE_CORE: case (uint64_t)MCE_CMD_ONLINE_CORE:
ret = ops->online_core(cpu_ari_base, arg0); ret = ops->online_core(cpu_ari_base, arg0);
break; break;
case MCE_CMD_CC3_CTRL: case (uint64_t)MCE_CMD_CC3_CTRL:
ret = ops->cc3_ctrl(cpu_ari_base, arg0, arg1, arg2); ret = ops->cc3_ctrl(cpu_ari_base, arg0, arg1, arg2);
break; break;
case MCE_CMD_ECHO_DATA: case (uint64_t)MCE_CMD_ECHO_DATA:
ret64 = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_ECHO, ret64 = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_ECHO,
arg0); arg0);
...@@ -252,7 +252,7 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1, ...@@ -252,7 +252,7 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
break; break;
case MCE_CMD_READ_VERSIONS: case (uint64_t)MCE_CMD_READ_VERSIONS:
ret64 = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_VERSION, ret64 = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_VERSION,
arg0); arg0);
...@@ -265,7 +265,7 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1, ...@@ -265,7 +265,7 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
break; break;
case MCE_CMD_ENUM_FEATURES: case (uint64_t)MCE_CMD_ENUM_FEATURES:
ret64 = ops->call_enum_misc(cpu_ari_base, ret64 = ops->call_enum_misc(cpu_ari_base,
TEGRA_ARI_MISC_FEATURE_LEAF_0, arg0); TEGRA_ARI_MISC_FEATURE_LEAF_0, arg0);
...@@ -274,22 +274,22 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1, ...@@ -274,22 +274,22 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
break; break;
case MCE_CMD_ROC_FLUSH_CACHE_TRBITS: case (uint64_t)MCE_CMD_ROC_FLUSH_CACHE_TRBITS:
ret = ops->roc_flush_cache_trbits(cpu_ari_base); ret = ops->roc_flush_cache_trbits(cpu_ari_base);
break; break;
case MCE_CMD_ROC_FLUSH_CACHE: case (uint64_t)MCE_CMD_ROC_FLUSH_CACHE:
ret = ops->roc_flush_cache(cpu_ari_base); ret = ops->roc_flush_cache(cpu_ari_base);
break; break;
case MCE_CMD_ROC_CLEAN_CACHE: case (uint64_t)MCE_CMD_ROC_CLEAN_CACHE:
ret = ops->roc_clean_cache(cpu_ari_base); ret = ops->roc_clean_cache(cpu_ari_base);
break; break;
case MCE_CMD_ENUM_READ_MCA: case (uint64_t)MCE_CMD_ENUM_READ_MCA:
ret64 = ops->read_write_mca(cpu_ari_base, arg0, &arg1); ret64 = ops->read_write_mca(cpu_ari_base, arg0, &arg1);
/* update context to return MCA data/error */ /* update context to return MCA data/error */
...@@ -299,7 +299,7 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1, ...@@ -299,7 +299,7 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
break; break;
case MCE_CMD_ENUM_WRITE_MCA: case (uint64_t)MCE_CMD_ENUM_WRITE_MCA:
ret64 = ops->read_write_mca(cpu_ari_base, arg0, &arg1); ret64 = ops->read_write_mca(cpu_ari_base, arg0, &arg1);
/* update context to return MCA error */ /* update context to return MCA error */
...@@ -309,7 +309,7 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1, ...@@ -309,7 +309,7 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
break; break;
#if ENABLE_CHIP_VERIFICATION_HARNESS #if ENABLE_CHIP_VERIFICATION_HARNESS
case MCE_CMD_ENABLE_LATIC: case (uint64_t)MCE_CMD_ENABLE_LATIC:
/* /*
* This call is not for production use. The constant value, * This call is not for production use. The constant value,
* 0xFFFF0000, is specific to allowing for enabling LATIC on * 0xFFFF0000, is specific to allowing for enabling LATIC on
...@@ -327,14 +327,14 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1, ...@@ -327,14 +327,14 @@ int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
break; break;
#endif #endif
case MCE_CMD_UNCORE_PERFMON_REQ: case (uint64_t)MCE_CMD_UNCORE_PERFMON_REQ:
ret = ops->read_write_uncore_perfmon(cpu_ari_base, arg0, &arg1); ret = ops->read_write_uncore_perfmon(cpu_ari_base, arg0, &arg1);
/* update context to return data */ /* update context to return data */
write_ctx_reg(gp_regs, CTX_GPREG_X1, (arg1)); write_ctx_reg(gp_regs, CTX_GPREG_X1, (arg1));
break; break;
case MCE_CMD_MISC_CCPLEX: case (uint64_t)MCE_CMD_MISC_CCPLEX:
ops->misc_ccplex(cpu_ari_base, arg0, arg1); ops->misc_ccplex(cpu_ari_base, arg0, arg1);
break; break;
......
...@@ -14,10 +14,12 @@ ...@@ -14,10 +14,12 @@
#include <mce_private.h> #include <mce_private.h>
#include <t18x_ari.h> #include <t18x_ari.h>
#include <tegra_private.h>
int32_t nvg_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time) int32_t nvg_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time)
{ {
int32_t ret = 0; int32_t ret = 0;
uint64_t val = 0ULL;
(void)ari_base; (void)ari_base;
...@@ -28,10 +30,11 @@ int32_t nvg_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time) ...@@ -28,10 +30,11 @@ int32_t nvg_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time)
ret = EINVAL; ret = EINVAL;
} else { } else {
/* time (TSC ticks) until the core is expected to get a wake event */ /* time (TSC ticks) until the core is expected to get a wake event */
nvg_set_request_data(TEGRA_NVG_CHANNEL_WAKE_TIME, wake_time); nvg_set_request_data((uint64_t)TEGRA_NVG_CHANNEL_WAKE_TIME, wake_time);
/* set the core cstate */ /* set the core cstate */
write_actlr_el1(state); val = read_actlr_el1() & ~ACTLR_EL1_PMSTATE_MASK;
write_actlr_el1(val | (uint64_t)state);
} }
return ret; return ret;
...@@ -78,7 +81,7 @@ int32_t nvg_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccp ...@@ -78,7 +81,7 @@ int32_t nvg_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccp
val |= ((uint64_t)wake_mask << CSTATE_WAKE_MASK_SHIFT); val |= ((uint64_t)wake_mask << CSTATE_WAKE_MASK_SHIFT);
/* set the updated cstate info */ /* set the updated cstate info */
nvg_set_request_data(TEGRA_NVG_CHANNEL_CSTATE_INFO, val); nvg_set_request_data((uint64_t)TEGRA_NVG_CHANNEL_CSTATE_INFO, val);
return 0; return 0;
} }
...@@ -189,7 +192,7 @@ int32_t nvg_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time ...@@ -189,7 +192,7 @@ int32_t nvg_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time
((uint64_t)state & MCE_SC7_ALLOWED_MASK); ((uint64_t)state & MCE_SC7_ALLOWED_MASK);
/* issue command to check if SC7 is allowed */ /* issue command to check if SC7 is allowed */
nvg_set_request_data(TEGRA_NVG_CHANNEL_IS_SC7_ALLOWED, val); nvg_set_request_data((uint64_t)TEGRA_NVG_CHANNEL_IS_SC7_ALLOWED, val);
/* 1 = SC7 allowed, 0 = SC7 not allowed */ /* 1 = SC7 allowed, 0 = SC7 not allowed */
ret = (nvg_get_result() != 0ULL) ? 1 : 0; ret = (nvg_get_result() != 0ULL) ? 1 : 0;
...@@ -219,7 +222,7 @@ int32_t nvg_online_core(uint32_t ari_base, uint32_t core) ...@@ -219,7 +222,7 @@ int32_t nvg_online_core(uint32_t ari_base, uint32_t core)
ret = EINVAL; ret = EINVAL;
} else { } else {
/* get a core online */ /* get a core online */
nvg_set_request_data(TEGRA_NVG_CHANNEL_ONLINE_CORE, nvg_set_request_data((uint64_t)TEGRA_NVG_CHANNEL_ONLINE_CORE,
((uint64_t)core & MCE_CORE_ID_MASK)); ((uint64_t)core & MCE_CORE_ID_MASK));
} }
} }
...@@ -247,7 +250,7 @@ int32_t nvg_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t en ...@@ -247,7 +250,7 @@ int32_t nvg_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t en
((volt & MCE_AUTO_CC3_VTG_MASK) << MCE_AUTO_CC3_VTG_SHIFT) |\ ((volt & MCE_AUTO_CC3_VTG_MASK) << MCE_AUTO_CC3_VTG_SHIFT) |\
((enable != 0U) ? MCE_AUTO_CC3_ENABLE_BIT : 0U)); ((enable != 0U) ? MCE_AUTO_CC3_ENABLE_BIT : 0U));
nvg_set_request_data(TEGRA_NVG_CHANNEL_CC3_CTRL, (uint64_t)val); nvg_set_request_data((uint64_t)TEGRA_NVG_CHANNEL_CC3_CTRL, (uint64_t)val);
return 0; return 0;
} }
...@@ -206,11 +206,11 @@ const static mc_txn_override_cfg_t tegra186_txn_override_cfgs[] = { ...@@ -206,11 +206,11 @@ const static mc_txn_override_cfg_t tegra186_txn_override_cfgs[] = {
******************************************************************************/ ******************************************************************************/
static tegra_mc_settings_t tegra186_mc_settings = { static tegra_mc_settings_t tegra186_mc_settings = {
.streamid_override_cfg = tegra186_streamid_override_regs, .streamid_override_cfg = tegra186_streamid_override_regs,
.num_streamid_override_cfgs = ARRAY_SIZE(tegra186_streamid_override_regs), .num_streamid_override_cfgs = (uint32_t)ARRAY_SIZE(tegra186_streamid_override_regs),
.streamid_security_cfg = tegra186_streamid_sec_cfgs, .streamid_security_cfg = tegra186_streamid_sec_cfgs,
.num_streamid_security_cfgs = ARRAY_SIZE(tegra186_streamid_sec_cfgs), .num_streamid_security_cfgs = (uint32_t)ARRAY_SIZE(tegra186_streamid_sec_cfgs),
.txn_override_cfg = tegra186_txn_override_cfgs, .txn_override_cfg = tegra186_txn_override_cfgs,
.num_txn_override_cfgs = ARRAY_SIZE(tegra186_txn_override_cfgs) .num_txn_override_cfgs = (uint32_t)ARRAY_SIZE(tegra186_txn_override_cfgs)
}; };
/******************************************************************************* /*******************************************************************************
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <mce.h> #include <mce.h>
#include <smmu.h> #include <smmu.h>
#include <stdbool.h>
#include <t18x_ari.h> #include <t18x_ari.h>
#include <tegra_private.h> #include <tegra_private.h>
...@@ -27,12 +28,9 @@ extern void memcpy16(void *dest, const void *src, unsigned int length); ...@@ -27,12 +28,9 @@ extern void memcpy16(void *dest, const void *src, unsigned int length);
extern void prepare_cpu_pwr_dwn(void); extern void prepare_cpu_pwr_dwn(void);
extern void tegra186_cpu_reset_handler(void); extern void tegra186_cpu_reset_handler(void);
extern uint32_t __tegra186_cpu_reset_handler_end, extern uint64_t __tegra186_cpu_reset_handler_end,
__tegra186_smmu_context; __tegra186_smmu_context;
/* TZDRAM offset for saving SMMU context */
#define TEGRA186_SMMU_CTX_OFFSET 16UL
/* state id mask */ /* state id mask */
#define TEGRA186_STATE_ID_MASK 0xFU #define TEGRA186_STATE_ID_MASK 0xFU
/* constants to get power state's wake time */ /* constants to get power state's wake time */
...@@ -111,7 +109,7 @@ int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state) ...@@ -111,7 +109,7 @@ int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
/* Enter CPU idle/powerdown */ /* Enter CPU idle/powerdown */
val = (stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ? val = (stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ?
TEGRA_ARI_CORE_C6 : TEGRA_ARI_CORE_C7; (uint32_t)TEGRA_ARI_CORE_C6 : (uint32_t)TEGRA_ARI_CORE_C7;
(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, (uint64_t)val, (void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, (uint64_t)val,
tegra_percpu_data[cpu].wake_time, 0U); tegra_percpu_data[cpu].wake_time, 0U);
...@@ -132,12 +130,12 @@ int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state) ...@@ -132,12 +130,12 @@ int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
/* save SMMU context to TZDRAM */ /* save SMMU context to TZDRAM */
smmu_ctx_base = params_from_bl2->tzdram_base + smmu_ctx_base = params_from_bl2->tzdram_base +
((uintptr_t)&__tegra186_smmu_context - ((uintptr_t)&__tegra186_smmu_context -
(uintptr_t)tegra186_cpu_reset_handler); (uintptr_t)&tegra186_cpu_reset_handler);
tegra_smmu_save_context((uintptr_t)smmu_ctx_base); tegra_smmu_save_context((uintptr_t)smmu_ctx_base);
/* Prepare for system suspend */ /* Prepare for system suspend */
cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7; cstate_info.cluster = (uint32_t)TEGRA_ARI_CLUSTER_CC7;
cstate_info.system = TEGRA_ARI_SYSTEM_SC7; cstate_info.system = (uint32_t)TEGRA_ARI_SYSTEM_SC7;
cstate_info.system_state_force = 1; cstate_info.system_state_force = 1;
cstate_info.update_wake_mask = 1; cstate_info.update_wake_mask = 1;
mce_update_cstate_info(&cstate_info); mce_update_cstate_info(&cstate_info);
...@@ -145,14 +143,14 @@ int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state) ...@@ -145,14 +143,14 @@ int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
do { do {
val = (uint32_t)mce_command_handler( val = (uint32_t)mce_command_handler(
(uint64_t)MCE_CMD_IS_SC7_ALLOWED, (uint64_t)MCE_CMD_IS_SC7_ALLOWED,
TEGRA_ARI_CORE_C7, (uint64_t)TEGRA_ARI_CORE_C7,
MCE_CORE_SLEEP_TIME_INFINITE, MCE_CORE_SLEEP_TIME_INFINITE,
0U); 0U);
} while (val == 0U); } while (val == 0U);
/* Instruct the MCE to enter system suspend state */ /* Instruct the MCE to enter system suspend state */
(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, (void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U); (uint64_t)TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U);
} else { } else {
; /* do nothing */ ; /* do nothing */
} }
...@@ -161,32 +159,41 @@ int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state) ...@@ -161,32 +159,41 @@ int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
} }
/******************************************************************************* /*******************************************************************************
* Platform handler to calculate the proper target power level at the * Helper function to check if this is the last ON CPU in the cluster
* specified affinity level
******************************************************************************/ ******************************************************************************/
plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl, static bool tegra_last_cpu_in_cluster(const plat_local_state_t *states,
const plat_local_state_t *states, uint32_t ncpu)
uint32_t ncpu)
{ {
plat_local_state_t target = *states; plat_local_state_t target;
uint32_t pos = 0; bool last_on_cpu = true;
plat_local_state_t result = PSCI_LOCAL_STATE_RUN; uint32_t num_cpus = ncpu, pos = 0;
uint32_t cpu = plat_my_core_pos(), num_cpu = ncpu;
int32_t ret, cluster_powerdn = 1; do {
uint64_t core_pos = read_mpidr() & (uint64_t)MPIDR_CPU_MASK; target = states[pos];
mce_cstate_info_t cstate_info = { 0 }; if (target != PLAT_MAX_OFF_STATE) {
last_on_cpu = false;
}
--num_cpus;
pos++;
} while (num_cpus != 0U);
/* get the power state at this level */ return last_on_cpu;
if (lvl == (uint32_t)MPIDR_AFFLVL1) { }
target = states[core_pos];
}
if (lvl == (uint32_t)MPIDR_AFFLVL2) {
target = states[cpu];
}
/* CPU suspend */ /*******************************************************************************
if ((lvl == (uint32_t)MPIDR_AFFLVL1) && (target == PSTATE_ID_CORE_POWERDN)) { * Helper function to get target power state for the cluster
******************************************************************************/
static plat_local_state_t tegra_get_afflvl1_pwr_state(const plat_local_state_t *states,
uint32_t ncpu)
{
uint32_t core_pos = (uint32_t)read_mpidr() & (uint32_t)MPIDR_CPU_MASK;
uint32_t cpu = plat_my_core_pos();
int32_t ret;
plat_local_state_t target = states[core_pos];
mce_cstate_info_t cstate_info = { 0 };
/* CPU suspend */
if (target == PSTATE_ID_CORE_POWERDN) {
/* Program default wake mask */ /* Program default wake mask */
cstate_info.wake_mask = TEGRA186_CORE_WAKE_MASK; cstate_info.wake_mask = TEGRA186_CORE_WAKE_MASK;
cstate_info.update_wake_mask = 1; cstate_info.update_wake_mask = 1;
...@@ -194,41 +201,30 @@ plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl, ...@@ -194,41 +201,30 @@ plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl,
/* Check if CCx state is allowed. */ /* Check if CCx state is allowed. */
ret = mce_command_handler((uint64_t)MCE_CMD_IS_CCX_ALLOWED, ret = mce_command_handler((uint64_t)MCE_CMD_IS_CCX_ALLOWED,
TEGRA_ARI_CORE_C7, tegra_percpu_data[cpu].wake_time, (uint64_t)TEGRA_ARI_CORE_C7,
tegra_percpu_data[cpu].wake_time,
0U); 0U);
if (ret != 0) { if (ret == 0) {
result = PSTATE_ID_CORE_POWERDN; target = PSCI_LOCAL_STATE_RUN;
} }
} }
/* CPU off */ /* CPU off */
if ((lvl == (uint32_t)MPIDR_AFFLVL1) && (target == PLAT_MAX_OFF_STATE)) { if (target == PLAT_MAX_OFF_STATE) {
/* find out the number of ON cpus in the cluster */
do {
target = states[pos];
if (target != PLAT_MAX_OFF_STATE) {
cluster_powerdn = 0;
}
--num_cpu;
pos++;
} while (num_cpu != 0U);
/* Enable cluster powerdn from last CPU in the cluster */ /* Enable cluster powerdn from last CPU in the cluster */
if (cluster_powerdn != 0) { if (tegra_last_cpu_in_cluster(states, ncpu)) {
/* Enable CC7 state and turn off wake mask */ /* Enable CC7 state and turn off wake mask */
cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7; cstate_info.cluster = (uint32_t)TEGRA_ARI_CLUSTER_CC7;
cstate_info.update_wake_mask = 1; cstate_info.update_wake_mask = 1;
mce_update_cstate_info(&cstate_info); mce_update_cstate_info(&cstate_info);
/* Check if CCx state is allowed. */ /* Check if CCx state is allowed. */
ret = mce_command_handler((uint64_t)MCE_CMD_IS_CCX_ALLOWED, ret = mce_command_handler((uint64_t)MCE_CMD_IS_CCX_ALLOWED,
TEGRA_ARI_CORE_C7, (uint64_t)TEGRA_ARI_CORE_C7,
MCE_CORE_SLEEP_TIME_INFINITE, MCE_CORE_SLEEP_TIME_INFINITE,
0U); 0U);
if (ret != 0) { if (ret == 0) {
result = PSTATE_ID_CORE_POWERDN; target = PSCI_LOCAL_STATE_RUN;
} }
} else { } else {
...@@ -236,17 +232,37 @@ plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl, ...@@ -236,17 +232,37 @@ plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl,
/* Turn off wake_mask */ /* Turn off wake_mask */
cstate_info.update_wake_mask = 1; cstate_info.update_wake_mask = 1;
mce_update_cstate_info(&cstate_info); mce_update_cstate_info(&cstate_info);
target = PSCI_LOCAL_STATE_RUN;
} }
} }
return target;
}
/*******************************************************************************
* Platform handler to calculate the proper target power level at the
* specified affinity level
******************************************************************************/
plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl,
const plat_local_state_t *states,
uint32_t ncpu)
{
plat_local_state_t target = PSCI_LOCAL_STATE_RUN;
uint32_t cpu = plat_my_core_pos();
/* System Suspend */ /* System Suspend */
if (((lvl == (uint32_t)MPIDR_AFFLVL2) || (lvl == (uint32_t)MPIDR_AFFLVL1)) && if ((lvl == (uint32_t)MPIDR_AFFLVL2) &&
(target == PSTATE_ID_SOC_POWERDN)) { (states[cpu] == PSTATE_ID_SOC_POWERDN)) {
result = PSTATE_ID_SOC_POWERDN; target = PSTATE_ID_SOC_POWERDN;
}
/* CPU off, CPU suspend */
if (lvl == (uint32_t)MPIDR_AFFLVL1) {
target = tegra_get_afflvl1_pwr_state(states, ncpu);
} }
/* default state */ /* target cluster/system state */
return result; return target;
} }
int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state) int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
...@@ -276,12 +292,12 @@ int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_sta ...@@ -276,12 +292,12 @@ int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_sta
int32_t tegra_soc_pwr_domain_on(u_register_t mpidr) int32_t tegra_soc_pwr_domain_on(u_register_t mpidr)
{ {
uint32_t target_cpu = mpidr & (uint64_t)MPIDR_CPU_MASK;
uint32_t target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >>
(uint64_t)MPIDR_AFFINITY_BITS;
int32_t ret = PSCI_E_SUCCESS; int32_t ret = PSCI_E_SUCCESS;
uint64_t target_cpu = mpidr & MPIDR_CPU_MASK;
uint64_t target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >>
MPIDR_AFFINITY_BITS;
if (target_cluster > (uint64_t)MPIDR_AFFLVL1) { if (target_cluster > MPIDR_AFFLVL1) {
ERROR("%s: unsupported CPU (0x%lx)\n", __func__, mpidr); ERROR("%s: unsupported CPU (0x%lx)\n", __func__, mpidr);
ret = PSCI_E_NOT_PRESENT; ret = PSCI_E_NOT_PRESENT;
...@@ -304,14 +320,13 @@ int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state) ...@@ -304,14 +320,13 @@ int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
uint64_t impl, val; uint64_t impl, val;
const plat_params_from_bl2_t *plat_params = bl31_get_plat_params(); const plat_params_from_bl2_t *plat_params = bl31_get_plat_params();
impl = (read_midr() >> MIDR_IMPL_SHIFT) & (uint64_t)MIDR_IMPL_MASK; impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
/* /*
* Enable ECC and Parity Protection for Cortex-A57 CPUs (Tegra186 * Enable ECC and Parity Protection for Cortex-A57 CPUs (Tegra186
* A02p and beyond). * A02p and beyond).
*/ */
if ((plat_params->l2_ecc_parity_prot_dis != 1) && if ((plat_params->l2_ecc_parity_prot_dis != 1) && (impl != DENVER_IMPL)) {
(impl != (uint64_t)DENVER_IMPL)) {
val = read_l2ctlr_el1(); val = read_l2ctlr_el1();
val |= CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT; val |= CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT;
...@@ -327,7 +342,7 @@ int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state) ...@@ -327,7 +342,7 @@ int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
*/ */
if (stateid_afflvl0 == PLAT_MAX_OFF_STATE) { if (stateid_afflvl0 == PLAT_MAX_OFF_STATE) {
cstate_info.cluster = TEGRA_ARI_CLUSTER_CC1; cstate_info.cluster = (uint32_t)TEGRA_ARI_CLUSTER_CC1;
cstate_info.update_wake_mask = 1; cstate_info.update_wake_mask = 1;
mce_update_cstate_info(&cstate_info); mce_update_cstate_info(&cstate_info);
} }
...@@ -354,8 +369,8 @@ int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state) ...@@ -354,8 +369,8 @@ int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
* and SC7 for SC7 entry which may not be requested by * and SC7 for SC7 entry which may not be requested by
* non-secure SW which controls idle states. * non-secure SW which controls idle states.
*/ */
cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7; cstate_info.cluster = (uint32_t)TEGRA_ARI_CLUSTER_CC7;
cstate_info.system = TEGRA_ARI_SYSTEM_SC1; cstate_info.system = (uint32_t)TEGRA_ARI_SYSTEM_SC1;
cstate_info.update_wake_mask = 1; cstate_info.update_wake_mask = 1;
mce_update_cstate_info(&cstate_info); mce_update_cstate_info(&cstate_info);
} }
...@@ -375,8 +390,8 @@ int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state) ...@@ -375,8 +390,8 @@ int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
} }
/* Turn off CPU */ /* Turn off CPU */
(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, TEGRA_ARI_CORE_C7, (void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
MCE_CORE_SLEEP_TIME_INFINITE, 0U); (uint64_t)TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U);
return PSCI_E_SUCCESS; return PSCI_E_SUCCESS;
} }
...@@ -384,7 +399,7 @@ int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state) ...@@ -384,7 +399,7 @@ int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
__dead2 void tegra_soc_prepare_system_off(void) __dead2 void tegra_soc_prepare_system_off(void)
{ {
/* power off the entire system */ /* power off the entire system */
mce_enter_ccplex_state(TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF); mce_enter_ccplex_state((uint32_t)TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF);
wfi(); wfi();
...@@ -396,7 +411,7 @@ __dead2 void tegra_soc_prepare_system_off(void) ...@@ -396,7 +411,7 @@ __dead2 void tegra_soc_prepare_system_off(void)
int32_t tegra_soc_prepare_system_reset(void) int32_t tegra_soc_prepare_system_reset(void)
{ {
mce_enter_ccplex_state(TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT); mce_enter_ccplex_state((uint32_t)TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT);
return PSCI_E_SUCCESS; return PSCI_E_SUCCESS;
} }
...@@ -49,11 +49,10 @@ void plat_secondary_setup(void) ...@@ -49,11 +49,10 @@ void plat_secondary_setup(void)
cpu_reset_handler_base = params_from_bl2->tzdram_base; cpu_reset_handler_base = params_from_bl2->tzdram_base;
memcpy16((void *)((uintptr_t)cpu_reset_handler_base), memcpy16((void *)((uintptr_t)cpu_reset_handler_base),
(void *)(uintptr_t)tegra186_cpu_reset_handler, (void *)(uintptr_t)tegra186_cpu_reset_handler,
(uintptr_t)&__tegra186_cpu_reset_handler_end - (uintptr_t)&tegra186_cpu_reset_handler);
(uintptr_t)tegra186_cpu_reset_handler);
} else { } else {
cpu_reset_handler_base = (uintptr_t)tegra_secure_entrypoint; cpu_reset_handler_base = (uintptr_t)&tegra_secure_entrypoint;
} }
addr_low = (uint32_t)cpu_reset_handler_base | CPU_RESET_MODE_AA64; addr_low = (uint32_t)cpu_reset_handler_base | CPU_RESET_MODE_AA64;
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
/******************************************************************************* /*******************************************************************************
* Offset to read the ref_clk counter value * Offset to read the ref_clk counter value
******************************************************************************/ ******************************************************************************/
#define REF_CLK_OFFSET 4 #define REF_CLK_OFFSET 4ULL
/******************************************************************************* /*******************************************************************************
* Tegra186 SiP SMCs * Tegra186 SiP SMCs
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
#define TEGRA_SIP_MCE_CMD_READ_CSTATE_STATS 0xC2FFFF03 #define TEGRA_SIP_MCE_CMD_READ_CSTATE_STATS 0xC2FFFF03
#define TEGRA_SIP_MCE_CMD_WRITE_CSTATE_STATS 0xC2FFFF04 #define TEGRA_SIP_MCE_CMD_WRITE_CSTATE_STATS 0xC2FFFF04
#define TEGRA_SIP_MCE_CMD_IS_SC7_ALLOWED 0xC2FFFF05 #define TEGRA_SIP_MCE_CMD_IS_SC7_ALLOWED 0xC2FFFF05
#define TEGRA_SIP_MCE_CMD_ONLINE_CORE 0xC2FFFF06
#define TEGRA_SIP_MCE_CMD_CC3_CTRL 0xC2FFFF07 #define TEGRA_SIP_MCE_CMD_CC3_CTRL 0xC2FFFF07
#define TEGRA_SIP_MCE_CMD_ECHO_DATA 0xC2FFFF08 #define TEGRA_SIP_MCE_CMD_ECHO_DATA 0xC2FFFF08
#define TEGRA_SIP_MCE_CMD_READ_VERSIONS 0xC2FFFF09 #define TEGRA_SIP_MCE_CMD_READ_VERSIONS 0xC2FFFF09
...@@ -52,7 +52,7 @@ ...@@ -52,7 +52,7 @@
/******************************************************************************* /*******************************************************************************
* This function is responsible for handling all T186 SiP calls * This function is responsible for handling all T186 SiP calls
******************************************************************************/ ******************************************************************************/
int plat_sip_handler(uint32_t smc_fid, int32_t plat_sip_handler(uint32_t smc_fid,
uint64_t x1, uint64_t x1,
uint64_t x2, uint64_t x2,
uint64_t x3, uint64_t x3,
...@@ -61,24 +61,30 @@ int plat_sip_handler(uint32_t smc_fid, ...@@ -61,24 +61,30 @@ int plat_sip_handler(uint32_t smc_fid,
void *handle, void *handle,
uint64_t flags) uint64_t flags)
{ {
int mce_ret; int32_t mce_ret, ret = 0;
int impl, cpu; uint32_t impl, cpu;
uint32_t base, core_clk_ctr, ref_clk_ctr; uint32_t base, core_clk_ctr, ref_clk_ctr;
uint32_t local_smc_fid = smc_fid;
uint64_t local_x1 = x1, local_x2 = x2, local_x3 = x3;
(void)x4;
(void)cookie;
(void)flags;
if (((smc_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_32) { if (((smc_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_32) {
/* 32-bit function, clear top parameter bits */ /* 32-bit function, clear top parameter bits */
x1 = (uint32_t)x1; local_x1 = (uint32_t)x1;
x2 = (uint32_t)x2; local_x2 = (uint32_t)x2;
x3 = (uint32_t)x3; local_x3 = (uint32_t)x3;
} }
/* /*
* Convert SMC FID to SMC64, to support SMC32/SMC64 configurations * Convert SMC FID to SMC64, to support SMC32/SMC64 configurations
*/ */
smc_fid |= (SMC_64 << FUNCID_CC_SHIFT); local_smc_fid |= (SMC_64 << FUNCID_CC_SHIFT);
switch (smc_fid) { switch (local_smc_fid) {
/* /*
* Micro Coded Engine (MCE) commands reside in the 0x82FFFF00 - * Micro Coded Engine (MCE) commands reside in the 0x82FFFF00 -
* 0x82FFFFFF SiP SMC space * 0x82FFFFFF SiP SMC space
...@@ -103,14 +109,13 @@ int plat_sip_handler(uint32_t smc_fid, ...@@ -103,14 +109,13 @@ int plat_sip_handler(uint32_t smc_fid,
case TEGRA_SIP_MCE_CMD_MISC_CCPLEX: case TEGRA_SIP_MCE_CMD_MISC_CCPLEX:
/* clean up the high bits */ /* clean up the high bits */
smc_fid &= MCE_CMD_MASK; local_smc_fid &= MCE_CMD_MASK;
/* execute the command and store the result */ /* execute the command and store the result */
mce_ret = mce_command_handler(smc_fid, x1, x2, x3); mce_ret = mce_command_handler(local_smc_fid, local_x1, local_x2, local_x3);
write_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X0, write_ctx_reg(get_gpregs_ctx(handle),
(uint64_t)mce_ret); CTX_GPREG_X0, (uint64_t)(mce_ret));
break;
return 0;
/* /*
* This function ID reads the Activity monitor's core/ref clock * This function ID reads the Activity monitor's core/ref clock
...@@ -125,28 +130,30 @@ int plat_sip_handler(uint32_t smc_fid, ...@@ -125,28 +130,30 @@ int plat_sip_handler(uint32_t smc_fid,
impl = ((uint32_t)x2 >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK; impl = ((uint32_t)x2 >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
/* sanity check target CPU number */ /* sanity check target CPU number */
if (cpu > PLATFORM_MAX_CPUS_PER_CLUSTER) if (cpu > (uint32_t)PLATFORM_MAX_CPUS_PER_CLUSTER) {
return -EINVAL; ret = -EINVAL;
} else {
/* get the base address for the current CPU */
base = (impl == DENVER_IMPL) ? TEGRA_DENVER_ACTMON_CTR_BASE :
TEGRA_ARM_ACTMON_CTR_BASE;
/* read the clock counter values */
core_clk_ctr = mmio_read_32(base + (8ULL * cpu));
ref_clk_ctr = mmio_read_32(base + (8ULL * cpu) + REF_CLK_OFFSET);
/* return the counter values as two different parameters */
write_ctx_reg(get_gpregs_ctx(handle),
CTX_GPREG_X1, (core_clk_ctr));
write_ctx_reg(get_gpregs_ctx(handle),
CTX_GPREG_X2, (ref_clk_ctr));
}
/* get the base address for the current CPU */ break;
base = (impl == DENVER_IMPL) ? TEGRA_DENVER_ACTMON_CTR_BASE :
TEGRA_ARM_ACTMON_CTR_BASE;
/* read the clock counter values */
core_clk_ctr = mmio_read_32(base + (8 * cpu));
ref_clk_ctr = mmio_read_32(base + (8 * cpu) + REF_CLK_OFFSET);
/* return the counter values as two different parameters */
write_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X1,
(uint64_t)core_clk_ctr);
write_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X2,
(uint64_t)ref_clk_ctr);
return 0;
default: default:
ret = -ENOTSUP;
break; break;
} }
return -ENOTSUP; return ret;
} }
...@@ -9,6 +9,8 @@ ...@@ -9,6 +9,8 @@
#include <smmu.h> #include <smmu.h>
#include <tegra_def.h> #include <tegra_def.h>
#define MAX_NUM_SMMU_DEVICES U(1)
/******************************************************************************* /*******************************************************************************
* Array to hold SMMU context for Tegra186 * Array to hold SMMU context for Tegra186
******************************************************************************/ ******************************************************************************/
...@@ -305,7 +307,15 @@ static __attribute__((aligned(16))) smmu_regs_t tegra186_smmu_context[] = { ...@@ -305,7 +307,15 @@ static __attribute__((aligned(16))) smmu_regs_t tegra186_smmu_context[] = {
smmu_regs_t *plat_get_smmu_ctx(void) smmu_regs_t *plat_get_smmu_ctx(void)
{ {
/* index of _END_OF_TABLE_ */ /* index of _END_OF_TABLE_ */
tegra186_smmu_context[0].val = ARRAY_SIZE(tegra186_smmu_context) - 1; tegra186_smmu_context[0].val = (uint32_t)(ARRAY_SIZE(tegra186_smmu_context)) - 1U;
return tegra186_smmu_context; return tegra186_smmu_context;
} }
/*******************************************************************************
* Handler to return the support SMMU devices number
******************************************************************************/
uint32_t plat_get_num_smmu_devices(void)
{
return MAX_NUM_SMMU_DEVICES;
}
...@@ -20,9 +20,6 @@ $(eval $(call add_define,ENABLE_CHIP_VERIFICATION_HARNESS)) ...@@ -20,9 +20,6 @@ $(eval $(call add_define,ENABLE_CHIP_VERIFICATION_HARNESS))
ENABLE_SMMU_DEVICE := 1 ENABLE_SMMU_DEVICE := 1
$(eval $(call add_define,ENABLE_SMMU_DEVICE)) $(eval $(call add_define,ENABLE_SMMU_DEVICE))
NUM_SMMU_DEVICES := 1
$(eval $(call add_define,NUM_SMMU_DEVICES))
RESET_TO_BL31 := 1 RESET_TO_BL31 := 1
PROGRAMMABLE_RESET_ADDRESS := 1 PROGRAMMABLE_RESET_ADDRESS := 1
...@@ -50,6 +47,7 @@ PLAT_INCLUDES += -I${SOC_DIR}/drivers/include ...@@ -50,6 +47,7 @@ PLAT_INCLUDES += -I${SOC_DIR}/drivers/include
BL31_SOURCES += lib/cpus/aarch64/denver.S \ BL31_SOURCES += lib/cpus/aarch64/denver.S \
lib/cpus/aarch64/cortex_a57.S \ lib/cpus/aarch64/cortex_a57.S \
${COMMON_DIR}/drivers/gpcdma/gpcdma.c \
${COMMON_DIR}/drivers/memctrl/memctrl_v2.c \ ${COMMON_DIR}/drivers/memctrl/memctrl_v2.c \
${COMMON_DIR}/drivers/smmu/smmu.c \ ${COMMON_DIR}/drivers/smmu/smmu.c \
${SOC_DIR}/drivers/mce/mce.c \ ${SOC_DIR}/drivers/mce/mce.c \
...@@ -64,3 +62,13 @@ BL31_SOURCES += lib/cpus/aarch64/denver.S \ ...@@ -64,3 +62,13 @@ BL31_SOURCES += lib/cpus/aarch64/denver.S \
${SOC_DIR}/plat_smmu.c \ ${SOC_DIR}/plat_smmu.c \
${SOC_DIR}/plat_trampoline.S ${SOC_DIR}/plat_trampoline.S
# Enable workarounds for selected Cortex-A57 erratas.
A57_DISABLE_NON_TEMPORAL_HINT := 1
ERRATA_A57_806969 := 1
ERRATA_A57_813419 := 1
ERRATA_A57_813420 := 1
ERRATA_A57_826974 := 1
ERRATA_A57_826977 := 1
ERRATA_A57_828024 := 1
ERRATA_A57_829520 := 1
ERRATA_A57_833471 := 1
...@@ -16,14 +16,16 @@ ...@@ -16,14 +16,16 @@
*/ */
/* Secure scratch registers */ /* Secure scratch registers */
#define PMC_SECURE_SCRATCH4_OFFSET 0xC0U #define PMC_SECURE_SCRATCH4_OFFSET 0xC0U
#define PMC_SECURE_SCRATCH5_OFFSET 0xC4U #define PMC_SECURE_SCRATCH5_OFFSET 0xC4U
#define PMC_SECURE_SCRATCH6_OFFSET 0x224U #define PMC_SECURE_SCRATCH6_OFFSET 0x224U
#define PMC_SECURE_SCRATCH7_OFFSET 0x228U #define PMC_SECURE_SCRATCH7_OFFSET 0x228U
#define PMC_SECURE_SCRATCH120_OFFSET 0xB38U #define PMC_SECURE_SCRATCH116_OFFSET 0xB28U
#define PMC_SECURE_SCRATCH121_OFFSET 0xB3CU #define PMC_SECURE_SCRATCH117_OFFSET 0xB2CU
#define PMC_SECURE_SCRATCH122_OFFSET 0xB40U #define PMC_SECURE_SCRATCH120_OFFSET 0xB38U
#define PMC_SECURE_SCRATCH123_OFFSET 0xB44U #define PMC_SECURE_SCRATCH121_OFFSET 0xB3CU
#define PMC_SECURE_SCRATCH122_OFFSET 0xB40U
#define PMC_SECURE_SCRATCH123_OFFSET 0xB44U
/* /*
* AHB arbitration memory write queue * AHB arbitration memory write queue
...@@ -32,6 +34,12 @@ ...@@ -32,6 +34,12 @@
#define ARAHB_MST_ID_SE2_MASK (0x1U << 13) #define ARAHB_MST_ID_SE2_MASK (0x1U << 13)
#define ARAHB_MST_ID_SE_MASK (0x1U << 14) #define ARAHB_MST_ID_SE_MASK (0x1U << 14)
/**
* SE registers
*/
#define TEGRA_SE_AES_KEYSLOT_COUNT 16
#define SE_MAX_LAST_BLOCK_SIZE 0xFFFFF
/* SE Status register */ /* SE Status register */
#define SE_STATUS_OFFSET 0x800U #define SE_STATUS_OFFSET 0x800U
#define SE_STATUS_SHIFT 0 #define SE_STATUS_SHIFT 0
...@@ -42,8 +50,24 @@ ...@@ -42,8 +50,24 @@
#define SE_STATUS(x) \ #define SE_STATUS(x) \
((x) & ((0x3U) << SE_STATUS_SHIFT)) ((x) & ((0x3U) << SE_STATUS_SHIFT))
#define SE_MEM_INTERFACE_SHIFT 2
#define SE_MEM_INTERFACE_IDLE 0
#define SE_MEM_INTERFACE_BUSY 1
#define SE_MEM_INTERFACE(x) ((x) << SE_STATUS_SHIFT)
/* SE register definitions */
#define SE_SECURITY_REG_OFFSET 0x0
#define SE_SECURITY_TZ_LOCK_SOFT_SHIFT 5
#define SE_SECURE 0x0
#define SE_SECURITY_TZ_LOCK_SOFT(x) ((x) << SE_SECURITY_TZ_LOCK_SOFT_SHIFT)
#define SE_SEC_ENG_DIS_SHIFT 1
#define SE_DISABLE_FALSE 0
#define SE_DISABLE_TRUE 1
#define SE_SEC_ENG_DISABLE(x)((x) << SE_SEC_ENG_DIS_SHIFT)
/* SE config register */ /* SE config register */
#define SE_CONFIG_REG_OFFSET 0x14U #define SE_CONFIG_REG_OFFSET 0x14U
#define SE_CONFIG_ENC_ALG_SHIFT 12 #define SE_CONFIG_ENC_ALG_SHIFT 12
#define SE_CONFIG_ENC_ALG_AES_ENC \ #define SE_CONFIG_ENC_ALG_AES_ENC \
((1U) << SE_CONFIG_ENC_ALG_SHIFT) ((1U) << SE_CONFIG_ENC_ALG_SHIFT)
...@@ -66,7 +90,7 @@ ...@@ -66,7 +90,7 @@
#define SE_CONFIG_DEC_ALG(x) \ #define SE_CONFIG_DEC_ALG(x) \
((x) & ((0xFU) << SE_CONFIG_DEC_ALG_SHIFT)) ((x) & ((0xFU) << SE_CONFIG_DEC_ALG_SHIFT))
#define SE_CONFIG_DST_SHIFT 2 #define SE_CONFIG_DST_SHIFT 2
#define SE_CONFIG_DST_MEMORY \ #define SE_CONFIG_DST_MEMORY \
((0U) << SE_CONFIG_DST_SHIFT) ((0U) << SE_CONFIG_DST_SHIFT)
#define SE_CONFIG_DST_HASHREG \ #define SE_CONFIG_DST_HASHREG \
...@@ -80,33 +104,75 @@ ...@@ -80,33 +104,75 @@
#define SE_CONFIG_DST(x) \ #define SE_CONFIG_DST(x) \
((x) & ((0x7U) << SE_CONFIG_DST_SHIFT)) ((x) & ((0x7U) << SE_CONFIG_DST_SHIFT))
#define SE_CONFIG_ENC_MODE_SHIFT 24
#define SE_CONFIG_ENC_MODE_KEY128 \
((0UL) << SE_CONFIG_ENC_MODE_SHIFT)
#define SE_CONFIG_ENC_MODE_KEY192 \
((1UL) << SE_CONFIG_ENC_MODE_SHIFT)
#define SE_CONFIG_ENC_MODE_KEY256 \
((2UL) << SE_CONFIG_ENC_MODE_SHIFT)
#define SE_CONFIG_ENC_MODE_SHA1 \
((0UL) << SE_CONFIG_ENC_MODE_SHIFT)
#define SE_CONFIG_ENC_MODE_SHA224 \
((4UL) << SE_CONFIG_ENC_MODE_SHIFT)
#define SE_CONFIG_ENC_MODE_SHA256 \
((5UL) << SE_CONFIG_ENC_MODE_SHIFT)
#define SE_CONFIG_ENC_MODE_SHA384 \
((6UL) << SE_CONFIG_ENC_MODE_SHIFT)
#define SE_CONFIG_ENC_MODE_SHA512 \
((7UL) << SE_CONFIG_ENC_MODE_SHIFT)
#define SE_CONFIG_ENC_MODE(x)\
((x) & ((0xFFUL) << SE_CONFIG_ENC_MODE_SHIFT))
#define SE_CONFIG_DEC_MODE_SHIFT 16
#define SE_CONFIG_DEC_MODE_KEY128 \
((0UL) << SE_CONFIG_DEC_MODE_SHIFT)
#define SE_CONFIG_DEC_MODE_KEY192 \
((1UL) << SE_CONFIG_DEC_MODE_SHIFT)
#define SE_CONFIG_DEC_MODE_KEY256 \
((2UL) << SE_CONFIG_DEC_MODE_SHIFT)
#define SE_CONFIG_DEC_MODE_SHA1 \
((0UL) << SE_CONFIG_DEC_MODE_SHIFT)
#define SE_CONFIG_DEC_MODE_SHA224 \
((4UL) << SE_CONFIG_DEC_MODE_SHIFT)
#define SE_CONFIG_DEC_MODE_SHA256 \
((5UL) << SE_CONFIG_DEC_MODE_SHIFT)
#define SE_CONFIG_DEC_MODE_SHA384 \
((6UL) << SE_CONFIG_DEC_MODE_SHIFT)
#define SE_CONFIG_DEC_MODE_SHA512 \
((7UL) << SE_CONFIG_DEC_MODE_SHIFT)
#define SE_CONFIG_DEC_MODE(x)\
((x) & ((0xFFUL) << SE_CONFIG_DEC_MODE_SHIFT))
/* DRBG random number generator config */ /* DRBG random number generator config */
#define SE_RNG_CONFIG_REG_OFFSET 0x340 #define SE_RNG_CONFIG_REG_OFFSET 0x340
#define DRBG_MODE_SHIFT 0 #define DRBG_MODE_SHIFT 0
#define DRBG_MODE_NORMAL \ #define DRBG_MODE_NORMAL \
((0UL) << DRBG_MODE_SHIFT) ((0U) << DRBG_MODE_SHIFT)
#define DRBG_MODE_FORCE_INSTANTION \ #define DRBG_MODE_FORCE_INSTANTION \
((1UL) << DRBG_MODE_SHIFT) ((1U) << DRBG_MODE_SHIFT)
#define DRBG_MODE_FORCE_RESEED \ #define DRBG_MODE_FORCE_RESEED \
((2UL) << DRBG_MODE_SHIFT) ((2U) << DRBG_MODE_SHIFT)
#define SE_RNG_CONFIG_MODE(x) \ #define SE_RNG_CONFIG_MODE(x) \
((x) & ((0x3UL) << DRBG_MODE_SHIFT)) ((x) & ((0x3U) << DRBG_MODE_SHIFT))
#define DRBG_SRC_SHIFT 2 #define DRBG_SRC_SHIFT 2
#define DRBG_SRC_NONE \ #define DRBG_SRC_NONE \
((0UL) << DRBG_SRC_SHIFT) ((0U) << DRBG_SRC_SHIFT)
#define DRBG_SRC_ENTROPY \ #define DRBG_SRC_ENTROPY \
((1UL) << DRBG_SRC_SHIFT) ((1U) << DRBG_SRC_SHIFT)
#define DRBG_SRC_LFSR \ #define DRBG_SRC_LFSR \
((2UL) << DRBG_SRC_SHIFT) ((2U) << DRBG_SRC_SHIFT)
#define SE_RNG_SRC_CONFIG_MODE(x) \ #define SE_RNG_SRC_CONFIG_MODE(x) \
((x) & ((0x3UL) << DRBG_SRC_SHIFT)) ((x) & ((0x3U) << DRBG_SRC_SHIFT))
/* DRBG random number generator entropy config */ /* DRBG random number generator entropy config */
#define SE_RNG_SRC_CONFIG_REG_OFFSET 0x344U #define SE_RNG_SRC_CONFIG_REG_OFFSET 0x344U
#define DRBG_RO_ENT_SRC_SHIFT 1 #define DRBG_RO_ENT_SRC_SHIFT 1
#define DRBG_RO_ENT_SRC_ENABLE \ #define DRBG_RO_ENT_SRC_ENABLE \
((1U) << DRBG_RO_ENT_SRC_SHIFT) ((1U) << DRBG_RO_ENT_SRC_SHIFT)
#define DRBG_RO_ENT_SRC_DISABLE \ #define DRBG_RO_ENT_SRC_DISABLE \
...@@ -114,7 +180,7 @@ ...@@ -114,7 +180,7 @@
#define SE_RNG_SRC_CONFIG_RO_ENT_SRC(x) \ #define SE_RNG_SRC_CONFIG_RO_ENT_SRC(x) \
((x) & ((0x1U) << DRBG_RO_ENT_SRC_SHIFT)) ((x) & ((0x1U) << DRBG_RO_ENT_SRC_SHIFT))
#define DRBG_RO_ENT_SRC_LOCK_SHIFT 0 #define DRBG_RO_ENT_SRC_LOCK_SHIFT 0
#define DRBG_RO_ENT_SRC_LOCK_ENABLE \ #define DRBG_RO_ENT_SRC_LOCK_ENABLE \
((1U) << DRBG_RO_ENT_SRC_LOCK_SHIFT) ((1U) << DRBG_RO_ENT_SRC_LOCK_SHIFT)
#define DRBG_RO_ENT_SRC_LOCK_DISABLE \ #define DRBG_RO_ENT_SRC_LOCK_DISABLE \
...@@ -130,9 +196,97 @@ ...@@ -130,9 +196,97 @@
#define SE_RNG_SRC_CONFIG_RO_ENT_IGNORE_MEM(x) \ #define SE_RNG_SRC_CONFIG_RO_ENT_IGNORE_MEM(x) \
((x) & ((0x1U) << DRBG_RO_ENT_IGNORE_MEM_SHIFT)) ((x) & ((0x1U) << DRBG_RO_ENT_IGNORE_MEM_SHIFT))
#define SE_RNG_RESEED_INTERVAL_REG_OFFSET 0x348
/* SE CRYPTO */
#define SE_CRYPTO_REG_OFFSET 0x304
#define SE_CRYPTO_HASH_SHIFT 0
#define SE_CRYPTO_HASH_DISABLE \
((0U) << SE_CRYPTO_HASH_SHIFT)
#define SE_CRYPTO_HASH_ENABLE \
((1U) << SE_CRYPTO_HASH_SHIFT)
#define SE_CRYPTO_XOR_POS_SHIFT 1
#define SE_CRYPTO_XOR_BYPASS \
((0U) << SE_CRYPTO_XOR_POS_SHIFT)
#define SE_CRYPTO_XOR_TOP \
((2U) << SE_CRYPTO_XOR_POS_SHIFT)
#define SE_CRYPTO_XOR_BOTTOM \
((3U) << SE_CRYPTO_XOR_POS_SHIFT)
#define SE_CRYPTO_INPUT_SEL_SHIFT 3
#define SE_CRYPTO_INPUT_AHB \
((0U) << SE_CRYPTO_INPUT_SEL_SHIFT)
#define SE_CRYPTO_INPUT_RANDOM \
((1U) << SE_CRYPTO_INPUT_SEL_SHIFT)
#define SE_CRYPTO_INPUT_AESOUT \
((2U) << SE_CRYPTO_INPUT_SEL_SHIFT)
#define SE_CRYPTO_INPUT_LNR_CTR \
((3U) << SE_CRYPTO_INPUT_SEL_SHIFT)
#define SE_CRYPTO_VCTRAM_SEL_SHIFT 5
#define SE_CRYPTO_VCTRAM_AHB \
((0U) << SE_CRYPTO_VCTRAM_SEL_SHIFT)
#define SE_CRYPTO_VCTRAM_AESOUT \
((2U) << SE_CRYPTO_VCTRAM_SEL_SHIFT)
#define SE_CRYPTO_VCTRAM_PREVAHB \
((3U) << SE_CRYPTO_VCTRAM_SEL_SHIFT)
#define SE_CRYPTO_IV_SEL_SHIFT 7
#define SE_CRYPTO_IV_ORIGINAL \
((0U) << SE_CRYPTO_IV_SEL_SHIFT)
#define SE_CRYPTO_IV_UPDATED \
((1U) << SE_CRYPTO_IV_SEL_SHIFT)
#define SE_CRYPTO_CORE_SEL_SHIFT 8
#define SE_CRYPTO_CORE_DECRYPT \
((0U) << SE_CRYPTO_CORE_SEL_SHIFT)
#define SE_CRYPTO_CORE_ENCRYPT \
((1U) << SE_CRYPTO_CORE_SEL_SHIFT)
#define SE_CRYPTO_KEY_INDEX_SHIFT 24
#define SE_CRYPTO_KEY_INDEX(x) (x << SE_CRYPTO_KEY_INDEX_SHIFT)
#define SE_CRYPTO_MEMIF_AHB \
((0U) << SE_CRYPTO_MEMIF_SHIFT)
#define SE_CRYPTO_MEMIF_MCCIF \
((1U) << SE_CRYPTO_MEMIF_SHIFT)
#define SE_CRYPTO_MEMIF_SHIFT 31
/* KEY TABLE */
#define SE_KEYTABLE_REG_OFFSET 0x31C
/* KEYIV PKT - key slot */
#define SE_KEYTABLE_SLOT_SHIFT 4
#define SE_KEYTABLE_SLOT(x) (x << SE_KEYTABLE_SLOT_SHIFT)
/* KEYIV PKT - KEYIV select */
#define SE_KEYIV_PKT_KEYIV_SEL_SHIFT 3
#define SE_CRYPTO_KEYIV_KEY \
((0U) << SE_KEYIV_PKT_KEYIV_SEL_SHIFT)
#define SE_CRYPTO_KEYIV_IVS \
((1U) << SE_KEYIV_PKT_KEYIV_SEL_SHIFT)
/* KEYIV PKT - IV select */
#define SE_KEYIV_PKT_IV_SEL_SHIFT 2
#define SE_CRYPTO_KEYIV_IVS_OIV \
((0U) << SE_KEYIV_PKT_IV_SEL_SHIFT)
#define SE_CRYPTO_KEYIV_IVS_UIV \
((1U) << SE_KEYIV_PKT_IV_SEL_SHIFT)
/* KEYIV PKT - key word */
#define SE_KEYIV_PKT_KEY_WORD_SHIFT 0
#define SE_KEYIV_PKT_KEY_WORD(x) \
((x) << SE_KEYIV_PKT_KEY_WORD_SHIFT)
/* KEYIV PKT - iv word */
#define SE_KEYIV_PKT_IV_WORD_SHIFT 0
#define SE_KEYIV_PKT_IV_WORD(x) \
((x) << SE_KEYIV_PKT_IV_WORD_SHIFT)
/* SE OPERATION */ /* SE OPERATION */
#define SE_OPERATION_REG_OFFSET 0x8U #define SE_OPERATION_REG_OFFSET 0x8U
#define SE_OPERATION_SHIFT 0 #define SE_OPERATION_SHIFT 0
#define SE_OP_ABORT \ #define SE_OP_ABORT \
((0x0U) << SE_OPERATION_SHIFT) ((0x0U) << SE_OPERATION_SHIFT)
#define SE_OP_START \ #define SE_OP_START \
...@@ -146,11 +300,85 @@ ...@@ -146,11 +300,85 @@
#define SE_OPERATION(x) \ #define SE_OPERATION(x) \
((x) & ((0x7U) << SE_OPERATION_SHIFT)) ((x) & ((0x7U) << SE_OPERATION_SHIFT))
/* SE CONTEXT */
#define SE_CTX_SAVE_CONFIG_REG_OFFSET 0x70
#define SE_CTX_SAVE_WORD_QUAD_SHIFT 0
#define SE_CTX_SAVE_WORD_QUAD(x) \
(x << SE_CTX_SAVE_WORD_QUAD_SHIFT)
#define SE_CTX_SAVE_WORD_QUAD_KEYS_0_3 \
((0U) << SE_CTX_SAVE_WORD_QUAD_SHIFT)
#define SE_CTX_SAVE_WORD_QUAD_KEYS_4_7 \
((1U) << SE_CTX_SAVE_WORD_QUAD_SHIFT)
#define SE_CTX_SAVE_WORD_QUAD_ORIG_IV \
((2U) << SE_CTX_SAVE_WORD_QUAD_SHIFT)
#define SE_CTX_SAVE_WORD_QUAD_UPD_IV \
((3U) << SE_CTX_SAVE_WORD_QUAD_SHIFT)
#define SE_CTX_SAVE_KEY_INDEX_SHIFT 8
#define SE_CTX_SAVE_KEY_INDEX(x) (x << SE_CTX_SAVE_KEY_INDEX_SHIFT)
#define SE_CTX_SAVE_STICKY_WORD_QUAD_SHIFT 24
#define SE_CTX_SAVE_STICKY_WORD_QUAD_STICKY_0_3 \
((0U) << SE_CTX_SAVE_STICKY_WORD_QUAD_SHIFT)
#define SE_CTX_SAVE_STICKY_WORD_QUAD_STICKY_4_7 \
((1U) << SE_CTX_SAVE_STICKY_WORD_QUAD_SHIFT)
#define SE_CTX_SAVE_STICKY_WORD_QUAD(x) \
(x << SE_CTX_SAVE_STICKY_WORD_QUAD_SHIFT)
#define SE_CTX_SAVE_SRC_SHIFT 29
#define SE_CTX_SAVE_SRC_STICKY_BITS \
((0U) << SE_CTX_SAVE_SRC_SHIFT)
#define SE_CTX_SAVE_SRC_RSA_KEYTABLE \
((1U) << SE_CTX_SAVE_SRC_SHIFT)
#define SE_CTX_SAVE_SRC_AES_KEYTABLE \
((2U) << SE_CTX_SAVE_SRC_SHIFT)
#define SE_CTX_SAVE_SRC_PKA1_STICKY_BITS \
((3U) << SE_CTX_SAVE_SRC_SHIFT)
#define SE_CTX_SAVE_SRC_MEM \
((4U) << SE_CTX_SAVE_SRC_SHIFT)
#define SE_CTX_SAVE_SRC_SRK \
((6U) << SE_CTX_SAVE_SRC_SHIFT)
#define SE_CTX_SAVE_SRC_PKA1_KEYTABLE \
((7U) << SE_CTX_SAVE_SRC_SHIFT)
#define SE_CTX_STICKY_WORD_QUAD_SHIFT 24
#define SE_CTX_STICKY_WORD_QUAD_WORDS_0_3 \
((0U) << SE_CTX_STICKY_WORD_QUAD_SHIFT)
#define SE_CTX_STICKY_WORD_QUAD_WORDS_4_7 \
((1U) << SE_CTX_STICKY_WORD_QUAD_SHIFT)
#define SE_CTX_STICKY_WORD_QUAD(x) (x << SE_CTX_STICKY_WORD_QUAD_SHIFT)
#define SE_CTX_SAVE_RSA_KEY_INDEX_SHIFT 16
#define SE_CTX_SAVE_RSA_KEY_INDEX(x) \
(x << SE_CTX_SAVE_RSA_KEY_INDEX_SHIFT)
#define SE_CTX_RSA_WORD_QUAD_SHIFT 12
#define SE_CTX_RSA_WORD_QUAD(x) \
(x << SE_CTX_RSA_WORD_QUAD_SHIFT)
#define SE_CTX_PKA1_WORD_QUAD_L_SHIFT 0
#define SE_CTX_PKA1_WORD_QUAD_L_SIZE \
((true ? 4:0) - \
(false ? 4:0) + 1)
#define SE_CTX_PKA1_WORD_QUAD_L(x)\
(((x) << SE_CTX_PKA1_WORD_QUAD_L_SHIFT) & 0x1f)
#define SE_CTX_PKA1_WORD_QUAD_H_SHIFT 12
#define SE_CTX_PKA1_WORD_QUAD_H(x)\
((((x) >> SE_CTX_PKA1_WORD_QUAD_L_SIZE) & 0xf) \
<< SE_CTX_PKA1_WORD_QUAD_H_SHIFT)
#define SE_RSA_KEY_INDEX_SLOT0_EXP 0
#define SE_RSA_KEY_INDEX_SLOT0_MOD 1
#define SE_RSA_KEY_INDEX_SLOT1_EXP 2
#define SE_RSA_KEY_INDEX_SLOT1_MOD 3
/* SE_CTX_SAVE_AUTO */ /* SE_CTX_SAVE_AUTO */
#define SE_CTX_SAVE_AUTO_REG_OFFSET 0x74U #define SE_CTX_SAVE_AUTO_REG_OFFSET 0x74U
/* Enable */ /* Enable */
#define SE_CTX_SAVE_AUTO_ENABLE_SHIFT 0 #define SE_CTX_SAVE_AUTO_ENABLE_SHIFT 0
#define SE_CTX_SAVE_AUTO_DIS \ #define SE_CTX_SAVE_AUTO_DIS \
((0U) << SE_CTX_SAVE_AUTO_ENABLE_SHIFT) ((0U) << SE_CTX_SAVE_AUTO_ENABLE_SHIFT)
#define SE_CTX_SAVE_AUTO_EN \ #define SE_CTX_SAVE_AUTO_EN \
...@@ -167,20 +395,22 @@ ...@@ -167,20 +395,22 @@
#define SE_CTX_SAVE_AUTO_LOCK(x) \ #define SE_CTX_SAVE_AUTO_LOCK(x) \
((x) & ((0x1U) << SE_CTX_SAVE_AUTO_LOCK_SHIFT)) ((x) & ((0x1U) << SE_CTX_SAVE_AUTO_LOCK_SHIFT))
/* Current context save number of blocks */ /* Current context save number of blocks*/
#define SE_CTX_SAVE_AUTO_CURR_CNT_SHIFT 16 #define SE_CTX_SAVE_AUTO_CURR_CNT_SHIFT 16
#define SE_CTX_SAVE_AUTO_CURR_CNT_MASK 0x3FFU #define SE_CTX_SAVE_AUTO_CURR_CNT_MASK 0x3FFU
#define SE_CTX_SAVE_GET_BLK_COUNT(x) \ #define SE_CTX_SAVE_GET_BLK_COUNT(x) \
(((x) >> SE_CTX_SAVE_AUTO_CURR_CNT_SHIFT) & \ (((x) >> SE_CTX_SAVE_AUTO_CURR_CNT_SHIFT) & \
SE_CTX_SAVE_AUTO_CURR_CNT_MASK) SE_CTX_SAVE_AUTO_CURR_CNT_MASK)
#define SE_CTX_SAVE_SIZE_BLOCKS_SE1 133 #define SE_CTX_SAVE_SIZE_BLOCKS_SE1 133
#define SE_CTX_SAVE_SIZE_BLOCKS_SE2 646 #define SE_CTX_SAVE_SIZE_BLOCKS_SE2 646
/* SE TZRAM OPERATION - only for SE1 */ /* SE TZRAM OPERATION - only for SE1 */
#define SE_TZRAM_OPERATION 0x540U #define SE_TZRAM_OPERATION 0x540U
#define SE_TZRAM_OP_MODE_SHIFT 1 #define SE_TZRAM_OP_MODE_SHIFT 1
#define SE_TZRAM_OP_COMMAND_INIT 1
#define SE_TZRAM_OP_COMMAND_SHIFT 0
#define SE_TZRAM_OP_MODE_SAVE \ #define SE_TZRAM_OP_MODE_SAVE \
((0U) << SE_TZRAM_OP_MODE_SHIFT) ((0U) << SE_TZRAM_OP_MODE_SHIFT)
#define SE_TZRAM_OP_MODE_RESTORE \ #define SE_TZRAM_OP_MODE_RESTORE \
...@@ -188,7 +418,7 @@ ...@@ -188,7 +418,7 @@
#define SE_TZRAM_OP_MODE(x) \ #define SE_TZRAM_OP_MODE(x) \
((x) & ((0x1U) << SE_TZRAM_OP_MODE_SHIFT)) ((x) & ((0x1U) << SE_TZRAM_OP_MODE_SHIFT))
#define SE_TZRAM_OP_BUSY_SHIFT 2 #define SE_TZRAM_OP_BUSY_SHIFT 2
#define SE_TZRAM_OP_BUSY_OFF \ #define SE_TZRAM_OP_BUSY_OFF \
((0U) << SE_TZRAM_OP_BUSY_SHIFT) ((0U) << SE_TZRAM_OP_BUSY_SHIFT)
#define SE_TZRAM_OP_BUSY_ON \ #define SE_TZRAM_OP_BUSY_ON \
...@@ -196,7 +426,7 @@ ...@@ -196,7 +426,7 @@
#define SE_TZRAM_OP_BUSY(x) \ #define SE_TZRAM_OP_BUSY(x) \
((x) & ((0x1U) << SE_TZRAM_OP_BUSY_SHIFT)) ((x) & ((0x1U) << SE_TZRAM_OP_BUSY_SHIFT))
#define SE_TZRAM_OP_REQ_SHIFT 0 #define SE_TZRAM_OP_REQ_SHIFT 0
#define SE_TZRAM_OP_REQ_IDLE \ #define SE_TZRAM_OP_REQ_IDLE \
((0U) << SE_TZRAM_OP_REQ_SHIFT) ((0U) << SE_TZRAM_OP_REQ_SHIFT)
#define SE_TZRAM_OP_REQ_INIT \ #define SE_TZRAM_OP_REQ_INIT \
...@@ -206,7 +436,7 @@ ...@@ -206,7 +436,7 @@
/* SE Interrupt */ /* SE Interrupt */
#define SE_INT_STATUS_REG_OFFSET 0x10U #define SE_INT_STATUS_REG_OFFSET 0x10U
#define SE_INT_OP_DONE_SHIFT 4 #define SE_INT_OP_DONE_SHIFT 4
#define SE_INT_OP_DONE_CLEAR \ #define SE_INT_OP_DONE_CLEAR \
((0U) << SE_INT_OP_DONE_SHIFT) ((0U) << SE_INT_OP_DONE_SHIFT)
#define SE_INT_OP_DONE_ACTIVE \ #define SE_INT_OP_DONE_ACTIVE \
...@@ -214,19 +444,186 @@ ...@@ -214,19 +444,186 @@
#define SE_INT_OP_DONE(x) \ #define SE_INT_OP_DONE(x) \
((x) & ((0x1U) << SE_INT_OP_DONE_SHIFT)) ((x) & ((0x1U) << SE_INT_OP_DONE_SHIFT))
/* SE TZRAM SECURITY */
#define SE_TZRAM_SEC_REG_OFFSET 0x4
#define SE_TZRAM_SEC_SETTING_SHIFT 0
#define SE_TZRAM_SECURE \
((0UL) << SE_TZRAM_SEC_SETTING_SHIFT)
#define SE_TZRAM_NONSECURE \
((1UL) << SE_TZRAM_SEC_SETTING_SHIFT)
#define SE_TZRAM_SEC_SETTING(x) \
((x) & ((0x1UL) << SE_TZRAM_SEC_SETTING_SHIFT))
/* PKA1 KEY SLOTS */
#define TEGRA_SE_PKA1_KEYSLOT_COUNT 4
/* SE error status */ /* SE error status */
#define SE_ERR_STATUS_REG_OFFSET 0x804U #define SE_ERR_STATUS_REG_OFFSET 0x804U
#define SE_CRYPTO_KEYTABLE_DST_REG_OFFSET 0x330
#define SE_CRYPTO_KEYTABLE_DST_WORD_QUAD_SHIFT 0
#define SE_CRYPTO_KEYTABLE_DST_WORD_QUAD(x) \
(x << SE_CRYPTO_KEYTABLE_DST_WORD_QUAD_SHIFT)
#define SE_KEY_INDEX_SHIFT 8
#define SE_CRYPTO_KEYTABLE_DST_KEY_INDEX(x) (x << SE_KEY_INDEX_SHIFT)
/* SE linked list (LL) register */ /* SE linked list (LL) register */
#define SE_IN_LL_ADDR_REG_OFFSET 0x18U #define SE_IN_LL_ADDR_REG_OFFSET 0x18U
#define SE_OUT_LL_ADDR_REG_OFFSET 0x24U #define SE_OUT_LL_ADDR_REG_OFFSET 0x24U
#define SE_BLOCK_COUNT_REG_OFFSET 0x318U #define SE_BLOCK_COUNT_REG_OFFSET 0x318U
/* AES data sizes */ /* AES data sizes */
#define TEGRA_SE_KEY_256_SIZE 32
#define TEGRA_SE_KEY_192_SIZE 24
#define TEGRA_SE_KEY_128_SIZE 16
#define TEGRA_SE_AES_BLOCK_SIZE 16 #define TEGRA_SE_AES_BLOCK_SIZE 16
#define TEGRA_SE_AES_MIN_KEY_SIZE 16 #define TEGRA_SE_AES_MIN_KEY_SIZE 16
#define TEGRA_SE_AES_MAX_KEY_SIZE 32 #define TEGRA_SE_AES_MAX_KEY_SIZE 32
#define TEGRA_SE_AES_IV_SIZE 16 #define TEGRA_SE_AES_IV_SIZE 16
#define TEGRA_SE_RNG_IV_SIZE 16
#define TEGRA_SE_RNG_DT_SIZE 16
#define TEGRA_SE_RNG_KEY_SIZE 16
#define TEGRA_SE_RNG_SEED_SIZE (TEGRA_SE_RNG_IV_SIZE + \
TEGRA_SE_RNG_KEY_SIZE + \
TEGRA_SE_RNG_DT_SIZE)
#define TEGRA_SE_RSA512_DIGEST_SIZE 64
#define TEGRA_SE_RSA1024_DIGEST_SIZE 128
#define TEGRA_SE_RSA1536_DIGEST_SIZE 192
#define TEGRA_SE_RSA2048_DIGEST_SIZE 256
#define SE_KEY_TABLE_ACCESS_REG_OFFSET 0x284
#define SE_KEY_READ_DISABLE_SHIFT 0
#define SE_CTX_BUFER_SIZE 1072
#define SE_CTX_DRBG_BUFER_SIZE 2112
/* SE blobs size in bytes */
#define SE_CTX_SAVE_RSA_KEY_LENGTH 1024
#define SE_CTX_SAVE_RANDOM_DATA_SIZE 16
#define SE_CTX_SAVE_STICKY_BITS_SIZE 16
#define SE2_CONTEXT_SAVE_PKA1_STICKY_BITS_LENGTH 16
#define SE2_CONTEXT_SAVE_PKA1_KEYS_LENGTH 8192
#define SE_CTX_KNOWN_PATTERN_SIZE 16
#define SE_CTX_KNOWN_PATTERN_SIZE_WORDS (SE_CTX_KNOWN_PATTERN_SIZE/4)
/* SE RSA */
#define TEGRA_SE_RSA_KEYSLOT_COUNT 2
#define SE_RSA_KEY_SIZE_REG_OFFSET 0x404
#define SE_RSA_EXP_SIZE_REG_OFFSET 0x408
#define SE_RSA_MAX_EXP_BIT_SIZE 2048
#define SE_RSA_MAX_EXP_SIZE32 \
(SE_RSA_MAX_EXP_BIT_SIZE >> 5)
#define SE_RSA_MAX_MOD_BIT_SIZE 2048
#define SE_RSA_MAX_MOD_SIZE32 \
(SE_RSA_MAX_MOD_BIT_SIZE >> 5)
/* SE_RSA_KEYTABLE_ADDR */
#define SE_RSA_KEYTABLE_ADDR 0x420
#define RSA_KEY_PKT_WORD_ADDR_SHIFT 0
#define RSA_KEY_PKT_EXPMOD_SEL_SHIFT \
((6U) << RSA_KEY_PKT_WORD_ADDR_SHIFT)
#define RSA_KEY_MOD \
((1U) << RSA_KEY_PKT_EXPMOD_SEL_SHIFT)
#define RSA_KEY_EXP \
((0U) << RSA_KEY_PKT_EXPMOD_SEL_SHIFT)
#define RSA_KEY_PKT_SLOT_SHIFT 7
#define RSA_KEY_SLOT_1 \
((0U) << RSA_KEY_PKT_SLOT_SHIFT)
#define RSA_KEY_SLOT_2 \
((1U) << RSA_KEY_PKT_SLOT_SHIFT)
#define RSA_KEY_PKT_INPUT_MODE_SHIFT 8
#define RSA_KEY_REG_INPUT \
((0U) << RSA_KEY_PKT_INPUT_MODE_SHIFT)
#define RSA_KEY_DMA_INPUT \
((1U) << RSA_KEY_PKT_INPUT_MODE_SHIFT)
/* SE_RSA_KEYTABLE_DATA */
#define SE_RSA_KEYTABLE_DATA 0x424
/* SE_RSA_CONFIG register */
#define SE_RSA_CONFIG 0x400
#define RSA_KEY_SLOT_SHIFT 24
#define RSA_KEY_SLOT(x) \
((x) << RSA_KEY_SLOT_SHIFT)
/*******************************************************************************
* Structure definition
******************************************************************************/
/* SE context blob */
#pragma pack(push, 1)
typedef struct tegra_aes_key_slot {
/* 0 - 7 AES key */
uint32_t key[8];
/* 8 - 11 Original IV */
uint32_t oiv[4];
/* 12 - 15 Updated IV */
uint32_t uiv[4];
} tegra_se_aes_key_slot_t;
#pragma pack(pop)
#pragma pack(push, 1)
typedef struct tegra_se_context {
/* random number */
unsigned char rand_data[SE_CTX_SAVE_RANDOM_DATA_SIZE];
/* Sticky bits */
unsigned char sticky_bits[SE_CTX_SAVE_STICKY_BITS_SIZE * 2];
/* AES key slots */
tegra_se_aes_key_slot_t key_slots[TEGRA_SE_AES_KEYSLOT_COUNT];
/* RSA key slots */
unsigned char rsa_keys[SE_CTX_SAVE_RSA_KEY_LENGTH];
} tegra_se_context_t;
#pragma pack(pop)
/* PKA context blob */
#pragma pack(push, 1)
typedef struct tegra_pka_context {
unsigned char sticky_bits[SE2_CONTEXT_SAVE_PKA1_STICKY_BITS_LENGTH];
unsigned char pka_keys[SE2_CONTEXT_SAVE_PKA1_KEYS_LENGTH];
} tegra_pka_context_t;
#pragma pack(pop)
/* SE context blob */
#pragma pack(push, 1)
typedef struct tegra_se_context_blob {
/* SE context */
tegra_se_context_t se_ctx;
/* Known Pattern */
unsigned char known_pattern[SE_CTX_KNOWN_PATTERN_SIZE];
} tegra_se_context_blob_t;
#pragma pack(pop)
/* SE2 and PKA1 context blob */
#pragma pack(push, 1)
typedef struct tegra_se2_context_blob {
/* SE2 context */
tegra_se_context_t se_ctx;
/* PKA1 context */
tegra_pka_context_t pka_ctx;
/* Known Pattern */
unsigned char known_pattern[SE_CTX_KNOWN_PATTERN_SIZE];
} tegra_se2_context_blob_t;
#pragma pack(pop)
/* SE AES key type 128bit, 192bit, 256bit */
typedef enum {
SE_AES_KEY128,
SE_AES_KEY192,
SE_AES_KEY256,
} tegra_se_aes_key_type_t;
/* SE RSA key slot */
typedef struct tegra_se_rsa_key_slot {
/* 0 - 63 exponent key */
uint32_t exponent[SE_RSA_MAX_EXP_SIZE32];
/* 64 - 127 modulus key */
uint32_t modulus[SE_RSA_MAX_MOD_SIZE32];
} tegra_se_rsa_key_slot_t;
/******************************************************************************* /*******************************************************************************
* Inline functions definition * Inline functions definition
...@@ -242,8 +639,21 @@ static inline void tegra_se_write_32(const tegra_se_dev_t *dev, uint32_t offset, ...@@ -242,8 +639,21 @@ static inline void tegra_se_write_32(const tegra_se_dev_t *dev, uint32_t offset,
mmio_write_32(dev->se_base + offset, val); mmio_write_32(dev->se_base + offset, val);
} }
static inline uint32_t tegra_pka_read_32(tegra_pka_dev_t *dev, uint32_t offset)
{
return mmio_read_32(dev->pka_base + offset);
}
static inline void tegra_pka_write_32(tegra_pka_dev_t *dev, uint32_t offset,
uint32_t val)
{
mmio_write_32(dev->pka_base + offset, val);
}
/******************************************************************************* /*******************************************************************************
* Prototypes * Prototypes
******************************************************************************/ ******************************************************************************/
int tegra_se_start_normal_operation(const tegra_se_dev_t *, uint32_t);
int tegra_se_start_ctx_save_operation(const tegra_se_dev_t *, uint32_t);
#endif /* SE_PRIVATE_H */ #endif /* SE_PRIVATE_H */
...@@ -20,7 +20,8 @@ ...@@ -20,7 +20,8 @@
* Constants and Macros * Constants and Macros
******************************************************************************/ ******************************************************************************/
#define TIMEOUT_100MS 100UL // Timeout in 100ms #define TIMEOUT_100MS 100U // Timeout in 100ms
#define RNG_AES_KEY_INDEX 1
/******************************************************************************* /*******************************************************************************
* Data structure and global variables * Data structure and global variables
...@@ -67,6 +68,15 @@ ...@@ -67,6 +68,15 @@
* #--------------------------------# * #--------------------------------#
*/ */
/* Known pattern data */
static const uint32_t se_ctx_known_pattern_data[SE_CTX_KNOWN_PATTERN_SIZE_WORDS] = {
/* 128 bit AES block */
0x0C0D0E0F,
0x08090A0B,
0x04050607,
0x00010203,
};
/* SE input and output linked list buffers */ /* SE input and output linked list buffers */
static tegra_se_io_lst_t se1_src_ll_buf; static tegra_se_io_lst_t se1_src_ll_buf;
static tegra_se_io_lst_t se1_dst_ll_buf; static tegra_se_io_lst_t se1_dst_ll_buf;
...@@ -78,7 +88,7 @@ static tegra_se_io_lst_t se2_dst_ll_buf; ...@@ -78,7 +88,7 @@ static tegra_se_io_lst_t se2_dst_ll_buf;
/* SE1 security engine device handle */ /* SE1 security engine device handle */
static tegra_se_dev_t se_dev_1 = { static tegra_se_dev_t se_dev_1 = {
.se_num = 1, .se_num = 1,
/* setup base address for se */ /* Setup base address for se */
.se_base = TEGRA_SE1_BASE, .se_base = TEGRA_SE1_BASE,
/* Setup context size in AES blocks */ /* Setup context size in AES blocks */
.ctx_size_blks = SE_CTX_SAVE_SIZE_BLOCKS_SE1, .ctx_size_blks = SE_CTX_SAVE_SIZE_BLOCKS_SE1,
...@@ -86,12 +96,14 @@ static tegra_se_dev_t se_dev_1 = { ...@@ -86,12 +96,14 @@ static tegra_se_dev_t se_dev_1 = {
.src_ll_buf = &se1_src_ll_buf, .src_ll_buf = &se1_src_ll_buf,
/* Setup DST buffers for SE operations */ /* Setup DST buffers for SE operations */
.dst_ll_buf = &se1_dst_ll_buf, .dst_ll_buf = &se1_dst_ll_buf,
/* Setup context save destination */
.ctx_save_buf = (uint32_t *)(TEGRA_TZRAM_CARVEOUT_BASE),
}; };
/* SE2 security engine device handle */ /* SE2 security engine device handle */
static tegra_se_dev_t se_dev_2 = { static tegra_se_dev_t se_dev_2 = {
.se_num = 2, .se_num = 2,
/* setup base address for se */ /* Setup base address for se */
.se_base = TEGRA_SE2_BASE, .se_base = TEGRA_SE2_BASE,
/* Setup context size in AES blocks */ /* Setup context size in AES blocks */
.ctx_size_blks = SE_CTX_SAVE_SIZE_BLOCKS_SE2, .ctx_size_blks = SE_CTX_SAVE_SIZE_BLOCKS_SE2,
...@@ -99,8 +111,12 @@ static tegra_se_dev_t se_dev_2 = { ...@@ -99,8 +111,12 @@ static tegra_se_dev_t se_dev_2 = {
.src_ll_buf = &se2_src_ll_buf, .src_ll_buf = &se2_src_ll_buf,
/* Setup DST buffers for SE operations */ /* Setup DST buffers for SE operations */
.dst_ll_buf = &se2_dst_ll_buf, .dst_ll_buf = &se2_dst_ll_buf,
/* Setup context save destination */
.ctx_save_buf = (uint32_t *)(TEGRA_TZRAM_CARVEOUT_BASE + 0x1000),
}; };
static bool ecid_valid;
/******************************************************************************* /*******************************************************************************
* Functions Definition * Functions Definition
******************************************************************************/ ******************************************************************************/
...@@ -186,35 +202,15 @@ static int32_t tegra_se_operation_complete(const tegra_se_dev_t *se_dev) ...@@ -186,35 +202,15 @@ static int32_t tegra_se_operation_complete(const tegra_se_dev_t *se_dev)
} }
/* /*
* Verify the SE context save auto has been enabled. * Returns true if the SE engine is configured to perform SE context save in
* SE_CTX_SAVE_AUTO.ENABLE == ENABLE * hardware.
* If the SE context save auto is not enabled, then set
* the context save auto enable and lock the setting.
* If the SE context save auto is not enabled and the
* enable setting is locked, then return an error.
*/ */
static inline int32_t tegra_se_ctx_save_auto_enable(const tegra_se_dev_t *se_dev) static inline bool tegra_se_atomic_save_enabled(const tegra_se_dev_t *se_dev)
{ {
uint32_t val; uint32_t val;
int32_t ret = 0;
val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET); val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET);
if (SE_CTX_SAVE_AUTO_ENABLE(val) == SE_CTX_SAVE_AUTO_DIS) { return (SE_CTX_SAVE_AUTO_ENABLE(val) == SE_CTX_SAVE_AUTO_EN);
if (SE_CTX_SAVE_AUTO_LOCK(val) == SE_CTX_SAVE_AUTO_LOCK_EN) {
ERROR("%s: ERR: Cannot enable atomic. Write locked!\n",
__func__);
ret = -EACCES;
}
/* Program SE_CTX_SAVE_AUTO */
if (ret == 0) {
tegra_se_write_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET,
SE_CTX_SAVE_AUTO_LOCK_EN |
SE_CTX_SAVE_AUTO_EN);
}
}
return ret;
} }
/* /*
...@@ -259,14 +255,6 @@ static int32_t tegra_se_context_save_atomic(const tegra_se_dev_t *se_dev) ...@@ -259,14 +255,6 @@ static int32_t tegra_se_context_save_atomic(const tegra_se_dev_t *se_dev)
/* Check that previous operation is finalized */ /* Check that previous operation is finalized */
ret = tegra_se_operation_prepare(se_dev); ret = tegra_se_operation_prepare(se_dev);
/* Ensure HW atomic context save has been enabled
* This should have been done at boot time.
* SE_CTX_SAVE_AUTO.ENABLE == ENABLE
*/
if (ret == 0) {
ret = tegra_se_ctx_save_auto_enable(se_dev);
}
/* Read the context save progress counter: block_count /* Read the context save progress counter: block_count
* Ensure no previous context save has been triggered * Ensure no previous context save has been triggered
* SE_CTX_SAVE_AUTO.CURR_CNT == 0 * SE_CTX_SAVE_AUTO.CURR_CNT == 0
...@@ -325,7 +313,8 @@ static int32_t tegra_se_context_save_atomic(const tegra_se_dev_t *se_dev) ...@@ -325,7 +313,8 @@ static int32_t tegra_se_context_save_atomic(const tegra_se_dev_t *se_dev)
* Security engine primitive operations, including normal operation * Security engine primitive operations, including normal operation
* and the context save operation. * and the context save operation.
*/ */
static int tegra_se_perform_operation(const tegra_se_dev_t *se_dev, uint32_t nbytes) static int tegra_se_perform_operation(const tegra_se_dev_t *se_dev, uint32_t nbytes,
bool context_save)
{ {
uint32_t nblocks = nbytes / TEGRA_SE_AES_BLOCK_SIZE; uint32_t nblocks = nbytes / TEGRA_SE_AES_BLOCK_SIZE;
int ret = 0; int ret = 0;
...@@ -351,7 +340,10 @@ static int tegra_se_perform_operation(const tegra_se_dev_t *se_dev, uint32_t nby ...@@ -351,7 +340,10 @@ static int tegra_se_perform_operation(const tegra_se_dev_t *se_dev, uint32_t nby
tegra_se_make_data_coherent(se_dev); tegra_se_make_data_coherent(se_dev);
/* Start hardware operation */ /* Start hardware operation */
tegra_se_write_32(se_dev, SE_OPERATION_REG_OFFSET, SE_OP_START); if (context_save)
tegra_se_write_32(se_dev, SE_OPERATION_REG_OFFSET, SE_OP_CTX_SAVE);
else
tegra_se_write_32(se_dev, SE_OPERATION_REG_OFFSET, SE_OP_START);
/* Wait for operation to finish */ /* Wait for operation to finish */
ret = tegra_se_operation_complete(se_dev); ret = tegra_se_operation_complete(se_dev);
...@@ -360,6 +352,22 @@ op_error: ...@@ -360,6 +352,22 @@ op_error:
return ret; return ret;
} }
/*
* Normal security engine operations other than the context save
*/
int tegra_se_start_normal_operation(const tegra_se_dev_t *se_dev, uint32_t nbytes)
{
return tegra_se_perform_operation(se_dev, nbytes, false);
}
/*
* Security engine context save operation
*/
int tegra_se_start_ctx_save_operation(const tegra_se_dev_t *se_dev, uint32_t nbytes)
{
return tegra_se_perform_operation(se_dev, nbytes, true);
}
/* /*
* Security Engine sequence to generat SRK * Security Engine sequence to generat SRK
* SE and SE2 will generate different SRK by different * SE and SE2 will generate different SRK by different
...@@ -381,7 +389,10 @@ static int tegra_se_generate_srk(const tegra_se_dev_t *se_dev) ...@@ -381,7 +389,10 @@ static int tegra_se_generate_srk(const tegra_se_dev_t *se_dev)
se_dev->dst_ll_buf->last_buff_num = 0; se_dev->dst_ll_buf->last_buff_num = 0;
/* Configure random number generator */ /* Configure random number generator */
val = (DRBG_MODE_FORCE_RESEED | DRBG_SRC_ENTROPY); if (ecid_valid)
val = (DRBG_MODE_FORCE_INSTANTION | DRBG_SRC_ENTROPY);
else
val = (DRBG_MODE_FORCE_RESEED | DRBG_SRC_ENTROPY);
tegra_se_write_32(se_dev, SE_RNG_CONFIG_REG_OFFSET, val); tegra_se_write_32(se_dev, SE_RNG_CONFIG_REG_OFFSET, val);
/* Configure output destination = SRK */ /* Configure output destination = SRK */
...@@ -391,25 +402,562 @@ static int tegra_se_generate_srk(const tegra_se_dev_t *se_dev) ...@@ -391,25 +402,562 @@ static int tegra_se_generate_srk(const tegra_se_dev_t *se_dev)
tegra_se_write_32(se_dev, SE_CONFIG_REG_OFFSET, val); tegra_se_write_32(se_dev, SE_CONFIG_REG_OFFSET, val);
/* Perform hardware operation */ /* Perform hardware operation */
ret = tegra_se_perform_operation(se_dev, 0); ret = tegra_se_start_normal_operation(se_dev, 0);
return ret;
}
/*
* Generate plain text random data to some memory location using
* SE/SE2's SP800-90 random number generator. The random data size
* must be some multiple of the AES block size (16 bytes).
*/
static int tegra_se_lp_generate_random_data(tegra_se_dev_t *se_dev)
{
int ret = 0;
uint32_t val;
/* Set some arbitrary memory location to store the random data */
se_dev->dst_ll_buf->last_buff_num = 0;
if (!se_dev->ctx_save_buf) {
ERROR("%s: ERR: context save buffer NULL pointer!\n", __func__);
return PSCI_E_NOT_PRESENT;
}
se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(((tegra_se_context_t *)
se_dev->ctx_save_buf)->rand_data)));
se_dev->dst_ll_buf->buffer[0].data_len = SE_CTX_SAVE_RANDOM_DATA_SIZE;
/* Confgure the following hardware register settings:
* SE_CONFIG.DEC_ALG = NOP
* SE_CONFIG.ENC_ALG = RNG
* SE_CONFIG.ENC_MODE = KEY192
* SE_CONFIG.DST = MEMORY
*/
val = (SE_CONFIG_ENC_ALG_RNG |
SE_CONFIG_DEC_ALG_NOP |
SE_CONFIG_ENC_MODE_KEY192 |
SE_CONFIG_DST_MEMORY);
tegra_se_write_32(se_dev, SE_CONFIG_REG_OFFSET, val);
/* Program the RNG options in SE_CRYPTO_CONFIG as follows:
* XOR_POS = BYPASS
* INPUT_SEL = RANDOM (Entropy or LFSR)
* HASH_ENB = DISABLE
*/
val = (SE_CRYPTO_INPUT_RANDOM |
SE_CRYPTO_XOR_BYPASS |
SE_CRYPTO_CORE_ENCRYPT |
SE_CRYPTO_HASH_DISABLE |
SE_CRYPTO_KEY_INDEX(RNG_AES_KEY_INDEX) |
SE_CRYPTO_IV_ORIGINAL);
tegra_se_write_32(se_dev, SE_CRYPTO_REG_OFFSET, val);
/* Configure RNG */
if (ecid_valid)
val = (DRBG_MODE_FORCE_INSTANTION | DRBG_SRC_LFSR);
else
val = (DRBG_MODE_FORCE_RESEED | DRBG_SRC_LFSR);
tegra_se_write_32(se_dev, SE_RNG_CONFIG_REG_OFFSET, val);
/* SE normal operation */
ret = tegra_se_start_normal_operation(se_dev, SE_CTX_SAVE_RANDOM_DATA_SIZE);
return ret;
}
/*
* Encrypt memory blocks with SRK as part of the security engine context.
* The data blocks include: random data and the known pattern data, where
* the random data is the first block and known pattern is the last block.
*/
static int tegra_se_lp_data_context_save(tegra_se_dev_t *se_dev,
uint64_t src_addr, uint64_t dst_addr, uint32_t data_size)
{
int ret = 0;
se_dev->src_ll_buf->last_buff_num = 0;
se_dev->dst_ll_buf->last_buff_num = 0;
se_dev->src_ll_buf->buffer[0].addr = src_addr;
se_dev->src_ll_buf->buffer[0].data_len = data_size;
se_dev->dst_ll_buf->buffer[0].addr = dst_addr;
se_dev->dst_ll_buf->buffer[0].data_len = data_size;
/* By setting the context source from memory and calling the context save
* operation, the SE encrypts the memory data with SRK.
*/
tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, SE_CTX_SAVE_SRC_MEM);
ret = tegra_se_start_ctx_save_operation(se_dev, data_size);
return ret;
}
/*
* Context save the key table access control sticky bits and
* security status of each key-slot. The encrypted sticky-bits are
* 32 bytes (2 AES blocks) and formatted as the following structure:
* { bit in registers bit in context save
* SECURITY_0[4] 158
* SE_RSA_KEYTABLE_ACCE4SS_1[2:0] 157:155
* SE_RSA_KEYTABLE_ACCE4SS_0[2:0] 154:152
* SE_RSA_SECURITY_PERKEY_0[1:0] 151:150
* SE_CRYPTO_KEYTABLE_ACCESS_15[7:0] 149:142
* ...,
* SE_CRYPTO_KEYTABLE_ACCESS_0[7:0] 29:22
* SE_CRYPTO_SECURITY_PERKEY_0[15:0] 21:6
* SE_TZRAM_SECURITY_0[1:0] 5:4
* SE_SECURITY_0[16] 3:3
* SE_SECURITY_0[2:0] } 2:0
*/
static int tegra_se_lp_sticky_bits_context_save(tegra_se_dev_t *se_dev)
{
int ret = PSCI_E_INTERN_FAIL;
uint32_t val = 0;
se_dev->dst_ll_buf->last_buff_num = 0;
if (!se_dev->ctx_save_buf) {
ERROR("%s: ERR: context save buffer NULL pointer!\n", __func__);
return PSCI_E_NOT_PRESENT;
}
se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(((tegra_se_context_t *)
se_dev->ctx_save_buf)->sticky_bits)));
se_dev->dst_ll_buf->buffer[0].data_len = SE_CTX_SAVE_STICKY_BITS_SIZE;
/*
* The 1st AES block save the sticky-bits context 1 - 16 bytes (0 - 3 words).
* The 2nd AES block save the sticky-bits context 17 - 32 bytes (4 - 7 words).
*/
for (int i = 0; i < 2; i++) {
val = SE_CTX_SAVE_SRC_STICKY_BITS |
SE_CTX_SAVE_STICKY_WORD_QUAD(i);
tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
/* SE context save operation */
ret = tegra_se_start_ctx_save_operation(se_dev,
SE_CTX_SAVE_STICKY_BITS_SIZE);
if (ret)
break;
se_dev->dst_ll_buf->buffer[0].addr += SE_CTX_SAVE_STICKY_BITS_SIZE;
}
return ret;
}
static int tegra_se_aeskeytable_context_save(tegra_se_dev_t *se_dev)
{
uint32_t val = 0;
int ret = 0;
se_dev->dst_ll_buf->last_buff_num = 0;
if (!se_dev->ctx_save_buf) {
ERROR("%s: ERR: context save buffer NULL pointer!\n", __func__);
ret = -EINVAL;
goto aes_keytable_save_err;
}
/* AES key context save */
for (int slot = 0; slot < TEGRA_SE_AES_KEYSLOT_COUNT; slot++) {
se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
((tegra_se_context_t *)se_dev->
ctx_save_buf)->key_slots[slot].key)));
se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_KEY_128_SIZE;
for (int i = 0; i < 2; i++) {
val = SE_CTX_SAVE_SRC_AES_KEYTABLE |
SE_CTX_SAVE_KEY_INDEX(slot) |
SE_CTX_SAVE_WORD_QUAD(i);
tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
/* SE context save operation */
ret = tegra_se_start_ctx_save_operation(se_dev,
TEGRA_SE_KEY_128_SIZE);
if (ret) {
ERROR("%s: ERR: AES key CTX_SAVE OP failed, "
"slot=%d, word_quad=%d.\n",
__func__, slot, i);
goto aes_keytable_save_err;
}
se_dev->dst_ll_buf->buffer[0].addr += TEGRA_SE_KEY_128_SIZE;
}
/* OIV context save */
se_dev->dst_ll_buf->last_buff_num = 0;
se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
((tegra_se_context_t *)se_dev->
ctx_save_buf)->key_slots[slot].oiv)));
se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_AES_IV_SIZE;
val = SE_CTX_SAVE_SRC_AES_KEYTABLE |
SE_CTX_SAVE_KEY_INDEX(slot) |
SE_CTX_SAVE_WORD_QUAD_ORIG_IV;
tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
/* SE context save operation */
ret = tegra_se_start_ctx_save_operation(se_dev, TEGRA_SE_AES_IV_SIZE);
if (ret) {
ERROR("%s: ERR: OIV CTX_SAVE OP failed, slot=%d.\n",
__func__, slot);
goto aes_keytable_save_err;
}
/* UIV context save */
se_dev->dst_ll_buf->last_buff_num = 0;
se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
((tegra_se_context_t *)se_dev->
ctx_save_buf)->key_slots[slot].uiv)));
se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_AES_IV_SIZE;
val = SE_CTX_SAVE_SRC_AES_KEYTABLE |
SE_CTX_SAVE_KEY_INDEX(slot) |
SE_CTX_SAVE_WORD_QUAD_UPD_IV;
tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
/* SE context save operation */
ret = tegra_se_start_ctx_save_operation(se_dev, TEGRA_SE_AES_IV_SIZE);
if (ret) {
ERROR("%s: ERR: UIV CTX_SAVE OP failed, slot=%d\n",
__func__, slot);
goto aes_keytable_save_err;
}
}
aes_keytable_save_err:
return ret;
}
static int tegra_se_lp_rsakeytable_context_save(tegra_se_dev_t *se_dev)
{
uint32_t val = 0;
int ret = 0;
/* First the modulus and then the exponent must be
* encrypted and saved. This is repeated for SLOT 0
* and SLOT 1. Hence the order:
* SLOT 0 exponent : RSA_KEY_INDEX : 0
* SLOT 0 modulus : RSA_KEY_INDEX : 1
* SLOT 1 exponent : RSA_KEY_INDEX : 2
* SLOT 1 modulus : RSA_KEY_INDEX : 3
*/
const unsigned int key_index_mod[TEGRA_SE_RSA_KEYSLOT_COUNT][2] = {
/* RSA key slot 0 */
{SE_RSA_KEY_INDEX_SLOT0_EXP, SE_RSA_KEY_INDEX_SLOT0_MOD},
/* RSA key slot 1 */
{SE_RSA_KEY_INDEX_SLOT1_EXP, SE_RSA_KEY_INDEX_SLOT1_MOD},
};
se_dev->dst_ll_buf->last_buff_num = 0;
se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
((tegra_se_context_t *)se_dev->
ctx_save_buf)->rsa_keys)));
se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_KEY_128_SIZE;
for (int slot = 0; slot < TEGRA_SE_RSA_KEYSLOT_COUNT; slot++) {
/* loop for modulus and exponent */
for (int index = 0; index < 2; index++) {
for (int word_quad = 0; word_quad < 16; word_quad++) {
val = SE_CTX_SAVE_SRC_RSA_KEYTABLE |
SE_CTX_SAVE_RSA_KEY_INDEX(
key_index_mod[slot][index]) |
SE_CTX_RSA_WORD_QUAD(word_quad);
tegra_se_write_32(se_dev,
SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
/* SE context save operation */
ret = tegra_se_start_ctx_save_operation(se_dev,
TEGRA_SE_KEY_128_SIZE);
if (ret) {
ERROR("%s: ERR: slot=%d.\n",
__func__, slot);
goto rsa_keytable_save_err;
}
/* Update the pointer to the next word quad */
se_dev->dst_ll_buf->buffer[0].addr +=
TEGRA_SE_KEY_128_SIZE;
}
}
}
rsa_keytable_save_err:
return ret;
}
static int tegra_se_pkakeytable_sticky_bits_save(tegra_se_dev_t *se_dev)
{
int ret = 0;
se_dev->dst_ll_buf->last_buff_num = 0;
se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
((tegra_se2_context_blob_t *)se_dev->
ctx_save_buf)->pka_ctx.sticky_bits)));
se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_AES_BLOCK_SIZE;
/* PKA1 sticky bits are 1 AES block (16 bytes) */
tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET,
SE_CTX_SAVE_SRC_PKA1_STICKY_BITS |
SE_CTX_STICKY_WORD_QUAD_WORDS_0_3);
/* SE context save operation */
ret = tegra_se_start_ctx_save_operation(se_dev, 0);
if (ret) {
ERROR("%s: ERR: PKA1 sticky bits CTX_SAVE OP failed\n",
__func__);
goto pka_sticky_bits_save_err;
}
pka_sticky_bits_save_err:
return ret; return ret;
} }
static int tegra_se_pkakeytable_context_save(tegra_se_dev_t *se_dev)
{
uint32_t val = 0;
int ret = 0;
se_dev->dst_ll_buf->last_buff_num = 0;
se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
((tegra_se2_context_blob_t *)se_dev->
ctx_save_buf)->pka_ctx.pka_keys)));
se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_KEY_128_SIZE;
/* for each slot, save word quad 0-127 */
for (int slot = 0; slot < TEGRA_SE_PKA1_KEYSLOT_COUNT; slot++) {
for (int word_quad = 0; word_quad < 512/4; word_quad++) {
val = SE_CTX_SAVE_SRC_PKA1_KEYTABLE |
SE_CTX_PKA1_WORD_QUAD_L((slot * 128) +
word_quad) |
SE_CTX_PKA1_WORD_QUAD_H((slot * 128) +
word_quad);
tegra_se_write_32(se_dev,
SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
/* SE context save operation */
ret = tegra_se_start_ctx_save_operation(se_dev,
TEGRA_SE_KEY_128_SIZE);
if (ret) {
ERROR("%s: ERR: pka1 keytable ctx save error\n",
__func__);
goto pka_keytable_save_err;
}
/* Update the pointer to the next word quad */
se_dev->dst_ll_buf->buffer[0].addr +=
TEGRA_SE_KEY_128_SIZE;
}
}
pka_keytable_save_err:
return ret;
}
static int tegra_se_save_SRK(tegra_se_dev_t *se_dev)
{
tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET,
SE_CTX_SAVE_SRC_SRK);
/* SE context save operation */
return tegra_se_start_ctx_save_operation(se_dev, 0);
}
/*
* Lock both SE from non-TZ clients.
*/
static inline void tegra_se_lock(tegra_se_dev_t *se_dev)
{
uint32_t val;
assert(se_dev);
val = tegra_se_read_32(se_dev, SE_SECURITY_REG_OFFSET);
val |= SE_SECURITY_TZ_LOCK_SOFT(SE_SECURE);
tegra_se_write_32(se_dev, SE_SECURITY_REG_OFFSET, val);
}
/*
* Use SRK to encrypt SE state and save to TZRAM carveout
*/
static int tegra_se_context_save_sw(tegra_se_dev_t *se_dev)
{
int err = 0;
assert(se_dev);
/* Lock entire SE/SE2 as TZ protected */
tegra_se_lock(se_dev);
INFO("%s: generate SRK\n", __func__);
/* Generate SRK */
err = tegra_se_generate_srk(se_dev);
if (err) {
ERROR("%s: ERR: SRK generation failed\n", __func__);
return err;
}
INFO("%s: generate random data\n", __func__);
/* Generate random data */
err = tegra_se_lp_generate_random_data(se_dev);
if (err) {
ERROR("%s: ERR: LP random pattern generation failed\n", __func__);
return err;
}
INFO("%s: encrypt random data\n", __func__);
/* Encrypt the random data block */
err = tegra_se_lp_data_context_save(se_dev,
((uint64_t)(&(((tegra_se_context_t *)se_dev->
ctx_save_buf)->rand_data))),
((uint64_t)(&(((tegra_se_context_t *)se_dev->
ctx_save_buf)->rand_data))),
SE_CTX_SAVE_RANDOM_DATA_SIZE);
if (err) {
ERROR("%s: ERR: random pattern encryption failed\n", __func__);
return err;
}
INFO("%s: save SE sticky bits\n", __func__);
/* Save AES sticky bits context */
err = tegra_se_lp_sticky_bits_context_save(se_dev);
if (err) {
ERROR("%s: ERR: sticky bits context save failed\n", __func__);
return err;
}
INFO("%s: save AES keytables\n", __func__);
/* Save AES key table context */
err = tegra_se_aeskeytable_context_save(se_dev);
if (err) {
ERROR("%s: ERR: LP keytable save failed\n", __func__);
return err;
}
/* RSA key slot table context save */
INFO("%s: save RSA keytables\n", __func__);
err = tegra_se_lp_rsakeytable_context_save(se_dev);
if (err) {
ERROR("%s: ERR: rsa key table context save failed\n", __func__);
return err;
}
/* Only SE2 has an interface with PKA1; thus, PKA1's context is saved
* via SE2.
*/
if (se_dev->se_num == 2) {
/* Encrypt PKA1 sticky bits on SE2 only */
INFO("%s: save PKA sticky bits\n", __func__);
err = tegra_se_pkakeytable_sticky_bits_save(se_dev);
if (err) {
ERROR("%s: ERR: PKA sticky bits context save failed\n", __func__);
return err;
}
/* Encrypt PKA1 keyslots on SE2 only */
INFO("%s: save PKA keytables\n", __func__);
err = tegra_se_pkakeytable_context_save(se_dev);
if (err) {
ERROR("%s: ERR: PKA key table context save failed\n", __func__);
return err;
}
}
/* Encrypt known pattern */
if (se_dev->se_num == 1) {
err = tegra_se_lp_data_context_save(se_dev,
((uint64_t)(&se_ctx_known_pattern_data)),
((uint64_t)(&(((tegra_se_context_blob_t *)se_dev->ctx_save_buf)->known_pattern))),
SE_CTX_KNOWN_PATTERN_SIZE);
} else if (se_dev->se_num == 2) {
err = tegra_se_lp_data_context_save(se_dev,
((uint64_t)(&se_ctx_known_pattern_data)),
((uint64_t)(&(((tegra_se2_context_blob_t *)se_dev->ctx_save_buf)->known_pattern))),
SE_CTX_KNOWN_PATTERN_SIZE);
}
if (err) {
ERROR("%s: ERR: save LP known pattern failure\n", __func__);
return err;
}
/* Write lp context buffer address into PMC scratch register */
if (se_dev->se_num == 1) {
/* SE context address */
mmio_write_32((uint64_t)TEGRA_PMC_BASE + PMC_SECURE_SCRATCH117_OFFSET,
((uint64_t)(se_dev->ctx_save_buf)));
} else if (se_dev->se_num == 2) {
/* SE2 & PKA1 context address */
mmio_write_32((uint64_t)TEGRA_PMC_BASE + PMC_SECURE_SCRATCH116_OFFSET,
((uint64_t)(se_dev->ctx_save_buf)));
}
/* Saves SRK to PMC secure scratch registers for BootROM, which
* verifies and restores the security engine context on warm boot.
*/
err = tegra_se_save_SRK(se_dev);
if (err < 0) {
ERROR("%s: ERR: LP SRK save failure\n", __func__);
return err;
}
INFO("%s: SE context save done \n", __func__);
return err;
}
/* /*
* Initialize the SE engine handle * Initialize the SE engine handle
*/ */
void tegra_se_init(void) void tegra_se_init(void)
{ {
uint32_t val = 0;
INFO("%s: start SE init\n", __func__); INFO("%s: start SE init\n", __func__);
/* Generate random SRK to initialize DRBG */ /* Generate random SRK to initialize DRBG */
tegra_se_generate_srk(&se_dev_1); tegra_se_generate_srk(&se_dev_1);
tegra_se_generate_srk(&se_dev_2); tegra_se_generate_srk(&se_dev_2);
/* determine if ECID is valid */
val = mmio_read_32(TEGRA_FUSE_BASE + FUSE_JTAG_SECUREID_VALID);
ecid_valid = (val == ECID_VALID);
INFO("%s: SE init done\n", __func__); INFO("%s: SE init done\n", __func__);
} }
static void tegra_se_enable_clocks(void)
{
uint32_t val = 0;
/* Enable entropy clock */
val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_W);
val |= ENTROPY_CLK_ENB_BIT;
mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_W, val);
/* De-Assert Entropy Reset */
val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEVICES_W);
val &= ~ENTROPY_RESET_BIT;
mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEVICES_W, val);
/* Enable SE clock */
val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_V);
val |= SE_CLK_ENB_BIT;
mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_V, val);
/* De-Assert SE Reset */
val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEVICES_V);
val &= ~SE_RESET_BIT;
mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEVICES_V, val);
}
static void tegra_se_disable_clocks(void)
{
uint32_t val = 0;
/* Disable entropy clock */
val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_W);
val &= ~ENTROPY_CLK_ENB_BIT;
mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_W, val);
/* Disable SE clock */
val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_V);
val &= ~SE_CLK_ENB_BIT;
mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_V, val);
}
/* /*
* Security engine power suspend entry point. * Security engine power suspend entry point.
* This function is invoked from PSCI power domain suspend handler. * This function is invoked from PSCI power domain suspend handler.
...@@ -417,21 +965,57 @@ void tegra_se_init(void) ...@@ -417,21 +965,57 @@ void tegra_se_init(void)
int32_t tegra_se_suspend(void) int32_t tegra_se_suspend(void)
{ {
int32_t ret = 0; int32_t ret = 0;
uint32_t val = 0;
/* Atomic context save se2 and pka1 */ /* SE does not use SMMU in EL3, disable SMMU.
INFO("%s: SE2/PKA1 atomic context save\n", __func__); * This will be re-enabled by kernel on resume */
ret = tegra_se_context_save_atomic(&se_dev_2); val = mmio_read_32(TEGRA_MC_BASE + MC_SMMU_PPCS_ASID_0);
val &= ~PPCS_SMMU_ENABLE;
mmio_write_32(TEGRA_MC_BASE + MC_SMMU_PPCS_ASID_0, val);
/* Atomic context save se */ tegra_se_enable_clocks();
if (ret == 0) {
INFO("%s: SE1 atomic context save\n", __func__);
ret = tegra_se_context_save_atomic(&se_dev_1);
}
if (ret == 0) { if (tegra_se_atomic_save_enabled(&se_dev_2) &&
INFO("%s: SE atomic context save done\n", __func__); tegra_se_atomic_save_enabled(&se_dev_1)) {
/* Atomic context save se2 and pka1 */
INFO("%s: SE2/PKA1 atomic context save\n", __func__);
if (ret == 0) {
ret = tegra_se_context_save_atomic(&se_dev_2);
}
/* Atomic context save se */
if (ret == 0) {
INFO("%s: SE1 atomic context save\n", __func__);
ret = tegra_se_context_save_atomic(&se_dev_1);
}
if (ret == 0) {
INFO("%s: SE atomic context save done\n", __func__);
}
} else if (!tegra_se_atomic_save_enabled(&se_dev_2) &&
!tegra_se_atomic_save_enabled(&se_dev_1)) {
/* SW context save se2 and pka1 */
INFO("%s: SE2/PKA1 legacy(SW) context save\n", __func__);
if (ret == 0) {
ret = tegra_se_context_save_sw(&se_dev_2);
}
/* SW context save se */
if (ret == 0) {
INFO("%s: SE1 legacy(SW) context save\n", __func__);
ret = tegra_se_context_save_sw(&se_dev_1);
}
if (ret == 0) {
INFO("%s: SE SW context save done\n", __func__);
}
} else {
ERROR("%s: One SE set for atomic CTX save, the other is not\n",
__func__);
} }
tegra_se_disable_clocks();
return ret; return ret;
} }
...@@ -445,6 +1029,7 @@ int32_t tegra_se_save_tzram(void) ...@@ -445,6 +1029,7 @@ int32_t tegra_se_save_tzram(void)
uint32_t timeout; uint32_t timeout;
INFO("%s: SE TZRAM save start\n", __func__); INFO("%s: SE TZRAM save start\n", __func__);
tegra_se_enable_clocks();
val = (SE_TZRAM_OP_REQ_INIT | SE_TZRAM_OP_MODE_SAVE); val = (SE_TZRAM_OP_REQ_INIT | SE_TZRAM_OP_MODE_SAVE);
tegra_se_write_32(&se_dev_1, SE_TZRAM_OPERATION, val); tegra_se_write_32(&se_dev_1, SE_TZRAM_OPERATION, val);
...@@ -465,6 +1050,8 @@ int32_t tegra_se_save_tzram(void) ...@@ -465,6 +1050,8 @@ int32_t tegra_se_save_tzram(void)
INFO("%s: SE TZRAM save done!\n", __func__); INFO("%s: SE TZRAM save done!\n", __func__);
} }
tegra_se_disable_clocks();
return ret; return ret;
} }
...@@ -483,12 +1070,6 @@ static void tegra_se_warm_boot_resume(const tegra_se_dev_t *se_dev) ...@@ -483,12 +1070,6 @@ static void tegra_se_warm_boot_resume(const tegra_se_dev_t *se_dev)
DRBG_RO_ENT_SRC_ENABLE; DRBG_RO_ENT_SRC_ENABLE;
tegra_se_write_32(se_dev, SE_RNG_SRC_CONFIG_REG_OFFSET, val); tegra_se_write_32(se_dev, SE_RNG_SRC_CONFIG_REG_OFFSET, val);
/* Enable and lock the SE atomic context save setting */
if (tegra_se_ctx_save_auto_enable(se_dev) != 0) {
ERROR("%s: ERR: enable SE%d context save auto failed!\n",
__func__, se_dev->se_num);
}
/* Set a random value to SRK to initialize DRBG */ /* Set a random value to SRK to initialize DRBG */
tegra_se_generate_srk(se_dev); tegra_se_generate_srk(se_dev);
} }
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment