plat_psci_handlers.c 14.7 KB
Newer Older
1
/*
2
 * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
3
4
5
6
7
8
 *
 * SPDX-License-Identifier: BSD-3-Clause
 */

#include <arch.h>
#include <assert.h>
9
10
11
12
#include <stdbool.h>
#include <string.h>

#include <arch_helpers.h>
13
#include <bpmp_ipc.h>
14
15
#include <common/bl_common.h>
#include <common/debug.h>
16
#include <context.h>
17
#include <drivers/delay_timer.h>
18
#include <denver.h>
19
20
#include <lib/el3_runtime/context_mgmt.h>
#include <lib/psci/psci.h>
21
#include <mce.h>
22
#include <mce_private.h>
23
#include <memctrl_v2.h>
24
#include <plat/common/platform.h>
25
#include <se.h>
26
#include <smmu.h>
Tejal Kudav's avatar
Tejal Kudav committed
27
#include <t194_nvg.h>
28
#include <tegra194_private.h>
29
30
#include <tegra_platform.h>
#include <tegra_private.h>
31

32
33
extern uint32_t __tegra194_cpu_reset_handler_data,
		__tegra194_cpu_reset_handler_end;
34
35

/* TZDRAM offset for saving SMMU context */
36
#define TEGRA194_SMMU_CTX_OFFSET	16U
37
38

/* state id mask */
39
#define TEGRA194_STATE_ID_MASK		0xFU
40
/* constants to get power state's wake time */
41
42
#define TEGRA194_WAKE_TIME_MASK		0x0FFFFFF0U
#define TEGRA194_WAKE_TIME_SHIFT	4U
43
/* default core wake mask for CPU_SUSPEND */
44
#define TEGRA194_CORE_WAKE_MASK		0x180cU
45

46
47
48
static struct t19x_psci_percpu_data {
	uint32_t wake_time;
} __aligned(CACHE_WRITEBACK_GRANULE) t19x_percpu_data[PLATFORM_CORE_COUNT];
49

50
int32_t tegra_soc_validate_power_state(uint32_t power_state,
51
52
					psci_power_state_t *req_state)
{
53
	uint8_t state_id = (uint8_t)psci_get_pstate_id(power_state) &
54
			   TEGRA194_STATE_ID_MASK;
55
56
	uint32_t cpu = plat_my_core_pos();
	int32_t ret = PSCI_E_SUCCESS;
57
58

	/* save the core wake time (in TSC ticks)*/
59
60
	t19x_percpu_data[cpu].wake_time = (power_state & TEGRA194_WAKE_TIME_MASK)
			<< TEGRA194_WAKE_TIME_SHIFT;
61
62

	/*
63
64
65
66
	 * Clean t19x_percpu_data[cpu] to DRAM. This needs to be done to ensure
	 * that the correct value is read in tegra_soc_pwr_domain_suspend(),
	 * which is called with caches disabled. It is possible to read a stale
	 * value from DRAM in that function, because the L2 cache is not flushed
67
68
	 * unless the cluster is entering CC6/CC7.
	 */
69
70
	clean_dcache_range((uint64_t)&t19x_percpu_data[cpu],
			sizeof(t19x_percpu_data[cpu]));
71
72
73
74

	/* Sanity check the requested state id */
	switch (state_id) {
	case PSTATE_ID_CORE_IDLE:
75

76
77
78
79
80
		if (psci_get_pstate_type(power_state) != PSTATE_TYPE_STANDBY) {
			ret = PSCI_E_INVALID_PARAMS;
			break;
		}

81
82
83
84
85
		/* Core idle request */
		req_state->pwr_domain_state[MPIDR_AFFLVL0] = PLAT_MAX_RET_STATE;
		req_state->pwr_domain_state[MPIDR_AFFLVL1] = PSCI_LOCAL_STATE_RUN;
		break;

86
87
	default:
		ERROR("%s: unsupported state id (%d)\n", __func__, state_id);
88
89
		ret = PSCI_E_INVALID_PARAMS;
		break;
90
91
	}

92
	return ret;
93
94
}

95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
int32_t tegra_soc_cpu_standby(plat_local_state_t cpu_state)
{
	uint32_t cpu = plat_my_core_pos();
	mce_cstate_info_t cstate_info = { 0 };

	/* Program default wake mask */
	cstate_info.wake_mask = TEGRA194_CORE_WAKE_MASK;
	cstate_info.update_wake_mask = 1;
	mce_update_cstate_info(&cstate_info);

	/* Enter CPU idle */
	(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
				  (uint64_t)TEGRA_NVG_CORE_C6,
				  t19x_percpu_data[cpu].wake_time,
				  0U);

	return PSCI_E_SUCCESS;
}

114
int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
115
116
{
	const plat_local_state_t *pwr_domain_state;
117
	uint8_t stateid_afflvl2;
118
	plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
119
	uint64_t mc_ctx_base;
120
	uint32_t val;
121
	mce_cstate_info_t sc7_cstate_info = {
122
		.cluster = (uint32_t)TEGRA_NVG_CLUSTER_CC6,
123
		.ccplex = (uint32_t)TEGRA_NVG_CG_CG7,
124
125
126
		.system = (uint32_t)TEGRA_NVG_SYSTEM_SC7,
		.system_state_force = 1U,
		.update_wake_mask = 1U,
127
128
	};
	int32_t ret = 0;
129
130
131
132

	/* get the state ID */
	pwr_domain_state = target_state->pwr_domain_state;
	stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
133
		TEGRA194_STATE_ID_MASK;
134

135
	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
136
137
138

		/* save 'Secure Boot' Processor Feature Config Register */
		val = mmio_read_32(TEGRA_MISC_BASE + MISCREG_PFCFG);
139
		mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_SECURE_BOOTP_FCFG, val);
140

141
142
143
144
		/* save MC context */
		mc_ctx_base = params_from_bl2->tzdram_base +
				tegra194_get_mc_ctx_offset();
		tegra_mc_save_context((uintptr_t)mc_ctx_base);
145

146
147
148
149
150
151
152
153
154
		/*
		 * Suspend SE, RNG1 and PKA1 only on silcon and fpga,
		 * since VDK does not support atomic se ctx save
		 */
		if (tegra_platform_is_silicon() || tegra_platform_is_fpga()) {
			ret = tegra_se_suspend();
			assert(ret == 0);
		}

155
156
		/* Prepare for system suspend */
		mce_update_cstate_info(&sc7_cstate_info);
157

158
159
160
161
		do {
			val = (uint32_t)mce_command_handler(
					(uint32_t)MCE_CMD_IS_SC7_ALLOWED,
					(uint32_t)TEGRA_NVG_CORE_C7,
162
163
					MCE_CORE_SLEEP_TIME_INFINITE,
					0U);
164
165
166
167
168
169
170
171
172
173
174
175
		} while (val == 0U);

		/* Instruct the MCE to enter system suspend state */
		ret = mce_command_handler(
				(uint64_t)MCE_CMD_ENTER_CSTATE,
				(uint64_t)TEGRA_NVG_CORE_C7,
				MCE_CORE_SLEEP_TIME_INFINITE,
				0U);
		assert(ret == 0);

		/* set system suspend state for house-keeping */
		tegra194_set_system_suspend_entry();
176
177
178
179
180
181
	}

	return PSCI_E_SUCCESS;
}

/*******************************************************************************
182
 * Helper function to check if this is the last ON CPU in the cluster
183
 ******************************************************************************/
184
185
static bool tegra_last_on_cpu_in_cluster(const plat_local_state_t *states,
			uint32_t ncpu)
186
{
187
188
	plat_local_state_t target;
	bool last_on_cpu = true;
189
	uint32_t num_cpus = ncpu, pos = 0;
190

191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
	do {
		target = states[pos];
		if (target != PLAT_MAX_OFF_STATE) {
			last_on_cpu = false;
		}
		--num_cpus;
		pos++;
	} while (num_cpus != 0U);

	return last_on_cpu;
}

/*******************************************************************************
 * Helper function to get target power state for the cluster
 ******************************************************************************/
static plat_local_state_t tegra_get_afflvl1_pwr_state(const plat_local_state_t *states,
			uint32_t ncpu)
{
	uint32_t core_pos = (uint32_t)read_mpidr() & (uint32_t)MPIDR_CPU_MASK;
	plat_local_state_t target = states[core_pos];
	mce_cstate_info_t cstate_info = { 0 };
212
213

	/* CPU off */
214
	if (target == PLAT_MAX_OFF_STATE) {
215
216

		/* Enable cluster powerdn from last CPU in the cluster */
217
		if (tegra_last_on_cpu_in_cluster(states, ncpu)) {
218

219
220
			/* Enable CC6 state and turn off wake mask */
			cstate_info.cluster = (uint32_t)TEGRA_NVG_CLUSTER_CC6;
221
222
			cstate_info.ccplex = (uint32_t)TEGRA_NVG_CG_CG7;
			cstate_info.system_state_force = 1;
223
224
			cstate_info.update_wake_mask = 1U;
			mce_update_cstate_info(&cstate_info);
225

226
		} else {
227

228
			/* Turn off wake_mask */
229
230
			cstate_info.update_wake_mask = 1U;
			mce_update_cstate_info(&cstate_info);
231
			target = PSCI_LOCAL_STATE_RUN;
232
233
234
		}
	}

235
236
237
238
239
240
241
242
243
244
245
246
247
248
	return target;
}

/*******************************************************************************
 * Platform handler to calculate the proper target power level at the
 * specified affinity level
 ******************************************************************************/
plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl,
					     const plat_local_state_t *states,
					     uint32_t ncpu)
{
	plat_local_state_t target = PSCI_LOCAL_STATE_RUN;
	uint32_t cpu = plat_my_core_pos();

249
	/* System Suspend */
250
251
252
253
254
255
256
	if ((lvl == (uint32_t)MPIDR_AFFLVL2) && (states[cpu] == PSTATE_ID_SOC_POWERDN)) {
		target = PSTATE_ID_SOC_POWERDN;
	}

	/* CPU off, CPU suspend */
	if (lvl == (uint32_t)MPIDR_AFFLVL1) {
		target = tegra_get_afflvl1_pwr_state(states, ncpu);
257
	}
258

259
260
	/* target cluster/system state */
	return target;
261
262
}

263
int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
264
265
266
267
{
	const plat_local_state_t *pwr_domain_state =
		target_state->pwr_domain_state;
	plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
268
	uint8_t stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
269
		TEGRA194_STATE_ID_MASK;
270
	uint64_t src_len_in_bytes = (uintptr_t)&__BL31_END__ - (uintptr_t)BL31_BASE;
271
	uint64_t val;
272
	int32_t ret = PSCI_E_SUCCESS;
273
274

	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
275
276
277
278
279
280
281
282
		val = params_from_bl2->tzdram_base +
		      tegra194_get_cpu_reset_handler_size();

		/* initialise communication channel with BPMP */
		ret = tegra_bpmp_ipc_init();
		assert(ret == 0);

		/* Enable SE clock before SE context save */
283
		ret = tegra_bpmp_ipc_enable_clock(TEGRA194_CLK_SE);
284
285
286
287
288
289
290
291
292
293
294
295
296
297
		assert(ret == 0);

		/*
		 * It is very unlikely that the BL31 image would be
		 * bigger than 2^32 bytes
		 */
		assert(src_len_in_bytes < UINT32_MAX);

		if (tegra_se_calculate_save_sha256(BL31_BASE,
					(uint32_t)src_len_in_bytes) != 0) {
			ERROR("Hash calculation failed. Reboot\n");
			(void)tegra_soc_prepare_system_reset();
		}

298
299
300
301
302
303
		/*
		 * The TZRAM loses power when we enter system suspend. To
		 * allow graceful exit from system suspend, we need to copy
		 * BL3-1 over to TZDRAM.
		 */
		val = params_from_bl2->tzdram_base +
304
		      tegra194_get_cpu_reset_handler_size();
305
		memcpy((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE,
306
307
308
		       src_len_in_bytes);

		/* Disable SE clock after SE context save */
309
		ret = tegra_bpmp_ipc_disable_clock(TEGRA194_CLK_SE);
310
		assert(ret == 0);
311
312
	}

313
	return ret;
314
315
}

316
317
318
319
320
int32_t tegra_soc_pwr_domain_suspend_pwrdown_early(const psci_power_state_t *target_state)
{
	return PSCI_E_NOT_SUPPORTED;
}

321
int32_t tegra_soc_pwr_domain_on(u_register_t mpidr)
322
{
323
324
	uint64_t target_cpu = mpidr & MPIDR_CPU_MASK;
	uint64_t target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >>
325
			MPIDR_AFFINITY_BITS;
326
	int32_t ret = 0;
327

328
	if (target_cluster > ((uint32_t)PLATFORM_CLUSTER_COUNT - 1U)) {
329
330
331
332
333
		ERROR("%s: unsupported CPU (0x%lx)\n", __func__ , mpidr);
		return PSCI_E_NOT_PRESENT;
	}

	/* construct the target CPU # */
334
	target_cpu += (target_cluster << 1U);
335

336
337
338
339
	ret = mce_command_handler((uint64_t)MCE_CMD_ONLINE_CORE, target_cpu, 0U, 0U);
	if (ret < 0) {
		return PSCI_E_DENIED;
	}
340
341
342
343

	return PSCI_E_SUCCESS;
}

344
int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
345
{
346
347
	const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
	uint8_t enable_ccplex_lock_step = params_from_bl2->enable_ccplex_lock_step;
348
	uint8_t stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL];
349
350
	cpu_context_t *ctx = cm_get_context(NON_SECURE);
	uint64_t actlr_elx;
351
352
353
354
355
356
357
358

	/*
	 * Reset power state info for CPUs when onlining, we set
	 * deepest power when offlining a core but that may not be
	 * requested by non-secure sw which controls idle states. It
	 * will re-init this info from non-secure software when the
	 * core come online.
	 */
359
360
361
362
	actlr_elx = read_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACTLR_EL1));
	actlr_elx &= ~DENVER_CPU_PMSTATE_MASK;
	actlr_elx |= DENVER_CPU_PMSTATE_C1;
	write_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACTLR_EL1), (actlr_elx));
363
364
365
366
367
368

	/*
	 * Check if we are exiting from deep sleep and restore SE
	 * context if we are.
	 */
	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
369

370
#if ENABLE_STRICT_CHECKING_MODE
371
372
373
374
375
		/*
		 * Enable strict checking after programming the GSC for
		 * enabling TZSRAM and TZDRAM
		 */
		mce_enable_strict_checking();
376
#endif
377

378
		/* Init SMMU */
379
380
		tegra_smmu_init();

381
382
383
		/* Resume SE, RNG1 and PKA1 */
		tegra_se_resume();

384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
		/*
		 * Program XUSB STREAMIDs
		 * ======================
		 * T19x XUSB has support for XUSB virtualization. It will
		 * have one physical function (PF) and four Virtual functions
		 * (VF)
		 *
		 * There were below two SIDs for XUSB until T186.
		 * 1) #define TEGRA_SID_XUSB_HOST    0x1bU
		 * 2) #define TEGRA_SID_XUSB_DEV    0x1cU
		 *
		 * We have below four new SIDs added for VF(s)
		 * 3) #define TEGRA_SID_XUSB_VF0    0x5dU
		 * 4) #define TEGRA_SID_XUSB_VF1    0x5eU
		 * 5) #define TEGRA_SID_XUSB_VF2    0x5fU
		 * 6) #define TEGRA_SID_XUSB_VF3    0x60U
		 *
		 * When virtualization is enabled then we have to disable SID
		 * override and program above SIDs in below newly added SID
		 * registers in XUSB PADCTL MMIO space. These registers are
		 * TZ protected and so need to be done in ATF.
		 *
		 * a) #define XUSB_PADCTL_HOST_AXI_STREAMID_PF_0 (0x136cU)
		 * b) #define XUSB_PADCTL_DEV_AXI_STREAMID_PF_0  (0x139cU)
		 * c) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_0 (0x1370U)
		 * d) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_1 (0x1374U)
		 * e) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_2 (0x1378U)
		 * f) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_3 (0x137cU)
		 *
		 * This change disables SID override and programs XUSB SIDs
		 * in above registers to support both virtualization and
		 * non-virtualization platforms
		 */
417
418
419
420
		if (tegra_platform_is_silicon() || tegra_platform_is_fpga()) {

			mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
				XUSB_PADCTL_HOST_AXI_STREAMID_PF_0, TEGRA_SID_XUSB_HOST);
421
422
			assert(mmio_read_32(TEGRA_XUSB_PADCTL_BASE +
				XUSB_PADCTL_HOST_AXI_STREAMID_PF_0) == TEGRA_SID_XUSB_HOST);
423
424
			mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
				XUSB_PADCTL_HOST_AXI_STREAMID_VF_0, TEGRA_SID_XUSB_VF0);
425
426
			assert(mmio_read_32(TEGRA_XUSB_PADCTL_BASE +
				XUSB_PADCTL_HOST_AXI_STREAMID_VF_0) == TEGRA_SID_XUSB_VF0);
427
428
			mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
				XUSB_PADCTL_HOST_AXI_STREAMID_VF_1, TEGRA_SID_XUSB_VF1);
429
430
			assert(mmio_read_32(TEGRA_XUSB_PADCTL_BASE +
				XUSB_PADCTL_HOST_AXI_STREAMID_VF_1) == TEGRA_SID_XUSB_VF1);
431
432
			mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
				XUSB_PADCTL_HOST_AXI_STREAMID_VF_2, TEGRA_SID_XUSB_VF2);
433
434
			assert(mmio_read_32(TEGRA_XUSB_PADCTL_BASE +
				XUSB_PADCTL_HOST_AXI_STREAMID_VF_2) == TEGRA_SID_XUSB_VF2);
435
436
			mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
				XUSB_PADCTL_HOST_AXI_STREAMID_VF_3, TEGRA_SID_XUSB_VF3);
437
438
			assert(mmio_read_32(TEGRA_XUSB_PADCTL_BASE +
				XUSB_PADCTL_HOST_AXI_STREAMID_VF_3) == TEGRA_SID_XUSB_VF3);
439
440
			mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
				XUSB_PADCTL_DEV_AXI_STREAMID_PF_0, TEGRA_SID_XUSB_DEV);
441
442
			assert(mmio_read_32(TEGRA_XUSB_PADCTL_BASE +
				XUSB_PADCTL_DEV_AXI_STREAMID_PF_0) == TEGRA_SID_XUSB_DEV);
443
		}
444
	}
445

446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
	/*
	 * Enable dual execution optimized translations for all ELx.
	 */
	if (enable_ccplex_lock_step != 0U) {
		actlr_elx = read_actlr_el3();
		actlr_elx |= DENVER_CPU_ENABLE_DUAL_EXEC_EL3;
		write_actlr_el3(actlr_elx);

		actlr_elx = read_actlr_el2();
		actlr_elx |= DENVER_CPU_ENABLE_DUAL_EXEC_EL2;
		write_actlr_el2(actlr_elx);

		actlr_elx = read_actlr_el1();
		actlr_elx |= DENVER_CPU_ENABLE_DUAL_EXEC_EL1;
		write_actlr_el1(actlr_elx);
461
462
463
464
465
	}

	return PSCI_E_SUCCESS;
}

466
int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
467
{
468
	uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
469
	int32_t ret = 0;
470

471
472
	(void)target_state;

473
	/* Disable Denver's DCO operations */
474
	if (impl == DENVER_IMPL) {
475
		denver_disable_dco();
476
	}
477
478

	/* Turn off CPU */
479
480
	ret = mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
			(uint64_t)TEGRA_NVG_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U);
481
	assert(ret == 0);
482
483
484
485
486
487
488

	return PSCI_E_SUCCESS;
}

__dead2 void tegra_soc_prepare_system_off(void)
{
	/* System power off */
489
	mce_system_shutdown();
490
491
492
493
494
495
496
497
498

	wfi();

	/* wait for the system to power down */
	for (;;) {
		;
	}
}

499
int32_t tegra_soc_prepare_system_reset(void)
500
{
501
502
503
	/* System reboot */
	mce_system_reboot();

504
505
	return PSCI_E_SUCCESS;
}