plat_psci_handlers.c 14.7 KB
Newer Older
1
/*
2
 * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
3
4
5
6
7
8
 *
 * SPDX-License-Identifier: BSD-3-Clause
 */

#include <arch.h>
#include <assert.h>
9
10
11
12
#include <stdbool.h>
#include <string.h>

#include <arch_helpers.h>
13
#include <bpmp_ipc.h>
14
15
#include <common/bl_common.h>
#include <common/debug.h>
16
#include <context.h>
17
#include <drivers/delay_timer.h>
18
#include <denver.h>
19
20
#include <lib/el3_runtime/context_mgmt.h>
#include <lib/psci/psci.h>
21
#include <mce.h>
22
#include <mce_private.h>
23
#include <memctrl_v2.h>
24
#include <plat/common/platform.h>
25
#include <se.h>
26
#include <smmu.h>
Tejal Kudav's avatar
Tejal Kudav committed
27
#include <t194_nvg.h>
28
#include <tegra194_private.h>
29
30
#include <tegra_platform.h>
#include <tegra_private.h>
31

32
33
extern uint32_t __tegra194_cpu_reset_handler_data,
		__tegra194_cpu_reset_handler_end;
34
35

/* TZDRAM offset for saving SMMU context */
36
#define TEGRA194_SMMU_CTX_OFFSET	16U
37
38

/* state id mask */
39
#define TEGRA194_STATE_ID_MASK		0xFU
40
/* constants to get power state's wake time */
41
42
#define TEGRA194_WAKE_TIME_MASK		0x0FFFFFF0U
#define TEGRA194_WAKE_TIME_SHIFT	4U
43
/* default core wake mask for CPU_SUSPEND */
44
#define TEGRA194_CORE_WAKE_MASK		0x180cU
45

46
47
48
static struct t19x_psci_percpu_data {
	uint32_t wake_time;
} __aligned(CACHE_WRITEBACK_GRANULE) t19x_percpu_data[PLATFORM_CORE_COUNT];
49

50
int32_t tegra_soc_validate_power_state(uint32_t power_state,
51
52
					psci_power_state_t *req_state)
{
53
	uint8_t state_id = (uint8_t)psci_get_pstate_id(power_state) &
54
			   TEGRA194_STATE_ID_MASK;
55
56
	uint32_t cpu = plat_my_core_pos();
	int32_t ret = PSCI_E_SUCCESS;
57
58

	/* save the core wake time (in TSC ticks)*/
59
60
	t19x_percpu_data[cpu].wake_time = (power_state & TEGRA194_WAKE_TIME_MASK)
			<< TEGRA194_WAKE_TIME_SHIFT;
61
62

	/*
63
64
65
66
	 * Clean t19x_percpu_data[cpu] to DRAM. This needs to be done to ensure
	 * that the correct value is read in tegra_soc_pwr_domain_suspend(),
	 * which is called with caches disabled. It is possible to read a stale
	 * value from DRAM in that function, because the L2 cache is not flushed
67
68
	 * unless the cluster is entering CC6/CC7.
	 */
69
70
	clean_dcache_range((uint64_t)&t19x_percpu_data[cpu],
			sizeof(t19x_percpu_data[cpu]));
71
72
73
74

	/* Sanity check the requested state id */
	switch (state_id) {
	case PSTATE_ID_CORE_IDLE:
75
76
77
78
79
80

		/* Core idle request */
		req_state->pwr_domain_state[MPIDR_AFFLVL0] = PLAT_MAX_RET_STATE;
		req_state->pwr_domain_state[MPIDR_AFFLVL1] = PSCI_LOCAL_STATE_RUN;
		break;

81
82
83
84
85
86
87
88
89
90
	case PSTATE_ID_CORE_POWERDN:

		/* Core powerdown request */
		req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id;
		req_state->pwr_domain_state[MPIDR_AFFLVL1] = state_id;

		break;

	default:
		ERROR("%s: unsupported state id (%d)\n", __func__, state_id);
91
92
		ret = PSCI_E_INVALID_PARAMS;
		break;
93
94
	}

95
	return ret;
96
97
}

98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
int32_t tegra_soc_cpu_standby(plat_local_state_t cpu_state)
{
	uint32_t cpu = plat_my_core_pos();
	mce_cstate_info_t cstate_info = { 0 };

	/* Program default wake mask */
	cstate_info.wake_mask = TEGRA194_CORE_WAKE_MASK;
	cstate_info.update_wake_mask = 1;
	mce_update_cstate_info(&cstate_info);

	/* Enter CPU idle */
	(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
				  (uint64_t)TEGRA_NVG_CORE_C6,
				  t19x_percpu_data[cpu].wake_time,
				  0U);

	return PSCI_E_SUCCESS;
}

117
int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
118
119
{
	const plat_local_state_t *pwr_domain_state;
120
	uint8_t stateid_afflvl0, stateid_afflvl2;
121
	plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
122
	uint64_t mc_ctx_base;
123
	uint32_t val;
124
	mce_cstate_info_t sc7_cstate_info = {
125
		.cluster = (uint32_t)TEGRA_NVG_CLUSTER_CC6,
126
		.ccplex = (uint32_t)TEGRA_NVG_CG_CG7,
127
128
129
		.system = (uint32_t)TEGRA_NVG_SYSTEM_SC7,
		.system_state_force = 1U,
		.update_wake_mask = 1U,
130
	};
131
	uint32_t cpu = plat_my_core_pos();
132
	int32_t ret = 0;
133
134
135
136

	/* get the state ID */
	pwr_domain_state = target_state->pwr_domain_state;
	stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0] &
137
		TEGRA194_STATE_ID_MASK;
138
	stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
139
		TEGRA194_STATE_ID_MASK;
140

141
	if (stateid_afflvl0 == PSTATE_ID_CORE_POWERDN) {
142

143
144
145
146
147
		/* Enter CPU powerdown */
		(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
					  (uint64_t)TEGRA_NVG_CORE_C7,
					  t19x_percpu_data[cpu].wake_time,
					  0U);
148
149
150
151
152

	} else if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {

		/* save 'Secure Boot' Processor Feature Config Register */
		val = mmio_read_32(TEGRA_MISC_BASE + MISCREG_PFCFG);
153
		mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_SECURE_BOOTP_FCFG, val);
154

155
156
157
158
		/* save MC context */
		mc_ctx_base = params_from_bl2->tzdram_base +
				tegra194_get_mc_ctx_offset();
		tegra_mc_save_context((uintptr_t)mc_ctx_base);
159

160
161
162
163
164
165
166
167
168
		/*
		 * Suspend SE, RNG1 and PKA1 only on silcon and fpga,
		 * since VDK does not support atomic se ctx save
		 */
		if (tegra_platform_is_silicon() || tegra_platform_is_fpga()) {
			ret = tegra_se_suspend();
			assert(ret == 0);
		}

169
170
		/* Prepare for system suspend */
		mce_update_cstate_info(&sc7_cstate_info);
171

172
173
174
175
		do {
			val = (uint32_t)mce_command_handler(
					(uint32_t)MCE_CMD_IS_SC7_ALLOWED,
					(uint32_t)TEGRA_NVG_CORE_C7,
176
177
					MCE_CORE_SLEEP_TIME_INFINITE,
					0U);
178
179
180
181
182
183
184
185
186
187
188
189
		} while (val == 0U);

		/* Instruct the MCE to enter system suspend state */
		ret = mce_command_handler(
				(uint64_t)MCE_CMD_ENTER_CSTATE,
				(uint64_t)TEGRA_NVG_CORE_C7,
				MCE_CORE_SLEEP_TIME_INFINITE,
				0U);
		assert(ret == 0);

		/* set system suspend state for house-keeping */
		tegra194_set_system_suspend_entry();
190
191
	} else {
		; /* do nothing */
192
193
194
195
196
197
	}

	return PSCI_E_SUCCESS;
}

/*******************************************************************************
198
 * Helper function to check if this is the last ON CPU in the cluster
199
 ******************************************************************************/
200
201
static bool tegra_last_on_cpu_in_cluster(const plat_local_state_t *states,
			uint32_t ncpu)
202
{
203
204
	plat_local_state_t target;
	bool last_on_cpu = true;
205
	uint32_t num_cpus = ncpu, pos = 0;
206

207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
	do {
		target = states[pos];
		if (target != PLAT_MAX_OFF_STATE) {
			last_on_cpu = false;
		}
		--num_cpus;
		pos++;
	} while (num_cpus != 0U);

	return last_on_cpu;
}

/*******************************************************************************
 * Helper function to get target power state for the cluster
 ******************************************************************************/
static plat_local_state_t tegra_get_afflvl1_pwr_state(const plat_local_state_t *states,
			uint32_t ncpu)
{
	uint32_t core_pos = (uint32_t)read_mpidr() & (uint32_t)MPIDR_CPU_MASK;
	plat_local_state_t target = states[core_pos];
	mce_cstate_info_t cstate_info = { 0 };
228
229

	/* CPU suspend */
230
	if (target == PSTATE_ID_CORE_POWERDN) {
231
232

		/* Program default wake mask */
233
234
235
		cstate_info.wake_mask = TEGRA194_CORE_WAKE_MASK;
		cstate_info.update_wake_mask = 1;
		mce_update_cstate_info(&cstate_info);
236
237
238
	}

	/* CPU off */
239
	if (target == PLAT_MAX_OFF_STATE) {
240
241

		/* Enable cluster powerdn from last CPU in the cluster */
242
		if (tegra_last_on_cpu_in_cluster(states, ncpu)) {
243

244
245
			/* Enable CC6 state and turn off wake mask */
			cstate_info.cluster = (uint32_t)TEGRA_NVG_CLUSTER_CC6;
246
247
			cstate_info.ccplex = (uint32_t)TEGRA_NVG_CG_CG7;
			cstate_info.system_state_force = 1;
248
249
			cstate_info.update_wake_mask = 1U;
			mce_update_cstate_info(&cstate_info);
250

251
		} else {
252

253
			/* Turn off wake_mask */
254
255
			cstate_info.update_wake_mask = 1U;
			mce_update_cstate_info(&cstate_info);
256
			target = PSCI_LOCAL_STATE_RUN;
257
258
259
		}
	}

260
261
262
263
264
265
266
267
268
269
270
271
272
273
	return target;
}

/*******************************************************************************
 * Platform handler to calculate the proper target power level at the
 * specified affinity level
 ******************************************************************************/
plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl,
					     const plat_local_state_t *states,
					     uint32_t ncpu)
{
	plat_local_state_t target = PSCI_LOCAL_STATE_RUN;
	uint32_t cpu = plat_my_core_pos();

274
	/* System Suspend */
275
276
277
278
279
280
281
	if ((lvl == (uint32_t)MPIDR_AFFLVL2) && (states[cpu] == PSTATE_ID_SOC_POWERDN)) {
		target = PSTATE_ID_SOC_POWERDN;
	}

	/* CPU off, CPU suspend */
	if (lvl == (uint32_t)MPIDR_AFFLVL1) {
		target = tegra_get_afflvl1_pwr_state(states, ncpu);
282
	}
283

284
285
	/* target cluster/system state */
	return target;
286
287
}

288
int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
289
290
291
292
{
	const plat_local_state_t *pwr_domain_state =
		target_state->pwr_domain_state;
	plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
293
	uint8_t stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
294
		TEGRA194_STATE_ID_MASK;
295
	uint64_t src_len_in_bytes = (uintptr_t)&__BL31_END__ - (uintptr_t)BL31_BASE;
296
	uint64_t val;
297
	int32_t ret = PSCI_E_SUCCESS;
298
299

	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
300
301
302
303
304
305
306
307
		val = params_from_bl2->tzdram_base +
		      tegra194_get_cpu_reset_handler_size();

		/* initialise communication channel with BPMP */
		ret = tegra_bpmp_ipc_init();
		assert(ret == 0);

		/* Enable SE clock before SE context save */
308
		ret = tegra_bpmp_ipc_enable_clock(TEGRA194_CLK_SE);
309
310
311
312
313
314
315
316
317
318
319
320
321
322
		assert(ret == 0);

		/*
		 * It is very unlikely that the BL31 image would be
		 * bigger than 2^32 bytes
		 */
		assert(src_len_in_bytes < UINT32_MAX);

		if (tegra_se_calculate_save_sha256(BL31_BASE,
					(uint32_t)src_len_in_bytes) != 0) {
			ERROR("Hash calculation failed. Reboot\n");
			(void)tegra_soc_prepare_system_reset();
		}

323
324
325
326
327
328
		/*
		 * The TZRAM loses power when we enter system suspend. To
		 * allow graceful exit from system suspend, we need to copy
		 * BL3-1 over to TZDRAM.
		 */
		val = params_from_bl2->tzdram_base +
329
		      tegra194_get_cpu_reset_handler_size();
330
		memcpy((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE,
331
332
333
		       src_len_in_bytes);

		/* Disable SE clock after SE context save */
334
		ret = tegra_bpmp_ipc_disable_clock(TEGRA194_CLK_SE);
335
		assert(ret == 0);
336
337
	}

338
	return ret;
339
340
}

341
342
343
344
345
int32_t tegra_soc_pwr_domain_suspend_pwrdown_early(const psci_power_state_t *target_state)
{
	return PSCI_E_NOT_SUPPORTED;
}

346
int32_t tegra_soc_pwr_domain_on(u_register_t mpidr)
347
{
348
349
	uint64_t target_cpu = mpidr & MPIDR_CPU_MASK;
	uint64_t target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >>
350
			MPIDR_AFFINITY_BITS;
351
	int32_t ret = 0;
352

353
	if (target_cluster > ((uint32_t)PLATFORM_CLUSTER_COUNT - 1U)) {
354
355
356
357
358
		ERROR("%s: unsupported CPU (0x%lx)\n", __func__ , mpidr);
		return PSCI_E_NOT_PRESENT;
	}

	/* construct the target CPU # */
359
	target_cpu += (target_cluster << 1U);
360

361
362
363
364
	ret = mce_command_handler((uint64_t)MCE_CMD_ONLINE_CORE, target_cpu, 0U, 0U);
	if (ret < 0) {
		return PSCI_E_DENIED;
	}
365
366
367
368

	return PSCI_E_SUCCESS;
}

369
int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
370
{
371
372
	const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
	uint8_t enable_ccplex_lock_step = params_from_bl2->enable_ccplex_lock_step;
373
	uint8_t stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL];
374
375
	cpu_context_t *ctx = cm_get_context(NON_SECURE);
	uint64_t actlr_elx;
376
377
378
379
380
381
382
383

	/*
	 * Reset power state info for CPUs when onlining, we set
	 * deepest power when offlining a core but that may not be
	 * requested by non-secure sw which controls idle states. It
	 * will re-init this info from non-secure software when the
	 * core come online.
	 */
384
385
386
387
	actlr_elx = read_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACTLR_EL1));
	actlr_elx &= ~DENVER_CPU_PMSTATE_MASK;
	actlr_elx |= DENVER_CPU_PMSTATE_C1;
	write_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACTLR_EL1), (actlr_elx));
388
389
390
391
392
393

	/*
	 * Check if we are exiting from deep sleep and restore SE
	 * context if we are.
	 */
	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
394

395
#if ENABLE_STRICT_CHECKING_MODE
396
397
398
399
400
		/*
		 * Enable strict checking after programming the GSC for
		 * enabling TZSRAM and TZDRAM
		 */
		mce_enable_strict_checking();
401
#endif
402

403
		/* Init SMMU */
404
405
		tegra_smmu_init();

406
407
408
		/* Resume SE, RNG1 and PKA1 */
		tegra_se_resume();

409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
		/*
		 * Program XUSB STREAMIDs
		 * ======================
		 * T19x XUSB has support for XUSB virtualization. It will
		 * have one physical function (PF) and four Virtual functions
		 * (VF)
		 *
		 * There were below two SIDs for XUSB until T186.
		 * 1) #define TEGRA_SID_XUSB_HOST    0x1bU
		 * 2) #define TEGRA_SID_XUSB_DEV    0x1cU
		 *
		 * We have below four new SIDs added for VF(s)
		 * 3) #define TEGRA_SID_XUSB_VF0    0x5dU
		 * 4) #define TEGRA_SID_XUSB_VF1    0x5eU
		 * 5) #define TEGRA_SID_XUSB_VF2    0x5fU
		 * 6) #define TEGRA_SID_XUSB_VF3    0x60U
		 *
		 * When virtualization is enabled then we have to disable SID
		 * override and program above SIDs in below newly added SID
		 * registers in XUSB PADCTL MMIO space. These registers are
		 * TZ protected and so need to be done in ATF.
		 *
		 * a) #define XUSB_PADCTL_HOST_AXI_STREAMID_PF_0 (0x136cU)
		 * b) #define XUSB_PADCTL_DEV_AXI_STREAMID_PF_0  (0x139cU)
		 * c) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_0 (0x1370U)
		 * d) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_1 (0x1374U)
		 * e) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_2 (0x1378U)
		 * f) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_3 (0x137cU)
		 *
		 * This change disables SID override and programs XUSB SIDs
		 * in above registers to support both virtualization and
		 * non-virtualization platforms
		 */
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
		if (tegra_platform_is_silicon() || tegra_platform_is_fpga()) {

			mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
				XUSB_PADCTL_HOST_AXI_STREAMID_PF_0, TEGRA_SID_XUSB_HOST);
			mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
				XUSB_PADCTL_HOST_AXI_STREAMID_VF_0, TEGRA_SID_XUSB_VF0);
			mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
				XUSB_PADCTL_HOST_AXI_STREAMID_VF_1, TEGRA_SID_XUSB_VF1);
			mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
				XUSB_PADCTL_HOST_AXI_STREAMID_VF_2, TEGRA_SID_XUSB_VF2);
			mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
				XUSB_PADCTL_HOST_AXI_STREAMID_VF_3, TEGRA_SID_XUSB_VF3);
			mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
				XUSB_PADCTL_DEV_AXI_STREAMID_PF_0, TEGRA_SID_XUSB_DEV);
		}
457
	}
458

459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
	/*
	 * Enable dual execution optimized translations for all ELx.
	 */
	if (enable_ccplex_lock_step != 0U) {
		actlr_elx = read_actlr_el3();
		actlr_elx |= DENVER_CPU_ENABLE_DUAL_EXEC_EL3;
		write_actlr_el3(actlr_elx);

		actlr_elx = read_actlr_el2();
		actlr_elx |= DENVER_CPU_ENABLE_DUAL_EXEC_EL2;
		write_actlr_el2(actlr_elx);

		actlr_elx = read_actlr_el1();
		actlr_elx |= DENVER_CPU_ENABLE_DUAL_EXEC_EL1;
		write_actlr_el1(actlr_elx);
474
475
476
477
478
	}

	return PSCI_E_SUCCESS;
}

479
int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
480
{
481
	uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
482
	int32_t ret = 0;
483

484
485
	(void)target_state;

486
	/* Disable Denver's DCO operations */
487
	if (impl == DENVER_IMPL) {
488
		denver_disable_dco();
489
	}
490
491

	/* Turn off CPU */
492
493
	ret = mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
			(uint64_t)TEGRA_NVG_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U);
494
	assert(ret == 0);
495
496
497
498
499
500
501

	return PSCI_E_SUCCESS;
}

__dead2 void tegra_soc_prepare_system_off(void)
{
	/* System power off */
502
	mce_system_shutdown();
503
504
505
506
507
508
509
510
511

	wfi();

	/* wait for the system to power down */
	for (;;) {
		;
	}
}

512
int32_t tegra_soc_prepare_system_reset(void)
513
{
514
515
516
	/* System reboot */
	mce_system_reboot();

517
518
	return PSCI_E_SUCCESS;
}