plat_psci_handlers.c 13.3 KB
Newer Older
1
/*
2
 * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
3
4
5
6
7
8
 *
 * SPDX-License-Identifier: BSD-3-Clause
 */

#include <arch.h>
#include <assert.h>
9
10
11
12
#include <stdbool.h>
#include <string.h>

#include <arch_helpers.h>
13
14
#include <common/bl_common.h>
#include <common/debug.h>
15
#include <context.h>
16
#include <denver.h>
17
18
#include <lib/el3_runtime/context_mgmt.h>
#include <lib/psci/psci.h>
19
#include <mce.h>
20
#include <mce_private.h>
21
#include <plat/common/platform.h>
22
#include <se.h>
23
#include <smmu.h>
Tejal Kudav's avatar
Tejal Kudav committed
24
#include <t194_nvg.h>
25
#include <tegra194_private.h>
26
27
#include <tegra_platform.h>
#include <tegra_private.h>
28

29
30
extern uint32_t __tegra194_cpu_reset_handler_data,
		__tegra194_cpu_reset_handler_end;
31
32

/* TZDRAM offset for saving SMMU context */
33
#define TEGRA194_SMMU_CTX_OFFSET	16U
34
35

/* state id mask */
36
#define TEGRA194_STATE_ID_MASK		0xFU
37
/* constants to get power state's wake time */
38
39
#define TEGRA194_WAKE_TIME_MASK		0x0FFFFFF0U
#define TEGRA194_WAKE_TIME_SHIFT	4U
40
/* default core wake mask for CPU_SUSPEND */
41
#define TEGRA194_CORE_WAKE_MASK		0x180cU
42

43
44
45
static struct t19x_psci_percpu_data {
	uint32_t wake_time;
} __aligned(CACHE_WRITEBACK_GRANULE) t19x_percpu_data[PLATFORM_CORE_COUNT];
46

47
48
49
50
51
52
53
54
/*
 * tegra_fake_system_suspend acts as a boolean var controlling whether
 * we are going to take fake system suspend code or normal system suspend code
 * path. This variable is set inside the sip call handlers, when the kernel
 * requests an SIP call to set the suspend debug flags.
 */
bool tegra_fake_system_suspend;

55
int32_t tegra_soc_validate_power_state(uint32_t power_state,
56
57
					psci_power_state_t *req_state)
{
58
	uint8_t state_id = (uint8_t)psci_get_pstate_id(power_state) &
59
			   TEGRA194_STATE_ID_MASK;
60
61
	uint32_t cpu = plat_my_core_pos();
	int32_t ret = PSCI_E_SUCCESS;
62
63

	/* save the core wake time (in TSC ticks)*/
64
65
	t19x_percpu_data[cpu].wake_time = (power_state & TEGRA194_WAKE_TIME_MASK)
			<< TEGRA194_WAKE_TIME_SHIFT;
66
67

	/*
68
69
70
71
	 * Clean t19x_percpu_data[cpu] to DRAM. This needs to be done to ensure
	 * that the correct value is read in tegra_soc_pwr_domain_suspend(),
	 * which is called with caches disabled. It is possible to read a stale
	 * value from DRAM in that function, because the L2 cache is not flushed
72
73
	 * unless the cluster is entering CC6/CC7.
	 */
74
75
	clean_dcache_range((uint64_t)&t19x_percpu_data[cpu],
			sizeof(t19x_percpu_data[cpu]));
76
77
78
79
80
81
82
83
84
85
86
87
88
89

	/* Sanity check the requested state id */
	switch (state_id) {
	case PSTATE_ID_CORE_IDLE:
	case PSTATE_ID_CORE_POWERDN:

		/* Core powerdown request */
		req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id;
		req_state->pwr_domain_state[MPIDR_AFFLVL1] = state_id;

		break;

	default:
		ERROR("%s: unsupported state id (%d)\n", __func__, state_id);
90
91
		ret = PSCI_E_INVALID_PARAMS;
		break;
92
93
	}

94
	return ret;
95
96
}

97
int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
98
99
{
	const plat_local_state_t *pwr_domain_state;
100
	uint8_t stateid_afflvl0, stateid_afflvl2;
101
102
103
	plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
	uint64_t smmu_ctx_base;
	uint32_t val;
104
	mce_cstate_info_t sc7_cstate_info = {
105
106
107
108
		.cluster = (uint32_t)TEGRA_NVG_CLUSTER_CC6,
		.system = (uint32_t)TEGRA_NVG_SYSTEM_SC7,
		.system_state_force = 1U,
		.update_wake_mask = 1U,
109
	};
110
	uint32_t cpu = plat_my_core_pos();
111
	int32_t ret = 0;
112
113
114
115

	/* get the state ID */
	pwr_domain_state = target_state->pwr_domain_state;
	stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0] &
116
		TEGRA194_STATE_ID_MASK;
117
	stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
118
		TEGRA194_STATE_ID_MASK;
119
120
121
122
123

	if ((stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ||
	    (stateid_afflvl0 == PSTATE_ID_CORE_POWERDN)) {

		/* Enter CPU idle/powerdown */
124
		val = (stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ?
125
126
			(uint32_t)TEGRA_NVG_CORE_C6 : (uint32_t)TEGRA_NVG_CORE_C7;
		ret = mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, (uint64_t)val,
127
				t19x_percpu_data[cpu].wake_time, 0);
128
		assert(ret == 0);
129
130
131
132
133

	} else if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {

		/* save 'Secure Boot' Processor Feature Config Register */
		val = mmio_read_32(TEGRA_MISC_BASE + MISCREG_PFCFG);
134
		mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_SECURE_BOOTP_FCFG, val);
135
136
137

		/* save SMMU context */
		smmu_ctx_base = params_from_bl2->tzdram_base +
138
				tegra194_get_smmu_ctx_offset();
139
140
		tegra_smmu_save_context((uintptr_t)smmu_ctx_base);

141
142
143
144
145
146
147
148
149
		/*
		 * Suspend SE, RNG1 and PKA1 only on silcon and fpga,
		 * since VDK does not support atomic se ctx save
		 */
		if (tegra_platform_is_silicon() || tegra_platform_is_fpga()) {
			ret = tegra_se_suspend();
			assert(ret == 0);
		}

150
		if (!tegra_fake_system_suspend) {
151
152

			/* Prepare for system suspend */
153
			mce_update_cstate_info(&sc7_cstate_info);
154
155

			do {
156
157
158
				val = (uint32_t)mce_command_handler(
						(uint32_t)MCE_CMD_IS_SC7_ALLOWED,
						(uint32_t)TEGRA_NVG_CORE_C7,
159
						MCE_CORE_SLEEP_TIME_INFINITE,
160
161
162
163
164
165
166
167
168
169
						0U);
			} while (val == 0U);

			/* Instruct the MCE to enter system suspend state */
			ret = mce_command_handler(
					(uint64_t)MCE_CMD_ENTER_CSTATE,
					(uint64_t)TEGRA_NVG_CORE_C7,
					MCE_CORE_SLEEP_TIME_INFINITE,
					0U);
			assert(ret == 0);
170
171
172

			/* set system suspend state for house-keeping */
			tegra194_set_system_suspend_entry();
173
		}
174
175
	} else {
		; /* do nothing */
176
177
178
179
180
181
	}

	return PSCI_E_SUCCESS;
}

/*******************************************************************************
182
 * Helper function to check if this is the last ON CPU in the cluster
183
 ******************************************************************************/
184
185
static bool tegra_last_on_cpu_in_cluster(const plat_local_state_t *states,
			uint32_t ncpu)
186
{
187
188
	plat_local_state_t target;
	bool last_on_cpu = true;
189
	uint32_t num_cpus = ncpu, pos = 0;
190

191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
	do {
		target = states[pos];
		if (target != PLAT_MAX_OFF_STATE) {
			last_on_cpu = false;
		}
		--num_cpus;
		pos++;
	} while (num_cpus != 0U);

	return last_on_cpu;
}

/*******************************************************************************
 * Helper function to get target power state for the cluster
 ******************************************************************************/
static plat_local_state_t tegra_get_afflvl1_pwr_state(const plat_local_state_t *states,
			uint32_t ncpu)
{
	uint32_t core_pos = (uint32_t)read_mpidr() & (uint32_t)MPIDR_CPU_MASK;
	plat_local_state_t target = states[core_pos];
	mce_cstate_info_t cstate_info = { 0 };
212
213

	/* CPU suspend */
214
	if (target == PSTATE_ID_CORE_POWERDN) {
215
216

		/* Program default wake mask */
217
218
219
		cstate_info.wake_mask = TEGRA194_CORE_WAKE_MASK;
		cstate_info.update_wake_mask = 1;
		mce_update_cstate_info(&cstate_info);
220
221
222
	}

	/* CPU off */
223
	if (target == PLAT_MAX_OFF_STATE) {
224
225

		/* Enable cluster powerdn from last CPU in the cluster */
226
		if (tegra_last_on_cpu_in_cluster(states, ncpu)) {
227

228
229
			/* Enable CC6 state and turn off wake mask */
			cstate_info.cluster = (uint32_t)TEGRA_NVG_CLUSTER_CC6;
230
231
			cstate_info.update_wake_mask = 1U;
			mce_update_cstate_info(&cstate_info);
232

233
		} else {
234

235
			/* Turn off wake_mask */
236
237
			cstate_info.update_wake_mask = 1U;
			mce_update_cstate_info(&cstate_info);
238
			target = PSCI_LOCAL_STATE_RUN;
239
240
241
		}
	}

242
243
244
245
246
247
248
249
250
251
252
253
254
255
	return target;
}

/*******************************************************************************
 * Platform handler to calculate the proper target power level at the
 * specified affinity level
 ******************************************************************************/
plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl,
					     const plat_local_state_t *states,
					     uint32_t ncpu)
{
	plat_local_state_t target = PSCI_LOCAL_STATE_RUN;
	uint32_t cpu = plat_my_core_pos();

256
	/* System Suspend */
257
258
259
260
261
262
263
	if ((lvl == (uint32_t)MPIDR_AFFLVL2) && (states[cpu] == PSTATE_ID_SOC_POWERDN)) {
		target = PSTATE_ID_SOC_POWERDN;
	}

	/* CPU off, CPU suspend */
	if (lvl == (uint32_t)MPIDR_AFFLVL1) {
		target = tegra_get_afflvl1_pwr_state(states, ncpu);
264
	}
265

266
267
	/* target cluster/system state */
	return target;
268
269
}

270
int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
271
272
273
274
{
	const plat_local_state_t *pwr_domain_state =
		target_state->pwr_domain_state;
	plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
275
	uint8_t stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
276
		TEGRA194_STATE_ID_MASK;
277
	uint64_t val;
278
	u_register_t ns_sctlr_el1;
279
280
281
282
283
284
285
286

	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
		/*
		 * The TZRAM loses power when we enter system suspend. To
		 * allow graceful exit from system suspend, we need to copy
		 * BL3-1 over to TZDRAM.
		 */
		val = params_from_bl2->tzdram_base +
287
		      tegra194_get_cpu_reset_handler_size();
288
289
		memcpy((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE,
		       (uintptr_t)&__BL31_END__ - (uintptr_t)BL31_BASE);
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313

		/*
		 * In fake suspend mode, ensure that the loopback procedure
		 * towards system suspend exit is started, instead of calling
		 * WFI. This is done by disabling both MMU's of EL1 & El3
		 * and calling tegra_secure_entrypoint().
		 */
		if (tegra_fake_system_suspend) {

			/*
			 * Disable EL1's MMU.
			 */
			ns_sctlr_el1 = read_sctlr_el1();
			ns_sctlr_el1 &= (~((u_register_t)SCTLR_M_BIT));
			write_sctlr_el1(ns_sctlr_el1);

			/*
			 * Disable MMU to power up the CPU in a "clean"
			 * state
			 */
			disable_mmu_el3();
			tegra_secure_entrypoint();
			panic();
		}
314
315
316
317
318
	}

	return PSCI_E_SUCCESS;
}

319
int32_t tegra_soc_pwr_domain_on(u_register_t mpidr)
320
{
321
322
	uint64_t target_cpu = mpidr & MPIDR_CPU_MASK;
	uint64_t target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >>
323
			MPIDR_AFFINITY_BITS;
324
	int32_t ret = 0;
325

326
	if (target_cluster > ((uint32_t)PLATFORM_CLUSTER_COUNT - 1U)) {
327
328
329
330
331
		ERROR("%s: unsupported CPU (0x%lx)\n", __func__ , mpidr);
		return PSCI_E_NOT_PRESENT;
	}

	/* construct the target CPU # */
332
	target_cpu += (target_cluster << 1U);
333

334
335
336
337
	ret = mce_command_handler((uint64_t)MCE_CMD_ONLINE_CORE, target_cpu, 0U, 0U);
	if (ret < 0) {
		return PSCI_E_DENIED;
	}
338
339
340
341

	return PSCI_E_SUCCESS;
}

342
int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
343
{
344
	uint8_t stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL];
345
346
347
348
349
350
351
352
353
354
355
356
357
358

	/*
	 * Reset power state info for CPUs when onlining, we set
	 * deepest power when offlining a core but that may not be
	 * requested by non-secure sw which controls idle states. It
	 * will re-init this info from non-secure software when the
	 * core come online.
	 */

	/*
	 * Check if we are exiting from deep sleep and restore SE
	 * context if we are.
	 */
	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
359
360
361
362
363
364
365

		/*
		 * Enable strict checking after programming the GSC for
		 * enabling TZSRAM and TZDRAM
		 */
		mce_enable_strict_checking();

366
		/* Init SMMU */
367
368
		tegra_smmu_init();

369
370
371
		/* Resume SE, RNG1 and PKA1 */
		tegra_se_resume();

372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
		/*
		 * Program XUSB STREAMIDs
		 * ======================
		 * T19x XUSB has support for XUSB virtualization. It will
		 * have one physical function (PF) and four Virtual functions
		 * (VF)
		 *
		 * There were below two SIDs for XUSB until T186.
		 * 1) #define TEGRA_SID_XUSB_HOST    0x1bU
		 * 2) #define TEGRA_SID_XUSB_DEV    0x1cU
		 *
		 * We have below four new SIDs added for VF(s)
		 * 3) #define TEGRA_SID_XUSB_VF0    0x5dU
		 * 4) #define TEGRA_SID_XUSB_VF1    0x5eU
		 * 5) #define TEGRA_SID_XUSB_VF2    0x5fU
		 * 6) #define TEGRA_SID_XUSB_VF3    0x60U
		 *
		 * When virtualization is enabled then we have to disable SID
		 * override and program above SIDs in below newly added SID
		 * registers in XUSB PADCTL MMIO space. These registers are
		 * TZ protected and so need to be done in ATF.
		 *
		 * a) #define XUSB_PADCTL_HOST_AXI_STREAMID_PF_0 (0x136cU)
		 * b) #define XUSB_PADCTL_DEV_AXI_STREAMID_PF_0  (0x139cU)
		 * c) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_0 (0x1370U)
		 * d) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_1 (0x1374U)
		 * e) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_2 (0x1378U)
		 * f) #define XUSB_PADCTL_HOST_AXI_STREAMID_VF_3 (0x137cU)
		 *
		 * This change disables SID override and programs XUSB SIDs
		 * in above registers to support both virtualization and
		 * non-virtualization platforms
		 */
		mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
			XUSB_PADCTL_HOST_AXI_STREAMID_PF_0, TEGRA_SID_XUSB_HOST);
		mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
			XUSB_PADCTL_HOST_AXI_STREAMID_VF_0, TEGRA_SID_XUSB_VF0);
		mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
			XUSB_PADCTL_HOST_AXI_STREAMID_VF_1, TEGRA_SID_XUSB_VF1);
		mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
			XUSB_PADCTL_HOST_AXI_STREAMID_VF_2, TEGRA_SID_XUSB_VF2);
		mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
			XUSB_PADCTL_HOST_AXI_STREAMID_VF_3, TEGRA_SID_XUSB_VF3);
		mmio_write_32(TEGRA_XUSB_PADCTL_BASE +
			XUSB_PADCTL_DEV_AXI_STREAMID_PF_0, TEGRA_SID_XUSB_DEV);

418
419
420
421
422
423
424
425
426
427
428
		/*
		 * Reset power state info for the last core doing SC7
		 * entry and exit, we set deepest power state as CC7
		 * and SC7 for SC7 entry which may not be requested by
		 * non-secure SW which controls idle states.
		 */
	}

	return PSCI_E_SUCCESS;
}

429
int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
430
{
431
	uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
432
	int32_t ret = 0;
433

434
435
	(void)target_state;

436
	/* Disable Denver's DCO operations */
437
	if (impl == DENVER_IMPL) {
438
		denver_disable_dco();
439
	}
440
441

	/* Turn off CPU */
442
443
	ret = mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
			(uint64_t)TEGRA_NVG_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U);
444
	assert(ret == 0);
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462

	return PSCI_E_SUCCESS;
}

__dead2 void tegra_soc_prepare_system_off(void)
{
	/* System power off */

	/* SC8 */

	wfi();

	/* wait for the system to power down */
	for (;;) {
		;
	}
}

463
int32_t tegra_soc_prepare_system_reset(void)
464
465
466
{
	return PSCI_E_SUCCESS;
}