plat_psci_handlers.c 12.3 KB
Newer Older
1
/*
2
 * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
3
 * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
4
 *
dp-arm's avatar
dp-arm committed
5
 * SPDX-License-Identifier: BSD-3-Clause
6
7
 */

8
9
10
#include <assert.h>
#include <string.h>

11
12
#include <arch.h>
#include <arch_helpers.h>
13
14
#include <common/bl_common.h>
#include <common/debug.h>
15
#include <context.h>
16
#include <cortex_a57.h>
17
#include <denver.h>
18
19
20
21
#include <lib/el3_runtime/context_mgmt.h>
#include <lib/psci/psci.h>
#include <plat/common/platform.h>

22
#include <mce.h>
23
#include <smmu.h>
24
#include <stdbool.h>
25
#include <t18x_ari.h>
26
#include <tegra186_private.h>
27
28
#include <tegra_private.h>

29
extern void memcpy16(void *dest, const void *src, unsigned int length);
30

31
/* state id mask */
32
#define TEGRA186_STATE_ID_MASK		0xFU
33
/* constants to get power state's wake time */
34
35
#define TEGRA186_WAKE_TIME_MASK		0x0FFFFFF0U
#define TEGRA186_WAKE_TIME_SHIFT	4U
36
/* default core wake mask for CPU_SUSPEND */
37
#define TEGRA186_CORE_WAKE_MASK		0x180cU
38
/* context size to save during system suspend */
39
#define TEGRA186_SE_CONTEXT_SIZE	3U
40

41
static uint32_t se_regs[TEGRA186_SE_CONTEXT_SIZE];
42
43
44
static struct tegra_psci_percpu_data {
	uint32_t wake_time;
} __aligned(CACHE_WRITEBACK_GRANULE) tegra_percpu_data[PLATFORM_CORE_COUNT];
45

46
int32_t tegra_soc_validate_power_state(uint32_t power_state,
47
					psci_power_state_t *req_state)
48
{
49
50
51
	uint8_t state_id = (uint8_t)psci_get_pstate_id(power_state) & TEGRA186_STATE_ID_MASK;
	uint32_t cpu = plat_my_core_pos();
	int32_t ret = PSCI_E_SUCCESS;
52

53
	/* save the core wake time (in TSC ticks)*/
54
	tegra_percpu_data[cpu].wake_time = (power_state & TEGRA186_WAKE_TIME_MASK)
55
			<< TEGRA186_WAKE_TIME_SHIFT;
56

57
58
59
60
61
62
63
	/*
	 * Clean percpu_data[cpu] to DRAM. This needs to be done to ensure that
	 * the correct value is read in tegra_soc_pwr_domain_suspend(), which
	 * is called with caches disabled. It is possible to read a stale value
	 * from DRAM in that function, because the L2 cache is not flushed
	 * unless the cluster is entering CC6/CC7.
	 */
64
65
	clean_dcache_range((uint64_t)&tegra_percpu_data[cpu],
			sizeof(tegra_percpu_data[cpu]));
66

67
68
69
70
	/* Sanity check the requested state id */
	switch (state_id) {
	case PSTATE_ID_CORE_IDLE:
	case PSTATE_ID_CORE_POWERDN:
71
72

		/* Core powerdown request */
73
		req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id;
74
		req_state->pwr_domain_state[MPIDR_AFFLVL1] = state_id;
75
76
77
78
79

		break;

	default:
		ERROR("%s: unsupported state id (%d)\n", __func__, state_id);
80
81
		ret = PSCI_E_INVALID_PARAMS;
		break;
82
83
	}

84
	return ret;
85
86
}

87
88
89
90
91
92
int32_t tegra_soc_cpu_standby(plat_local_state_t cpu_state)
{
	(void)cpu_state;
	return PSCI_E_SUCCESS;
}

93
int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
94
95
{
	const plat_local_state_t *pwr_domain_state;
96
97
98
	uint8_t stateid_afflvl0, stateid_afflvl2;
	uint32_t cpu = plat_my_core_pos();
	const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
99
	mce_cstate_info_t cstate_info = { 0 };
100
	uint64_t smmu_ctx_base;
101
102
	uint32_t val;

103
104
105
106
	/* get the state ID */
	pwr_domain_state = target_state->pwr_domain_state;
	stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0] &
		TEGRA186_STATE_ID_MASK;
107
108
	stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
		TEGRA186_STATE_ID_MASK;
109

110
111
	if ((stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ||
	    (stateid_afflvl0 == PSTATE_ID_CORE_POWERDN)) {
112

113
114
		/* Enter CPU idle/powerdown */
		val = (stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ?
115
			(uint32_t)TEGRA_ARI_CORE_C6 : (uint32_t)TEGRA_ARI_CORE_C7;
116
117
		(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, (uint64_t)val,
				tegra_percpu_data[cpu].wake_time, 0U);
118

119
120
121
122
123
124
125
126
127
128
129
130
	} else if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {

		/* save SE registers */
		se_regs[0] = mmio_read_32(TEGRA_SE0_BASE +
				SE_MUTEX_WATCHDOG_NS_LIMIT);
		se_regs[1] = mmio_read_32(TEGRA_RNG1_BASE +
				RNG_MUTEX_WATCHDOG_NS_LIMIT);
		se_regs[2] = mmio_read_32(TEGRA_PKA1_BASE +
				PKA_MUTEX_WATCHDOG_NS_LIMIT);

		/* save 'Secure Boot' Processor Feature Config Register */
		val = mmio_read_32(TEGRA_MISC_BASE + MISCREG_PFCFG);
131
		mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_SECURE_BOOTP_FCFG, val);
132

133
134
		/* save SMMU context to TZDRAM */
		smmu_ctx_base = params_from_bl2->tzdram_base +
135
				tegra186_get_smmu_ctx_offset();
136
		tegra_smmu_save_context((uintptr_t)smmu_ctx_base);
137
138

		/* Prepare for system suspend */
139
140
		cstate_info.cluster = (uint32_t)TEGRA_ARI_CLUSTER_CC7;
		cstate_info.system = (uint32_t)TEGRA_ARI_SYSTEM_SC7;
141
142
143
		cstate_info.system_state_force = 1;
		cstate_info.update_wake_mask = 1;
		mce_update_cstate_info(&cstate_info);
144

145
146
		/* Loop until system suspend is allowed */
		do {
147
148
			val = (uint32_t)mce_command_handler(
					(uint64_t)MCE_CMD_IS_SC7_ALLOWED,
149
					(uint64_t)TEGRA_ARI_CORE_C7,
150
					MCE_CORE_SLEEP_TIME_INFINITE,
151
152
					0U);
		} while (val == 0U);
153

154
		/* Instruct the MCE to enter system suspend state */
155
		(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
156
			(uint64_t)TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U);
157
158
159
160

		/* set system suspend state for house-keeping */
		tegra186_set_system_suspend_entry();

161
162
	} else {
		; /* do nothing */
163
164
165
166
	}

	return PSCI_E_SUCCESS;
}
167

168
/*******************************************************************************
169
 * Helper function to check if this is the last ON CPU in the cluster
170
 ******************************************************************************/
171
172
static bool tegra_last_cpu_in_cluster(const plat_local_state_t *states,
			uint32_t ncpu)
173
{
174
175
176
177
178
179
180
181
182
183
184
185
	plat_local_state_t target;
	bool last_on_cpu = true;
	uint32_t num_cpus = ncpu, pos = 0;

	do {
		target = states[pos];
		if (target != PLAT_MAX_OFF_STATE) {
			last_on_cpu = false;
		}
		--num_cpus;
		pos++;
	} while (num_cpus != 0U);
186

187
188
189
190
191
192
193
194
195
196
197
198
199
200
	return last_on_cpu;
}

/*******************************************************************************
 * Helper function to get target power state for the cluster
 ******************************************************************************/
static plat_local_state_t tegra_get_afflvl1_pwr_state(const plat_local_state_t *states,
			uint32_t ncpu)
{
	uint32_t core_pos = (uint32_t)read_mpidr() & (uint32_t)MPIDR_CPU_MASK;
	uint32_t cpu = plat_my_core_pos();
	int32_t ret;
	plat_local_state_t target = states[core_pos];
	mce_cstate_info_t cstate_info = { 0 };
201
202

	/* CPU suspend */
203
	if (target == PSTATE_ID_CORE_POWERDN) {
204
205
206
207
208
209
		/* Program default wake mask */
		cstate_info.wake_mask = TEGRA186_CORE_WAKE_MASK;
		cstate_info.update_wake_mask = 1;
		mce_update_cstate_info(&cstate_info);

		/* Check if CCx state is allowed. */
210
		ret = mce_command_handler((uint64_t)MCE_CMD_IS_CCX_ALLOWED,
211
212
				(uint64_t)TEGRA_ARI_CORE_C7,
				tegra_percpu_data[cpu].wake_time,
213
				0U);
214
215
		if (ret == 0) {
			target = PSCI_LOCAL_STATE_RUN;
216
		}
217
218
219
	}

	/* CPU off */
220
	if (target == PLAT_MAX_OFF_STATE) {
221
		/* Enable cluster powerdn from last CPU in the cluster */
222
		if (tegra_last_cpu_in_cluster(states, ncpu)) {
223
			/* Enable CC7 state and turn off wake mask */
224
			cstate_info.cluster = (uint32_t)TEGRA_ARI_CLUSTER_CC7;
225
226
227
228
			cstate_info.update_wake_mask = 1;
			mce_update_cstate_info(&cstate_info);

			/* Check if CCx state is allowed. */
229
			ret = mce_command_handler((uint64_t)MCE_CMD_IS_CCX_ALLOWED,
230
						  (uint64_t)TEGRA_ARI_CORE_C7,
231
						  MCE_CORE_SLEEP_TIME_INFINITE,
232
						  0U);
233
234
			if (ret == 0) {
				target = PSCI_LOCAL_STATE_RUN;
235
			}
236
237
238
239
240
241

		} else {

			/* Turn off wake_mask */
			cstate_info.update_wake_mask = 1;
			mce_update_cstate_info(&cstate_info);
242
			target = PSCI_LOCAL_STATE_RUN;
243
244
245
		}
	}

246
247
248
249
250
251
252
	return target;
}

/*******************************************************************************
 * Platform handler to calculate the proper target power level at the
 * specified affinity level
 ******************************************************************************/
253
plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl,
254
255
256
257
					     const plat_local_state_t *states,
					     uint32_t ncpu)
{
	plat_local_state_t target = PSCI_LOCAL_STATE_RUN;
258
	uint32_t cpu = plat_my_core_pos();
259

260
	/* System Suspend */
261
262
263
264
265
266
267
268
	if ((lvl == (uint32_t)MPIDR_AFFLVL2) &&
	    (states[cpu] == PSTATE_ID_SOC_POWERDN)) {
		target = PSTATE_ID_SOC_POWERDN;
	}

	/* CPU off, CPU suspend */
	if (lvl == (uint32_t)MPIDR_AFFLVL1) {
		target = tegra_get_afflvl1_pwr_state(states, ncpu);
269
	}
270

271
272
	/* target cluster/system state */
	return target;
273
274
}

275
int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
276
277
278
{
	const plat_local_state_t *pwr_domain_state =
		target_state->pwr_domain_state;
279
280
	const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
	uint8_t stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
281
		TEGRA186_STATE_ID_MASK;
282
	uint64_t val;
283
284
285
286
287
288
289
290

	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
		/*
		 * The TZRAM loses power when we enter system suspend. To
		 * allow graceful exit from system suspend, we need to copy
		 * BL3-1 over to TZDRAM.
		 */
		val = params_from_bl2->tzdram_base +
291
			tegra186_get_cpu_reset_handler_size();
292
		memcpy16((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE,
293
			 (uintptr_t)BL31_END - (uintptr_t)BL31_BASE);
294
295
296
297
298
	}

	return PSCI_E_SUCCESS;
}

299
300
301
302
303
int32_t tegra_soc_pwr_domain_suspend_pwrdown_early(const psci_power_state_t *target_state)
{
	return PSCI_E_NOT_SUPPORTED;
}

304
int32_t tegra_soc_pwr_domain_on(u_register_t mpidr)
305
{
306
	int32_t ret = PSCI_E_SUCCESS;
307
308
309
	uint64_t target_cpu = mpidr & MPIDR_CPU_MASK;
	uint64_t target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >>
			MPIDR_AFFINITY_BITS;
310

311
	if (target_cluster > ((uint32_t)PLATFORM_CLUSTER_COUNT - 1U)) {
312
313

		ERROR("%s: unsupported CPU (0x%lx)\n", __func__, mpidr);
314
		ret = PSCI_E_NOT_PRESENT;
315

316
317
318
	} else {
		/* construct the target CPU # */
		target_cpu |= (target_cluster << 2);
319

320
321
		(void)mce_command_handler((uint64_t)MCE_CMD_ONLINE_CORE, target_cpu, 0U, 0U);
	}
322

323
	return ret;
324
325
}

326
int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
327
{
328
329
	uint8_t stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL];
	uint8_t stateid_afflvl0 = target_state->pwr_domain_state[MPIDR_AFFLVL0];
330
	mce_cstate_info_t cstate_info = { 0 };
331
332
333
	uint64_t impl, val;
	const plat_params_from_bl2_t *plat_params = bl31_get_plat_params();

334
	impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
335
336
337
338
339

	/*
	 * Enable ECC and Parity Protection for Cortex-A57 CPUs (Tegra186
	 * A02p and beyond).
	 */
340
	if ((plat_params->l2_ecc_parity_prot_dis != 1) && (impl != DENVER_IMPL)) {
341
342

		val = read_l2ctlr_el1();
343
		val |= CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT;
344
345
		write_l2ctlr_el1(val);
	}
346

347
	/*
348
349
350
351
352
	 * Reset power state info for CPUs when onlining, we set
	 * deepest power when offlining a core but that may not be
	 * requested by non-secure sw which controls idle states. It
	 * will re-init this info from non-secure software when the
	 * core come online.
353
	 */
354
355
	if (stateid_afflvl0 == PLAT_MAX_OFF_STATE) {

356
		cstate_info.cluster = (uint32_t)TEGRA_ARI_CLUSTER_CC1;
357
358
		cstate_info.update_wake_mask = 1;
		mce_update_cstate_info(&cstate_info);
359
	}
360

361
362
363
364
	/*
	 * Check if we are exiting from deep sleep and restore SE
	 * context if we are.
	 */
365
366
	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {

367
368
369
370
371
372
373
374
375
		mmio_write_32(TEGRA_SE0_BASE + SE_MUTEX_WATCHDOG_NS_LIMIT,
			se_regs[0]);
		mmio_write_32(TEGRA_RNG1_BASE + RNG_MUTEX_WATCHDOG_NS_LIMIT,
			se_regs[1]);
		mmio_write_32(TEGRA_PKA1_BASE + PKA_MUTEX_WATCHDOG_NS_LIMIT,
			se_regs[2]);

		/* Init SMMU */
		tegra_smmu_init();
376
377

		/*
378
379
380
381
		 * Reset power state info for the last core doing SC7
		 * entry and exit, we set deepest power state as CC7
		 * and SC7 for SC7 entry which may not be requested by
		 * non-secure SW which controls idle states.
382
		 */
383
384
		cstate_info.cluster = (uint32_t)TEGRA_ARI_CLUSTER_CC7;
		cstate_info.system = (uint32_t)TEGRA_ARI_SYSTEM_SC1;
385
386
		cstate_info.update_wake_mask = 1;
		mce_update_cstate_info(&cstate_info);
387
388
389
390
391
	}

	return PSCI_E_SUCCESS;
}

392
int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
393
{
394
395
396
	uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & (uint64_t)MIDR_IMPL_MASK;

	(void)target_state;
397

398
	/* Disable Denver's DCO operations */
399
	if (impl == DENVER_IMPL) {
400
		denver_disable_dco();
401
	}
402

403
	/* Turn off CPU */
404
405
	(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
			(uint64_t)TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U);
406
407

	return PSCI_E_SUCCESS;
408
}
409
410
411

__dead2 void tegra_soc_prepare_system_off(void)
{
412
	/* power off the entire system */
413
	mce_enter_ccplex_state((uint32_t)TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF);
414
415
416
417
418
419
420

	wfi();

	/* wait for the system to power down */
	for (;;) {
		;
	}
421
}
422

423
int32_t tegra_soc_prepare_system_reset(void)
424
{
425
	mce_enter_ccplex_state((uint32_t)TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT);
426
427
428

	return PSCI_E_SUCCESS;
}