plat_psci_handlers.c 11.5 KB
Newer Older
1
/*
2
 * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
3
 *
dp-arm's avatar
dp-arm committed
4
 * SPDX-License-Identifier: BSD-3-Clause
5
6
 */

7
8
9
#include <assert.h>
#include <string.h>

10
11
#include <arch.h>
#include <arch_helpers.h>
12
13
#include <common/bl_common.h>
#include <common/debug.h>
14
#include <context.h>
15
#include <cortex_a57.h>
16
#include <denver.h>
17
18
19
20
#include <lib/el3_runtime/context_mgmt.h>
#include <lib/psci/psci.h>
#include <plat/common/platform.h>

21
#include <mce.h>
22
#include <smmu.h>
23
#include <t18x_ari.h>
24
25
#include <tegra_private.h>

26
27
extern void memcpy16(void *dest, const void *src, unsigned int length);

28
extern void prepare_cpu_pwr_dwn(void);
29
extern void tegra186_cpu_reset_handler(void);
30
31
extern uint32_t __tegra186_cpu_reset_handler_end,
		__tegra186_smmu_context;
32

33
34
35
/* TZDRAM offset for saving SMMU context */
#define TEGRA186_SMMU_CTX_OFFSET	16UL

36
/* state id mask */
37
#define TEGRA186_STATE_ID_MASK		0xFU
38
/* constants to get power state's wake time */
39
40
#define TEGRA186_WAKE_TIME_MASK		0x0FFFFFF0U
#define TEGRA186_WAKE_TIME_SHIFT	4U
41
/* default core wake mask for CPU_SUSPEND */
42
#define TEGRA186_CORE_WAKE_MASK		0x180cU
43
/* context size to save during system suspend */
44
#define TEGRA186_SE_CONTEXT_SIZE	3U
45

46
static uint32_t se_regs[TEGRA186_SE_CONTEXT_SIZE];
47
48
49
static struct tegra_psci_percpu_data {
	uint32_t wake_time;
} __aligned(CACHE_WRITEBACK_GRANULE) tegra_percpu_data[PLATFORM_CORE_COUNT];
50

51
int32_t tegra_soc_validate_power_state(uint32_t power_state,
52
					psci_power_state_t *req_state)
53
{
54
55
56
	uint8_t state_id = (uint8_t)psci_get_pstate_id(power_state) & TEGRA186_STATE_ID_MASK;
	uint32_t cpu = plat_my_core_pos();
	int32_t ret = PSCI_E_SUCCESS;
57

58
	/* save the core wake time (in TSC ticks)*/
59
	tegra_percpu_data[cpu].wake_time = (power_state & TEGRA186_WAKE_TIME_MASK)
60
			<< TEGRA186_WAKE_TIME_SHIFT;
61

62
63
64
65
66
67
68
	/*
	 * Clean percpu_data[cpu] to DRAM. This needs to be done to ensure that
	 * the correct value is read in tegra_soc_pwr_domain_suspend(), which
	 * is called with caches disabled. It is possible to read a stale value
	 * from DRAM in that function, because the L2 cache is not flushed
	 * unless the cluster is entering CC6/CC7.
	 */
69
70
	clean_dcache_range((uint64_t)&tegra_percpu_data[cpu],
			sizeof(tegra_percpu_data[cpu]));
71

72
73
74
75
	/* Sanity check the requested state id */
	switch (state_id) {
	case PSTATE_ID_CORE_IDLE:
	case PSTATE_ID_CORE_POWERDN:
76
77

		/* Core powerdown request */
78
		req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id;
79
		req_state->pwr_domain_state[MPIDR_AFFLVL1] = state_id;
80
81
82
83
84

		break;

	default:
		ERROR("%s: unsupported state id (%d)\n", __func__, state_id);
85
86
		ret = PSCI_E_INVALID_PARAMS;
		break;
87
88
	}

89
	return ret;
90
91
}

92
int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
93
94
{
	const plat_local_state_t *pwr_domain_state;
95
96
97
	uint8_t stateid_afflvl0, stateid_afflvl2;
	uint32_t cpu = plat_my_core_pos();
	const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
98
	mce_cstate_info_t cstate_info = { 0 };
99
	uint64_t smmu_ctx_base;
100
101
	uint32_t val;

102
103
104
105
	/* get the state ID */
	pwr_domain_state = target_state->pwr_domain_state;
	stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0] &
		TEGRA186_STATE_ID_MASK;
106
107
	stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
		TEGRA186_STATE_ID_MASK;
108

109
110
	if ((stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ||
	    (stateid_afflvl0 == PSTATE_ID_CORE_POWERDN)) {
111

112
113
114
		/* Enter CPU idle/powerdown */
		val = (stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ?
			TEGRA_ARI_CORE_C6 : TEGRA_ARI_CORE_C7;
115
116
		(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, (uint64_t)val,
				tegra_percpu_data[cpu].wake_time, 0U);
117

118
119
120
121
122
123
124
125
126
127
128
129
130
131
	} else if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {

		/* save SE registers */
		se_regs[0] = mmio_read_32(TEGRA_SE0_BASE +
				SE_MUTEX_WATCHDOG_NS_LIMIT);
		se_regs[1] = mmio_read_32(TEGRA_RNG1_BASE +
				RNG_MUTEX_WATCHDOG_NS_LIMIT);
		se_regs[2] = mmio_read_32(TEGRA_PKA1_BASE +
				PKA_MUTEX_WATCHDOG_NS_LIMIT);

		/* save 'Secure Boot' Processor Feature Config Register */
		val = mmio_read_32(TEGRA_MISC_BASE + MISCREG_PFCFG);
		mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV6, val);

132
133
		/* save SMMU context to TZDRAM */
		smmu_ctx_base = params_from_bl2->tzdram_base +
134
135
			((uintptr_t)&__tegra186_smmu_context -
			 (uintptr_t)tegra186_cpu_reset_handler);
136
		tegra_smmu_save_context((uintptr_t)smmu_ctx_base);
137
138

		/* Prepare for system suspend */
139
140
141
142
143
		cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7;
		cstate_info.system = TEGRA_ARI_SYSTEM_SC7;
		cstate_info.system_state_force = 1;
		cstate_info.update_wake_mask = 1;
		mce_update_cstate_info(&cstate_info);
144
145
		/* Loop until system suspend is allowed */
		do {
146
147
			val = (uint32_t)mce_command_handler(
					(uint64_t)MCE_CMD_IS_SC7_ALLOWED,
148
149
					TEGRA_ARI_CORE_C7,
					MCE_CORE_SLEEP_TIME_INFINITE,
150
151
					0U);
		} while (val == 0U);
152

153
		/* Instruct the MCE to enter system suspend state */
154
155
156
157
		(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
			TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U);
	} else {
		; /* do nothing */
158
159
160
161
	}

	return PSCI_E_SUCCESS;
}
162

163
164
165
166
/*******************************************************************************
 * Platform handler to calculate the proper target power level at the
 * specified affinity level
 ******************************************************************************/
167
plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl,
168
					     const plat_local_state_t *states,
169
					     uint32_t ncpu)
170
171
{
	plat_local_state_t target = *states;
172
173
174
175
176
	uint32_t pos = 0;
	plat_local_state_t result = PSCI_LOCAL_STATE_RUN;
	uint32_t cpu = plat_my_core_pos(), num_cpu = ncpu;
	int32_t ret, cluster_powerdn = 1;
	uint64_t core_pos = read_mpidr() & (uint64_t)MPIDR_CPU_MASK;
177
178
	mce_cstate_info_t cstate_info = { 0 };

179
	/* get the power state at this level */
180
181
182
183
184
185
	if (lvl == (uint32_t)MPIDR_AFFLVL1) {
		target = states[core_pos];
	}
	if (lvl == (uint32_t)MPIDR_AFFLVL2) {
		target = states[cpu];
	}
186
187

	/* CPU suspend */
188
	if ((lvl == (uint32_t)MPIDR_AFFLVL1) && (target == PSTATE_ID_CORE_POWERDN)) {
189
190
191
192
193
194
195

		/* Program default wake mask */
		cstate_info.wake_mask = TEGRA186_CORE_WAKE_MASK;
		cstate_info.update_wake_mask = 1;
		mce_update_cstate_info(&cstate_info);

		/* Check if CCx state is allowed. */
196
197
198
199
200
201
		ret = mce_command_handler((uint64_t)MCE_CMD_IS_CCX_ALLOWED,
				TEGRA_ARI_CORE_C7, tegra_percpu_data[cpu].wake_time,
				0U);
		if (ret != 0) {
			result = PSTATE_ID_CORE_POWERDN;
		}
202
203
204
	}

	/* CPU off */
205
	if ((lvl == (uint32_t)MPIDR_AFFLVL1) && (target == PLAT_MAX_OFF_STATE)) {
206
207
208

		/* find out the number of ON cpus in the cluster */
		do {
209
210
			target = states[pos];
			if (target != PLAT_MAX_OFF_STATE) {
211
				cluster_powerdn = 0;
212
213
214
215
			}
			--num_cpu;
			pos++;
		} while (num_cpu != 0U);
216
217

		/* Enable cluster powerdn from last CPU in the cluster */
218
		if (cluster_powerdn != 0) {
219
220
221
222
223
224
225

			/* Enable CC7 state and turn off wake mask */
			cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7;
			cstate_info.update_wake_mask = 1;
			mce_update_cstate_info(&cstate_info);

			/* Check if CCx state is allowed. */
226
			ret = mce_command_handler((uint64_t)MCE_CMD_IS_CCX_ALLOWED,
227
228
						  TEGRA_ARI_CORE_C7,
						  MCE_CORE_SLEEP_TIME_INFINITE,
229
230
231
232
						  0U);
			if (ret != 0) {
				result = PSTATE_ID_CORE_POWERDN;
			}
233
234
235
236
237
238
239
240
241
242

		} else {

			/* Turn off wake_mask */
			cstate_info.update_wake_mask = 1;
			mce_update_cstate_info(&cstate_info);
		}
	}

	/* System Suspend */
243
244
245
246
	if (((lvl == (uint32_t)MPIDR_AFFLVL2) || (lvl == (uint32_t)MPIDR_AFFLVL1)) &&
	    (target == PSTATE_ID_SOC_POWERDN)) {
		result = PSTATE_ID_SOC_POWERDN;
	}
247
248

	/* default state */
249
	return result;
250
251
}

252
int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
253
254
255
{
	const plat_local_state_t *pwr_domain_state =
		target_state->pwr_domain_state;
256
257
	const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
	uint8_t stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
258
		TEGRA186_STATE_ID_MASK;
259
	uint64_t val;
260
261
262
263
264
265
266
267
268

	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
		/*
		 * The TZRAM loses power when we enter system suspend. To
		 * allow graceful exit from system suspend, we need to copy
		 * BL3-1 over to TZDRAM.
		 */
		val = params_from_bl2->tzdram_base +
			((uintptr_t)&__tegra186_cpu_reset_handler_end -
269
			 (uintptr_t)&tegra186_cpu_reset_handler);
270
271
272
273
274
275
276
		memcpy16((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE,
			 (uintptr_t)&__BL31_END__ - (uintptr_t)BL31_BASE);
	}

	return PSCI_E_SUCCESS;
}

277
int32_t tegra_soc_pwr_domain_on(u_register_t mpidr)
278
{
279
	uint32_t target_cpu = mpidr & (uint64_t)MPIDR_CPU_MASK;
280
	uint32_t target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >>
281
282
283
284
			(uint64_t)MPIDR_AFFINITY_BITS;
	int32_t ret = PSCI_E_SUCCESS;

	if (target_cluster > (uint64_t)MPIDR_AFFLVL1) {
285
286

		ERROR("%s: unsupported CPU (0x%lx)\n", __func__, mpidr);
287
		ret = PSCI_E_NOT_PRESENT;
288

289
290
291
	} else {
		/* construct the target CPU # */
		target_cpu |= (target_cluster << 2);
292

293
294
		(void)mce_command_handler((uint64_t)MCE_CMD_ONLINE_CORE, target_cpu, 0U, 0U);
	}
295

296
	return ret;
297
298
}

299
int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
300
{
301
302
	uint8_t stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL];
	uint8_t stateid_afflvl0 = target_state->pwr_domain_state[MPIDR_AFFLVL0];
303
	mce_cstate_info_t cstate_info = { 0 };
304
305
306
307
308
309
310
311
312
313
314
315
316
	uint64_t impl, val;
	const plat_params_from_bl2_t *plat_params = bl31_get_plat_params();

	impl = (read_midr() >> MIDR_IMPL_SHIFT) & (uint64_t)MIDR_IMPL_MASK;

	/*
	 * Enable ECC and Parity Protection for Cortex-A57 CPUs (Tegra186
	 * A02p and beyond).
	 */
	if ((plat_params->l2_ecc_parity_prot_dis != 1) &&
	    (impl != (uint64_t)DENVER_IMPL)) {

		val = read_l2ctlr_el1();
317
		val |= CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT;
318
319
		write_l2ctlr_el1(val);
	}
320

321
	/*
322
323
324
325
326
	 * Reset power state info for CPUs when onlining, we set
	 * deepest power when offlining a core but that may not be
	 * requested by non-secure sw which controls idle states. It
	 * will re-init this info from non-secure software when the
	 * core come online.
327
	 */
328
329
	if (stateid_afflvl0 == PLAT_MAX_OFF_STATE) {

330
331
332
		cstate_info.cluster = TEGRA_ARI_CLUSTER_CC1;
		cstate_info.update_wake_mask = 1;
		mce_update_cstate_info(&cstate_info);
333
	}
334

335
336
337
338
	/*
	 * Check if we are exiting from deep sleep and restore SE
	 * context if we are.
	 */
339
340
	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {

341
342
343
344
345
346
347
348
349
		mmio_write_32(TEGRA_SE0_BASE + SE_MUTEX_WATCHDOG_NS_LIMIT,
			se_regs[0]);
		mmio_write_32(TEGRA_RNG1_BASE + RNG_MUTEX_WATCHDOG_NS_LIMIT,
			se_regs[1]);
		mmio_write_32(TEGRA_PKA1_BASE + PKA_MUTEX_WATCHDOG_NS_LIMIT,
			se_regs[2]);

		/* Init SMMU */
		tegra_smmu_init();
350
351

		/*
352
353
354
355
		 * Reset power state info for the last core doing SC7
		 * entry and exit, we set deepest power state as CC7
		 * and SC7 for SC7 entry which may not be requested by
		 * non-secure SW which controls idle states.
356
		 */
357
358
359
360
		cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7;
		cstate_info.system = TEGRA_ARI_SYSTEM_SC1;
		cstate_info.update_wake_mask = 1;
		mce_update_cstate_info(&cstate_info);
361
362
363
364
365
	}

	return PSCI_E_SUCCESS;
}

366
int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
367
{
368
369
370
	uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & (uint64_t)MIDR_IMPL_MASK;

	(void)target_state;
371

372
	/* Disable Denver's DCO operations */
373
	if (impl == DENVER_IMPL) {
374
		denver_disable_dco();
375
	}
376

377
	/* Turn off CPU */
378
379
	(void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, TEGRA_ARI_CORE_C7,
			MCE_CORE_SLEEP_TIME_INFINITE, 0U);
380
381

	return PSCI_E_SUCCESS;
382
}
383
384
385

__dead2 void tegra_soc_prepare_system_off(void)
{
386
387
	/* power off the entire system */
	mce_enter_ccplex_state(TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF);
388
389
390
391
392
393
394

	wfi();

	/* wait for the system to power down */
	for (;;) {
		;
	}
395
}
396

397
int32_t tegra_soc_prepare_system_reset(void)
398
399
400
401
402
{
	mce_enter_ccplex_state(TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT);

	return PSCI_E_SUCCESS;
}