plat_psci_handlers.c 11.4 KB
Newer Older
1
2
3
4
5
6
7
8
/*
 * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
 *
 * SPDX-License-Identifier: BSD-3-Clause
 */

#include <arch.h>
#include <assert.h>
9
10
11
12
#include <stdbool.h>
#include <string.h>

#include <arch_helpers.h>
13
14
#include <common/bl_common.h>
#include <common/debug.h>
15
#include <context.h>
16
#include <denver.h>
17
18
#include <lib/el3_runtime/context_mgmt.h>
#include <lib/psci/psci.h>
19
#include <mce.h>
20
#include <mce_private.h>
21
#include <plat/common/platform.h>
22
#include <se.h>
23
#include <smmu.h>
Tejal Kudav's avatar
Tejal Kudav committed
24
#include <t194_nvg.h>
25
#include <tegra194_private.h>
26
27
#include <tegra_platform.h>
#include <tegra_private.h>
28

29
30
31
extern void tegra194_cpu_reset_handler(void);
extern uint32_t __tegra194_cpu_reset_handler_data,
		__tegra194_cpu_reset_handler_end;
32
33

/* TZDRAM offset for saving SMMU context */
34
#define TEGRA194_SMMU_CTX_OFFSET	16U
35
36

/* state id mask */
37
#define TEGRA194_STATE_ID_MASK		0xFU
38
/* constants to get power state's wake time */
39
40
#define TEGRA194_WAKE_TIME_MASK		0x0FFFFFF0U
#define TEGRA194_WAKE_TIME_SHIFT	4U
41
/* default core wake mask for CPU_SUSPEND */
42
#define TEGRA194_CORE_WAKE_MASK		0x180cU
43

44
45
46
static struct t19x_psci_percpu_data {
	uint32_t wake_time;
} __aligned(CACHE_WRITEBACK_GRANULE) t19x_percpu_data[PLATFORM_CORE_COUNT];
47

48
49
50
51
52
53
54
55
/*
 * tegra_fake_system_suspend acts as a boolean var controlling whether
 * we are going to take fake system suspend code or normal system suspend code
 * path. This variable is set inside the sip call handlers, when the kernel
 * requests an SIP call to set the suspend debug flags.
 */
bool tegra_fake_system_suspend;

56
int32_t tegra_soc_validate_power_state(uint32_t power_state,
57
58
					psci_power_state_t *req_state)
{
59
	uint8_t state_id = (uint8_t)psci_get_pstate_id(power_state) &
60
			   TEGRA194_STATE_ID_MASK;
61
62
	uint32_t cpu = plat_my_core_pos();
	int32_t ret = PSCI_E_SUCCESS;
63
64

	/* save the core wake time (in TSC ticks)*/
65
66
	t19x_percpu_data[cpu].wake_time = (power_state & TEGRA194_WAKE_TIME_MASK)
			<< TEGRA194_WAKE_TIME_SHIFT;
67
68
69
70
71
72
73
74

	/*
	 * Clean percpu_data[cpu] to DRAM. This needs to be done to ensure that
	 * the correct value is read in tegra_soc_pwr_domain_suspend(), which
	 * is called with caches disabled. It is possible to read a stale value
	 * from DRAM in that function, because the L2 cache is not flushed
	 * unless the cluster is entering CC6/CC7.
	 */
75
76
	clean_dcache_range((uint64_t)&t19x_percpu_data[cpu],
			sizeof(t19x_percpu_data[cpu]));
77
78
79
80
81
82
83
84
85
86
87
88
89
90

	/* Sanity check the requested state id */
	switch (state_id) {
	case PSTATE_ID_CORE_IDLE:
	case PSTATE_ID_CORE_POWERDN:

		/* Core powerdown request */
		req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id;
		req_state->pwr_domain_state[MPIDR_AFFLVL1] = state_id;

		break;

	default:
		ERROR("%s: unsupported state id (%d)\n", __func__, state_id);
91
92
		ret = PSCI_E_INVALID_PARAMS;
		break;
93
94
	}

95
	return ret;
96
97
}

98
int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
99
100
{
	const plat_local_state_t *pwr_domain_state;
101
	uint8_t stateid_afflvl0, stateid_afflvl2;
102
103
104
	plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
	uint64_t smmu_ctx_base;
	uint32_t val;
105
	mce_cstate_info_t sc7_cstate_info = {
106
107
108
109
		.cluster = (uint32_t)TEGRA_NVG_CLUSTER_CC6,
		.system = (uint32_t)TEGRA_NVG_SYSTEM_SC7,
		.system_state_force = 1U,
		.update_wake_mask = 1U,
110
	};
111
	uint32_t cpu = plat_my_core_pos();
112
	int32_t ret = 0;
113
114
115
116

	/* get the state ID */
	pwr_domain_state = target_state->pwr_domain_state;
	stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0] &
117
		TEGRA194_STATE_ID_MASK;
118
	stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
119
		TEGRA194_STATE_ID_MASK;
120
121
122
123
124

	if ((stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ||
	    (stateid_afflvl0 == PSTATE_ID_CORE_POWERDN)) {

		/* Enter CPU idle/powerdown */
125
		val = (stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ?
126
127
			(uint32_t)TEGRA_NVG_CORE_C6 : (uint32_t)TEGRA_NVG_CORE_C7;
		ret = mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, (uint64_t)val,
128
				percpu_data[cpu].wake_time, 0);
129
		assert(ret == 0);
130
131
132
133
134

	} else if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {

		/* save 'Secure Boot' Processor Feature Config Register */
		val = mmio_read_32(TEGRA_MISC_BASE + MISCREG_PFCFG);
135
		mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_SECURE_BOOTP_FCFG, val);
136
137
138

		/* save SMMU context */
		smmu_ctx_base = params_from_bl2->tzdram_base +
139
				tegra194_get_smmu_ctx_offset();
140
141
		tegra_smmu_save_context((uintptr_t)smmu_ctx_base);

142
143
144
145
146
147
148
149
150
		/*
		 * Suspend SE, RNG1 and PKA1 only on silcon and fpga,
		 * since VDK does not support atomic se ctx save
		 */
		if (tegra_platform_is_silicon() || tegra_platform_is_fpga()) {
			ret = tegra_se_suspend();
			assert(ret == 0);
		}

151
		if (!tegra_fake_system_suspend) {
152
153

			/* Prepare for system suspend */
154
			mce_update_cstate_info(&sc7_cstate_info);
155
156

			do {
157
158
159
				val = (uint32_t)mce_command_handler(
						(uint32_t)MCE_CMD_IS_SC7_ALLOWED,
						(uint32_t)TEGRA_NVG_CORE_C7,
160
						MCE_CORE_SLEEP_TIME_INFINITE,
161
162
163
164
165
166
167
168
169
170
						0U);
			} while (val == 0U);

			/* Instruct the MCE to enter system suspend state */
			ret = mce_command_handler(
					(uint64_t)MCE_CMD_ENTER_CSTATE,
					(uint64_t)TEGRA_NVG_CORE_C7,
					MCE_CORE_SLEEP_TIME_INFINITE,
					0U);
			assert(ret == 0);
171
172
173

			/* set system suspend state for house-keeping */
			tegra194_set_system_suspend_entry();
174
		}
175
176
	} else {
		; /* do nothing */
177
178
179
180
181
182
	}

	return PSCI_E_SUCCESS;
}

/*******************************************************************************
183
 * Helper function to check if this is the last ON CPU in the cluster
184
 ******************************************************************************/
185
186
static bool tegra_last_on_cpu_in_cluster(const plat_local_state_t *states,
			uint32_t ncpu)
187
{
188
189
	plat_local_state_t target;
	bool last_on_cpu = true;
190
	uint32_t num_cpus = ncpu, pos = 0;
191

192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
	do {
		target = states[pos];
		if (target != PLAT_MAX_OFF_STATE) {
			last_on_cpu = false;
		}
		--num_cpus;
		pos++;
	} while (num_cpus != 0U);

	return last_on_cpu;
}

/*******************************************************************************
 * Helper function to get target power state for the cluster
 ******************************************************************************/
static plat_local_state_t tegra_get_afflvl1_pwr_state(const plat_local_state_t *states,
			uint32_t ncpu)
{
	uint32_t core_pos = (uint32_t)read_mpidr() & (uint32_t)MPIDR_CPU_MASK;
	plat_local_state_t target = states[core_pos];
	mce_cstate_info_t cstate_info = { 0 };
213
214

	/* CPU suspend */
215
	if (target == PSTATE_ID_CORE_POWERDN) {
216
217

		/* Program default wake mask */
218
219
220
		cstate_info.wake_mask = TEGRA194_CORE_WAKE_MASK;
		cstate_info.update_wake_mask = 1;
		mce_update_cstate_info(&cstate_info);
221
222
223
	}

	/* CPU off */
224
	if (target == PLAT_MAX_OFF_STATE) {
225
226

		/* Enable cluster powerdn from last CPU in the cluster */
227
		if (tegra_last_on_cpu_in_cluster(states, ncpu)) {
228

229
230
			/* Enable CC6 state and turn off wake mask */
			cstate_info.cluster = (uint32_t)TEGRA_NVG_CLUSTER_CC6;
231
232
			cstate_info.update_wake_mask = 1U;
			mce_update_cstate_info(&cstate_info);
233

234
		} else {
235

236
			/* Turn off wake_mask */
237
238
			cstate_info.update_wake_mask = 1U;
			mce_update_cstate_info(&cstate_info);
239
			target = PSCI_LOCAL_STATE_RUN;
240
241
242
		}
	}

243
244
245
246
247
248
249
250
251
252
253
254
255
256
	return target;
}

/*******************************************************************************
 * Platform handler to calculate the proper target power level at the
 * specified affinity level
 ******************************************************************************/
plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl,
					     const plat_local_state_t *states,
					     uint32_t ncpu)
{
	plat_local_state_t target = PSCI_LOCAL_STATE_RUN;
	uint32_t cpu = plat_my_core_pos();

257
	/* System Suspend */
258
259
260
261
262
263
264
	if ((lvl == (uint32_t)MPIDR_AFFLVL2) && (states[cpu] == PSTATE_ID_SOC_POWERDN)) {
		target = PSTATE_ID_SOC_POWERDN;
	}

	/* CPU off, CPU suspend */
	if (lvl == (uint32_t)MPIDR_AFFLVL1) {
		target = tegra_get_afflvl1_pwr_state(states, ncpu);
265
	}
266

267
268
	/* target cluster/system state */
	return target;
269
270
}

271
int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
272
273
274
275
{
	const plat_local_state_t *pwr_domain_state =
		target_state->pwr_domain_state;
	plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
276
	uint8_t stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
277
		TEGRA194_STATE_ID_MASK;
278
	uint64_t val;
279
	u_register_t ns_sctlr_el1;
280
281
282
283
284
285
286
287

	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
		/*
		 * The TZRAM loses power when we enter system suspend. To
		 * allow graceful exit from system suspend, we need to copy
		 * BL3-1 over to TZDRAM.
		 */
		val = params_from_bl2->tzdram_base +
288
		      tegra194_get_cpu_reset_handler_size();
289
290
		memcpy((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE,
		       (uintptr_t)&__BL31_END__ - (uintptr_t)BL31_BASE);
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314

		/*
		 * In fake suspend mode, ensure that the loopback procedure
		 * towards system suspend exit is started, instead of calling
		 * WFI. This is done by disabling both MMU's of EL1 & El3
		 * and calling tegra_secure_entrypoint().
		 */
		if (tegra_fake_system_suspend) {

			/*
			 * Disable EL1's MMU.
			 */
			ns_sctlr_el1 = read_sctlr_el1();
			ns_sctlr_el1 &= (~((u_register_t)SCTLR_M_BIT));
			write_sctlr_el1(ns_sctlr_el1);

			/*
			 * Disable MMU to power up the CPU in a "clean"
			 * state
			 */
			disable_mmu_el3();
			tegra_secure_entrypoint();
			panic();
		}
315
316
317
318
319
	}

	return PSCI_E_SUCCESS;
}

320
int32_t tegra_soc_pwr_domain_on(u_register_t mpidr)
321
{
322
323
	uint64_t target_cpu = mpidr & MPIDR_CPU_MASK;
	uint64_t target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >>
324
			MPIDR_AFFINITY_BITS;
325
	int32_t ret = 0;
326

327
	if (target_cluster > ((uint32_t)PLATFORM_CLUSTER_COUNT - 1U)) {
328
329
330
331
332
		ERROR("%s: unsupported CPU (0x%lx)\n", __func__ , mpidr);
		return PSCI_E_NOT_PRESENT;
	}

	/* construct the target CPU # */
333
	target_cpu += (target_cluster << 1U);
334

335
336
337
338
	ret = mce_command_handler((uint64_t)MCE_CMD_ONLINE_CORE, target_cpu, 0U, 0U);
	if (ret < 0) {
		return PSCI_E_DENIED;
	}
339
340
341
342

	return PSCI_E_SUCCESS;
}

343
int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
344
{
345
	uint8_t stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL];
346
347
348
349
350
351
352
353
354
355
356
357
358
359

	/*
	 * Reset power state info for CPUs when onlining, we set
	 * deepest power when offlining a core but that may not be
	 * requested by non-secure sw which controls idle states. It
	 * will re-init this info from non-secure software when the
	 * core come online.
	 */

	/*
	 * Check if we are exiting from deep sleep and restore SE
	 * context if we are.
	 */
	if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
360
361
362
363
364
365
366

		/*
		 * Enable strict checking after programming the GSC for
		 * enabling TZSRAM and TZDRAM
		 */
		mce_enable_strict_checking();

367
		/* Init SMMU */
368
369
		tegra_smmu_init();

370
371
372
		/* Resume SE, RNG1 and PKA1 */
		tegra_se_resume();

373
374
375
376
377
378
379
380
381
382
383
		/*
		 * Reset power state info for the last core doing SC7
		 * entry and exit, we set deepest power state as CC7
		 * and SC7 for SC7 entry which may not be requested by
		 * non-secure SW which controls idle states.
		 */
	}

	return PSCI_E_SUCCESS;
}

384
int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
385
{
386
	uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
387
	int32_t ret = 0;
388

389
390
	(void)target_state;

391
	/* Disable Denver's DCO operations */
392
	if (impl == DENVER_IMPL) {
393
		denver_disable_dco();
394
	}
395
396

	/* Turn off CPU */
397
398
	ret = mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
			(uint64_t)TEGRA_NVG_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U);
399
	assert(ret == 0);
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417

	return PSCI_E_SUCCESS;
}

__dead2 void tegra_soc_prepare_system_off(void)
{
	/* System power off */

	/* SC8 */

	wfi();

	/* wait for the system to power down */
	for (;;) {
		;
	}
}

418
int32_t tegra_soc_prepare_system_reset(void)
419
420
421
{
	return PSCI_E_SUCCESS;
}