psci_common.c 34.6 KB
Newer Older
1
/*
2
 * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
3
 *
dp-arm's avatar
dp-arm committed
4
 * SPDX-License-Identifier: BSD-3-Clause
5
6
 */

7
8
9
#include <assert.h>
#include <string.h>

10
#include <arch.h>
11
#include <arch_helpers.h>
12
13
#include <common/bl_common.h>
#include <common/debug.h>
14
#include <context.h>
15
16
17
18
#include <lib/el3_runtime/context_mgmt.h>
#include <lib/utils.h>
#include <plat/common/platform.h>

19
#include "psci_private.h"
20

21
/*
22
23
 * SPD power management operations, expected to be supplied by the registered
 * SPD on successful SP initialization
24
 */
25
const spd_pm_ops_t *psci_spd_pm;
26

27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
/*
 * PSCI requested local power state map. This array is used to store the local
 * power states requested by a CPU for power levels from level 1 to
 * PLAT_MAX_PWR_LVL. It does not store the requested local power state for power
 * level 0 (PSCI_CPU_PWR_LVL) as the requested and the target power state for a
 * CPU are the same.
 *
 * During state coordination, the platform is passed an array containing the
 * local states requested for a particular non cpu power domain by each cpu
 * within the domain.
 *
 * TODO: Dense packing of the requested states will cause cache thrashing
 * when multiple power domains write to it. If we allocate the requested
 * states at each power level in a cache-line aligned per-domain memory,
 * the cache thrashing can be avoided.
 */
static plat_local_state_t
	psci_req_local_pwr_states[PLAT_MAX_PWR_LVL][PLATFORM_CORE_COUNT];

46
unsigned int psci_plat_core_count;
47

48
/*******************************************************************************
49
50
51
52
53
 * Arrays that hold the platform's power domain tree information for state
 * management of power domains.
 * Each node in the array 'psci_non_cpu_pd_nodes' corresponds to a power domain
 * which is an ancestor of a CPU power domain.
 * Each node in the array 'psci_cpu_pd_nodes' corresponds to a cpu power domain
54
 ******************************************************************************/
55
non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS]
56
#if USE_COHERENT_MEM
57
__section("tzfw_coherent_mem")
58
59
#endif
;
60

61
62
/* Lock for PSCI state coordination */
DEFINE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
63

64
65
cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];

66
67
68
/*******************************************************************************
 * Pointer to functions exported by the platform to complete power mgmt. ops
 ******************************************************************************/
69
const plat_psci_ops_t *psci_plat_pm_ops;
70

71
72
73
/******************************************************************************
 * Check that the maximum power level supported by the platform makes sense
 *****************************************************************************/
74
75
76
CASSERT((PLAT_MAX_PWR_LVL <= PSCI_MAX_PWR_LVL) &&
	(PLAT_MAX_PWR_LVL >= PSCI_CPU_PWR_LVL),
	assert_platform_max_pwrlvl_check);
77

78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
/*
 * The plat_local_state used by the platform is one of these types: RUN,
 * RETENTION and OFF. The platform can define further sub-states for each type
 * apart from RUN. This categorization is done to verify the sanity of the
 * psci_power_state passed by the platform and to print debug information. The
 * categorization is done on the basis of the following conditions:
 *
 * 1. If (plat_local_state == 0) then the category is STATE_TYPE_RUN.
 *
 * 2. If (0 < plat_local_state <= PLAT_MAX_RET_STATE), then the category is
 *    STATE_TYPE_RETN.
 *
 * 3. If (plat_local_state > PLAT_MAX_RET_STATE), then the category is
 *    STATE_TYPE_OFF.
 */
typedef enum plat_local_state_type {
	STATE_TYPE_RUN = 0,
	STATE_TYPE_RETN,
	STATE_TYPE_OFF
} plat_local_state_type_t;

99
100
101
102
103
104
105
106
107
108
109
110
111
/* Function used to categorize plat_local_state. */
static plat_local_state_type_t find_local_state_type(plat_local_state_t state)
{
	if (state != 0U) {
		if (state > PLAT_MAX_RET_STATE) {
			return STATE_TYPE_OFF;
		} else {
			return STATE_TYPE_RETN;
		}
	} else {
		return STATE_TYPE_RUN;
	}
}
112
113
114
115
116

/******************************************************************************
 * Check that the maximum retention level supported by the platform is less
 * than the maximum off level.
 *****************************************************************************/
117
CASSERT(PLAT_MAX_RET_STATE < PLAT_MAX_OFF_STATE,
118
119
120
121
122
123
124
125
		assert_platform_max_off_and_retn_state_check);

/******************************************************************************
 * This function ensures that the power state parameter in a CPU_SUSPEND request
 * is valid. If so, it returns the requested states for each power level.
 *****************************************************************************/
int psci_validate_power_state(unsigned int power_state,
			      psci_power_state_t *state_info)
126
{
127
	/* Check SBZ bits in power state are zero */
128
	if (psci_check_power_state(power_state) != 0U)
129
		return PSCI_E_INVALID_PARAMS;
130

131
	assert(psci_plat_pm_ops->validate_power_state != NULL);
132

133
134
135
136
137
138
139
140
141
142
143
144
145
146
	/* Validate the power_state using platform pm_ops */
	return psci_plat_pm_ops->validate_power_state(power_state, state_info);
}

/******************************************************************************
 * This function retrieves the `psci_power_state_t` for system suspend from
 * the platform.
 *****************************************************************************/
void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info)
{
	/*
	 * Assert that the required pm_ops hook is implemented to ensure that
	 * the capability detected during psci_setup() is valid.
	 */
147
	assert(psci_plat_pm_ops->get_sys_suspend_power_state != NULL);
148

149
150
151
152
	/*
	 * Query the platform for the power_state required for system suspend
	 */
	psci_plat_pm_ops->get_sys_suspend_power_state(state_info);
153
154
}

155
156
157
158
159
160
161
162
/*******************************************************************************
 * This function verifies that the all the other cores in the system have been
 * turned OFF and the current CPU is the last running CPU in the system.
 * Returns 1 (true) if the current CPU is the last ON CPU or 0 (false)
 * otherwise.
 ******************************************************************************/
unsigned int psci_is_last_on_cpu(void)
{
163
	unsigned int cpu_idx, my_idx = plat_my_core_pos();
164

165
	for (cpu_idx = 0; cpu_idx < psci_plat_core_count;
166
			cpu_idx++) {
167
168
		if (cpu_idx == my_idx) {
			assert(psci_get_aff_info_state() == AFF_STATE_ON);
169
170
171
			continue;
		}

172
		if (psci_get_aff_info_state_by_idx(cpu_idx) != AFF_STATE_OFF)
173
174
175
176
177
178
			return 0;
	}

	return 1;
}

179
/*******************************************************************************
180
181
182
 * Routine to return the maximum power level to traverse to after a cpu has
 * been physically powered up. It is expected to be called immediately after
 * reset from assembler code.
183
 ******************************************************************************/
184
static unsigned int get_power_on_target_pwrlvl(void)
185
{
186
	unsigned int pwrlvl;
187
188

	/*
189
190
191
192
	 * Assume that this cpu was suspended and retrieve its target power
	 * level. If it is invalid then it could only have been turned off
	 * earlier. PLAT_MAX_PWR_LVL will be the highest power level a
	 * cpu can be turned off to.
193
	 */
194
	pwrlvl = psci_get_suspend_pwrlvl();
195
	if (pwrlvl == PSCI_INVALID_PWR_LVL)
196
		pwrlvl = PLAT_MAX_PWR_LVL;
197
	assert(pwrlvl < PSCI_INVALID_PWR_LVL);
198
	return pwrlvl;
199
200
}

201
202
203
/******************************************************************************
 * Helper function to update the requested local power state array. This array
 * does not store the requested state for the CPU power level. Hence an
204
 * assertion is added to prevent us from accessing the CPU power level.
205
206
207
208
 *****************************************************************************/
static void psci_set_req_local_pwr_state(unsigned int pwrlvl,
					 unsigned int cpu_idx,
					 plat_local_state_t req_pwr_state)
209
{
210
	assert(pwrlvl > PSCI_CPU_PWR_LVL);
211
	if ((pwrlvl > PSCI_CPU_PWR_LVL) && (pwrlvl <= PLAT_MAX_PWR_LVL) &&
212
			(cpu_idx < psci_plat_core_count)) {
213
214
		psci_req_local_pwr_states[pwrlvl - 1U][cpu_idx] = req_pwr_state;
	}
215
216
}

217
218
219
/******************************************************************************
 * This function initializes the psci_req_local_pwr_states.
 *****************************************************************************/
220
void __init psci_init_req_local_pwr_states(void)
221
{
222
	/* Initialize the requested state of all non CPU power domains as OFF */
223
	unsigned int pwrlvl;
224
	unsigned int core;
225
226

	for (pwrlvl = 0U; pwrlvl < PLAT_MAX_PWR_LVL; pwrlvl++) {
227
		for (core = 0; core < psci_plat_core_count; core++) {
228
229
230
231
			psci_req_local_pwr_states[pwrlvl][core] =
				PLAT_MAX_OFF_STATE;
		}
	}
232
}
233

234
235
236
237
238
239
240
241
/******************************************************************************
 * Helper function to return a reference to an array containing the local power
 * states requested by each cpu for a power domain at 'pwrlvl'. The size of the
 * array will be the number of cpu power domains of which this power domain is
 * an ancestor. These requested states will be used to determine a suitable
 * target state for this power domain during psci state coordination. An
 * assertion is added to prevent us from accessing the CPU power level.
 *****************************************************************************/
242
static plat_local_state_t *psci_get_req_local_pwr_states(unsigned int pwrlvl,
243
							 unsigned int cpu_idx)
244
245
{
	assert(pwrlvl > PSCI_CPU_PWR_LVL);
246

247
	if ((pwrlvl > PSCI_CPU_PWR_LVL) && (pwrlvl <= PLAT_MAX_PWR_LVL) &&
248
			(cpu_idx < psci_plat_core_count)) {
249
250
251
		return &psci_req_local_pwr_states[pwrlvl - 1U][cpu_idx];
	} else
		return NULL;
252
}
253

254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
/*
 * psci_non_cpu_pd_nodes can be placed either in normal memory or coherent
 * memory.
 *
 * With !USE_COHERENT_MEM, psci_non_cpu_pd_nodes is placed in normal memory,
 * it's accessed by both cached and non-cached participants. To serve the common
 * minimum, perform a cache flush before read and after write so that non-cached
 * participants operate on latest data in main memory.
 *
 * When USE_COHERENT_MEM is used, psci_non_cpu_pd_nodes is placed in coherent
 * memory. With HW_ASSISTED_COHERENCY, all PSCI participants are cache-coherent.
 * In both cases, no cache operations are required.
 */

/*
 * Retrieve local state of non-CPU power domain node from a non-cached CPU,
 * after any required cache maintenance operation.
 */
static plat_local_state_t get_non_cpu_pd_node_local_state(
		unsigned int parent_idx)
{
275
#if !(USE_COHERENT_MEM || HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
	flush_dcache_range(
			(uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
			sizeof(psci_non_cpu_pd_nodes[parent_idx]));
#endif
	return psci_non_cpu_pd_nodes[parent_idx].local_state;
}

/*
 * Update local state of non-CPU power domain node from a cached CPU; perform
 * any required cache maintenance operation afterwards.
 */
static void set_non_cpu_pd_node_local_state(unsigned int parent_idx,
		plat_local_state_t state)
{
	psci_non_cpu_pd_nodes[parent_idx].local_state = state;
291
#if !(USE_COHERENT_MEM || HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
292
293
294
295
296
297
	flush_dcache_range(
			(uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
			sizeof(psci_non_cpu_pd_nodes[parent_idx]));
#endif
}

298
299
300
301
302
303
/******************************************************************************
 * Helper function to return the current local power state of each power domain
 * from the current cpu power domain to its ancestor at the 'end_pwrlvl'. This
 * function will be called after a cpu is powered on to find the local state
 * each power domain has emerged from.
 *****************************************************************************/
304
305
void psci_get_target_local_pwr_states(unsigned int end_pwrlvl,
				      psci_power_state_t *target_state)
306
{
307
	unsigned int parent_idx, lvl;
308
309
310
311
312
313
	plat_local_state_t *pd_state = target_state->pwr_domain_state;

	pd_state[PSCI_CPU_PWR_LVL] = psci_get_cpu_local_state();
	parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node;

	/* Copy the local power state from node to state_info */
314
	for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
315
		pd_state[lvl] = get_non_cpu_pd_node_local_state(parent_idx);
316
317
318
319
320
321
322
323
324
325
326
327
328
329
		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
	}

	/* Set the the higher levels to RUN */
	for (; lvl <= PLAT_MAX_PWR_LVL; lvl++)
		target_state->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN;
}

/******************************************************************************
 * Helper function to set the target local power state that each power domain
 * from the current cpu power domain to its ancestor at the 'end_pwrlvl' will
 * enter. This function will be called after coordination of requested power
 * states has been done for each power level.
 *****************************************************************************/
330
static void psci_set_target_local_pwr_states(unsigned int end_pwrlvl,
331
332
					const psci_power_state_t *target_state)
{
333
	unsigned int parent_idx, lvl;
334
335
336
	const plat_local_state_t *pd_state = target_state->pwr_domain_state;

	psci_set_cpu_local_state(pd_state[PSCI_CPU_PWR_LVL]);
337

338
	/*
339
	 * Need to flush as local_state might be accessed with Data Cache
340
	 * disabled during power on
341
	 */
342
	psci_flush_cpu_data(psci_svc_cpu_data.local_state);
343
344
345
346

	parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node;

	/* Copy the local_state from state_info */
347
	for (lvl = 1U; lvl <= end_pwrlvl; lvl++) {
348
		set_non_cpu_pd_node_local_state(parent_idx, pd_state[lvl]);
349
350
		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
	}
351
352
}

353

354
/*******************************************************************************
355
 * PSCI helper function to get the parent nodes corresponding to a cpu_index.
356
 ******************************************************************************/
357
void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
358
				      unsigned int end_lvl,
359
				      unsigned int *node_index)
360
361
{
	unsigned int parent_node = psci_cpu_pd_nodes[cpu_idx].parent_node;
362
	unsigned int i;
363
	unsigned int *node = node_index;
364

365
366
367
	for (i = PSCI_CPU_PWR_LVL + 1U; i <= end_lvl; i++) {
		*node = parent_node;
		node++;
368
369
370
371
372
373
374
375
376
		parent_node = psci_non_cpu_pd_nodes[parent_node].parent_node;
	}
}

/******************************************************************************
 * This function is invoked post CPU power up and initialization. It sets the
 * affinity info state, target power state and requested power state for the
 * current CPU and all its ancestor power domains to RUN.
 *****************************************************************************/
377
void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl)
378
{
379
	unsigned int parent_idx, cpu_idx = plat_my_core_pos(), lvl;
380
381
382
	parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;

	/* Reset the local_state to RUN for the non cpu power domains. */
383
	for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
384
385
		set_non_cpu_pd_node_local_state(parent_idx,
				PSCI_LOCAL_STATE_RUN);
386
387
388
389
390
391
392
393
394
395
		psci_set_req_local_pwr_state(lvl,
					     cpu_idx,
					     PSCI_LOCAL_STATE_RUN);
		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
	}

	/* Set the affinity info state to ON */
	psci_set_aff_info_state(AFF_STATE_ON);

	psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN);
396
	psci_flush_cpu_data(psci_svc_cpu_data);
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
}

/******************************************************************************
 * This function is passed the local power states requested for each power
 * domain (state_info) between the current CPU domain and its ancestors until
 * the target power level (end_pwrlvl). It updates the array of requested power
 * states with this information.
 *
 * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it
 * retrieves the states requested by all the cpus of which the power domain at
 * that level is an ancestor. It passes this information to the platform to
 * coordinate and return the target power state. If the target state for a level
 * is RUN then subsequent levels are not considered. At the CPU level, state
 * coordination is not required. Hence, the requested and the target states are
 * the same.
 *
 * The 'state_info' is updated with the target state for each level between the
 * CPU and the 'end_pwrlvl' and returned to the caller.
 *
 * This function will only be invoked with data cache enabled and while
 * powering down a core.
 *****************************************************************************/
419
420
void psci_do_state_coordination(unsigned int end_pwrlvl,
				psci_power_state_t *state_info)
421
{
422
	unsigned int lvl, parent_idx, cpu_idx = plat_my_core_pos();
423
	unsigned int start_idx;
424
	unsigned int ncpus;
425
426
	plat_local_state_t target_state, *req_states;

427
	assert(end_pwrlvl <= PLAT_MAX_PWR_LVL);
428
	parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
429

430
431
	/* For level 0, the requested state will be equivalent
	   to target state */
432
	for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453

		/* First update the requested power state */
		psci_set_req_local_pwr_state(lvl, cpu_idx,
					     state_info->pwr_domain_state[lvl]);

		/* Get the requested power states for this power level */
		start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx;
		req_states = psci_get_req_local_pwr_states(lvl, start_idx);

		/*
		 * Let the platform coordinate amongst the requested states at
		 * this power level and return the target local power state.
		 */
		ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus;
		target_state = plat_get_target_pwr_state(lvl,
							 req_states,
							 ncpus);

		state_info->pwr_domain_state[lvl] = target_state;

		/* Break early if the negotiated target power state is RUN */
454
		if (is_local_state_run(state_info->pwr_domain_state[lvl]) != 0)
455
456
457
458
			break;

		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
	}
459
460

	/*
461
462
463
464
	 * This is for cases when we break out of the above loop early because
	 * the target power state is RUN at a power level < end_pwlvl.
	 * We update the requested power state from state_info and then
	 * set the target state as RUN.
465
	 */
466
	for (lvl = lvl + 1U; lvl <= end_pwrlvl; lvl++) {
467
468
469
		psci_set_req_local_pwr_state(lvl, cpu_idx,
					     state_info->pwr_domain_state[lvl]);
		state_info->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN;
470

471
	}
472

473
474
	/* Update the target state in the power domain nodes */
	psci_set_target_local_pwr_states(end_pwrlvl, state_info);
475
476
}

477
478
479
480
481
482
483
484
485
486
487
488
489
/******************************************************************************
 * This function validates a suspend request by making sure that if a standby
 * state is requested then no power level is turned off and the highest power
 * level is placed in a standby/retention state.
 *
 * It also ensures that the state level X will enter is not shallower than the
 * state level X + 1 will enter.
 *
 * This validation will be enabled only for DEBUG builds as the platform is
 * expected to perform these validations as well.
 *****************************************************************************/
int psci_validate_suspend_req(const psci_power_state_t *state_info,
			      unsigned int is_power_down_state)
490
{
491
492
493
494
495
496
497
	unsigned int max_off_lvl, target_lvl, max_retn_lvl;
	plat_local_state_t state;
	plat_local_state_type_t req_state_type, deepest_state_type;
	int i;

	/* Find the target suspend power level */
	target_lvl = psci_find_target_suspend_lvl(state_info);
498
	if (target_lvl == PSCI_INVALID_PWR_LVL)
499
500
		return PSCI_E_INVALID_PARAMS;

501
502
	/* All power domain levels are in a RUN state to begin with */
	deepest_state_type = STATE_TYPE_RUN;
503

504
	for (i = (int) target_lvl; i >= (int) PSCI_CPU_PWR_LVL; i--) {
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
		state = state_info->pwr_domain_state[i];
		req_state_type = find_local_state_type(state);

		/*
		 * While traversing from the highest power level to the lowest,
		 * the state requested for lower levels has to be the same or
		 * deeper i.e. equal to or greater than the state at the higher
		 * levels. If this condition is true, then the requested state
		 * becomes the deepest state encountered so far.
		 */
		if (req_state_type < deepest_state_type)
			return PSCI_E_INVALID_PARAMS;
		deepest_state_type = req_state_type;
	}

	/* Find the highest off power level */
	max_off_lvl = psci_find_max_off_lvl(state_info);

	/* The target_lvl is either equal to the max_off_lvl or max_retn_lvl */
524
	max_retn_lvl = PSCI_INVALID_PWR_LVL;
525
526
527
528
529
530
531
532
	if (target_lvl != max_off_lvl)
		max_retn_lvl = target_lvl;

	/*
	 * If this is not a request for a power down state then max off level
	 * has to be invalid and max retention level has to be a valid power
	 * level.
	 */
533
534
535
	if ((is_power_down_state == 0U) &&
			((max_off_lvl != PSCI_INVALID_PWR_LVL) ||
			 (max_retn_lvl == PSCI_INVALID_PWR_LVL)))
536
537
538
539
540
		return PSCI_E_INVALID_PARAMS;

	return PSCI_E_SUCCESS;
}

541
542
543
544
545
/******************************************************************************
 * This function finds the highest power level which will be powered down
 * amongst all the power levels specified in the 'state_info' structure
 *****************************************************************************/
unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info)
546
{
547
	int i;
548

549
550
551
	for (i = (int) PLAT_MAX_PWR_LVL; i >= (int) PSCI_CPU_PWR_LVL; i--) {
		if (is_local_state_off(state_info->pwr_domain_state[i]) != 0)
			return (unsigned int) i;
552
553
	}

554
	return PSCI_INVALID_PWR_LVL;
555
556
557
558
559
560
561
562
563
564
}

/******************************************************************************
 * This functions finds the level of the highest power domain which will be
 * placed in a low power state during a suspend operation.
 *****************************************************************************/
unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info)
{
	int i;

565
566
567
	for (i = (int) PLAT_MAX_PWR_LVL; i >= (int) PSCI_CPU_PWR_LVL; i--) {
		if (is_local_state_run(state_info->pwr_domain_state[i]) == 0)
			return (unsigned int) i;
568
	}
569

570
	return PSCI_INVALID_PWR_LVL;
571
572
}

573
/*******************************************************************************
574
575
576
577
 * This function is passed the highest level in the topology tree that the
 * operation should be applied to and a list of node indexes. It picks up locks
 * from the node index list in order of increasing power domain level in the
 * range specified.
578
 ******************************************************************************/
579
580
void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl,
				   const unsigned int *parent_nodes)
581
{
582
	unsigned int parent_idx;
583
	unsigned int level;
584

585
	/* No locking required for level 0. Hence start locking from level 1 */
586
	for (level = PSCI_CPU_PWR_LVL + 1U; level <= end_pwrlvl; level++) {
587
		parent_idx = parent_nodes[level - 1U];
588
		psci_lock_get(&psci_non_cpu_pd_nodes[parent_idx]);
589
590
591
592
	}
}

/*******************************************************************************
593
594
595
 * This function is passed the highest level in the topology tree that the
 * operation should be applied to and a list of node indexes. It releases the
 * locks in order of decreasing power domain level in the range specified.
596
 ******************************************************************************/
597
598
void psci_release_pwr_domain_locks(unsigned int end_pwrlvl,
				   const unsigned int *parent_nodes)
599
{
600
	unsigned int parent_idx;
601
	unsigned int level;
602

603
	/* Unlock top down. No unlocking required for level 0. */
604
605
	for (level = end_pwrlvl; level >= PSCI_CPU_PWR_LVL + 1U; level--) {
		parent_idx = parent_nodes[level - 1U];
606
		psci_lock_release(&psci_non_cpu_pd_nodes[parent_idx]);
607
608
609
	}
}

610
/*******************************************************************************
611
 * Simple routine to determine whether a mpidr is valid or not.
612
 ******************************************************************************/
613
int psci_validate_mpidr(u_register_t mpidr)
614
{
615
	if (plat_core_pos_by_mpidr(mpidr) < 0)
616
		return PSCI_E_INVALID_PARAMS;
617
618

	return PSCI_E_SUCCESS;
619
620
621
}

/*******************************************************************************
622
 * This function determines the full entrypoint information for the requested
623
 * PSCI entrypoint on power on/resume and returns it.
624
 ******************************************************************************/
625
#ifdef __aarch64__
626
static int psci_get_ns_ep_info(entry_point_info_t *ep,
627
628
			       uintptr_t entrypoint,
			       u_register_t context_id)
629
{
630
	u_register_t ep_attr, sctlr;
631
	unsigned int daif, ee, mode;
632
633
	u_register_t ns_scr_el3 = read_scr_el3();
	u_register_t ns_sctlr_el1 = read_sctlr_el1();
634

635
636
	sctlr = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ?
		read_sctlr_el2() : ns_sctlr_el1;
637
	ee = 0;
638

639
	ep_attr = NON_SECURE | EP_ST_DISABLE;
640
	if ((sctlr & SCTLR_EE_BIT) != 0U) {
641
642
643
		ep_attr |= EP_EE_BIG;
		ee = 1;
	}
644
	SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);
645

646
	ep->pc = entrypoint;
647
	zeromem(&ep->args, sizeof(ep->args));
648
	ep->args.arg0 = context_id;
649
650
651
652
653

	/*
	 * Figure out whether the cpu enters the non-secure address space
	 * in aarch32 or aarch64
	 */
654
	if ((ns_scr_el3 & SCR_RW_BIT) != 0U) {
655
656
657
658
659

		/*
		 * Check whether a Thumb entry point has been provided for an
		 * aarch64 EL
		 */
660
		if ((entrypoint & 0x1UL) != 0UL)
661
			return PSCI_E_INVALID_ADDRESS;
662

663
		mode = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ? MODE_EL2 : MODE_EL1;
664

665
		ep->spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
666
667
	} else {

668
669
		mode = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ?
			MODE32_hyp : MODE32_svc;
670
671
672
673
674

		/*
		 * TODO: Choose async. exception bits if HYP mode is not
		 * implemented according to the values of SCR.{AW, FW} bits
		 */
675
676
		daif = DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT;

677
		ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, daif);
678
679
	}

680
	return PSCI_E_SUCCESS;
681
}
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
#else /* !__aarch64__ */
static int psci_get_ns_ep_info(entry_point_info_t *ep,
			       uintptr_t entrypoint,
			       u_register_t context_id)
{
	u_register_t ep_attr;
	unsigned int aif, ee, mode;
	u_register_t scr = read_scr();
	u_register_t ns_sctlr, sctlr;

	/* Switch to non secure state */
	write_scr(scr | SCR_NS_BIT);
	isb();
	ns_sctlr = read_sctlr();

	sctlr = scr & SCR_HCE_BIT ? read_hsctlr() : ns_sctlr;

	/* Return to original state */
	write_scr(scr);
	isb();
	ee = 0;

	ep_attr = NON_SECURE | EP_ST_DISABLE;
	if (sctlr & SCTLR_EE_BIT) {
		ep_attr |= EP_EE_BIG;
		ee = 1;
	}
	SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);

	ep->pc = entrypoint;
	zeromem(&ep->args, sizeof(ep->args));
	ep->args.arg0 = context_id;

	mode = scr & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc;

	/*
	 * TODO: Choose async. exception bits if HYP mode is not
	 * implemented according to the values of SCR.{AW, FW} bits
	 */
	aif = SPSR_ABT_BIT | SPSR_IRQ_BIT | SPSR_FIQ_BIT;

	ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, aif);

	return PSCI_E_SUCCESS;
}

#endif /* __aarch64__ */
729

730
731
732
733
734
735
/*******************************************************************************
 * This function validates the entrypoint with the platform layer if the
 * appropriate pm_ops hook is exported by the platform and returns the
 * 'entry_point_info'.
 ******************************************************************************/
int psci_validate_entry_point(entry_point_info_t *ep,
736
737
			      uintptr_t entrypoint,
			      u_register_t context_id)
738
739
740
741
{
	int rc;

	/* Validate the entrypoint using platform psci_ops */
742
	if (psci_plat_pm_ops->validate_ns_entrypoint != NULL) {
743
744
745
746
747
748
749
750
751
752
753
754
755
756
		rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint);
		if (rc != PSCI_E_SUCCESS)
			return PSCI_E_INVALID_ADDRESS;
	}

	/*
	 * Verify and derive the re-entry information for
	 * the non-secure world from the non-secure state from
	 * where this call originated.
	 */
	rc = psci_get_ns_ep_info(ep, entrypoint, context_id);
	return rc;
}

757
758
/*******************************************************************************
 * Generic handler which is called when a cpu is physically powered on. It
759
760
761
762
763
764
 * traverses the node information and finds the highest power level powered
 * off and performs generic, architectural, platform setup and state management
 * to power on that power level and power levels below it.
 * e.g. For a cpu that's been powered on, it will call the platform specific
 * code to enable the gic cpu interface and for a cluster it will enable
 * coherency at the interconnect level in addition to gic cpu interface.
765
 ******************************************************************************/
Soby Mathew's avatar
Soby Mathew committed
766
void psci_warmboot_entrypoint(void)
767
{
768
	unsigned int end_pwrlvl;
769
	unsigned int cpu_idx = plat_my_core_pos();
770
	unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
771
	psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
772
773

	/*
774
775
	 * Verify that we have been explicitly turned ON or resumed from
	 * suspend.
776
	 */
777
	if (psci_get_aff_info_state() == AFF_STATE_OFF) {
Andrew Walbran's avatar
Andrew Walbran committed
778
		ERROR("Unexpected affinity info state.\n");
779
		panic();
780
	}
781
782

	/*
783
784
	 * Get the maximum power domain level to traverse to after this cpu
	 * has been physically powered up.
785
	 */
786
	end_pwrlvl = get_power_on_target_pwrlvl();
787

788
789
790
	/* Get the parent nodes */
	psci_get_parent_pwr_domain_nodes(cpu_idx, end_pwrlvl, parent_nodes);

791
	/*
792
793
794
	 * This function acquires the lock corresponding to each power level so
	 * that by the time all locks are taken, the system topology is snapshot
	 * and state management can be done safely.
795
	 */
796
	psci_acquire_pwr_domain_locks(end_pwrlvl, parent_nodes);
797

798
799
	psci_get_target_local_pwr_states(end_pwrlvl, &state_info);

800
#if ENABLE_PSCI_STAT
801
	plat_psci_stat_accounting_stop(&state_info);
802
803
#endif

804
	/*
805
806
807
808
809
810
811
812
813
814
	 * This CPU could be resuming from suspend or it could have just been
	 * turned on. To distinguish between these 2 cases, we examine the
	 * affinity state of the CPU:
	 *  - If the affinity state is ON_PENDING then it has just been
	 *    turned on.
	 *  - Else it is resuming from suspend.
	 *
	 * Depending on the type of warm reset identified, choose the right set
	 * of power management handler and perform the generic, architecture
	 * and platform specific handling.
815
	 */
816
817
818
819
	if (psci_get_aff_info_state() == AFF_STATE_ON_PENDING)
		psci_cpu_on_finish(cpu_idx, &state_info);
	else
		psci_cpu_suspend_finish(cpu_idx, &state_info);
820

821
	/*
822
823
	 * Set the requested and target state of this CPU and all the higher
	 * power domains which are ancestors of this CPU to run.
824
	 */
825
	psci_set_pwr_domains_to_run(end_pwrlvl);
826

827
828
829
830
831
832
833
#if ENABLE_PSCI_STAT
	/*
	 * Update PSCI stats.
	 * Caches are off when writing stats data on the power down path.
	 * Since caches are now enabled, it's necessary to do cache
	 * maintenance before reading that same data.
	 */
834
	psci_stats_update_pwr_up(end_pwrlvl, &state_info);
835
836
#endif

837
	/*
838
	 * This loop releases the lock corresponding to each power level
839
840
	 * in the reverse order to which they were acquired.
	 */
841
	psci_release_pwr_domain_locks(end_pwrlvl, parent_nodes);
842
}
843
844
845
846
847
848

/*******************************************************************************
 * This function initializes the set of hooks that PSCI invokes as part of power
 * management operation. The power management hooks are expected to be provided
 * by the SPD, after it finishes all its initialization
 ******************************************************************************/
849
void psci_register_spd_pm_hook(const spd_pm_ops_t *pm)
850
{
851
	assert(pm != NULL);
852
	psci_spd_pm = pm;
Soby Mathew's avatar
Soby Mathew committed
853

854
	if (pm->svc_migrate != NULL)
Soby Mathew's avatar
Soby Mathew committed
855
856
		psci_caps |= define_psci_cap(PSCI_MIG_AARCH64);

857
	if (pm->svc_migrate_info != NULL)
Soby Mathew's avatar
Soby Mathew committed
858
859
		psci_caps |= define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64)
				| define_psci_cap(PSCI_MIG_INFO_TYPE);
860
}
861

Soby Mathew's avatar
Soby Mathew committed
862
863
864
865
866
867
868
/*******************************************************************************
 * This function invokes the migrate info hook in the spd_pm_ops. It performs
 * the necessary return value validation. If the Secure Payload is UP and
 * migrate capable, it returns the mpidr of the CPU on which the Secure payload
 * is resident through the mpidr parameter. Else the value of the parameter on
 * return is undefined.
 ******************************************************************************/
869
int psci_spd_migrate_info(u_register_t *mpidr)
Soby Mathew's avatar
Soby Mathew committed
870
871
872
{
	int rc;

873
	if ((psci_spd_pm == NULL) || (psci_spd_pm->svc_migrate_info == NULL))
Soby Mathew's avatar
Soby Mathew committed
874
875
876
877
		return PSCI_E_NOT_SUPPORTED;

	rc = psci_spd_pm->svc_migrate_info(mpidr);

878
879
	assert((rc == PSCI_TOS_UP_MIG_CAP) || (rc == PSCI_TOS_NOT_UP_MIG_CAP) ||
	       (rc == PSCI_TOS_NOT_PRESENT_MP) || (rc == PSCI_E_NOT_SUPPORTED));
Soby Mathew's avatar
Soby Mathew committed
880
881
882
883
884

	return rc;
}


885
/*******************************************************************************
886
 * This function prints the state of all power domains present in the
887
888
 * system
 ******************************************************************************/
889
void psci_print_power_domain_map(void)
890
891
{
#if LOG_LEVEL >= LOG_LEVEL_INFO
892
	unsigned int idx;
893
894
895
	plat_local_state_t state;
	plat_local_state_type_t state_type;

896
	/* This array maps to the PSCI_STATE_X definitions in psci.h */
Soby Mathew's avatar
Soby Mathew committed
897
	static const char * const psci_state_type_str[] = {
898
		"ON",
899
		"RETENTION",
900
901
902
		"OFF",
	};

903
	INFO("PSCI Power Domain Map:\n");
904
	for (idx = 0; idx < (PSCI_NUM_PWR_DOMAINS - psci_plat_core_count);
905
906
907
908
909
910
911
912
913
914
915
							idx++) {
		state_type = find_local_state_type(
				psci_non_cpu_pd_nodes[idx].local_state);
		INFO("  Domain Node : Level %u, parent_node %d,"
				" State %s (0x%x)\n",
				psci_non_cpu_pd_nodes[idx].level,
				psci_non_cpu_pd_nodes[idx].parent_node,
				psci_state_type_str[state_type],
				psci_non_cpu_pd_nodes[idx].local_state);
	}

916
	for (idx = 0; idx < psci_plat_core_count; idx++) {
917
918
		state = psci_get_cpu_local_state_by_idx(idx);
		state_type = find_local_state_type(state);
919
		INFO("  CPU Node : MPID 0x%llx, parent_node %d,"
920
				" State %s (0x%x)\n",
921
				(unsigned long long)psci_cpu_pd_nodes[idx].mpidr,
922
923
924
				psci_cpu_pd_nodes[idx].parent_node,
				psci_state_type_str[state_type],
				psci_get_cpu_local_state_by_idx(idx));
925
926
927
	}
#endif
}
928

929
930
931
932
933
934
935
936
937
/******************************************************************************
 * Return whether any secondaries were powered up with CPU_ON call. A CPU that
 * have ever been powered up would have set its MPDIR value to something other
 * than PSCI_INVALID_MPIDR. Note that MPDIR isn't reset back to
 * PSCI_INVALID_MPIDR when a CPU is powered down later, so the return value is
 * meaningful only when called on the primary CPU during early boot.
 *****************************************************************************/
int psci_secondaries_brought_up(void)
{
938
	unsigned int idx, n_valid = 0U;
939

940
	for (idx = 0U; idx < ARRAY_SIZE(psci_cpu_pd_nodes); idx++) {
941
942
943
944
		if (psci_cpu_pd_nodes[idx].mpidr != PSCI_INVALID_MPIDR)
			n_valid++;
	}

945
	assert(n_valid > 0U);
946

947
	return (n_valid > 1U) ? 1 : 0;
948
949
}

950
951
952
953
954
955
956
957
958
959
/*******************************************************************************
 * Initiate power down sequence, by calling power down operations registered for
 * this CPU.
 ******************************************************************************/
void psci_do_pwrdown_sequence(unsigned int power_level)
{
#if HW_ASSISTED_COHERENCY
	/*
	 * With hardware-assisted coherency, the CPU drivers only initiate the
	 * power down sequence, without performing cache-maintenance operations
960
	 * in software. Data caches enabled both before and after this call.
961
962
963
964
965
	 */
	prepare_cpu_pwr_dwn(power_level);
#else
	/*
	 * Without hardware-assisted coherency, the CPU drivers disable data
966
	 * caches, then perform cache-maintenance operations in software.
967
	 *
968
969
970
971
	 * This also calls prepare_cpu_pwr_dwn() to initiate power down
	 * sequence, but that function will return with data caches disabled.
	 * We must ensure that the stack memory is flushed out to memory before
	 * we start popping from it again.
972
973
974
975
	 */
	psci_do_pwrdown_cache_maintenance(power_level);
#endif
}