psci_common.c 34.3 KB
Newer Older
1
/*
2
 * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
3
 *
dp-arm's avatar
dp-arm committed
4
 * SPDX-License-Identifier: BSD-3-Clause
5
6
 */

7
8
9
#include <assert.h>
#include <string.h>

10
#include <arch.h>
11
#include <arch_helpers.h>
12
13
#include <common/bl_common.h>
#include <common/debug.h>
14
#include <context.h>
15
16
17
18
#include <lib/el3_runtime/context_mgmt.h>
#include <lib/utils.h>
#include <plat/common/platform.h>

19
#include "psci_private.h"
20

21
/*
22
23
 * SPD power management operations, expected to be supplied by the registered
 * SPD on successful SP initialization
24
 */
25
const spd_pm_ops_t *psci_spd_pm;
26

27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
/*
 * PSCI requested local power state map. This array is used to store the local
 * power states requested by a CPU for power levels from level 1 to
 * PLAT_MAX_PWR_LVL. It does not store the requested local power state for power
 * level 0 (PSCI_CPU_PWR_LVL) as the requested and the target power state for a
 * CPU are the same.
 *
 * During state coordination, the platform is passed an array containing the
 * local states requested for a particular non cpu power domain by each cpu
 * within the domain.
 *
 * TODO: Dense packing of the requested states will cause cache thrashing
 * when multiple power domains write to it. If we allocate the requested
 * states at each power level in a cache-line aligned per-domain memory,
 * the cache thrashing can be avoided.
 */
static plat_local_state_t
	psci_req_local_pwr_states[PLAT_MAX_PWR_LVL][PLATFORM_CORE_COUNT];


47
/*******************************************************************************
48
49
50
51
52
 * Arrays that hold the platform's power domain tree information for state
 * management of power domains.
 * Each node in the array 'psci_non_cpu_pd_nodes' corresponds to a power domain
 * which is an ancestor of a CPU power domain.
 * Each node in the array 'psci_cpu_pd_nodes' corresponds to a cpu power domain
53
 ******************************************************************************/
54
non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS]
55
#if USE_COHERENT_MEM
56
__section("tzfw_coherent_mem")
57
58
#endif
;
59

60
61
/* Lock for PSCI state coordination */
DEFINE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
62

63
64
cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];

65
66
67
/*******************************************************************************
 * Pointer to functions exported by the platform to complete power mgmt. ops
 ******************************************************************************/
68
const plat_psci_ops_t *psci_plat_pm_ops;
69

70
71
72
/******************************************************************************
 * Check that the maximum power level supported by the platform makes sense
 *****************************************************************************/
73
74
75
CASSERT((PLAT_MAX_PWR_LVL <= PSCI_MAX_PWR_LVL) &&
	(PLAT_MAX_PWR_LVL >= PSCI_CPU_PWR_LVL),
	assert_platform_max_pwrlvl_check);
76

77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
/*
 * The plat_local_state used by the platform is one of these types: RUN,
 * RETENTION and OFF. The platform can define further sub-states for each type
 * apart from RUN. This categorization is done to verify the sanity of the
 * psci_power_state passed by the platform and to print debug information. The
 * categorization is done on the basis of the following conditions:
 *
 * 1. If (plat_local_state == 0) then the category is STATE_TYPE_RUN.
 *
 * 2. If (0 < plat_local_state <= PLAT_MAX_RET_STATE), then the category is
 *    STATE_TYPE_RETN.
 *
 * 3. If (plat_local_state > PLAT_MAX_RET_STATE), then the category is
 *    STATE_TYPE_OFF.
 */
typedef enum plat_local_state_type {
	STATE_TYPE_RUN = 0,
	STATE_TYPE_RETN,
	STATE_TYPE_OFF
} plat_local_state_type_t;

98
99
100
101
102
103
104
105
106
107
108
109
110
/* Function used to categorize plat_local_state. */
static plat_local_state_type_t find_local_state_type(plat_local_state_t state)
{
	if (state != 0U) {
		if (state > PLAT_MAX_RET_STATE) {
			return STATE_TYPE_OFF;
		} else {
			return STATE_TYPE_RETN;
		}
	} else {
		return STATE_TYPE_RUN;
	}
}
111
112
113
114
115

/******************************************************************************
 * Check that the maximum retention level supported by the platform is less
 * than the maximum off level.
 *****************************************************************************/
116
CASSERT(PLAT_MAX_RET_STATE < PLAT_MAX_OFF_STATE,
117
118
119
120
121
122
123
124
		assert_platform_max_off_and_retn_state_check);

/******************************************************************************
 * This function ensures that the power state parameter in a CPU_SUSPEND request
 * is valid. If so, it returns the requested states for each power level.
 *****************************************************************************/
int psci_validate_power_state(unsigned int power_state,
			      psci_power_state_t *state_info)
125
{
126
	/* Check SBZ bits in power state are zero */
127
	if (psci_check_power_state(power_state) != 0U)
128
		return PSCI_E_INVALID_PARAMS;
129

130
	assert(psci_plat_pm_ops->validate_power_state != NULL);
131

132
133
134
135
136
137
138
139
140
141
142
143
144
145
	/* Validate the power_state using platform pm_ops */
	return psci_plat_pm_ops->validate_power_state(power_state, state_info);
}

/******************************************************************************
 * This function retrieves the `psci_power_state_t` for system suspend from
 * the platform.
 *****************************************************************************/
void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info)
{
	/*
	 * Assert that the required pm_ops hook is implemented to ensure that
	 * the capability detected during psci_setup() is valid.
	 */
146
	assert(psci_plat_pm_ops->get_sys_suspend_power_state != NULL);
147

148
149
150
151
	/*
	 * Query the platform for the power_state required for system suspend
	 */
	psci_plat_pm_ops->get_sys_suspend_power_state(state_info);
152
153
}

154
155
156
157
158
159
160
161
/*******************************************************************************
 * This function verifies that the all the other cores in the system have been
 * turned OFF and the current CPU is the last running CPU in the system.
 * Returns 1 (true) if the current CPU is the last ON CPU or 0 (false)
 * otherwise.
 ******************************************************************************/
unsigned int psci_is_last_on_cpu(void)
{
162
	int cpu_idx, my_idx = (int) plat_my_core_pos();
163

164
165
166
	for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) {
		if (cpu_idx == my_idx) {
			assert(psci_get_aff_info_state() == AFF_STATE_ON);
167
168
169
			continue;
		}

170
		if (psci_get_aff_info_state_by_idx(cpu_idx) != AFF_STATE_OFF)
171
172
173
174
175
176
			return 0;
	}

	return 1;
}

177
/*******************************************************************************
178
179
180
 * Routine to return the maximum power level to traverse to after a cpu has
 * been physically powered up. It is expected to be called immediately after
 * reset from assembler code.
181
 ******************************************************************************/
182
static unsigned int get_power_on_target_pwrlvl(void)
183
{
184
	unsigned int pwrlvl;
185
186

	/*
187
188
189
190
	 * Assume that this cpu was suspended and retrieve its target power
	 * level. If it is invalid then it could only have been turned off
	 * earlier. PLAT_MAX_PWR_LVL will be the highest power level a
	 * cpu can be turned off to.
191
	 */
192
	pwrlvl = psci_get_suspend_pwrlvl();
193
	if (pwrlvl == PSCI_INVALID_PWR_LVL)
194
195
		pwrlvl = PLAT_MAX_PWR_LVL;
	return pwrlvl;
196
197
}

198
199
200
201
202
203
204
205
/******************************************************************************
 * Helper function to update the requested local power state array. This array
 * does not store the requested state for the CPU power level. Hence an
 * assertion is added to prevent us from accessing the wrong index.
 *****************************************************************************/
static void psci_set_req_local_pwr_state(unsigned int pwrlvl,
					 unsigned int cpu_idx,
					 plat_local_state_t req_pwr_state)
206
{
207
208
209
210
	/*
	 * This should never happen, we have this here to avoid
	 * "array subscript is above array bounds" errors in GCC.
	 */
211
	assert(pwrlvl > PSCI_CPU_PWR_LVL);
212
213
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Warray-bounds"
214
	psci_req_local_pwr_states[pwrlvl - 1U][cpu_idx] = req_pwr_state;
215
#pragma GCC diagnostic pop
216
217
}

218
219
220
/******************************************************************************
 * This function initializes the psci_req_local_pwr_states.
 *****************************************************************************/
221
void __init psci_init_req_local_pwr_states(void)
222
{
223
	/* Initialize the requested state of all non CPU power domains as OFF */
224
225
226
227
228
229
230
231
232
	unsigned int pwrlvl;
	int core;

	for (pwrlvl = 0U; pwrlvl < PLAT_MAX_PWR_LVL; pwrlvl++) {
		for (core = 0; core < PLATFORM_CORE_COUNT; core++) {
			psci_req_local_pwr_states[pwrlvl][core] =
				PLAT_MAX_OFF_STATE;
		}
	}
233
}
234

235
236
237
238
239
240
241
242
/******************************************************************************
 * Helper function to return a reference to an array containing the local power
 * states requested by each cpu for a power domain at 'pwrlvl'. The size of the
 * array will be the number of cpu power domains of which this power domain is
 * an ancestor. These requested states will be used to determine a suitable
 * target state for this power domain during psci state coordination. An
 * assertion is added to prevent us from accessing the CPU power level.
 *****************************************************************************/
243
static plat_local_state_t *psci_get_req_local_pwr_states(unsigned int pwrlvl,
244
							 int cpu_idx)
245
246
{
	assert(pwrlvl > PSCI_CPU_PWR_LVL);
247

248
	return &psci_req_local_pwr_states[pwrlvl - 1U][cpu_idx];
249
}
250

251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
/*
 * psci_non_cpu_pd_nodes can be placed either in normal memory or coherent
 * memory.
 *
 * With !USE_COHERENT_MEM, psci_non_cpu_pd_nodes is placed in normal memory,
 * it's accessed by both cached and non-cached participants. To serve the common
 * minimum, perform a cache flush before read and after write so that non-cached
 * participants operate on latest data in main memory.
 *
 * When USE_COHERENT_MEM is used, psci_non_cpu_pd_nodes is placed in coherent
 * memory. With HW_ASSISTED_COHERENCY, all PSCI participants are cache-coherent.
 * In both cases, no cache operations are required.
 */

/*
 * Retrieve local state of non-CPU power domain node from a non-cached CPU,
 * after any required cache maintenance operation.
 */
static plat_local_state_t get_non_cpu_pd_node_local_state(
		unsigned int parent_idx)
{
272
#if !(USE_COHERENT_MEM || HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
	flush_dcache_range(
			(uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
			sizeof(psci_non_cpu_pd_nodes[parent_idx]));
#endif
	return psci_non_cpu_pd_nodes[parent_idx].local_state;
}

/*
 * Update local state of non-CPU power domain node from a cached CPU; perform
 * any required cache maintenance operation afterwards.
 */
static void set_non_cpu_pd_node_local_state(unsigned int parent_idx,
		plat_local_state_t state)
{
	psci_non_cpu_pd_nodes[parent_idx].local_state = state;
288
#if !(USE_COHERENT_MEM || HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
289
290
291
292
293
294
	flush_dcache_range(
			(uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
			sizeof(psci_non_cpu_pd_nodes[parent_idx]));
#endif
}

295
296
297
298
299
300
/******************************************************************************
 * Helper function to return the current local power state of each power domain
 * from the current cpu power domain to its ancestor at the 'end_pwrlvl'. This
 * function will be called after a cpu is powered on to find the local state
 * each power domain has emerged from.
 *****************************************************************************/
301
302
void psci_get_target_local_pwr_states(unsigned int end_pwrlvl,
				      psci_power_state_t *target_state)
303
{
304
	unsigned int parent_idx, lvl;
305
306
307
308
309
310
	plat_local_state_t *pd_state = target_state->pwr_domain_state;

	pd_state[PSCI_CPU_PWR_LVL] = psci_get_cpu_local_state();
	parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node;

	/* Copy the local power state from node to state_info */
311
	for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
312
		pd_state[lvl] = get_non_cpu_pd_node_local_state(parent_idx);
313
314
315
316
317
318
319
320
321
322
323
324
325
326
		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
	}

	/* Set the the higher levels to RUN */
	for (; lvl <= PLAT_MAX_PWR_LVL; lvl++)
		target_state->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN;
}

/******************************************************************************
 * Helper function to set the target local power state that each power domain
 * from the current cpu power domain to its ancestor at the 'end_pwrlvl' will
 * enter. This function will be called after coordination of requested power
 * states has been done for each power level.
 *****************************************************************************/
327
static void psci_set_target_local_pwr_states(unsigned int end_pwrlvl,
328
329
					const psci_power_state_t *target_state)
{
330
	unsigned int parent_idx, lvl;
331
332
333
	const plat_local_state_t *pd_state = target_state->pwr_domain_state;

	psci_set_cpu_local_state(pd_state[PSCI_CPU_PWR_LVL]);
334

335
	/*
336
	 * Need to flush as local_state might be accessed with Data Cache
337
	 * disabled during power on
338
	 */
339
	psci_flush_cpu_data(psci_svc_cpu_data.local_state);
340
341
342
343

	parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node;

	/* Copy the local_state from state_info */
344
	for (lvl = 1U; lvl <= end_pwrlvl; lvl++) {
345
		set_non_cpu_pd_node_local_state(parent_idx, pd_state[lvl]);
346
347
		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
	}
348
349
}

350

351
/*******************************************************************************
352
 * PSCI helper function to get the parent nodes corresponding to a cpu_index.
353
 ******************************************************************************/
354
void psci_get_parent_pwr_domain_nodes(int cpu_idx,
355
				      unsigned int end_lvl,
356
				      unsigned int *node_index)
357
358
{
	unsigned int parent_node = psci_cpu_pd_nodes[cpu_idx].parent_node;
359
	unsigned int i;
360
	unsigned int *node = node_index;
361

362
363
364
	for (i = PSCI_CPU_PWR_LVL + 1U; i <= end_lvl; i++) {
		*node = parent_node;
		node++;
365
366
367
368
369
370
371
372
373
		parent_node = psci_non_cpu_pd_nodes[parent_node].parent_node;
	}
}

/******************************************************************************
 * This function is invoked post CPU power up and initialization. It sets the
 * affinity info state, target power state and requested power state for the
 * current CPU and all its ancestor power domains to RUN.
 *****************************************************************************/
374
void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl)
375
{
376
	unsigned int parent_idx, cpu_idx = plat_my_core_pos(), lvl;
377
378
379
	parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;

	/* Reset the local_state to RUN for the non cpu power domains. */
380
	for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
381
382
		set_non_cpu_pd_node_local_state(parent_idx,
				PSCI_LOCAL_STATE_RUN);
383
384
385
386
387
388
389
390
391
392
		psci_set_req_local_pwr_state(lvl,
					     cpu_idx,
					     PSCI_LOCAL_STATE_RUN);
		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
	}

	/* Set the affinity info state to ON */
	psci_set_aff_info_state(AFF_STATE_ON);

	psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN);
393
	psci_flush_cpu_data(psci_svc_cpu_data);
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
}

/******************************************************************************
 * This function is passed the local power states requested for each power
 * domain (state_info) between the current CPU domain and its ancestors until
 * the target power level (end_pwrlvl). It updates the array of requested power
 * states with this information.
 *
 * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it
 * retrieves the states requested by all the cpus of which the power domain at
 * that level is an ancestor. It passes this information to the platform to
 * coordinate and return the target power state. If the target state for a level
 * is RUN then subsequent levels are not considered. At the CPU level, state
 * coordination is not required. Hence, the requested and the target states are
 * the same.
 *
 * The 'state_info' is updated with the target state for each level between the
 * CPU and the 'end_pwrlvl' and returned to the caller.
 *
 * This function will only be invoked with data cache enabled and while
 * powering down a core.
 *****************************************************************************/
416
417
void psci_do_state_coordination(unsigned int end_pwrlvl,
				psci_power_state_t *state_info)
418
{
419
	unsigned int lvl, parent_idx, cpu_idx = plat_my_core_pos();
420
421
	int start_idx;
	unsigned int ncpus;
422
423
	plat_local_state_t target_state, *req_states;

424
	assert(end_pwrlvl <= PLAT_MAX_PWR_LVL);
425
	parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
426

427
428
	/* For level 0, the requested state will be equivalent
	   to target state */
429
	for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450

		/* First update the requested power state */
		psci_set_req_local_pwr_state(lvl, cpu_idx,
					     state_info->pwr_domain_state[lvl]);

		/* Get the requested power states for this power level */
		start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx;
		req_states = psci_get_req_local_pwr_states(lvl, start_idx);

		/*
		 * Let the platform coordinate amongst the requested states at
		 * this power level and return the target local power state.
		 */
		ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus;
		target_state = plat_get_target_pwr_state(lvl,
							 req_states,
							 ncpus);

		state_info->pwr_domain_state[lvl] = target_state;

		/* Break early if the negotiated target power state is RUN */
451
		if (is_local_state_run(state_info->pwr_domain_state[lvl]) != 0)
452
453
454
455
			break;

		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
	}
456
457

	/*
458
459
460
461
	 * This is for cases when we break out of the above loop early because
	 * the target power state is RUN at a power level < end_pwlvl.
	 * We update the requested power state from state_info and then
	 * set the target state as RUN.
462
	 */
463
	for (lvl = lvl + 1U; lvl <= end_pwrlvl; lvl++) {
464
465
466
		psci_set_req_local_pwr_state(lvl, cpu_idx,
					     state_info->pwr_domain_state[lvl]);
		state_info->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN;
467

468
	}
469

470
471
	/* Update the target state in the power domain nodes */
	psci_set_target_local_pwr_states(end_pwrlvl, state_info);
472
473
}

474
475
476
477
478
479
480
481
482
483
484
485
486
/******************************************************************************
 * This function validates a suspend request by making sure that if a standby
 * state is requested then no power level is turned off and the highest power
 * level is placed in a standby/retention state.
 *
 * It also ensures that the state level X will enter is not shallower than the
 * state level X + 1 will enter.
 *
 * This validation will be enabled only for DEBUG builds as the platform is
 * expected to perform these validations as well.
 *****************************************************************************/
int psci_validate_suspend_req(const psci_power_state_t *state_info,
			      unsigned int is_power_down_state)
487
{
488
489
490
491
492
493
494
	unsigned int max_off_lvl, target_lvl, max_retn_lvl;
	plat_local_state_t state;
	plat_local_state_type_t req_state_type, deepest_state_type;
	int i;

	/* Find the target suspend power level */
	target_lvl = psci_find_target_suspend_lvl(state_info);
495
	if (target_lvl == PSCI_INVALID_PWR_LVL)
496
497
		return PSCI_E_INVALID_PARAMS;

498
499
	/* All power domain levels are in a RUN state to begin with */
	deepest_state_type = STATE_TYPE_RUN;
500

501
	for (i = (int) target_lvl; i >= (int) PSCI_CPU_PWR_LVL; i--) {
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
		state = state_info->pwr_domain_state[i];
		req_state_type = find_local_state_type(state);

		/*
		 * While traversing from the highest power level to the lowest,
		 * the state requested for lower levels has to be the same or
		 * deeper i.e. equal to or greater than the state at the higher
		 * levels. If this condition is true, then the requested state
		 * becomes the deepest state encountered so far.
		 */
		if (req_state_type < deepest_state_type)
			return PSCI_E_INVALID_PARAMS;
		deepest_state_type = req_state_type;
	}

	/* Find the highest off power level */
	max_off_lvl = psci_find_max_off_lvl(state_info);

	/* The target_lvl is either equal to the max_off_lvl or max_retn_lvl */
521
	max_retn_lvl = PSCI_INVALID_PWR_LVL;
522
523
524
525
526
527
528
529
	if (target_lvl != max_off_lvl)
		max_retn_lvl = target_lvl;

	/*
	 * If this is not a request for a power down state then max off level
	 * has to be invalid and max retention level has to be a valid power
	 * level.
	 */
530
531
532
	if ((is_power_down_state == 0U) &&
			((max_off_lvl != PSCI_INVALID_PWR_LVL) ||
			 (max_retn_lvl == PSCI_INVALID_PWR_LVL)))
533
534
535
536
537
		return PSCI_E_INVALID_PARAMS;

	return PSCI_E_SUCCESS;
}

538
539
540
541
542
/******************************************************************************
 * This function finds the highest power level which will be powered down
 * amongst all the power levels specified in the 'state_info' structure
 *****************************************************************************/
unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info)
543
{
544
	int i;
545

546
547
548
	for (i = (int) PLAT_MAX_PWR_LVL; i >= (int) PSCI_CPU_PWR_LVL; i--) {
		if (is_local_state_off(state_info->pwr_domain_state[i]) != 0)
			return (unsigned int) i;
549
550
	}

551
	return PSCI_INVALID_PWR_LVL;
552
553
554
555
556
557
558
559
560
561
}

/******************************************************************************
 * This functions finds the level of the highest power domain which will be
 * placed in a low power state during a suspend operation.
 *****************************************************************************/
unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info)
{
	int i;

562
563
564
	for (i = (int) PLAT_MAX_PWR_LVL; i >= (int) PSCI_CPU_PWR_LVL; i--) {
		if (is_local_state_run(state_info->pwr_domain_state[i]) == 0)
			return (unsigned int) i;
565
	}
566

567
	return PSCI_INVALID_PWR_LVL;
568
569
}

570
/*******************************************************************************
571
572
573
 * This function is passed a cpu_index and the highest level in the topology
 * tree that the operation should be applied to. It picks up locks in order of
 * increasing power domain level in the range specified.
574
 ******************************************************************************/
575
void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl, int cpu_idx)
576
{
577
	unsigned int parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
578
	unsigned int level;
579

580
	/* No locking required for level 0. Hence start locking from level 1 */
581
	for (level = PSCI_CPU_PWR_LVL + 1U; level <= end_pwrlvl; level++) {
582
583
		psci_lock_get(&psci_non_cpu_pd_nodes[parent_idx]);
		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
584
585
586
587
	}
}

/*******************************************************************************
588
589
590
 * This function is passed a cpu_index and the highest level in the topology
 * tree that the operation should be applied to. It releases the locks in order
 * of decreasing power domain level in the range specified.
591
 ******************************************************************************/
592
void psci_release_pwr_domain_locks(unsigned int end_pwrlvl, int cpu_idx)
593
{
594
	unsigned int parent_idx, parent_nodes[PLAT_MAX_PWR_LVL] = {0};
595
	unsigned int level;
596

597
598
	/* Get the parent nodes */
	psci_get_parent_pwr_domain_nodes(cpu_idx, end_pwrlvl, parent_nodes);
599

600
	/* Unlock top down. No unlocking required for level 0. */
601
602
	for (level = end_pwrlvl; level >= PSCI_CPU_PWR_LVL + 1U; level--) {
		parent_idx = parent_nodes[level - 1U];
603
		psci_lock_release(&psci_non_cpu_pd_nodes[parent_idx]);
604
605
606
	}
}

607
/*******************************************************************************
608
 * Simple routine to determine whether a mpidr is valid or not.
609
 ******************************************************************************/
610
int psci_validate_mpidr(u_register_t mpidr)
611
{
612
	if (plat_core_pos_by_mpidr(mpidr) < 0)
613
		return PSCI_E_INVALID_PARAMS;
614
615

	return PSCI_E_SUCCESS;
616
617
618
}

/*******************************************************************************
619
 * This function determines the full entrypoint information for the requested
620
 * PSCI entrypoint on power on/resume and returns it.
621
 ******************************************************************************/
Soby Mathew's avatar
Soby Mathew committed
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
#ifdef AARCH32
static int psci_get_ns_ep_info(entry_point_info_t *ep,
			       uintptr_t entrypoint,
			       u_register_t context_id)
{
	u_register_t ep_attr;
	unsigned int aif, ee, mode;
	u_register_t scr = read_scr();
	u_register_t ns_sctlr, sctlr;

	/* Switch to non secure state */
	write_scr(scr | SCR_NS_BIT);
	isb();
	ns_sctlr = read_sctlr();

	sctlr = scr & SCR_HCE_BIT ? read_hsctlr() : ns_sctlr;

	/* Return to original state */
	write_scr(scr);
	isb();
	ee = 0;

	ep_attr = NON_SECURE | EP_ST_DISABLE;
	if (sctlr & SCTLR_EE_BIT) {
		ep_attr |= EP_EE_BIG;
		ee = 1;
	}
	SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);

	ep->pc = entrypoint;
652
	zeromem(&ep->args, sizeof(ep->args));
Soby Mathew's avatar
Soby Mathew committed
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
	ep->args.arg0 = context_id;

	mode = scr & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc;

	/*
	 * TODO: Choose async. exception bits if HYP mode is not
	 * implemented according to the values of SCR.{AW, FW} bits
	 */
	aif = SPSR_ABT_BIT | SPSR_IRQ_BIT | SPSR_FIQ_BIT;

	ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, aif);

	return PSCI_E_SUCCESS;
}

#else
669
static int psci_get_ns_ep_info(entry_point_info_t *ep,
670
671
			       uintptr_t entrypoint,
			       u_register_t context_id)
672
{
673
	u_register_t ep_attr, sctlr;
674
	unsigned int daif, ee, mode;
675
676
	u_register_t ns_scr_el3 = read_scr_el3();
	u_register_t ns_sctlr_el1 = read_sctlr_el1();
677

678
679
	sctlr = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ?
		read_sctlr_el2() : ns_sctlr_el1;
680
	ee = 0;
681

682
	ep_attr = NON_SECURE | EP_ST_DISABLE;
683
	if ((sctlr & SCTLR_EE_BIT) != 0U) {
684
685
686
		ep_attr |= EP_EE_BIG;
		ee = 1;
	}
687
	SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);
688

689
	ep->pc = entrypoint;
690
	zeromem(&ep->args, sizeof(ep->args));
691
	ep->args.arg0 = context_id;
692
693
694
695
696

	/*
	 * Figure out whether the cpu enters the non-secure address space
	 * in aarch32 or aarch64
	 */
697
	if ((ns_scr_el3 & SCR_RW_BIT) != 0U) {
698
699
700
701
702

		/*
		 * Check whether a Thumb entry point has been provided for an
		 * aarch64 EL
		 */
703
		if ((entrypoint & 0x1UL) != 0UL)
704
			return PSCI_E_INVALID_ADDRESS;
705

706
		mode = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ? MODE_EL2 : MODE_EL1;
707

708
		ep->spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
709
710
	} else {

711
712
		mode = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ?
			MODE32_hyp : MODE32_svc;
713
714
715
716
717

		/*
		 * TODO: Choose async. exception bits if HYP mode is not
		 * implemented according to the values of SCR.{AW, FW} bits
		 */
718
719
		daif = DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT;

720
		ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, daif);
721
722
	}

723
	return PSCI_E_SUCCESS;
724
}
Soby Mathew's avatar
Soby Mathew committed
725
#endif
726

727
728
729
730
731
732
/*******************************************************************************
 * This function validates the entrypoint with the platform layer if the
 * appropriate pm_ops hook is exported by the platform and returns the
 * 'entry_point_info'.
 ******************************************************************************/
int psci_validate_entry_point(entry_point_info_t *ep,
733
734
			      uintptr_t entrypoint,
			      u_register_t context_id)
735
736
737
738
{
	int rc;

	/* Validate the entrypoint using platform psci_ops */
739
	if (psci_plat_pm_ops->validate_ns_entrypoint != NULL) {
740
741
742
743
744
745
746
747
748
749
750
751
752
753
		rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint);
		if (rc != PSCI_E_SUCCESS)
			return PSCI_E_INVALID_ADDRESS;
	}

	/*
	 * Verify and derive the re-entry information for
	 * the non-secure world from the non-secure state from
	 * where this call originated.
	 */
	rc = psci_get_ns_ep_info(ep, entrypoint, context_id);
	return rc;
}

754
755
/*******************************************************************************
 * Generic handler which is called when a cpu is physically powered on. It
756
757
758
759
760
761
 * traverses the node information and finds the highest power level powered
 * off and performs generic, architectural, platform setup and state management
 * to power on that power level and power levels below it.
 * e.g. For a cpu that's been powered on, it will call the platform specific
 * code to enable the gic cpu interface and for a cluster it will enable
 * coherency at the interconnect level in addition to gic cpu interface.
762
 ******************************************************************************/
Soby Mathew's avatar
Soby Mathew committed
763
void psci_warmboot_entrypoint(void)
764
{
765
766
	unsigned int end_pwrlvl;
	int cpu_idx = (int) plat_my_core_pos();
767
	psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
768
769

	/*
770
771
	 * Verify that we have been explicitly turned ON or resumed from
	 * suspend.
772
	 */
773
774
	if (psci_get_aff_info_state() == AFF_STATE_OFF) {
		ERROR("Unexpected affinity info state");
775
		panic();
776
	}
777
778

	/*
779
780
	 * Get the maximum power domain level to traverse to after this cpu
	 * has been physically powered up.
781
	 */
782
	end_pwrlvl = get_power_on_target_pwrlvl();
783
784

	/*
785
786
787
	 * This function acquires the lock corresponding to each power level so
	 * that by the time all locks are taken, the system topology is snapshot
	 * and state management can be done safely.
788
	 */
789
	psci_acquire_pwr_domain_locks(end_pwrlvl, cpu_idx);
790

791
792
	psci_get_target_local_pwr_states(end_pwrlvl, &state_info);

793
#if ENABLE_PSCI_STAT
794
	plat_psci_stat_accounting_stop(&state_info);
795
796
#endif

797
	/*
798
799
800
801
802
803
804
805
806
807
	 * This CPU could be resuming from suspend or it could have just been
	 * turned on. To distinguish between these 2 cases, we examine the
	 * affinity state of the CPU:
	 *  - If the affinity state is ON_PENDING then it has just been
	 *    turned on.
	 *  - Else it is resuming from suspend.
	 *
	 * Depending on the type of warm reset identified, choose the right set
	 * of power management handler and perform the generic, architecture
	 * and platform specific handling.
808
	 */
809
810
811
812
	if (psci_get_aff_info_state() == AFF_STATE_ON_PENDING)
		psci_cpu_on_finish(cpu_idx, &state_info);
	else
		psci_cpu_suspend_finish(cpu_idx, &state_info);
813

814
	/*
815
816
	 * Set the requested and target state of this CPU and all the higher
	 * power domains which are ancestors of this CPU to run.
817
	 */
818
	psci_set_pwr_domains_to_run(end_pwrlvl);
819

820
821
822
823
824
825
826
#if ENABLE_PSCI_STAT
	/*
	 * Update PSCI stats.
	 * Caches are off when writing stats data on the power down path.
	 * Since caches are now enabled, it's necessary to do cache
	 * maintenance before reading that same data.
	 */
827
	psci_stats_update_pwr_up(end_pwrlvl, &state_info);
828
829
#endif

830
	/*
831
	 * This loop releases the lock corresponding to each power level
832
833
	 * in the reverse order to which they were acquired.
	 */
834
	psci_release_pwr_domain_locks(end_pwrlvl, cpu_idx);
835
}
836
837
838
839
840
841

/*******************************************************************************
 * This function initializes the set of hooks that PSCI invokes as part of power
 * management operation. The power management hooks are expected to be provided
 * by the SPD, after it finishes all its initialization
 ******************************************************************************/
842
void psci_register_spd_pm_hook(const spd_pm_ops_t *pm)
843
{
844
	assert(pm != NULL);
845
	psci_spd_pm = pm;
Soby Mathew's avatar
Soby Mathew committed
846

847
	if (pm->svc_migrate != NULL)
Soby Mathew's avatar
Soby Mathew committed
848
849
		psci_caps |= define_psci_cap(PSCI_MIG_AARCH64);

850
	if (pm->svc_migrate_info != NULL)
Soby Mathew's avatar
Soby Mathew committed
851
852
		psci_caps |= define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64)
				| define_psci_cap(PSCI_MIG_INFO_TYPE);
853
}
854

Soby Mathew's avatar
Soby Mathew committed
855
856
857
858
859
860
861
/*******************************************************************************
 * This function invokes the migrate info hook in the spd_pm_ops. It performs
 * the necessary return value validation. If the Secure Payload is UP and
 * migrate capable, it returns the mpidr of the CPU on which the Secure payload
 * is resident through the mpidr parameter. Else the value of the parameter on
 * return is undefined.
 ******************************************************************************/
862
int psci_spd_migrate_info(u_register_t *mpidr)
Soby Mathew's avatar
Soby Mathew committed
863
864
865
{
	int rc;

866
	if ((psci_spd_pm == NULL) || (psci_spd_pm->svc_migrate_info == NULL))
Soby Mathew's avatar
Soby Mathew committed
867
868
869
870
		return PSCI_E_NOT_SUPPORTED;

	rc = psci_spd_pm->svc_migrate_info(mpidr);

871
872
	assert((rc == PSCI_TOS_UP_MIG_CAP) || (rc == PSCI_TOS_NOT_UP_MIG_CAP) ||
	       (rc == PSCI_TOS_NOT_PRESENT_MP) || (rc == PSCI_E_NOT_SUPPORTED));
Soby Mathew's avatar
Soby Mathew committed
873
874
875
876
877

	return rc;
}


878
/*******************************************************************************
879
 * This function prints the state of all power domains present in the
880
881
 * system
 ******************************************************************************/
882
void psci_print_power_domain_map(void)
883
884
{
#if LOG_LEVEL >= LOG_LEVEL_INFO
885
	int idx;
886
887
888
	plat_local_state_t state;
	plat_local_state_type_t state_type;

889
	/* This array maps to the PSCI_STATE_X definitions in psci.h */
Soby Mathew's avatar
Soby Mathew committed
890
	static const char * const psci_state_type_str[] = {
891
		"ON",
892
		"RETENTION",
893
894
895
		"OFF",
	};

896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
	INFO("PSCI Power Domain Map:\n");
	for (idx = 0; idx < (PSCI_NUM_PWR_DOMAINS - PLATFORM_CORE_COUNT);
							idx++) {
		state_type = find_local_state_type(
				psci_non_cpu_pd_nodes[idx].local_state);
		INFO("  Domain Node : Level %u, parent_node %d,"
				" State %s (0x%x)\n",
				psci_non_cpu_pd_nodes[idx].level,
				psci_non_cpu_pd_nodes[idx].parent_node,
				psci_state_type_str[state_type],
				psci_non_cpu_pd_nodes[idx].local_state);
	}

	for (idx = 0; idx < PLATFORM_CORE_COUNT; idx++) {
		state = psci_get_cpu_local_state_by_idx(idx);
		state_type = find_local_state_type(state);
912
		INFO("  CPU Node : MPID 0x%llx, parent_node %d,"
913
				" State %s (0x%x)\n",
914
				(unsigned long long)psci_cpu_pd_nodes[idx].mpidr,
915
916
917
				psci_cpu_pd_nodes[idx].parent_node,
				psci_state_type_str[state_type],
				psci_get_cpu_local_state_by_idx(idx));
918
919
920
	}
#endif
}
921

922
923
924
925
926
927
928
929
930
/******************************************************************************
 * Return whether any secondaries were powered up with CPU_ON call. A CPU that
 * have ever been powered up would have set its MPDIR value to something other
 * than PSCI_INVALID_MPIDR. Note that MPDIR isn't reset back to
 * PSCI_INVALID_MPIDR when a CPU is powered down later, so the return value is
 * meaningful only when called on the primary CPU during early boot.
 *****************************************************************************/
int psci_secondaries_brought_up(void)
{
931
	unsigned int idx, n_valid = 0U;
932

933
	for (idx = 0U; idx < ARRAY_SIZE(psci_cpu_pd_nodes); idx++) {
934
935
936
937
		if (psci_cpu_pd_nodes[idx].mpidr != PSCI_INVALID_MPIDR)
			n_valid++;
	}

938
	assert(n_valid > 0U);
939

940
	return (n_valid > 1U) ? 1 : 0;
941
942
}

943
944
945
946
947
948
949
950
951
952
/*******************************************************************************
 * Initiate power down sequence, by calling power down operations registered for
 * this CPU.
 ******************************************************************************/
void psci_do_pwrdown_sequence(unsigned int power_level)
{
#if HW_ASSISTED_COHERENCY
	/*
	 * With hardware-assisted coherency, the CPU drivers only initiate the
	 * power down sequence, without performing cache-maintenance operations
953
	 * in software. Data caches enabled both before and after this call.
954
955
956
957
958
	 */
	prepare_cpu_pwr_dwn(power_level);
#else
	/*
	 * Without hardware-assisted coherency, the CPU drivers disable data
959
	 * caches, then perform cache-maintenance operations in software.
960
	 *
961
962
963
964
	 * This also calls prepare_cpu_pwr_dwn() to initiate power down
	 * sequence, but that function will return with data caches disabled.
	 * We must ensure that the stack memory is flushed out to memory before
	 * we start popping from it again.
965
966
967
968
	 */
	psci_do_pwrdown_cache_maintenance(power_level);
#endif
}