psci_private.h 10.3 KB
Newer Older
1
/*
2
 * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
3
 *
dp-arm's avatar
dp-arm committed
4
 * SPDX-License-Identifier: BSD-3-Clause
5
6
 */

7
8
#ifndef PSCI_PRIVATE_H
#define PSCI_PRIVATE_H
9

10
#include <arch.h>
11
#include <bakery_lock.h>
12
#include <bl_common.h>
13
#include <cpu_data.h>
14
#include <psci.h>
15
#include <spinlock.h>
16

17
#if HW_ASSISTED_COHERENCY
18

19
20
21
22
23
24
25
26
27
28
/*
 * On systems with hardware-assisted coherency, make PSCI cache operations NOP,
 * as PSCI participants are cache-coherent, and there's no need for explicit
 * cache maintenance operations or barriers to coordinate their state.
 */
#define psci_flush_dcache_range(addr, size)
#define psci_flush_cpu_data(member)
#define psci_inv_cpu_data(member)

#define psci_dsbish()
29
30
31
32
33
34
35
36
37
38
39
40
41

/*
 * On systems where participant CPUs are cache-coherent, we can use spinlocks
 * instead of bakery locks.
 */
#define DEFINE_PSCI_LOCK(_name)		spinlock_t _name
#define DECLARE_PSCI_LOCK(_name)	extern DEFINE_PSCI_LOCK(_name)

#define psci_lock_get(non_cpu_pd_node)				\
	spin_lock(&psci_locks[(non_cpu_pd_node)->lock_index])
#define psci_lock_release(non_cpu_pd_node)			\
	spin_unlock(&psci_locks[(non_cpu_pd_node)->lock_index])

42
#else
43

44
45
46
47
48
49
50
51
52
53
/*
 * If not all PSCI participants are cache-coherent, perform cache maintenance
 * and issue barriers wherever required to coordinate state.
 */
#define psci_flush_dcache_range(addr, size)	flush_dcache_range(addr, size)
#define psci_flush_cpu_data(member)		flush_cpu_data(member)
#define psci_inv_cpu_data(member)		inv_cpu_data(member)

#define psci_dsbish()				dsbish()

54
/*
55
56
 * Use bakery locks for state coordination as not all PSCI participants are
 * cache coherent.
57
 */
58
59
60
#define DEFINE_PSCI_LOCK(_name)		DEFINE_BAKERY_LOCK(_name)
#define DECLARE_PSCI_LOCK(_name)	DECLARE_BAKERY_LOCK(_name)

61
#define psci_lock_get(non_cpu_pd_node)				\
62
	bakery_lock_get(&psci_locks[(non_cpu_pd_node)->lock_index])
63
#define psci_lock_release(non_cpu_pd_node)			\
64
	bakery_lock_release(&psci_locks[(non_cpu_pd_node)->lock_index])
65

66
67
#endif

Daniel Boulby's avatar
Daniel Boulby committed
68
69
#define psci_lock_init(_non_cpu_pd_node, _idx)			\
	((_non_cpu_pd_node)[(_idx)].lock_index = (_idx))
70

Soby Mathew's avatar
Soby Mathew committed
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
/*
 * The PSCI capability which are provided by the generic code but does not
 * depend on the platform or spd capabilities.
 */
#define PSCI_GENERIC_CAP	\
			(define_psci_cap(PSCI_VERSION) |		\
			define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) |	\
			define_psci_cap(PSCI_FEATURES))

/*
 * The PSCI capabilities mask for 64 bit functions.
 */
#define PSCI_CAP_64BIT_MASK	\
			(define_psci_cap(PSCI_CPU_SUSPEND_AARCH64) |	\
			define_psci_cap(PSCI_CPU_ON_AARCH64) |		\
			define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) |	\
			define_psci_cap(PSCI_MIG_AARCH64) |		\
88
			define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64) |	\
89
			define_psci_cap(PSCI_NODE_HW_STATE_AARCH64) |	\
90
91
			define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64) |	\
			define_psci_cap(PSCI_STAT_RESIDENCY_AARCH64) |	\
92
			define_psci_cap(PSCI_STAT_COUNT_AARCH64) |	\
93
94
			define_psci_cap(PSCI_SYSTEM_RESET2_AARCH64) |	\
			define_psci_cap(PSCI_MEM_CHK_RANGE_AARCH64))
Soby Mathew's avatar
Soby Mathew committed
95

96
/*
97
 * Helper functions to get/set the fields of PSCI per-cpu data.
98
 */
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
static inline void psci_set_aff_info_state(aff_info_state_t aff_state)
{
	set_cpu_data(psci_svc_cpu_data.aff_info_state, aff_state);
}

static inline aff_info_state_t psci_get_aff_info_state(void)
{
	return get_cpu_data(psci_svc_cpu_data.aff_info_state);
}

static inline aff_info_state_t psci_get_aff_info_state_by_idx(int idx)
{
	return get_cpu_data_by_index((unsigned int)idx,
				     psci_svc_cpu_data.aff_info_state);
}

static inline void psci_set_aff_info_state_by_idx(int idx,
						  aff_info_state_t aff_state)
{
	set_cpu_data_by_index((unsigned int)idx,
			      psci_svc_cpu_data.aff_info_state, aff_state);
}

static inline unsigned int psci_get_suspend_pwrlvl(void)
{
	return get_cpu_data(psci_svc_cpu_data.target_pwrlvl);
}

static inline void psci_set_suspend_pwrlvl(unsigned int target_lvl)
{
	set_cpu_data(psci_svc_cpu_data.target_pwrlvl, target_lvl);
}

static inline void psci_set_cpu_local_state(plat_local_state_t state)
{
	set_cpu_data(psci_svc_cpu_data.local_state, state);
}

static inline plat_local_state_t psci_get_cpu_local_state(void)
{
	return get_cpu_data(psci_svc_cpu_data.local_state);
}

static inline plat_local_state_t psci_get_cpu_local_state_by_idx(int idx)
{
	return get_cpu_data_by_index((unsigned int)idx,
				     psci_svc_cpu_data.local_state);
}

/* Helper function to identify a CPU standby request in PSCI Suspend call */
static inline int is_cpu_standby_req(unsigned int is_power_down_state,
				     unsigned int retn_lvl)
{
	return ((is_power_down_state == 0U) && (retn_lvl == 0U)) ? 1 : 0;
}
Soby Mathew's avatar
Soby Mathew committed
154

155
/*******************************************************************************
156
157
158
159
160
 * The following two data structures implement the power domain tree. The tree
 * is used to track the state of all the nodes i.e. power domain instances
 * described by the platform. The tree consists of nodes that describe CPU power
 * domains i.e. leaf nodes and all other power domains which are parents of a
 * CPU power domain i.e. non-leaf nodes.
161
 ******************************************************************************/
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
typedef struct non_cpu_pwr_domain_node {
	/*
	 * Index of the first CPU power domain node level 0 which has this node
	 * as its parent.
	 */
	unsigned int cpu_start_idx;

	/*
	 * Number of CPU power domains which are siblings of the domain indexed
	 * by 'cpu_start_idx' i.e. all the domains in the range 'cpu_start_idx
	 * -> cpu_start_idx + ncpus' have this node as their parent.
	 */
	unsigned int ncpus;

	/*
	 * Index of the parent power domain node.
	 * TODO: Figure out whether to whether using pointer is more efficient.
	 */
	unsigned int parent_node;

	plat_local_state_t local_state;

184
	unsigned char level;
185
186

	/* For indexing the psci_lock array*/
187
188
	unsigned char lock_index;
} non_cpu_pd_node_t;
189

190
typedef struct cpu_pwr_domain_node {
191
	u_register_t mpidr;
192

193
194
195
196
197
198
199
200
201
202
203
204
205
206
	/*
	 * Index of the parent power domain node.
	 * TODO: Figure out whether to whether using pointer is more efficient.
	 */
	unsigned int parent_node;

	/*
	 * A CPU power domain does not require state coordination like its
	 * parent power domains. Hence this node does not include a bakery
	 * lock. A spinlock is required by the CPU_ON handler to prevent a race
	 * when multiple CPUs try to turn ON the same target CPU.
	 */
	spinlock_t cpu_lock;
} cpu_pd_node_t;
207
208
209
210

/*******************************************************************************
 * Data prototypes
 ******************************************************************************/
211
212
213
extern const plat_psci_ops_t *psci_plat_pm_ops;
extern non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS];
extern cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];
214
extern unsigned int psci_caps;
215

216
217
/* One lock is required per non-CPU power domain node */
DECLARE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
218

219
/*******************************************************************************
220
 * SPD's power management hooks registered with PSCI
221
 ******************************************************************************/
222
extern const spd_pm_ops_t *psci_spd_pm;
223

224
225
226
227
/*******************************************************************************
 * Function prototypes
 ******************************************************************************/
/* Private exported functions from psci_common.c */
228
229
230
int psci_validate_power_state(unsigned int power_state,
			      psci_power_state_t *state_info);
void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info);
231
int psci_validate_mpidr(u_register_t mpidr);
232
void psci_init_req_local_pwr_states(void);
233
234
void psci_get_target_local_pwr_states(unsigned int end_pwrlvl,
				      psci_power_state_t *target_state);
235
int psci_validate_entry_point(entry_point_info_t *ep,
236
			uintptr_t entrypoint, u_register_t context_id);
237
void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
238
				      unsigned int end_lvl,
239
				      unsigned int node_index[]);
240
void psci_do_state_coordination(unsigned int end_pwrlvl,
241
				psci_power_state_t *state_info);
242
void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl,
243
				   unsigned int cpu_idx);
244
void psci_release_pwr_domain_locks(unsigned int end_pwrlvl,
245
246
				   unsigned int cpu_idx);
int psci_validate_suspend_req(const psci_power_state_t *state_info,
247
			      unsigned int is_power_down_state);
248
249
unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info);
unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info);
250
void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl);
251
void psci_print_power_domain_map(void);
252
unsigned int psci_is_last_on_cpu(void);
253
int psci_spd_migrate_info(u_register_t *mpidr);
254
255
256
257
258
259
260
261
void psci_do_pwrdown_sequence(unsigned int power_level);

/*
 * CPU power down is directly called only when HW_ASSISTED_COHERENCY is
 * available. Otherwise, this needs post-call stack maintenance, which is
 * handled in assembly.
 */
void prepare_cpu_pwr_dwn(unsigned int power_level);
262

263
/* Private exported functions from psci_on.c */
264
int psci_cpu_on_start(u_register_t target_cpu,
265
		      entry_point_info_t *ep);
266

267
268
void psci_cpu_on_finish(unsigned int cpu_idx,
			psci_power_state_t *state_info);
269

270
/* Private exported functions from psci_off.c */
271
int psci_do_cpu_off(unsigned int end_pwrlvl);
272

273
/* Private exported functions from psci_suspend.c */
274
void psci_cpu_suspend_start(entry_point_info_t *ep,
275
			unsigned int end_pwrlvl,
276
			psci_power_state_t *state_info,
277
			unsigned int is_power_down_state);
278

279
280
void psci_cpu_suspend_finish(unsigned int cpu_idx,
			psci_power_state_t *state_info);
281

282
/* Private exported functions from psci_helpers.S */
283
void psci_do_pwrdown_cache_maintenance(unsigned int pwr_level);
284
void psci_do_pwrup_cache_maintenance(void);
285

286
287
288
/* Private exported functions from psci_system_off.c */
void __dead2 psci_system_off(void);
void __dead2 psci_system_reset(void);
289
int psci_system_reset2(uint32_t reset_type, u_register_t cookie);
290

291
292
293
294
/* Private exported functions from psci_stat.c */
void psci_stats_update_pwr_down(unsigned int end_pwrlvl,
			const psci_power_state_t *state_info);
void psci_stats_update_pwr_up(unsigned int end_pwrlvl,
295
			const psci_power_state_t *state_info);
296
297
298
299
300
u_register_t psci_stat_residency(u_register_t target_cpu,
			unsigned int power_state);
u_register_t psci_stat_count(u_register_t target_cpu,
			unsigned int power_state);

301
302
303
304
/* Private exported functions from psci_mem_protect.c */
int psci_mem_protect(unsigned int enable);
int psci_mem_chk_range(uintptr_t base, u_register_t length);

305
#endif /* PSCI_PRIVATE_H */