psci_private.h 11.2 KB
Newer Older
1
/*
2
 * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 *
 * Redistributions of source code must retain the above copyright notice, this
 * list of conditions and the following disclaimer.
 *
 * Redistributions in binary form must reproduce the above copyright notice,
 * this list of conditions and the following disclaimer in the documentation
 * and/or other materials provided with the distribution.
 *
 * Neither the name of ARM nor the names of its contributors may be used
 * to endorse or promote products derived from this software without specific
 * prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 */

#ifndef __PSCI_PRIVATE_H__
#define __PSCI_PRIVATE_H__

34
#include <arch.h>
35
#include <bakery_lock.h>
36
#include <bl_common.h>
37
#include <cpu_data.h>
38
#include <psci.h>
39
#include <spinlock.h>
40

41
#if HW_ASSISTED_COHERENCY
42

43
44
45
46
47
48
49
50
51
52
/*
 * On systems with hardware-assisted coherency, make PSCI cache operations NOP,
 * as PSCI participants are cache-coherent, and there's no need for explicit
 * cache maintenance operations or barriers to coordinate their state.
 */
#define psci_flush_dcache_range(addr, size)
#define psci_flush_cpu_data(member)
#define psci_inv_cpu_data(member)

#define psci_dsbish()
53
54
55
56
57
58
59
60
61
62
63
64
65

/*
 * On systems where participant CPUs are cache-coherent, we can use spinlocks
 * instead of bakery locks.
 */
#define DEFINE_PSCI_LOCK(_name)		spinlock_t _name
#define DECLARE_PSCI_LOCK(_name)	extern DEFINE_PSCI_LOCK(_name)

#define psci_lock_get(non_cpu_pd_node)				\
	spin_lock(&psci_locks[(non_cpu_pd_node)->lock_index])
#define psci_lock_release(non_cpu_pd_node)			\
	spin_unlock(&psci_locks[(non_cpu_pd_node)->lock_index])

66
#else
67

68
69
70
71
72
73
74
75
76
77
/*
 * If not all PSCI participants are cache-coherent, perform cache maintenance
 * and issue barriers wherever required to coordinate state.
 */
#define psci_flush_dcache_range(addr, size)	flush_dcache_range(addr, size)
#define psci_flush_cpu_data(member)		flush_cpu_data(member)
#define psci_inv_cpu_data(member)		inv_cpu_data(member)

#define psci_dsbish()				dsbish()

78
/*
79
80
 * Use bakery locks for state coordination as not all PSCI participants are
 * cache coherent.
81
 */
82
83
84
#define DEFINE_PSCI_LOCK(_name)		DEFINE_BAKERY_LOCK(_name)
#define DECLARE_PSCI_LOCK(_name)	DECLARE_BAKERY_LOCK(_name)

85
#define psci_lock_get(non_cpu_pd_node)				\
86
	bakery_lock_get(&psci_locks[(non_cpu_pd_node)->lock_index])
87
#define psci_lock_release(non_cpu_pd_node)			\
88
	bakery_lock_release(&psci_locks[(non_cpu_pd_node)->lock_index])
89

90
91
92
93
94
#endif

#define psci_lock_init(non_cpu_pd_node, idx)			\
	((non_cpu_pd_node)[(idx)].lock_index = (idx))

Soby Mathew's avatar
Soby Mathew committed
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
/*
 * The PSCI capability which are provided by the generic code but does not
 * depend on the platform or spd capabilities.
 */
#define PSCI_GENERIC_CAP	\
			(define_psci_cap(PSCI_VERSION) |		\
			define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) |	\
			define_psci_cap(PSCI_FEATURES))

/*
 * The PSCI capabilities mask for 64 bit functions.
 */
#define PSCI_CAP_64BIT_MASK	\
			(define_psci_cap(PSCI_CPU_SUSPEND_AARCH64) |	\
			define_psci_cap(PSCI_CPU_ON_AARCH64) |		\
			define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) |	\
			define_psci_cap(PSCI_MIG_AARCH64) |		\
112
			define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64) |	\
113
			define_psci_cap(PSCI_NODE_HW_STATE_AARCH64) |	\
114
115
116
			define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64) |	\
			define_psci_cap(PSCI_STAT_RESIDENCY_AARCH64) |	\
			define_psci_cap(PSCI_STAT_COUNT_AARCH64))
Soby Mathew's avatar
Soby Mathew committed
117

118
119
120
121
122
123
124
125
126
/*
 * Helper macros to get/set the fields of PSCI per-cpu data.
 */
#define psci_set_aff_info_state(aff_state) \
		set_cpu_data(psci_svc_cpu_data.aff_info_state, aff_state)
#define psci_get_aff_info_state() \
		get_cpu_data(psci_svc_cpu_data.aff_info_state)
#define psci_get_aff_info_state_by_idx(idx) \
		get_cpu_data_by_index(idx, psci_svc_cpu_data.aff_info_state)
127
128
129
#define psci_set_aff_info_state_by_idx(idx, aff_state) \
		set_cpu_data_by_index(idx, psci_svc_cpu_data.aff_info_state,\
					aff_state)
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
#define psci_get_suspend_pwrlvl() \
		get_cpu_data(psci_svc_cpu_data.target_pwrlvl)
#define psci_set_suspend_pwrlvl(target_lvl) \
		set_cpu_data(psci_svc_cpu_data.target_pwrlvl, target_lvl)
#define psci_set_cpu_local_state(state) \
		set_cpu_data(psci_svc_cpu_data.local_state, state)
#define psci_get_cpu_local_state() \
		get_cpu_data(psci_svc_cpu_data.local_state)
#define psci_get_cpu_local_state_by_idx(idx) \
		get_cpu_data_by_index(idx, psci_svc_cpu_data.local_state)

/*
 * Helper macros for the CPU level spinlocks
 */
#define psci_spin_lock_cpu(idx)	spin_lock(&psci_cpu_pd_nodes[idx].cpu_lock)
#define psci_spin_unlock_cpu(idx) spin_unlock(&psci_cpu_pd_nodes[idx].cpu_lock)

/* Helper macro to identify a CPU standby request in PSCI Suspend call */
#define is_cpu_standby_req(is_power_down_state, retn_lvl) \
		(((!(is_power_down_state)) && ((retn_lvl) == 0)) ? 1 : 0)
Soby Mathew's avatar
Soby Mathew committed
150

151
/*******************************************************************************
152
153
154
155
156
 * The following two data structures implement the power domain tree. The tree
 * is used to track the state of all the nodes i.e. power domain instances
 * described by the platform. The tree consists of nodes that describe CPU power
 * domains i.e. leaf nodes and all other power domains which are parents of a
 * CPU power domain i.e. non-leaf nodes.
157
 ******************************************************************************/
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
typedef struct non_cpu_pwr_domain_node {
	/*
	 * Index of the first CPU power domain node level 0 which has this node
	 * as its parent.
	 */
	unsigned int cpu_start_idx;

	/*
	 * Number of CPU power domains which are siblings of the domain indexed
	 * by 'cpu_start_idx' i.e. all the domains in the range 'cpu_start_idx
	 * -> cpu_start_idx + ncpus' have this node as their parent.
	 */
	unsigned int ncpus;

	/*
	 * Index of the parent power domain node.
	 * TODO: Figure out whether to whether using pointer is more efficient.
	 */
	unsigned int parent_node;

	plat_local_state_t local_state;

180
	unsigned char level;
181
182

	/* For indexing the psci_lock array*/
183
184
	unsigned char lock_index;
} non_cpu_pd_node_t;
185

186
typedef struct cpu_pwr_domain_node {
187
	u_register_t mpidr;
188

189
190
191
192
193
194
195
196
197
198
199
200
201
202
	/*
	 * Index of the parent power domain node.
	 * TODO: Figure out whether to whether using pointer is more efficient.
	 */
	unsigned int parent_node;

	/*
	 * A CPU power domain does not require state coordination like its
	 * parent power domains. Hence this node does not include a bakery
	 * lock. A spinlock is required by the CPU_ON handler to prevent a race
	 * when multiple CPUs try to turn ON the same target CPU.
	 */
	spinlock_t cpu_lock;
} cpu_pd_node_t;
203
204
205
206

/*******************************************************************************
 * Data prototypes
 ******************************************************************************/
207
208
209
extern const plat_psci_ops_t *psci_plat_pm_ops;
extern non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS];
extern cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];
210
extern unsigned int psci_caps;
211

212
213
/* One lock is required per non-CPU power domain node */
DECLARE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
214

215
/*******************************************************************************
216
 * SPD's power management hooks registered with PSCI
217
 ******************************************************************************/
218
extern const spd_pm_ops_t *psci_spd_pm;
219

220
221
222
223
/*******************************************************************************
 * Function prototypes
 ******************************************************************************/
/* Private exported functions from psci_common.c */
224
225
226
int psci_validate_power_state(unsigned int power_state,
			      psci_power_state_t *state_info);
void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info);
227
int psci_validate_mpidr(u_register_t mpidr);
228
void psci_init_req_local_pwr_states(void);
229
230
void psci_get_target_local_pwr_states(unsigned int end_pwrlvl,
				      psci_power_state_t *target_state);
231
int psci_validate_entry_point(entry_point_info_t *ep,
232
			uintptr_t entrypoint, u_register_t context_id);
233
void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
234
				      unsigned int end_lvl,
235
				      unsigned int node_index[]);
236
void psci_do_state_coordination(unsigned int end_pwrlvl,
237
				psci_power_state_t *state_info);
238
void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl,
239
				   unsigned int cpu_idx);
240
void psci_release_pwr_domain_locks(unsigned int end_pwrlvl,
241
242
243
244
245
				   unsigned int cpu_idx);
int psci_validate_suspend_req(const psci_power_state_t *state_info,
			      unsigned int is_power_down_state_req);
unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info);
unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info);
246
void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl);
247
void psci_print_power_domain_map(void);
248
unsigned int psci_is_last_on_cpu(void);
249
int psci_spd_migrate_info(u_register_t *mpidr);
250
251
252
253
254
255
256
257
void psci_do_pwrdown_sequence(unsigned int power_level);

/*
 * CPU power down is directly called only when HW_ASSISTED_COHERENCY is
 * available. Otherwise, this needs post-call stack maintenance, which is
 * handled in assembly.
 */
void prepare_cpu_pwr_dwn(unsigned int power_level);
258

259
/* Private exported functions from psci_on.c */
260
int psci_cpu_on_start(u_register_t target_cpu,
261
		      entry_point_info_t *ep);
262

263
264
void psci_cpu_on_finish(unsigned int cpu_idx,
			psci_power_state_t *state_info);
265

266
/* Private exported functions from psci_off.c */
267
int psci_do_cpu_off(unsigned int end_pwrlvl);
268

269
/* Private exported functions from psci_suspend.c */
270
void psci_cpu_suspend_start(entry_point_info_t *ep,
271
			unsigned int end_pwrlvl,
272
273
			psci_power_state_t *state_info,
			unsigned int is_power_down_state_req);
274

275
276
void psci_cpu_suspend_finish(unsigned int cpu_idx,
			psci_power_state_t *state_info);
277

278
/* Private exported functions from psci_helpers.S */
279
void psci_do_pwrdown_cache_maintenance(unsigned int pwr_level);
280
void psci_do_pwrup_cache_maintenance(void);
281

282
283
284
285
/* Private exported functions from psci_system_off.c */
void __dead2 psci_system_off(void);
void __dead2 psci_system_reset(void);

286
287
288
289
/* Private exported functions from psci_stat.c */
void psci_stats_update_pwr_down(unsigned int end_pwrlvl,
			const psci_power_state_t *state_info);
void psci_stats_update_pwr_up(unsigned int end_pwrlvl,
290
			const psci_power_state_t *state_info);
291
292
293
294
295
u_register_t psci_stat_residency(u_register_t target_cpu,
			unsigned int power_state);
u_register_t psci_stat_count(u_register_t target_cpu,
			unsigned int power_state);

296
#endif /* __PSCI_PRIVATE_H__ */