psci_setup.c 10.3 KB
Newer Older
1
/*
2
 * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 *
 * Redistributions of source code must retain the above copyright notice, this
 * list of conditions and the following disclaimer.
 *
 * Redistributions in binary form must reproduce the above copyright notice,
 * this list of conditions and the following disclaimer in the documentation
 * and/or other materials provided with the distribution.
 *
 * Neither the name of ARM nor the names of its contributors may be used
 * to endorse or promote products derived from this software without specific
 * prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 */

31
#include <arch.h>
32
#include <arch_helpers.h>
33
34
35
#include <assert.h>
#include <bl_common.h>
#include <context.h>
36
#include <context_mgmt.h>
37
38
#include <platform.h>
#include <stddef.h>
39
#include "psci_private.h"
40
41
42
43
44

/*******************************************************************************
 * Per cpu non-secure contexts used to program the architectural state prior
 * return to the normal world.
 * TODO: Use the memory allocator to set aside memory for the contexts instead
45
 * of relying on platform defined constants.
46
 ******************************************************************************/
47
static cpu_context_t psci_ns_context[PLATFORM_CORE_COUNT];
48

Soby Mathew's avatar
Soby Mathew committed
49
50
51
/******************************************************************************
 * Define the psci capability variable.
 *****************************************************************************/
52
unsigned int psci_caps;
Soby Mathew's avatar
Soby Mathew committed
53

54
/*******************************************************************************
55
56
 * Function which initializes the 'psci_non_cpu_pd_nodes' or the
 * 'psci_cpu_pd_nodes' corresponding to the power level.
57
 ******************************************************************************/
58
59
60
static void psci_init_pwr_domain_node(unsigned int node_idx,
					unsigned int parent_idx,
					unsigned int level)
61
{
62
63
64
65
66
67
68
69
	if (level > PSCI_CPU_PWR_LVL) {
		psci_non_cpu_pd_nodes[node_idx].level = level;
		psci_lock_init(psci_non_cpu_pd_nodes, node_idx);
		psci_non_cpu_pd_nodes[node_idx].parent_node = parent_idx;
		psci_non_cpu_pd_nodes[node_idx].local_state =
							 PLAT_MAX_OFF_STATE;
	} else {
		psci_cpu_data_t *svc_cpu_data;
70

71
		psci_cpu_pd_nodes[node_idx].parent_node = parent_idx;
72

73
74
		/* Initialize with an invalid mpidr */
		psci_cpu_pd_nodes[node_idx].mpidr = PSCI_INVALID_MPIDR;
75

76
77
		svc_cpu_data =
			&(_cpu_data_by_index(node_idx)->psci_svc_cpu_data);
78

79
80
		/* Set the Affinity Info for the cores as OFF */
		svc_cpu_data->aff_info_state = AFF_STATE_OFF;
81

82
		/* Invalidate the suspend level for the cpu */
83
		svc_cpu_data->target_pwrlvl = PSCI_INVALID_PWR_LVL;
84

85
86
		/* Set the power state to OFF state */
		svc_cpu_data->local_state = PLAT_MAX_OFF_STATE;
87

88
		flush_dcache_range((uintptr_t)svc_cpu_data,
89
90
91
92
93
94
						 sizeof(*svc_cpu_data));

		cm_set_context_by_index(node_idx,
					(void *) &psci_ns_context[node_idx],
					NON_SECURE);
	}
95
96
}

97
/*******************************************************************************
98
99
100
101
102
103
104
105
106
 * This functions updates cpu_start_idx and ncpus field for each of the node in
 * psci_non_cpu_pd_nodes[]. It does so by comparing the parent nodes of each of
 * the CPUs and check whether they match with the parent of the previous
 * CPU. The basic assumption for this work is that children of the same parent
 * are allocated adjacent indices. The platform should ensure this though proper
 * mapping of the CPUs to indices via plat_core_pos_by_mpidr() and
 * plat_my_core_pos() APIs.
 *******************************************************************************/
static void psci_update_pwrlvl_limits(void)
107
{
108
	int j;
109
	unsigned int nodes_idx[PLAT_MAX_PWR_LVL] = {0};
110
	unsigned int temp_index[PLAT_MAX_PWR_LVL], cpu_idx;
111
112
113
114
115
116
117
118
119
120

	for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) {
		psci_get_parent_pwr_domain_nodes(cpu_idx,
						 PLAT_MAX_PWR_LVL,
						 temp_index);
		for (j = PLAT_MAX_PWR_LVL - 1; j >= 0; j--) {
			if (temp_index[j] != nodes_idx[j]) {
				nodes_idx[j] = temp_index[j];
				psci_non_cpu_pd_nodes[nodes_idx[j]].cpu_start_idx
					= cpu_idx;
121
			}
122
123
			psci_non_cpu_pd_nodes[nodes_idx[j]].ncpus++;
		}
124
125
126
	}
}

127
/*******************************************************************************
128
129
130
131
 * Core routine to populate the power domain tree. The tree descriptor passed by
 * the platform is populated breadth-first and the first entry in the map
 * informs the number of root power domains. The parent nodes of the root nodes
 * will point to an invalid entry(-1).
132
 ******************************************************************************/
133
static void populate_power_domain_tree(const unsigned char *topology)
134
{
135
136
137
	unsigned int i, j = 0, num_nodes_at_lvl = 1, num_nodes_at_next_lvl;
	unsigned int node_index = 0, parent_node_index = 0, num_children;
	int level = PLAT_MAX_PWR_LVL;
138
139

	/*
140
141
142
143
144
145
146
	 * For each level the inputs are:
	 * - number of nodes at this level in plat_array i.e. num_nodes_at_level
	 *   This is the sum of values of nodes at the parent level.
	 * - Index of first entry at this level in the plat_array i.e.
	 *   parent_node_index.
	 * - Index of first free entry in psci_non_cpu_pd_nodes[] or
	 *   psci_cpu_pd_nodes[] i.e. node_index depending upon the level.
147
	 */
148
149
	while (level >= PSCI_CPU_PWR_LVL) {
		num_nodes_at_next_lvl = 0;
150
		/*
151
152
153
154
155
156
		 * For each entry (parent node) at this level in the plat_array:
		 * - Find the number of children
		 * - Allocate a node in a power domain array for each child
		 * - Set the parent of the child to the parent_node_index - 1
		 * - Increment parent_node_index to point to the next parent
		 * - Accumulate the number of children at next level.
157
		 */
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
		for (i = 0; i < num_nodes_at_lvl; i++) {
			assert(parent_node_index <=
					PSCI_NUM_NON_CPU_PWR_DOMAINS);
			num_children = topology[parent_node_index];

			for (j = node_index;
				j < node_index + num_children; j++)
				psci_init_pwr_domain_node(j,
							  parent_node_index - 1,
							  level);

			node_index = j;
			num_nodes_at_next_lvl += num_children;
			parent_node_index++;
		}
173

174
175
		num_nodes_at_lvl = num_nodes_at_next_lvl;
		level--;
176

177
178
179
		/* Reset the index for the cpu power domain array */
		if (level == PSCI_CPU_PWR_LVL)
			node_index = 0;
180
181
	}

182
183
	/* Validate the sanity of array exported by the platform */
	assert(j == PLATFORM_CORE_COUNT);
184

185
186
#if !USE_COHERENT_MEM
	/* Flush the non CPU power domain data to memory */
187
	flush_dcache_range((uintptr_t) &psci_non_cpu_pd_nodes,
188
189
			   sizeof(psci_non_cpu_pd_nodes));
#endif
190
191
192
}

/*******************************************************************************
193
194
195
196
197
198
199
200
201
 * This function initializes the power domain topology tree by querying the
 * platform. The power domain nodes higher than the CPU are populated in the
 * array psci_non_cpu_pd_nodes[] and the CPU power domains are populated in
 * psci_cpu_pd_nodes[]. The platform exports its static topology map through the
 * populate_power_domain_topology_tree() API. The algorithm populates the
 * psci_non_cpu_pd_nodes and psci_cpu_pd_nodes iteratively by using this
 * topology map.  On a platform that implements two clusters of 2 cpus each, and
 * supporting 3 domain levels, the populated psci_non_cpu_pd_nodes would look
 * like this:
202
203
 *
 * ---------------------------------------------------
204
 * | system node | cluster 0 node  | cluster 1 node  |
205
206
 * ---------------------------------------------------
 *
207
208
209
210
211
 * And populated psci_cpu_pd_nodes would look like this :
 * <-    cpus cluster0   -><-   cpus cluster1   ->
 * ------------------------------------------------
 * |   CPU 0   |   CPU 1   |   CPU 2   |   CPU 3  |
 * ------------------------------------------------
212
 ******************************************************************************/
213
int psci_setup(void)
214
{
215
	const unsigned char *topology_tree;
216

217
218
	/* Query the topology map from the platform */
	topology_tree = plat_get_power_domain_tree_desc();
219

220
221
	/* Populate the power domain arrays using the platform topology map */
	populate_power_domain_tree(topology_tree);
222

223
224
225
226
227
228
	/* Update the CPU limits for each node in psci_non_cpu_pd_nodes */
	psci_update_pwrlvl_limits();

	/* Populate the mpidr field of cpu node for this CPU */
	psci_cpu_pd_nodes[plat_my_core_pos()].mpidr =
		read_mpidr() & MPIDR_AFFINITY_MASK;
229

230
231
#if !USE_COHERENT_MEM
	/*
232
	 * The psci_non_cpu_pd_nodes only needs flushing when it's not allocated in
233
234
	 * coherent memory.
	 */
235
	flush_dcache_range((uintptr_t) &psci_non_cpu_pd_nodes,
236
			   sizeof(psci_non_cpu_pd_nodes));
237
238
#endif

239
	flush_dcache_range((uintptr_t) &psci_cpu_pd_nodes,
240
			   sizeof(psci_cpu_pd_nodes));
241

242
	psci_init_req_local_pwr_states();
243
244

	/*
245
246
	 * Set the requested and target state of this CPU and all the higher
	 * power domain levels for this CPU to run.
247
	 */
248
	psci_set_pwr_domains_to_run(PLAT_MAX_PWR_LVL);
249

250
251
	plat_setup_psci_ops((uintptr_t)psci_entrypoint,
					&psci_plat_pm_ops);
252
253
	assert(psci_plat_pm_ops);

Soby Mathew's avatar
Soby Mathew committed
254
255
256
	/* Initialize the psci capability */
	psci_caps = PSCI_GENERIC_CAP;

257
	if (psci_plat_pm_ops->pwr_domain_off)
Soby Mathew's avatar
Soby Mathew committed
258
		psci_caps |=  define_psci_cap(PSCI_CPU_OFF);
259
260
	if (psci_plat_pm_ops->pwr_domain_on &&
			psci_plat_pm_ops->pwr_domain_on_finish)
Soby Mathew's avatar
Soby Mathew committed
261
		psci_caps |=  define_psci_cap(PSCI_CPU_ON_AARCH64);
262
263
	if (psci_plat_pm_ops->pwr_domain_suspend &&
			psci_plat_pm_ops->pwr_domain_suspend_finish) {
Soby Mathew's avatar
Soby Mathew committed
264
		psci_caps |=  define_psci_cap(PSCI_CPU_SUSPEND_AARCH64);
265
266
267
		if (psci_plat_pm_ops->get_sys_suspend_power_state)
			psci_caps |=  define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64);
	}
Soby Mathew's avatar
Soby Mathew committed
268
269
270
271
272
	if (psci_plat_pm_ops->system_off)
		psci_caps |=  define_psci_cap(PSCI_SYSTEM_OFF);
	if (psci_plat_pm_ops->system_reset)
		psci_caps |=  define_psci_cap(PSCI_SYSTEM_RESET);

Achin Gupta's avatar
Achin Gupta committed
273
	return 0;
274
}