psci_entry.S 4.65 KB
Newer Older
1
/*
2
 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 *
 * Redistributions of source code must retain the above copyright notice, this
 * list of conditions and the following disclaimer.
 *
 * Redistributions in binary form must reproduce the above copyright notice,
 * this list of conditions and the following disclaimer in the documentation
 * and/or other materials provided with the distribution.
 *
 * Neither the name of ARM nor the names of its contributors may be used
 * to endorse or promote products derived from this software without specific
 * prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 */

#include <arch.h>
#include <platform.h>
#include <psci.h>
#include <psci_private.h>
35
#include <runtime_svc.h>
36
#include <asm_macros.S>
37
#include <cm_macros.S>
38
39
40
41
42
43

	.globl	psci_aff_on_finish_entry
	.globl	psci_aff_suspend_finish_entry
	.globl	__psci_cpu_off
	.globl	__psci_cpu_suspend

44
	.section	.text, "ax"; .align 3
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66

	/* -----------------------------------------------------
	 * This cpu has been physically powered up. Depending
	 * upon whether it was resumed from suspend or simply
	 * turned on, call the common power on finisher with
	 * the handlers (chosen depending upon original state).
	 * For ease, the finisher is called with coherent
	 * stacks. This allows the cluster/cpu finishers to
	 * enter coherency and enable the mmu without running
	 * into issues. We switch back to normal stacks once
	 * all this is done.
	 * -----------------------------------------------------
	 */
psci_aff_on_finish_entry:
	adr	x23, psci_afflvl_on_finishers
	b	psci_aff_common_finish_entry

psci_aff_suspend_finish_entry:
	adr	x23, psci_afflvl_suspend_finishers

psci_aff_common_finish_entry:
	adr	x22, psci_afflvl_power_on_finish
Achin Gupta's avatar
Achin Gupta committed
67
68
69
70
71
72
73
74
75
76
77

	/* ---------------------------------------------
	 * Exceptions should not occur at this point.
	 * Set VBAR in order to handle and report any
	 * that do occur
	 * ---------------------------------------------
	 */
	adr	x0, early_exceptions
	msr	vbar_el3, x0
	isb

78
79
80
81
82
83
84
	/* ---------------------------------------------
	 * Use SP_EL0 for the C runtime stack.
	 * ---------------------------------------------
	 */
	msr	spsel, #0
	isb

85
86
87
88
89
90
91
92
93
	bl	read_mpidr
	mov	x19, x0
	bl	platform_set_coherent_stack

	/* ---------------------------------------------
	 * Call the finishers starting from affinity
	 * level 0.
	 * ---------------------------------------------
	 */
94
95
96
97
	mov	x0, x19
	bl	get_power_on_target_afflvl
	cmp	x0, xzr
	b.lt	_panic
98
99
100
101
102
103
104
105
106
107
108
109
110
111
	mov	x3, x23
	mov	x2, x0
	mov	x0, x19
	mov	x1, #MPIDR_AFFLVL0
	blr	x22

	/* --------------------------------------------
	 * Give ourselves a stack allocated in Normal
	 * -IS-WBWA memory
	 * --------------------------------------------
	 */
	mov	x0, x19
	bl	platform_set_stack

112
113
	zero_callee_saved_regs
	b	el3_exit
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
_panic:
	b	_panic

	/* -----------------------------------------------------
	 * The following two stubs give the calling cpu a
	 * coherent stack to allow flushing of caches without
	 * suffering from stack coherency issues
	 * -----------------------------------------------------
	 */
__psci_cpu_off:
	func_prologue
	sub	sp, sp, #0x10
	stp	x19, x20, [sp, #0]
	mov	x19, sp
	bl	read_mpidr
	bl	platform_set_coherent_stack
	bl	psci_cpu_off
	mov	x1, #PSCI_E_SUCCESS
	cmp	x0, x1
	b.eq	final_wfi
	mov	sp, x19
	ldp	x19, x20, [sp,#0]
	add	sp, sp, #0x10
	func_epilogue
	ret

__psci_cpu_suspend:
	func_prologue
	sub	sp, sp, #0x20
	stp	x19, x20, [sp, #0]
	stp	x21, x22, [sp, #0x10]
	mov	x19, sp
	mov	x20, x0
	mov	x21, x1
	mov	x22, x2
	bl	read_mpidr
	bl	platform_set_coherent_stack
	mov	x0, x20
	mov	x1, x21
	mov	x2, x22
	bl	psci_cpu_suspend
	mov	x1, #PSCI_E_SUCCESS
	cmp	x0, x1
	b.eq	final_wfi
	mov	sp, x19
	ldp	x21, x22, [sp,#0x10]
	ldp	x19, x20, [sp,#0]
	add	sp, sp, #0x20
	func_epilogue
	ret

final_wfi:
	dsb	sy
	wfi
wfi_spill:
	b	wfi_spill