psci_entry.S 4.94 KB
Newer Older
1
/*
2
 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 *
 * Redistributions of source code must retain the above copyright notice, this
 * list of conditions and the following disclaimer.
 *
 * Redistributions in binary form must reproduce the above copyright notice,
 * this list of conditions and the following disclaimer in the documentation
 * and/or other materials provided with the distribution.
 *
 * Neither the name of ARM nor the names of its contributors may be used
 * to endorse or promote products derived from this software without specific
 * prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 */

#include <arch.h>
#include <asm_macros.S>
33
#include <psci.h>
34
35
36
37
38

	.globl	psci_aff_on_finish_entry
	.globl	psci_aff_suspend_finish_entry
	.globl	__psci_cpu_off
	.globl	__psci_cpu_suspend
39
	.globl	psci_power_down_wfi
40
41
42
43
44
45
46
47
48
49
50
51
52

	/* -----------------------------------------------------
	 * This cpu has been physically powered up. Depending
	 * upon whether it was resumed from suspend or simply
	 * turned on, call the common power on finisher with
	 * the handlers (chosen depending upon original state).
	 * For ease, the finisher is called with coherent
	 * stacks. This allows the cluster/cpu finishers to
	 * enter coherency and enable the mmu without running
	 * into issues. We switch back to normal stacks once
	 * all this is done.
	 * -----------------------------------------------------
	 */
53
func psci_aff_on_finish_entry
54
55
56
57
58
59
60
61
	adr	x23, psci_afflvl_on_finishers
	b	psci_aff_common_finish_entry

psci_aff_suspend_finish_entry:
	adr	x23, psci_afflvl_suspend_finishers

psci_aff_common_finish_entry:
	adr	x22, psci_afflvl_power_on_finish
Achin Gupta's avatar
Achin Gupta committed
62

63
64
65
66
67
68
	/* ---------------------------------------------
	 * Initialise the pcpu cache pointer for the CPU
	 * ---------------------------------------------
	 */
	bl	init_cpu_data_ptr

Achin Gupta's avatar
Achin Gupta committed
69
	/* ---------------------------------------------
70
	 * Set the exception vectors
Achin Gupta's avatar
Achin Gupta committed
71
72
	 * ---------------------------------------------
	 */
73
	adr	x0, runtime_exceptions
Achin Gupta's avatar
Achin Gupta committed
74
75
76
	msr	vbar_el3, x0
	isb

77
78
79
80
81
82
	/* ---------------------------------------------
	 * Use SP_EL0 for the C runtime stack.
	 * ---------------------------------------------
	 */
	msr	spsel, #0

83
	mrs	x0, mpidr_el1
84
85
86
87
88
89
90
	bl	platform_set_coherent_stack

	/* ---------------------------------------------
	 * Call the finishers starting from affinity
	 * level 0.
	 * ---------------------------------------------
	 */
91
	mrs	x0, mpidr_el1
92
93
94
	bl	get_power_on_target_afflvl
	cmp	x0, xzr
	b.lt	_panic
95
96
97
	mov	x3, x23
	mov	x2, x0
	mov	x1, #MPIDR_AFFLVL0
98
	mrs	x0, mpidr_el1
99
100
101
102
103
104
105
	blr	x22

	/* --------------------------------------------
	 * Give ourselves a stack allocated in Normal
	 * -IS-WBWA memory
	 * --------------------------------------------
	 */
106
	mrs	x0, mpidr_el1
107
108
	bl	platform_set_stack

109
	b	el3_exit
110
111
112
113
114
115
116
117
118
_panic:
	b	_panic

	/* -----------------------------------------------------
	 * The following two stubs give the calling cpu a
	 * coherent stack to allow flushing of caches without
	 * suffering from stack coherency issues
	 * -----------------------------------------------------
	 */
119
func __psci_cpu_off
120
121
122
123
	func_prologue
	sub	sp, sp, #0x10
	stp	x19, x20, [sp, #0]
	mov	x19, sp
124
	mrs	x0, mpidr_el1
125
126
127
128
129
130
131
132
	bl	platform_set_coherent_stack
	bl	psci_cpu_off
	mov	sp, x19
	ldp	x19, x20, [sp,#0]
	add	sp, sp, #0x10
	func_epilogue
	ret

133
func __psci_cpu_suspend
134
135
136
137
138
139
140
141
	func_prologue
	sub	sp, sp, #0x20
	stp	x19, x20, [sp, #0]
	stp	x21, x22, [sp, #0x10]
	mov	x19, sp
	mov	x20, x0
	mov	x21, x1
	mov	x22, x2
142
	mrs	x0, mpidr_el1
143
144
145
146
147
148
149
150
151
152
153
154
	bl	platform_set_coherent_stack
	mov	x0, x20
	mov	x1, x21
	mov	x2, x22
	bl	psci_cpu_suspend
	mov	sp, x19
	ldp	x21, x22, [sp,#0x10]
	ldp	x19, x20, [sp,#0]
	add	sp, sp, #0x20
	func_epilogue
	ret

155
156
157
158
159
160
161
162
163
164
	/* --------------------------------------------
	 * This function is called to indicate to the
	 * power controller that it is safe to power
	 * down this cpu. It should not exit the wfi
	 * and will be released from reset upon power
	 * up. 'wfi_spill' is used to catch erroneous
	 * exits from wfi.
	 * --------------------------------------------
	 */
func psci_power_down_wfi
165
	dsb	sy		// ensure write buffer empty
166
167
168
169
	wfi
wfi_spill:
	b	wfi_spill