psci_entry.S 4.46 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
/*
 * Copyright (c) 2013, ARM Limited. All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 *
 * Redistributions of source code must retain the above copyright notice, this
 * list of conditions and the following disclaimer.
 *
 * Redistributions in binary form must reproduce the above copyright notice,
 * this list of conditions and the following disclaimer in the documentation
 * and/or other materials provided with the distribution.
 *
 * Neither the name of ARM nor the names of its contributors may be used
 * to endorse or promote products derived from this software without specific
 * prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 */

#include <arch.h>
#include <platform.h>
#include <psci.h>
#include <psci_private.h>
#include <asm_macros.S>

	.globl	psci_aff_on_finish_entry
	.globl	psci_aff_suspend_finish_entry
	.globl	__psci_cpu_off
	.globl	__psci_cpu_suspend

42
	.section	.text, "ax"; .align 3
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159

	/* -----------------------------------------------------
	 * This cpu has been physically powered up. Depending
	 * upon whether it was resumed from suspend or simply
	 * turned on, call the common power on finisher with
	 * the handlers (chosen depending upon original state).
	 * For ease, the finisher is called with coherent
	 * stacks. This allows the cluster/cpu finishers to
	 * enter coherency and enable the mmu without running
	 * into issues. We switch back to normal stacks once
	 * all this is done.
	 * -----------------------------------------------------
	 */
psci_aff_on_finish_entry:
	adr	x23, psci_afflvl_on_finishers
	b	psci_aff_common_finish_entry

psci_aff_suspend_finish_entry:
	adr	x23, psci_afflvl_suspend_finishers

psci_aff_common_finish_entry:
	adr	x22, psci_afflvl_power_on_finish
	bl	read_mpidr
	mov	x19, x0
	bl	platform_set_coherent_stack

	/* ---------------------------------------------
	 * Call the finishers starting from affinity
	 * level 0.
	 * ---------------------------------------------
	 */
	bl	get_max_afflvl
	mov	x3, x23
	mov	x2, x0
	mov	x0, x19
	mov	x1, #MPIDR_AFFLVL0
	blr	x22
	mov	x21, x0

	/* --------------------------------------------
	 * Give ourselves a stack allocated in Normal
	 * -IS-WBWA memory
	 * --------------------------------------------
	 */
	mov	x0, x19
	bl	platform_set_stack

	/* --------------------------------------------
	 * Restore the context id. value
	 * --------------------------------------------
	 */
	mov	x0, x21

	/* --------------------------------------------
	 * Jump back to the non-secure world assuming
	 * that the elr and spsr setup has been done
	 * by the finishers
	 * --------------------------------------------
	 */
	eret
_panic:
	b	_panic

	/* -----------------------------------------------------
	 * The following two stubs give the calling cpu a
	 * coherent stack to allow flushing of caches without
	 * suffering from stack coherency issues
	 * -----------------------------------------------------
	 */
__psci_cpu_off:
	func_prologue
	sub	sp, sp, #0x10
	stp	x19, x20, [sp, #0]
	mov	x19, sp
	bl	read_mpidr
	bl	platform_set_coherent_stack
	bl	psci_cpu_off
	mov	x1, #PSCI_E_SUCCESS
	cmp	x0, x1
	b.eq	final_wfi
	mov	sp, x19
	ldp	x19, x20, [sp,#0]
	add	sp, sp, #0x10
	func_epilogue
	ret

__psci_cpu_suspend:
	func_prologue
	sub	sp, sp, #0x20
	stp	x19, x20, [sp, #0]
	stp	x21, x22, [sp, #0x10]
	mov	x19, sp
	mov	x20, x0
	mov	x21, x1
	mov	x22, x2
	bl	read_mpidr
	bl	platform_set_coherent_stack
	mov	x0, x20
	mov	x1, x21
	mov	x2, x22
	bl	psci_cpu_suspend
	mov	x1, #PSCI_E_SUCCESS
	cmp	x0, x1
	b.eq	final_wfi
	mov	sp, x19
	ldp	x21, x22, [sp,#0x10]
	ldp	x19, x20, [sp,#0]
	add	sp, sp, #0x20
	func_epilogue
	ret

final_wfi:
	dsb	sy
	wfi
wfi_spill:
	b	wfi_spill