entrypoint.S 7.49 KB
Newer Older
1
/*
2
 * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
3
 *
dp-arm's avatar
dp-arm committed
4
 * SPDX-License-Identifier: BSD-3-Clause
5
6
7
8
9
10
 */

#include <arch.h>
#include <asm_macros.S>
#include <bl_common.h>
#include <context.h>
11
#include <el3_common_macros.S>
12
13
14
#include <runtime_svc.h>
#include <smcc_helpers.h>
#include <smcc_macros.S>
15
#include <xlat_tables_defs.h>
16
17
18
19
20

	.globl	sp_min_vector_table
	.globl	sp_min_entrypoint
	.globl	sp_min_warm_entrypoint

21
22

vector_base sp_min_vector_table
23
24
25
26
27
28
29
30
31
32
33
34
35
36
	b	sp_min_entrypoint
	b	plat_panic_handler	/* Undef */
	b	handle_smc		/* Syscall */
	b	plat_panic_handler	/* Prefetch abort */
	b	plat_panic_handler	/* Data abort */
	b	plat_panic_handler	/* Reserved */
	b	plat_panic_handler	/* IRQ */
	b	plat_panic_handler	/* FIQ */


/*
 * The Cold boot/Reset entrypoint for SP_MIN
 */
func sp_min_entrypoint
37
38
39
40
41
42
#if !RESET_TO_SP_MIN
	/* ---------------------------------------------------------------
	 * Preceding bootloader has populated r0 with a pointer to a
	 * 'bl_params_t' structure & r1 with a pointer to platform
	 * specific structure
	 * ---------------------------------------------------------------
43
	 */
44
45
46
47
48
49
50
51
52
53
54
	mov	r11, r0
	mov	r12, r1

	/* ---------------------------------------------------------------------
	 * For !RESET_TO_SP_MIN systems, only the primary CPU ever reaches
	 * sp_min_entrypoint() during the cold boot flow, so the cold/warm boot
	 * and primary/secondary CPU logic should not be executed in this case.
	 *
	 * Also, assume that the previous bootloader has already set up the CPU
	 * endianness and has initialised the memory.
	 * ---------------------------------------------------------------------
55
	 */
56
57
58
59
60
61
62
63
64
65
66
	el3_entrypoint_common					\
		_set_endian=0					\
		_warm_boot_mailbox=0				\
		_secondary_cold_boot=0				\
		_init_memory=0					\
		_init_c_runtime=1				\
		_exception_vectors=sp_min_vector_table

	/* ---------------------------------------------------------------------
	 * Relay the previous bootloader's arguments to the platform layer
	 * ---------------------------------------------------------------------
67
	 */
68
69
70
71
72
73
74
75
	mov	r0, r11
	mov	r1, r12
#else
	/* ---------------------------------------------------------------------
	 * For RESET_TO_SP_MIN systems which have a programmable reset address,
	 * sp_min_entrypoint() is executed only on the cold boot path so we can
	 * skip the warm boot mailbox mechanism.
	 * ---------------------------------------------------------------------
76
	 */
77
78
79
80
81
82
83
84
85
86
87
88
89
	el3_entrypoint_common					\
		_set_endian=1					\
		_warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS	\
		_secondary_cold_boot=!COLD_BOOT_SINGLE_CPU	\
		_init_memory=1					\
		_init_c_runtime=1				\
		_exception_vectors=sp_min_vector_table

	/* ---------------------------------------------------------------------
	 * For RESET_TO_SP_MIN systems, BL32 (SP_MIN) is the first bootloader
	 * to run so there's no argument to relay from a previous bootloader.
	 * Zero the arguments passed to the platform layer to reflect that.
	 * ---------------------------------------------------------------------
90
	 */
91
92
93
	mov	r0, #0
	mov	r1, #0
#endif /* RESET_TO_SP_MIN */
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135

	bl	sp_min_early_platform_setup
	bl	sp_min_plat_arch_setup

	/* Jump to the main function */
	bl	sp_min_main

	/* -------------------------------------------------------------
	 * Clean the .data & .bss sections to main memory. This ensures
	 * that any global data which was initialised by the primary CPU
	 * is visible to secondary CPUs before they enable their data
	 * caches and participate in coherency.
	 * -------------------------------------------------------------
	 */
	ldr	r0, =__DATA_START__
	ldr	r1, =__DATA_END__
	sub	r1, r1, r0
	bl	clean_dcache_range

	ldr	r0, =__BSS_START__
	ldr	r1, =__BSS_END__
	sub	r1, r1, r0
	bl	clean_dcache_range

	/* Program the registers in cpu_context and exit monitor mode */
	mov	r0, #NON_SECURE
	bl	cm_get_context

	/* Restore the SCR */
	ldr	r2, [r0, #CTX_REGS_OFFSET + CTX_SCR]
	stcopr	r2, SCR
	isb

	/* Restore the SCTLR  */
	ldr	r2, [r0, #CTX_REGS_OFFSET + CTX_NS_SCTLR]
	stcopr	r2, SCTLR

	bl	smc_get_next_ctx
	/* The other cpu_context registers have been copied to smc context */
	b	sp_min_exit
endfunc sp_min_entrypoint

136
137
138
139
140
141
142
143
144
145
146

/*
 * SMC handling function for SP_MIN.
 */
func handle_smc
	smcc_save_gp_mode_regs

	/* r0 points to smc_context */
	mov	r2, r0				/* handle */
	ldcopr	r0, SCR

147
148
149
150
151
	/*
	 * Save SCR in stack. r1 is pushed to meet the 8 byte
	 * stack alignment requirement.
	 */
	push	{r0, r1}
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
	and	r3, r0, #SCR_NS_BIT		/* flags */

	/* Switch to Secure Mode*/
	bic	r0, #SCR_NS_BIT
	stcopr	r0, SCR
	isb
	ldr	r0, [r2, #SMC_CTX_GPREG_R0]	/* smc_fid */
	/* Check whether an SMC64 is issued */
	tst	r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT)
	beq	1f	/* SMC32 is detected */
	mov	r0, #SMC_UNK
	str	r0, [r2, #SMC_CTX_GPREG_R0]
	mov	r0, r2
	b	2f	/* Skip handling the SMC */
1:
	mov	r1, #0				/* cookie */
	bl	handle_runtime_svc
2:
	/* r0 points to smc context */

	/* Restore SCR from stack */
173
	pop	{r1, r2}
174
175
176
177
178
179
180
	stcopr	r1, SCR
	isb

	b	sp_min_exit
endfunc handle_smc


181
182
183
184
/*
 * The Warm boot entrypoint for SP_MIN.
 */
func sp_min_warm_entrypoint
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
	/*
	 * On the warm boot path, most of the EL3 initialisations performed by
	 * 'el3_entrypoint_common' must be skipped:
	 *
	 *  - Only when the platform bypasses the BL1/BL32 (SP_MIN) entrypoint by
	 *    programming the reset address do we need to set the CPU endianness.
	 *    In other cases, we assume this has been taken care by the
	 *    entrypoint code.
	 *
	 *  - No need to determine the type of boot, we know it is a warm boot.
	 *
	 *  - Do not try to distinguish between primary and secondary CPUs, this
	 *    notion only exists for a cold boot.
	 *
	 *  - No need to initialise the memory or the C runtime environment,
	 *    it has been done once and for all on the cold boot path.
	 */
	el3_entrypoint_common					\
		_set_endian=PROGRAMMABLE_RESET_ADDRESS		\
		_warm_boot_mailbox=0				\
		_secondary_cold_boot=0				\
		_init_memory=0					\
		_init_c_runtime=0				\
		_exception_vectors=sp_min_vector_table
209

210
211
212
213
214
	/*
	 * We're about to enable MMU and participate in PSCI state coordination.
	 *
	 * The PSCI implementation invokes platform routines that enable CPUs to
	 * participate in coherency. On a system where CPUs are not
215
216
217
218
219
	 * cache-coherent without appropriate platform specific programming,
	 * having caches enabled until such time might lead to coherency issues
	 * (resulting from stale data getting speculatively fetched, among
	 * others). Therefore we keep data caches disabled even after enabling
	 * the MMU for such platforms.
220
	 *
221
222
223
224
	 * On systems with hardware-assisted coherency, or on single cluster
	 * platforms, such platform specific programming is not required to
	 * enter coherency (as CPUs already are); and there's no reason to have
	 * caches disabled either.
225
226
227
228
	 */
	mov	r0, #DISABLE_DCACHE
	bl	bl32_plat_enable_mmu

229
230
231
232
233
234
235
#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY
	ldcopr	r0, SCTLR
	orr	r0, r0, #SCTLR_C_BIT
	stcopr	r0, SCTLR
	isb
#endif

236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
	bl	sp_min_warm_boot

	/* Program the registers in cpu_context and exit monitor mode */
	mov	r0, #NON_SECURE
	bl	cm_get_context

	/* Restore the SCR */
	ldr	r2, [r0, #CTX_REGS_OFFSET + CTX_SCR]
	stcopr	r2, SCR
	isb

	/* Restore the SCTLR  */
	ldr	r2, [r0, #CTX_REGS_OFFSET + CTX_NS_SCTLR]
	stcopr	r2, SCTLR

	bl	smc_get_next_ctx

	/* The other cpu_context registers have been copied to smc context */
	b	sp_min_exit
endfunc sp_min_warm_entrypoint

/*
 * The function to restore the registers from SMC context and return
 * to the mode restored to SPSR.
 *
 * Arguments : r0 must point to the SMC context to restore from.
 */
func sp_min_exit
	smcc_restore_gp_mode_regs
	eret
endfunc sp_min_exit