entrypoint.S 9 KB
Newer Older
1
/*
2
 * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
3
 *
dp-arm's avatar
dp-arm committed
4
 * SPDX-License-Identifier: BSD-3-Clause
5
6
7
8
9
10
 */

#include <arch.h>
#include <asm_macros.S>
#include <bl_common.h>
#include <context.h>
11
#include <el3_common_macros.S>
12
13
14
#include <runtime_svc.h>
#include <smcc_helpers.h>
#include <smcc_macros.S>
15
#include <xlat_tables_defs.h>
16
17
18
19

	.globl	sp_min_vector_table
	.globl	sp_min_entrypoint
	.globl	sp_min_warm_entrypoint
20
21
	.globl	sp_min_handle_smc
	.globl	sp_min_handle_fiq
22

23
24
25
26
27
28
29
30
31
32
33
	.macro route_fiq_to_sp_min reg
		/* -----------------------------------------------------
		 * FIQs are secure interrupts trapped by Monitor and non
		 * secure is not allowed to mask the FIQs.
		 * -----------------------------------------------------
		 */
		ldcopr	\reg, SCR
		orr	\reg, \reg, #SCR_FIQ_BIT
		bic	\reg, \reg, #SCR_FW_BIT
		stcopr	\reg, SCR
	.endm
34

35
36
37
38
39
40
41
42
43
44
	.macro clrex_on_monitor_entry
#if (ARM_ARCH_MAJOR == 7)
	/*
	 * ARMv7 architectures need to clear the exclusive access when
	 * entering Monitor mode.
	 */
	clrex
#endif
	.endm

45
vector_base sp_min_vector_table
46
47
	b	sp_min_entrypoint
	b	plat_panic_handler	/* Undef */
48
	b	sp_min_handle_smc	/* Syscall */
49
50
51
52
	b	plat_panic_handler	/* Prefetch abort */
	b	plat_panic_handler	/* Data abort */
	b	plat_panic_handler	/* Reserved */
	b	plat_panic_handler	/* IRQ */
53
	b	sp_min_handle_fiq	/* FIQ */
54
55
56
57
58
59


/*
 * The Cold boot/Reset entrypoint for SP_MIN
 */
func sp_min_entrypoint
60
61
62
63
64
65
#if !RESET_TO_SP_MIN
	/* ---------------------------------------------------------------
	 * Preceding bootloader has populated r0 with a pointer to a
	 * 'bl_params_t' structure & r1 with a pointer to platform
	 * specific structure
	 * ---------------------------------------------------------------
66
	 */
67
68
69
70
71
72
73
74
	mov	r11, r0
	mov	r12, r1

	/* ---------------------------------------------------------------------
	 * For !RESET_TO_SP_MIN systems, only the primary CPU ever reaches
	 * sp_min_entrypoint() during the cold boot flow, so the cold/warm boot
	 * and primary/secondary CPU logic should not be executed in this case.
	 *
75
76
	 * Also, assume that the previous bootloader has already initialised the
	 * SCTLR, including the CPU endianness, and has initialised the memory.
77
	 * ---------------------------------------------------------------------
78
	 */
79
	el3_entrypoint_common					\
80
		_init_sctlr=0					\
81
82
83
84
85
86
87
88
89
		_warm_boot_mailbox=0				\
		_secondary_cold_boot=0				\
		_init_memory=0					\
		_init_c_runtime=1				\
		_exception_vectors=sp_min_vector_table

	/* ---------------------------------------------------------------------
	 * Relay the previous bootloader's arguments to the platform layer
	 * ---------------------------------------------------------------------
90
	 */
91
92
93
94
95
96
97
98
	mov	r0, r11
	mov	r1, r12
#else
	/* ---------------------------------------------------------------------
	 * For RESET_TO_SP_MIN systems which have a programmable reset address,
	 * sp_min_entrypoint() is executed only on the cold boot path so we can
	 * skip the warm boot mailbox mechanism.
	 * ---------------------------------------------------------------------
99
	 */
100
	el3_entrypoint_common					\
101
		_init_sctlr=1					\
102
103
104
105
106
107
108
109
110
111
112
		_warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS	\
		_secondary_cold_boot=!COLD_BOOT_SINGLE_CPU	\
		_init_memory=1					\
		_init_c_runtime=1				\
		_exception_vectors=sp_min_vector_table

	/* ---------------------------------------------------------------------
	 * For RESET_TO_SP_MIN systems, BL32 (SP_MIN) is the first bootloader
	 * to run so there's no argument to relay from a previous bootloader.
	 * Zero the arguments passed to the platform layer to reflect that.
	 * ---------------------------------------------------------------------
113
	 */
114
115
116
	mov	r0, #0
	mov	r1, #0
#endif /* RESET_TO_SP_MIN */
117

118
119
120
121
#if SP_MIN_WITH_SECURE_FIQ
	route_fiq_to_sp_min r4
#endif

122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
	bl	sp_min_early_platform_setup
	bl	sp_min_plat_arch_setup

	/* Jump to the main function */
	bl	sp_min_main

	/* -------------------------------------------------------------
	 * Clean the .data & .bss sections to main memory. This ensures
	 * that any global data which was initialised by the primary CPU
	 * is visible to secondary CPUs before they enable their data
	 * caches and participate in coherency.
	 * -------------------------------------------------------------
	 */
	ldr	r0, =__DATA_START__
	ldr	r1, =__DATA_END__
	sub	r1, r1, r0
	bl	clean_dcache_range

	ldr	r0, =__BSS_START__
	ldr	r1, =__BSS_END__
	sub	r1, r1, r0
	bl	clean_dcache_range

	bl	smc_get_next_ctx
146
147
148

	/* r0 points to `smc_ctx_t` */
	/* The PSCI cpu_context registers have been copied to `smc_ctx_t` */
149
150
151
	b	sp_min_exit
endfunc sp_min_entrypoint

152
153
154
155

/*
 * SMC handling function for SP_MIN.
 */
156
func sp_min_handle_smc
157
158
	/* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */
	str	lr, [sp, #SMC_CTX_LR_MON]
159

160
	smcc_save_gp_mode_regs
161

162
163
	clrex_on_monitor_entry

164
	/*
165
166
	 * `sp` still points to `smc_ctx_t`. Save it to a register
	 * and restore the C runtime stack pointer to `sp`.
167
	 */
168
169
170
171
	mov	r2, sp				/* handle */
	ldr	sp, [r2, #SMC_CTX_SP_MON]

	ldr	r0, [r2, #SMC_CTX_SCR]
172
173
174
175
176
177
	and	r3, r0, #SCR_NS_BIT		/* flags */

	/* Switch to Secure Mode*/
	bic	r0, #SCR_NS_BIT
	stcopr	r0, SCR
	isb
178

179
180
181
182
183
184
185
186
187
	/*
	 * Set PMCR.DP to 1 to prohibit cycle counting whilst in Secure Mode.
	 * Also, the PMCR.LC field has an architecturally UNKNOWN value on reset
	 * and so set to 1 as ARM has deprecated use of PMCR.LC=0.
	 */
	ldcopr	r0, PMCR
	orr	r0, r0, #(PMCR_LC_BIT | PMCR_DP_BIT)
	stcopr	r0, PMCR

188
189
190
	ldr	r0, [r2, #SMC_CTX_GPREG_R0]	/* smc_fid */
	/* Check whether an SMC64 is issued */
	tst	r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT)
191
192
	beq	1f
	/* SMC32 is not detected. Return error back to caller */
193
194
195
	mov	r0, #SMC_UNK
	str	r0, [r2, #SMC_CTX_GPREG_R0]
	mov	r0, r2
196
	b	sp_min_exit
197
1:
198
	/* SMC32 is detected */
199
200
201
	mov	r1, #0				/* cookie */
	bl	handle_runtime_svc

202
	/* `r0` points to `smc_ctx_t` */
203
	b	sp_min_exit
204
endfunc sp_min_handle_smc
205

206
207
208
/*
 * Secure Interrupts handling function for SP_MIN.
 */
209
func sp_min_handle_fiq
210
211
212
213
214
215
216
217
218
219
#if !SP_MIN_WITH_SECURE_FIQ
	b plat_panic_handler
#else
	/* FIQ has a +4 offset for lr compared to preferred return address */
	sub	lr, lr, #4
	/* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */
	str	lr, [sp, #SMC_CTX_LR_MON]

	smcc_save_gp_mode_regs

220
	clrex_on_monitor_entry
221
222
223
224
225
226
227
228
229
230
231

	/* load run-time stack */
	mov	r2, sp
	ldr	sp, [r2, #SMC_CTX_SP_MON]

	/* Switch to Secure Mode */
	ldr	r0, [r2, #SMC_CTX_SCR]
	bic	r0, #SCR_NS_BIT
	stcopr	r0, SCR
	isb

232
233
234
235
236
237
238
239
240
	/*
	 * Set PMCR.DP to 1 to prohibit cycle counting whilst in Secure Mode.
	 * Also, the PMCR.LC field has an architecturally UNKNOWN value on reset
	 * and so set to 1 as ARM has deprecated use of PMCR.LC=0.
	 */
	ldcopr	r0, PMCR
	orr	r0, r0, #(PMCR_LC_BIT | PMCR_DP_BIT)
	stcopr	r0, PMCR

241
242
243
244
245
246
	push	{r2, r3}
	bl	sp_min_fiq
	pop	{r0, r3}

	b	sp_min_exit
#endif
247
endfunc sp_min_handle_fiq
248

249
250
251
252
/*
 * The Warm boot entrypoint for SP_MIN.
 */
func sp_min_warm_entrypoint
253
254
255
256
257
	/*
	 * On the warm boot path, most of the EL3 initialisations performed by
	 * 'el3_entrypoint_common' must be skipped:
	 *
	 *  - Only when the platform bypasses the BL1/BL32 (SP_MIN) entrypoint by
258
	 *    programming the reset address do we need to initialied the SCTLR.
259
260
261
262
263
264
265
266
267
268
269
270
	 *    In other cases, we assume this has been taken care by the
	 *    entrypoint code.
	 *
	 *  - No need to determine the type of boot, we know it is a warm boot.
	 *
	 *  - Do not try to distinguish between primary and secondary CPUs, this
	 *    notion only exists for a cold boot.
	 *
	 *  - No need to initialise the memory or the C runtime environment,
	 *    it has been done once and for all on the cold boot path.
	 */
	el3_entrypoint_common					\
271
		_init_sctlr=PROGRAMMABLE_RESET_ADDRESS		\
272
273
274
275
276
		_warm_boot_mailbox=0				\
		_secondary_cold_boot=0				\
		_init_memory=0					\
		_init_c_runtime=0				\
		_exception_vectors=sp_min_vector_table
277

278
279
280
281
282
	/*
	 * We're about to enable MMU and participate in PSCI state coordination.
	 *
	 * The PSCI implementation invokes platform routines that enable CPUs to
	 * participate in coherency. On a system where CPUs are not
283
284
285
286
287
	 * cache-coherent without appropriate platform specific programming,
	 * having caches enabled until such time might lead to coherency issues
	 * (resulting from stale data getting speculatively fetched, among
	 * others). Therefore we keep data caches disabled even after enabling
	 * the MMU for such platforms.
288
	 *
289
290
291
292
	 * On systems with hardware-assisted coherency, or on single cluster
	 * platforms, such platform specific programming is not required to
	 * enter coherency (as CPUs already are); and there's no reason to have
	 * caches disabled either.
293
294
295
296
	 */
	mov	r0, #DISABLE_DCACHE
	bl	bl32_plat_enable_mmu

297
298
299
300
#if SP_MIN_WITH_SECURE_FIQ
	route_fiq_to_sp_min r0
#endif

301
302
303
304
305
306
307
#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY
	ldcopr	r0, SCTLR
	orr	r0, r0, #SCTLR_C_BIT
	stcopr	r0, SCTLR
	isb
#endif

308
309
	bl	sp_min_warm_boot
	bl	smc_get_next_ctx
310
311
	/* r0 points to `smc_ctx_t` */
	/* The PSCI cpu_context registers have been copied to `smc_ctx_t` */
312
313
314
315
316
317
318
319
320
321
	b	sp_min_exit
endfunc sp_min_warm_entrypoint

/*
 * The function to restore the registers from SMC context and return
 * to the mode restored to SPSR.
 *
 * Arguments : r0 must point to the SMC context to restore from.
 */
func sp_min_exit
322
	monitor_exit
323
endfunc sp_min_exit