entrypoint.S 8.16 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
/*
 * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 *
 * Redistributions of source code must retain the above copyright notice, this
 * list of conditions and the following disclaimer.
 *
 * Redistributions in binary form must reproduce the above copyright notice,
 * this list of conditions and the following disclaimer in the documentation
 * and/or other materials provided with the distribution.
 *
 * Neither the name of ARM nor the names of its contributors may be used
 * to endorse or promote products derived from this software without specific
 * prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 */

#include <arch.h>
#include <asm_macros.S>
#include <bl_common.h>
#include <context.h>
#include <runtime_svc.h>
#include <smcc_helpers.h>
#include <smcc_macros.S>
#include <xlat_tables.h>

	.globl	sp_min_vector_table
	.globl	sp_min_entrypoint
	.globl	sp_min_warm_entrypoint

func sp_min_vector_table
	b	sp_min_entrypoint
	b	plat_panic_handler	/* Undef */
	b	handle_smc		/* Syscall */
	b	plat_panic_handler	/* Prefetch abort */
	b	plat_panic_handler	/* Data abort */
	b	plat_panic_handler	/* Reserved */
	b	plat_panic_handler	/* IRQ */
	b	plat_panic_handler	/* FIQ */
endfunc sp_min_vector_table

func handle_smc
	smcc_save_gp_mode_regs

	/* r0 points to smc_context */
	mov	r2, r0				/* handle */
	ldcopr	r0, SCR

	/* Save SCR in stack */
	push	{r0}
	and	r3, r0, #SCR_NS_BIT		/* flags */

	/* Switch to Secure Mode*/
	bic	r0, #SCR_NS_BIT
	stcopr	r0, SCR
	isb
	ldr	r0, [r2, #SMC_CTX_GPREG_R0]	/* smc_fid */
	/* Check whether an SMC64 is issued */
	tst	r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT)
	beq	1f	/* SMC32 is detected */
	mov	r0, #SMC_UNK
	str	r0, [r2, #SMC_CTX_GPREG_R0]
	mov	r0, r2
	b	2f	/* Skip handling the SMC */
1:
	mov	r1, #0				/* cookie */
	bl	handle_runtime_svc
2:
	/* r0 points to smc context */

	/* Restore SCR from stack */
	pop	{r1}
	stcopr	r1, SCR
	isb

	b	sp_min_exit
endfunc handle_smc

/*
 * The Cold boot/Reset entrypoint for SP_MIN
 */
func sp_min_entrypoint

	/*
	 * The caches and TLBs are disabled at reset. If any implementation
	 * allows the caches/TLB to be hit while they are disabled, ensure
	 * that they are invalidated here
	 */

	/* Make sure we are in Secure Mode*/
	ldcopr	r0, SCR
	bic	r0, #SCR_NS_BIT
	stcopr	r0, SCR
	isb

	/* Switch to monitor mode */
	cps	#MODE32_mon
	isb

	/*
	 * Set sane values for NS SCTLR as well.
	 * Switch to non secure mode for this.
	 */
	ldr	r0, =(SCTLR_RES1)
	ldcopr	r1, SCR
	orr	r2, r1, #SCR_NS_BIT
	stcopr	r2, SCR
	isb

	ldcopr	r2, SCTLR
	orr	r0, r0, r2
	stcopr	r0, SCTLR
	isb

	stcopr	r1, SCR
	isb

	/*
	 * Set the CPU endianness before doing anything that might involve
	 * memory reads or writes.
	 */
	ldcopr	r0, SCTLR
	bic	r0, r0, #SCTLR_EE_BIT
	stcopr	r0, SCTLR
	isb

	/* Run the CPU Specific Reset handler */
	bl	reset_handler

	/*
	 * Enable the instruction cache and data access
	 * alignment checks
	 */
	ldcopr	r0, SCTLR
	ldr	r1, =(SCTLR_RES1 | SCTLR_A_BIT | SCTLR_I_BIT)
	orr	r0, r0, r1
	stcopr	r0, SCTLR
	isb

	/* Set the vector tables */
	ldr	r0, =sp_min_vector_table
	stcopr	r0, VBAR
	stcopr	r0, MVBAR
	isb

	/*
	 * Enable the SIF bit to disable instruction fetches
	 * from Non-secure memory.
	 */
	ldcopr	r0, SCR
	orr	r0, r0, #SCR_SIF_BIT
	stcopr	r0, SCR

	/*
	 * Enable the SError interrupt now that the exception vectors have been
	 * setup.
	 */
	cpsie   a
	isb

	/* Enable access to Advanced SIMD registers */
	ldcopr	r0, NSACR
	bic	r0, r0, #NSASEDIS_BIT
	orr	r0, r0, #(NASCR_CP10_BIT | NASCR_CP11_BIT)
	stcopr	r0, NSACR
	isb

	/*
	 * Enable access to Advanced SIMD, Floating point and to the Trace
	 * functionality as well.
	 */
	ldcopr	r0, CPACR
	bic	r0, r0, #ASEDIS_BIT
	bic	r0, r0, #TRCDIS_BIT
	orr	r0, r0, #CPACR_ENABLE_FP_ACCESS
	stcopr	r0, CPACR
	isb

	vmrs	r0, FPEXC
	orr	r0, r0, #FPEXC_EN_BIT
	vmsr	FPEXC, r0

	/* Detect whether Warm or Cold boot */
	bl	plat_get_my_entrypoint
	cmp	r0, #0
	/* If warm boot detected, jump to warm boot entry */
	bxne	r0

	/* Setup C runtime stack */
	bl	plat_set_my_stack

	/* Perform platform specific memory initialization */
	bl	platform_mem_init

	/* Initialize the C Runtime Environment */

	/*
	 * Invalidate the RW memory used by SP_MIN image. This includes
	 * the data and NOBITS sections. This is done to safeguard against
	 * possible corruption of this memory by dirty cache lines in a system
	 * cache as a result of use by an earlier boot loader stage.
	 */
	ldr	r0, =__RW_START__
	ldr	r1, =__RW_END__
	sub	r1, r1, r0
	bl	inv_dcache_range

	ldr	r0, =__BSS_START__
	ldr	r1, =__BSS_SIZE__
	bl	zeromem

#if USE_COHERENT_MEM
	ldr	r0, =__COHERENT_RAM_START__
	ldr	r1, =__COHERENT_RAM_UNALIGNED_SIZE__
	bl	zeromem
#endif

	/* Perform platform specific early arch. setup */
	bl	sp_min_early_platform_setup
	bl	sp_min_plat_arch_setup

	/* Jump to the main function */
	bl	sp_min_main

	/* -------------------------------------------------------------
	 * Clean the .data & .bss sections to main memory. This ensures
	 * that any global data which was initialised by the primary CPU
	 * is visible to secondary CPUs before they enable their data
	 * caches and participate in coherency.
	 * -------------------------------------------------------------
	 */
	ldr	r0, =__DATA_START__
	ldr	r1, =__DATA_END__
	sub	r1, r1, r0
	bl	clean_dcache_range

	ldr	r0, =__BSS_START__
	ldr	r1, =__BSS_END__
	sub	r1, r1, r0
	bl	clean_dcache_range

	/* Program the registers in cpu_context and exit monitor mode */
	mov	r0, #NON_SECURE
	bl	cm_get_context

	/* Restore the SCR */
	ldr	r2, [r0, #CTX_REGS_OFFSET + CTX_SCR]
	stcopr	r2, SCR
	isb

	/* Restore the SCTLR  */
	ldr	r2, [r0, #CTX_REGS_OFFSET + CTX_NS_SCTLR]
	stcopr	r2, SCTLR

	bl	smc_get_next_ctx
	/* The other cpu_context registers have been copied to smc context */
	b	sp_min_exit
endfunc sp_min_entrypoint

/*
 * The Warm boot entrypoint for SP_MIN.
 */
func sp_min_warm_entrypoint

	/* Setup C runtime stack */
	bl	plat_set_my_stack

	/* --------------------------------------------
	 * Enable the MMU with the DCache disabled. It
	 * is safe to use stacks allocated in normal
	 * memory as a result. All memory accesses are
	 * marked nGnRnE when the MMU is disabled. So
	 * all the stack writes will make it to memory.
	 * All memory accesses are marked Non-cacheable
	 * when the MMU is enabled but D$ is disabled.
	 * So used stack memory is guaranteed to be
	 * visible immediately after the MMU is enabled
	 * Enabling the DCache at the same time as the
	 * MMU can lead to speculatively fetched and
	 * possibly stale stack memory being read from
	 * other caches. This can lead to coherency
	 * issues.
	 * --------------------------------------------
	 */
	mov	r0, #DISABLE_DCACHE
	bl	bl32_plat_enable_mmu

	bl	sp_min_warm_boot

	/* Program the registers in cpu_context and exit monitor mode */
	mov	r0, #NON_SECURE
	bl	cm_get_context

	/* Restore the SCR */
	ldr	r2, [r0, #CTX_REGS_OFFSET + CTX_SCR]
	stcopr	r2, SCR
	isb

	/* Restore the SCTLR  */
	ldr	r2, [r0, #CTX_REGS_OFFSET + CTX_NS_SCTLR]
	stcopr	r2, SCTLR

	bl	smc_get_next_ctx

	/* The other cpu_context registers have been copied to smc context */
	b	sp_min_exit
endfunc sp_min_warm_entrypoint

/*
 * The function to restore the registers from SMC context and return
 * to the mode restored to SPSR.
 *
 * Arguments : r0 must point to the SMC context to restore from.
 */
func sp_min_exit
	smcc_restore_gp_mode_regs
	eret
endfunc sp_min_exit