tsp_entrypoint.S 13.2 KB
Newer Older
1
/*
Masahiro Yamada's avatar
Masahiro Yamada committed
2
 * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
3
 *
dp-arm's avatar
dp-arm committed
4
 * SPDX-License-Identifier: BSD-3-Clause
5
6
 */

Masahiro Yamada's avatar
Masahiro Yamada committed
7
8
#include <platform_def.h>

9
#include <arch.h>
10
#include <asm_macros.S>
11
12
13
#include <bl32/tsp/tsp.h>
#include <lib/xlat_tables/xlat_tables_defs.h>

14
#include "../tsp_private.h"
15
16
17


	.globl	tsp_entrypoint
18
	.globl  tsp_vector_table
19

20
21


22
23
24
25
26
27
28
29
30
31
32
33
34
	/* ---------------------------------------------
	 * Populate the params in x0-x7 from the pointer
	 * to the smc args structure in x0.
	 * ---------------------------------------------
	 */
	.macro restore_args_call_smc
	ldp	x6, x7, [x0, #TSP_ARG6]
	ldp	x4, x5, [x0, #TSP_ARG4]
	ldp	x2, x3, [x0, #TSP_ARG2]
	ldp	x0, x1, [x0, #TSP_ARG0]
	smc	#0
	.endm

35
36
37
38
39
40
41
42
43
44
45
46
47
48
	.macro	save_eret_context reg1 reg2
	mrs	\reg1, elr_el1
	mrs	\reg2, spsr_el1
	stp	\reg1, \reg2, [sp, #-0x10]!
	stp	x30, x18, [sp, #-0x10]!
	.endm

	.macro restore_eret_context reg1 reg2
	ldp	x30, x18, [sp], #0x10
	ldp	\reg1, \reg2, [sp], #0x10
	msr	elr_el1, \reg1
	msr	spsr_el1, \reg2
	.endm

49
func tsp_entrypoint _align=3
50

Masahiro Yamada's avatar
Masahiro Yamada committed
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
#if ENABLE_PIE
		/*
		 * ------------------------------------------------------------
		 * If PIE is enabled fixup the Global descriptor Table only
		 * once during primary core cold boot path.
		 *
		 * Compile time base address, required for fixup, is calculated
		 * using "pie_fixup" label present within first page.
		 * ------------------------------------------------------------
		 */
	pie_fixup:
		ldr	x0, =pie_fixup
		and	x0, x0, #~(PAGE_SIZE - 1)
		mov_imm	x1, (BL32_LIMIT - BL32_BASE)
		add	x1, x1, x0
		bl	fixup_gdt_reloc
#endif /* ENABLE_PIE */

69
70
71
72
	/* ---------------------------------------------
	 * Set the exception vector to something sane.
	 * ---------------------------------------------
	 */
73
	adr	x0, tsp_exceptions
74
	msr	vbar_el1, x0
75
76
77
78
79
80
81
82
	isb

	/* ---------------------------------------------
	 * Enable the SError interrupt now that the
	 * exception vectors have been setup.
	 * ---------------------------------------------
	 */
	msr	daifclr, #DAIF_ABT_BIT
83
84

	/* ---------------------------------------------
85
	 * Enable the instruction cache, stack pointer
86
87
	 * and data access alignment checks and disable
	 * speculative loads.
88
89
	 * ---------------------------------------------
	 */
90
	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
91
	mrs	x0, sctlr_el1
92
	orr	x0, x0, x1
93
	bic	x0, x0, #SCTLR_DSSBS_BIT
94
95
96
	msr	sctlr_el1, x0
	isb

97
98
99
100
101
102
103
104
105
106
107
108
109
110
	/* ---------------------------------------------
	 * Invalidate the RW memory used by the BL32
	 * image. This includes the data and NOBITS
	 * sections. This is done to safeguard against
	 * possible corruption of this memory by dirty
	 * cache lines in a system cache as a result of
	 * use by an earlier boot loader stage.
	 * ---------------------------------------------
	 */
	adr	x0, __RW_START__
	adr	x1, __RW_END__
	sub	x1, x1, x0
	bl	inv_dcache_range

111
112
113
114
115
116
117
118
	/* ---------------------------------------------
	 * Zero out NOBITS sections. There are 2 of them:
	 *   - the .bss section;
	 *   - the coherent memory section.
	 * ---------------------------------------------
	 */
	ldr	x0, =__BSS_START__
	ldr	x1, =__BSS_SIZE__
119
	bl	zeromem
120

121
#if USE_COHERENT_MEM
122
123
	ldr	x0, =__COHERENT_RAM_START__
	ldr	x1, =__COHERENT_RAM_UNALIGNED_SIZE__
124
	bl	zeromem
125
#endif
126
127

	/* --------------------------------------------
128
129
130
131
132
	 * Allocate a stack whose memory will be marked
	 * as Normal-IS-WBWA when the MMU is enabled.
	 * There is no risk of reading stale stack
	 * memory after enabling the MMU as only the
	 * primary cpu is running at the moment.
133
134
	 * --------------------------------------------
	 */
135
	bl	plat_set_my_stack
136

137
138
139
140
141
142
143
144
145
	/* ---------------------------------------------
	 * Initialize the stack protector canary before
	 * any C code is called.
	 * ---------------------------------------------
	 */
#if STACK_PROTECTOR_ENABLED
	bl	update_stack_protector_canary
#endif

146
	/* ---------------------------------------------
147
	 * Perform TSP setup
148
149
	 * ---------------------------------------------
	 */
150
151
152
	bl	tsp_setup

#if ENABLE_PAUTH
153
	/* ---------------------------------------------
154
155
	 * Program APIAKey_EL1
	 * and enable pointer authentication
156
157
	 * ---------------------------------------------
	 */
158
	bl	pauth_init_enable_el1
159
#endif /* ENABLE_PAUTH */
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176

	/* ---------------------------------------------
	 * Jump to main function.
	 * ---------------------------------------------
	 */
	bl	tsp_main

	/* ---------------------------------------------
	 * Tell TSPD that we are done initialising
	 * ---------------------------------------------
	 */
	mov	x1, x0
	mov	x0, #TSP_ENTRY_DONE
	smc	#0

tsp_entrypoint_panic:
	b	tsp_entrypoint_panic
177
endfunc tsp_entrypoint
178

179
180
181
182
183
184

	/* -------------------------------------------
	 * Table of entrypoint vectors provided to the
	 * TSPD for the various entrypoints
	 * -------------------------------------------
	 */
185
vector_base tsp_vector_table
186
	b	tsp_yield_smc_entry
187
188
189
190
191
	b	tsp_fast_smc_entry
	b	tsp_cpu_on_entry
	b	tsp_cpu_off_entry
	b	tsp_cpu_resume_entry
	b	tsp_cpu_suspend_entry
192
	b	tsp_sel1_intr_entry
193
194
	b	tsp_system_off_entry
	b	tsp_system_reset_entry
195
	b	tsp_abort_yield_smc_entry
196

197
198
199
200
201
202
203
204
205
206
	/*---------------------------------------------
	 * This entrypoint is used by the TSPD when this
	 * cpu is to be turned off through a CPU_OFF
	 * psci call to ask the TSP to perform any
	 * bookeeping necessary. In the current
	 * implementation, the TSPD expects the TSP to
	 * re-initialise its state so nothing is done
	 * here except for acknowledging the request.
	 * ---------------------------------------------
	 */
207
func tsp_cpu_off_entry
208
209
	bl	tsp_cpu_off_main
	restore_args_call_smc
210
endfunc tsp_cpu_off_entry
211

212
213
214
215
216
217
218
219
220
221
	/*---------------------------------------------
	 * This entrypoint is used by the TSPD when the
	 * system is about to be switched off (through
	 * a SYSTEM_OFF psci call) to ask the TSP to
	 * perform any necessary bookkeeping.
	 * ---------------------------------------------
	 */
func tsp_system_off_entry
	bl	tsp_system_off_main
	restore_args_call_smc
222
endfunc tsp_system_off_entry
223
224
225
226
227
228
229
230
231
232
233

	/*---------------------------------------------
	 * This entrypoint is used by the TSPD when the
	 * system is about to be reset (through a
	 * SYSTEM_RESET psci call) to ask the TSP to
	 * perform any necessary bookkeeping.
	 * ---------------------------------------------
	 */
func tsp_system_reset_entry
	bl	tsp_system_reset_main
	restore_args_call_smc
234
endfunc tsp_system_reset_entry
235

236
237
238
239
240
241
242
243
244
245
246
	/*---------------------------------------------
	 * This entrypoint is used by the TSPD when this
	 * cpu is turned on using a CPU_ON psci call to
	 * ask the TSP to initialise itself i.e. setup
	 * the mmu, stacks etc. Minimal architectural
	 * state will be initialised by the TSPD when
	 * this function is entered i.e. Caches and MMU
	 * will be turned off, the execution state
	 * will be aarch64 and exceptions masked.
	 * ---------------------------------------------
	 */
247
func tsp_cpu_on_entry
248
249
250
251
	/* ---------------------------------------------
	 * Set the exception vector to something sane.
	 * ---------------------------------------------
	 */
252
	adr	x0, tsp_exceptions
253
	msr	vbar_el1, x0
254
255
256
257
	isb

	/* Enable the SError interrupt */
	msr	daifclr, #DAIF_ABT_BIT
258
259

	/* ---------------------------------------------
260
261
	 * Enable the instruction cache, stack pointer
	 * and data access alignment checks
262
263
	 * ---------------------------------------------
	 */
264
	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
265
	mrs	x0, sctlr_el1
266
	orr	x0, x0, x1
267
268
269
270
	msr	sctlr_el1, x0
	isb

	/* --------------------------------------------
271
272
273
	 * Give ourselves a stack whose memory will be
	 * marked as Normal-IS-WBWA when the MMU is
	 * enabled.
274
275
	 * --------------------------------------------
	 */
276
	bl	plat_set_my_stack
277

278
	/* --------------------------------------------
279
	 * Enable MMU and D-caches together.
280
	 * --------------------------------------------
281
	 */
282
	mov	x0, #0
283
	bl	bl32_plat_enable_mmu
284

285
286
287
288
289
290
291
292
293
#if ENABLE_PAUTH
	/* ---------------------------------------------
	 * Program APIAKey_EL1
	 * and enable pointer authentication
	 * ---------------------------------------------
	 */
	bl	pauth_init_enable_el1
#endif /* ENABLE_PAUTH */

294
295
296
297
298
299
300
301
302
303
304
	/* ---------------------------------------------
	 * Enter C runtime to perform any remaining
	 * book keeping
	 * ---------------------------------------------
	 */
	bl	tsp_cpu_on_main
	restore_args_call_smc

	/* Should never reach here */
tsp_cpu_on_entry_panic:
	b	tsp_cpu_on_entry_panic
305
endfunc tsp_cpu_on_entry
306
307
308
309
310
311
312
313
314
315

	/*---------------------------------------------
	 * This entrypoint is used by the TSPD when this
	 * cpu is to be suspended through a CPU_SUSPEND
	 * psci call to ask the TSP to perform any
	 * bookeeping necessary. In the current
	 * implementation, the TSPD saves and restores
	 * the EL1 state.
	 * ---------------------------------------------
	 */
316
func tsp_cpu_suspend_entry
317
318
	bl	tsp_cpu_suspend_main
	restore_args_call_smc
319
endfunc tsp_cpu_suspend_entry
320

321
	/*-------------------------------------------------
322
	 * This entrypoint is used by the TSPD to pass
323
324
325
326
327
328
329
	 * control for `synchronously` handling a S-EL1
	 * Interrupt which was triggered while executing
	 * in normal world. 'x0' contains a magic number
	 * which indicates this. TSPD expects control to
	 * be handed back at the end of interrupt
	 * processing. This is done through an SMC.
	 * The handover agreement is:
330
331
332
333
334
335
336
337
338
339
340
	 *
	 * 1. PSTATE.DAIF are set upon entry. 'x1' has
	 *    the ELR_EL3 from the non-secure state.
	 * 2. TSP has to preserve the callee saved
	 *    general purpose registers, SP_EL1/EL0 and
	 *    LR.
	 * 3. TSP has to preserve the system and vfp
	 *    registers (if applicable).
	 * 4. TSP can use 'x0-x18' to enable its C
	 *    runtime.
	 * 5. TSP returns to TSPD using an SMC with
341
342
	 *    'x0' = TSP_HANDLED_S_EL1_INTR
	 * ------------------------------------------------
343
	 */
344
func	tsp_sel1_intr_entry
345
#if DEBUG
346
	mov_imm	x2, TSP_HANDLE_SEL1_INTR_AND_RETURN
347
	cmp	x0, x2
348
	b.ne	tsp_sel1_int_entry_panic
349
#endif
350
	/*-------------------------------------------------
351
352
	 * Save any previous context needed to perform
	 * an exception return from S-EL1 e.g. context
353
354
355
	 * from a previous Non secure Interrupt.
	 * Update statistics and handle the S-EL1
	 * interrupt before returning to the TSPD.
356
357
358
	 * IRQ/FIQs are not enabled since that will
	 * complicate the implementation. Execution
	 * will be transferred back to the normal world
359
360
361
362
363
364
365
366
	 * in any case. The handler can return 0
	 * if the interrupt was handled or TSP_PREEMPTED
	 * if the expected interrupt was preempted
	 * by an interrupt that should be handled in EL3
	 * e.g. Group 0 interrupt in GICv3. In both
	 * the cases switch to EL3 using SMC with id
	 * TSP_HANDLED_S_EL1_INTR. Any other return value
	 * from the handler will result in panic.
367
	 * ------------------------------------------------
368
369
	 */
	save_eret_context x2 x3
370
371
	bl	tsp_update_sync_sel1_intr_stats
	bl	tsp_common_int_handler
372
373
374
375
376
377
378
379
380
381
	/* Check if the S-EL1 interrupt has been handled */
	cbnz	x0, tsp_sel1_intr_check_preemption
	b	tsp_sel1_intr_return
tsp_sel1_intr_check_preemption:
	/* Check if the S-EL1 interrupt has been preempted */
	mov_imm	x1, TSP_PREEMPTED
	cmp	x0, x1
	b.ne	tsp_sel1_int_entry_panic
tsp_sel1_intr_return:
	mov_imm	x0, TSP_HANDLED_S_EL1_INTR
382
383
384
	restore_eret_context x2 x3
	smc	#0

385
	/* Should never reach here */
386
tsp_sel1_int_entry_panic:
387
	no_ret	plat_panic_handler
388
endfunc tsp_sel1_intr_entry
389

390
391
392
393
394
395
396
397
398
399
	/*---------------------------------------------
	 * This entrypoint is used by the TSPD when this
	 * cpu resumes execution after an earlier
	 * CPU_SUSPEND psci call to ask the TSP to
	 * restore its saved context. In the current
	 * implementation, the TSPD saves and restores
	 * EL1 state so nothing is done here apart from
	 * acknowledging the request.
	 * ---------------------------------------------
	 */
400
func tsp_cpu_resume_entry
401
402
	bl	tsp_cpu_resume_main
	restore_args_call_smc
403
404

	/* Should never reach here */
405
	no_ret	plat_panic_handler
406
endfunc tsp_cpu_resume_entry
407
408
409
410
411
412

	/*---------------------------------------------
	 * This entrypoint is used by the TSPD to ask
	 * the TSP to service a fast smc request.
	 * ---------------------------------------------
	 */
413
func tsp_fast_smc_entry
414
	bl	tsp_smc_handler
415
	restore_args_call_smc
416
417

	/* Should never reach here */
418
	no_ret	plat_panic_handler
419
endfunc tsp_fast_smc_entry
420

421
422
	/*---------------------------------------------
	 * This entrypoint is used by the TSPD to ask
423
	 * the TSP to service a Yielding SMC request.
424
425
426
427
	 * We will enable preemption during execution
	 * of tsp_smc_handler.
	 * ---------------------------------------------
	 */
428
func tsp_yield_smc_entry
429
430
431
432
	msr	daifclr, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
	bl	tsp_smc_handler
	msr	daifset, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
	restore_args_call_smc
433
434

	/* Should never reach here */
435
	no_ret	plat_panic_handler
436
endfunc tsp_yield_smc_entry
437
438

	/*---------------------------------------------------------------------
439
	 * This entrypoint is used by the TSPD to abort a pre-empted Yielding
440
441
442
443
	 * SMC. It could be on behalf of non-secure world or because a CPU
	 * suspend/CPU off request needs to abort the preempted SMC.
	 * --------------------------------------------------------------------
	 */
444
func tsp_abort_yield_smc_entry
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462

	/*
	 * Exceptions masking is already done by the TSPD when entering this
	 * hook so there is no need to do it here.
	 */

	/* Reset the stack used by the pre-empted SMC */
	bl	plat_set_my_stack

	/*
	 * Allow some cleanup such as releasing locks.
	 */
	bl	tsp_abort_smc_handler

	restore_args_call_smc

	/* Should never reach here */
	bl	plat_panic_handler
463
endfunc tsp_abort_yield_smc_entry