runtime_exceptions.S 16.7 KB
Newer Older
1
/*
2
 * Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved.
3
 *
dp-arm's avatar
dp-arm committed
4
 * SPDX-License-Identifier: BSD-3-Clause
5
6
 */

7
8
#include <platform_def.h>

9
#include <arch.h>
10
#include <asm_macros.S>
11
12
13
#include <bl31/ea_handle.h>
#include <bl31/interrupt_mgmt.h>
#include <common/runtime_svc.h>
14
#include <context.h>
15
#include <el3_common_macros.S>
16
17
#include <lib/el3_runtime/cpu_data.h>
#include <lib/smccc.h>
18
19
20

	.globl	runtime_exceptions

21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
	.globl	sync_exception_sp_el0
	.globl	irq_sp_el0
	.globl	fiq_sp_el0
	.globl	serror_sp_el0

	.globl	sync_exception_sp_elx
	.globl	irq_sp_elx
	.globl	fiq_sp_elx
	.globl	serror_sp_elx

	.globl	sync_exception_aarch64
	.globl	irq_aarch64
	.globl	fiq_aarch64
	.globl	serror_aarch64

	.globl	sync_exception_aarch32
	.globl	irq_aarch32
	.globl	fiq_aarch32
	.globl	serror_aarch32

41
42
43
44
45
46
47
	/*
	 * Macro that prepares entry to EL3 upon taking an exception.
	 *
	 * With RAS_EXTENSION, this macro synchronizes pending errors with an ESB
	 * instruction. When an error is thus synchronized, the handling is
	 * delegated to platform EA handler.
	 *
48
49
50
	 * Without RAS_EXTENSION, this macro synchronizes pending errors using
         * a DSB, unmasks Asynchronous External Aborts and saves X30 before
	 * setting the flag CTX_IS_IN_EL3.
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
	 */
	.macro check_and_unmask_ea
#if RAS_EXTENSION
	/* Synchronize pending External Aborts */
	esb

	/* Unmask the SError interrupt */
	msr	daifclr, #DAIF_ABT_BIT

	/*
	 * Explicitly save x30 so as to free up a register and to enable
	 * branching
	 */
	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]

	/* Check for SErrors synchronized by the ESB instruction */
	mrs	x30, DISR_EL1
	tbz	x30, #DISR_A_BIT, 1f

70
	/*
71
72
73
	 * Save general purpose and ARMv8.3-PAuth registers (if enabled).
	 * If Secure Cycle Counter is not disabled in MDCR_EL3 when
	 * ARMv8.5-PMU is implemented, save PMCR_EL0 and disable Cycle Counter.
74
	 */
75
	bl	save_gp_pmcr_pauth_regs
76

77
	bl	handle_lower_el_ea_esb
78

79
80
	/* Restore general purpose, PMCR_EL0 and ARMv8.3-PAuth registers */
	bl	restore_gp_pmcr_pauth_regs
81
82
1:
#else
83
84
85
86
87
88
	/*
	 * For SoCs which do not implement RAS, use DSB as a barrier to
	 * synchronize pending external aborts.
	 */
	dsb	sy

89
90
91
	/* Unmask the SError interrupt */
	msr	daifclr, #DAIF_ABT_BIT

92
93
94
95
96
97
98
99
	/* Use ISB for the above unmask operation to take effect immediately */
	isb

	/*
	 * Refer Note 1. No need to restore X30 as both handle_sync_exception
	 * and handle_interrupt_exception macro which follow this macro modify
	 * X30 anyway.
	 */
100
	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
101
102
103
	mov 	x30, #1
	str	x30, [sp, #CTX_EL3STATE_OFFSET + CTX_IS_IN_EL3]
	dmb	sy
104
105
106
#endif
	.endm

107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
#if !RAS_EXTENSION
	/*
	 * Note 1: The explicit DSB at the entry of various exception vectors
	 * for handling exceptions from lower ELs can inadvertently trigger an
	 * SError exception in EL3 due to pending asynchronous aborts in lower
	 * ELs. This will end up being handled by serror_sp_elx which will
	 * ultimately panic and die.
	 * The way to workaround is to update a flag to indicate if the exception
	 * truly came from EL3. This flag is allocated in the cpu_context
	 * structure and located at offset "CTX_EL3STATE_OFFSET + CTX_IS_IN_EL3"
	 * This is not a bullet proof solution to the problem at hand because
	 * we assume the instructions following "isb" that help to update the
	 * flag execute without causing further exceptions.
	 */

	/* ---------------------------------------------------------------------
	 * This macro handles Asynchronous External Aborts.
	 * ---------------------------------------------------------------------
	 */
	.macro	handle_async_ea
	/*
	 * Use a barrier to synchronize pending external aborts.
	 */
	dsb	sy

	/* Unmask the SError interrupt */
	msr	daifclr, #DAIF_ABT_BIT

	/* Use ISB for the above unmask operation to take effect immediately */
	isb

	/* Refer Note 1 */
	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
	mov 	x30, #1
	str	x30, [sp, #CTX_EL3STATE_OFFSET + CTX_IS_IN_EL3]
	dmb	sy

	b	handle_lower_el_async_ea
	.endm

	/*
	 * This macro checks if the exception was taken due to SError in EL3 or
	 * because of pending asynchronous external aborts from lower EL that got
	 * triggered due to explicit synchronization in EL3. Refer Note 1.
	 */
	.macro check_if_serror_from_EL3
	/* Assumes SP_EL3 on entry */
	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
	ldr	x30, [sp, #CTX_EL3STATE_OFFSET + CTX_IS_IN_EL3]
	cbnz	x30, exp_from_EL3

	/* Handle asynchronous external abort from lower EL */
	b	handle_lower_el_async_ea

exp_from_EL3:
	/* Jump to plat_handle_el3_ea which does not return */
	.endm
#endif

166
167
168
169
	/* ---------------------------------------------------------------------
	 * This macro handles Synchronous exceptions.
	 * Only SMC exceptions are supported.
	 * ---------------------------------------------------------------------
170
171
	 */
	.macro	handle_sync_exception
dp-arm's avatar
dp-arm committed
172
173
#if ENABLE_RUNTIME_INSTRUMENTATION
	/*
174
175
176
	 * Read the timestamp value and store it in per-cpu data. The value
	 * will be extracted from per-cpu data by the C level SMC handler and
	 * saved to the PMF timestamp region.
dp-arm's avatar
dp-arm committed
177
178
179
180
181
182
183
184
	 */
	mrs	x30, cntpct_el0
	str	x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
	mrs	x29, tpidr_el3
	str	x30, [x29, #CPU_DATA_PMF_TS0_OFFSET]
	ldr	x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
#endif

185
186
187
	mrs	x30, esr_el3
	ubfx	x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH

188
	/* Handle SMC exceptions separately from other synchronous exceptions */
189
190
191
192
193
194
	cmp	x30, #EC_AARCH32_SMC
	b.eq	smc_handler32

	cmp	x30, #EC_AARCH64_SMC
	b.eq	smc_handler64

195
	/* Synchronous exceptions other than the above are assumed to be EA */
196
	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
197
	b	enter_lower_el_sync_ea
198
199
200
	.endm


201
202
203
204
	/* ---------------------------------------------------------------------
	 * This macro handles FIQ or IRQ interrupts i.e. EL3, S-EL1 and NS
	 * interrupts.
	 * ---------------------------------------------------------------------
205
206
	 */
	.macro	handle_interrupt_exception label
207

208
	/*
209
210
211
	 * Save general purpose and ARMv8.3-PAuth registers (if enabled).
	 * If Secure Cycle Counter is not disabled in MDCR_EL3 when
	 * ARMv8.5-PMU is implemented, save PMCR_EL0 and disable Cycle Counter.
212
	 */
213
	bl	save_gp_pmcr_pauth_regs
214

215
#if ENABLE_PAUTH
216
217
	/* Load and program APIAKey firmware key */
	bl	pauth_load_bl31_apiakey
218
#endif
219

220
	/* Save the EL3 system registers needed to return from this exception */
221
222
223
224
	mrs	x0, spsr_el3
	mrs	x1, elr_el3
	stp	x0, x1, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]

225
226
227
	/* Switch to the runtime stack i.e. SP_EL0 */
	ldr	x2, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
	mov	x20, sp
228
	msr	spsel, #MODE_SP_EL0
229
230
231
	mov	sp, x2

	/*
232
233
234
	 * Find out whether this is a valid interrupt type.
	 * If the interrupt controller reports a spurious interrupt then return
	 * to where we came from.
235
	 */
236
	bl	plat_ic_get_pending_interrupt_type
237
238
239
240
	cmp	x0, #INTR_TYPE_INVAL
	b.eq	interrupt_exit_\label

	/*
241
242
	 * Get the registered handler for this interrupt type.
	 * A NULL return value could be 'cause of the following conditions:
243
	 *
244
245
	 * a. An interrupt of a type was routed correctly but a handler for its
	 *    type was not registered.
246
	 *
247
248
	 * b. An interrupt of a type was not routed correctly so a handler for
	 *    its type was not registered.
249
	 *
250
251
252
253
254
	 * c. An interrupt of a type was routed correctly to EL3, but was
	 *    deasserted before its pending state could be read. Another
	 *    interrupt of a different type pended at the same time and its
	 *    type was reported as pending instead. However, a handler for this
	 *    type was not registered.
255
	 *
256
257
258
259
	 * a. and b. can only happen due to a programming error. The
	 * occurrence of c. could be beyond the control of Trusted Firmware.
	 * It makes sense to return from this exception instead of reporting an
	 * error.
260
261
	 */
	bl	get_interrupt_type_handler
262
	cbz	x0, interrupt_exit_\label
263
264
265
266
267
268
269
270
271
272
273
	mov	x21, x0

	mov	x0, #INTR_ID_UNAVAILABLE

	/* Set the current security state in the 'flags' parameter */
	mrs	x2, scr_el3
	ubfx	x1, x2, #0, #1

	/* Restore the reference to the 'handle' i.e. SP_EL3 */
	mov	x2, x20

274
	/* x3 will point to a cookie (not used now) */
275
276
	mov	x3, xzr

277
278
279
280
281
282
283
284
285
286
	/* Call the interrupt type handler */
	blr	x21

interrupt_exit_\label:
	/* Return from exception, possibly in a different security state */
	b	el3_exit

	.endm


287
288
vector_base runtime_exceptions

289
290
291
	/* ---------------------------------------------------------------------
	 * Current EL with SP_EL0 : 0x0 - 0x200
	 * ---------------------------------------------------------------------
292
	 */
293
vector_entry sync_exception_sp_el0
294
295
296
297
298
299
300
301
302
303
304
305
306
#ifdef MONITOR_TRAPS
	stp x29, x30, [sp, #-16]!

	mrs	x30, esr_el3
	ubfx	x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH

	/* Check for BRK */
	cmp	x30, #EC_BRK
	b.eq	brk_handler

	ldp x29, x30, [sp], #16
#endif /* MONITOR_TRAPS */

307
	/* We don't expect any synchronous exceptions from EL3 */
308
	b	report_unhandled_exception
309
end_vector_entry sync_exception_sp_el0
310

311
vector_entry irq_sp_el0
312
313
314
315
	/*
	 * EL3 code is non-reentrant. Any asynchronous exception is a serious
	 * error. Loop infinitely.
	 */
316
	b	report_unhandled_interrupt
317
end_vector_entry irq_sp_el0
318

319
320

vector_entry fiq_sp_el0
321
	b	report_unhandled_interrupt
322
end_vector_entry fiq_sp_el0
323

324
325

vector_entry serror_sp_el0
326
	no_ret	plat_handle_el3_ea
327
end_vector_entry serror_sp_el0
328

329
330
331
	/* ---------------------------------------------------------------------
	 * Current EL with SP_ELx: 0x200 - 0x400
	 * ---------------------------------------------------------------------
332
	 */
333
vector_entry sync_exception_sp_elx
334
335
336
337
338
	/*
	 * This exception will trigger if anything went wrong during a previous
	 * exception entry or exit or while handling an earlier unexpected
	 * synchronous exception. There is a high probability that SP_EL3 is
	 * corrupted.
339
	 */
340
	b	report_unhandled_exception
341
end_vector_entry sync_exception_sp_elx
342

343
vector_entry irq_sp_elx
344
	b	report_unhandled_interrupt
345
end_vector_entry irq_sp_elx
346

347
vector_entry fiq_sp_elx
348
	b	report_unhandled_interrupt
349
end_vector_entry fiq_sp_elx
350

351
vector_entry serror_sp_elx
352
353
354
#if !RAS_EXTENSION
	check_if_serror_from_EL3
#endif
355
	no_ret	plat_handle_el3_ea
356
end_vector_entry serror_sp_elx
357

358
	/* ---------------------------------------------------------------------
359
	 * Lower EL using AArch64 : 0x400 - 0x600
360
	 * ---------------------------------------------------------------------
361
	 */
362
vector_entry sync_exception_aarch64
363
364
365
366
367
	/*
	 * This exception vector will be the entry point for SMCs and traps
	 * that are unhandled at lower ELs most commonly. SP_EL3 should point
	 * to a valid cpu context where the general purpose and system register
	 * state can be saved.
368
	 */
369
	apply_at_speculative_wa
370
	check_and_unmask_ea
371
	handle_sync_exception
372
end_vector_entry sync_exception_aarch64
373

374
vector_entry irq_aarch64
375
	apply_at_speculative_wa
376
	check_and_unmask_ea
377
	handle_interrupt_exception irq_aarch64
378
end_vector_entry irq_aarch64
379

380
vector_entry fiq_aarch64
381
	apply_at_speculative_wa
382
	check_and_unmask_ea
383
	handle_interrupt_exception fiq_aarch64
384
end_vector_entry fiq_aarch64
385

386
vector_entry serror_aarch64
387
	apply_at_speculative_wa
388
#if RAS_EXTENSION
389
	msr	daifclr, #DAIF_ABT_BIT
390
	b	enter_lower_el_async_ea
391
392
393
#else
	handle_async_ea
#endif
394
end_vector_entry serror_aarch64
395

396
	/* ---------------------------------------------------------------------
397
	 * Lower EL using AArch32 : 0x600 - 0x800
398
	 * ---------------------------------------------------------------------
399
	 */
400
vector_entry sync_exception_aarch32
401
402
403
404
405
	/*
	 * This exception vector will be the entry point for SMCs and traps
	 * that are unhandled at lower ELs most commonly. SP_EL3 should point
	 * to a valid cpu context where the general purpose and system register
	 * state can be saved.
406
	 */
407
	apply_at_speculative_wa
408
	check_and_unmask_ea
409
	handle_sync_exception
410
end_vector_entry sync_exception_aarch32
411

412
vector_entry irq_aarch32
413
	apply_at_speculative_wa
414
	check_and_unmask_ea
415
	handle_interrupt_exception irq_aarch32
416
end_vector_entry irq_aarch32
417

418
vector_entry fiq_aarch32
419
	apply_at_speculative_wa
420
	check_and_unmask_ea
421
	handle_interrupt_exception fiq_aarch32
422
end_vector_entry fiq_aarch32
423

424
vector_entry serror_aarch32
425
	apply_at_speculative_wa
426
#if RAS_EXTENSION
427
	msr	daifclr, #DAIF_ABT_BIT
428
	b	enter_lower_el_async_ea
429
430
431
#else
	handle_async_ea
#endif
432
end_vector_entry serror_aarch32
433

434
435
436
437
438
439
440
441
#ifdef MONITOR_TRAPS
	.section .rodata.brk_string, "aS"
brk_location:
	.asciz "Error at instruction 0x"
brk_message:
	.asciz "Unexpected BRK instruction with value 0x"
#endif /* MONITOR_TRAPS */

442
	/* ---------------------------------------------------------------------
443
	 * The following code handles secure monitor calls.
444
445
446
447
448
	 * Depending upon the execution state from where the SMC has been
	 * invoked, it frees some general purpose registers to perform the
	 * remaining tasks. They involve finding the runtime service handler
	 * that is the target of the SMC & switching to runtime stacks (SP_EL0)
	 * before calling the handler.
449
	 *
450
451
	 * Note that x30 has been explicitly saved and can be used here
	 * ---------------------------------------------------------------------
452
	 */
453
func smc_handler
454
455
456
457
458
smc_handler32:
	/* Check whether aarch32 issued an SMC64 */
	tbnz	x0, #FUNCID_CC_SHIFT, smc_prohibited

smc_handler64:
459
460
	/* NOTE: The code below must preserve x0-x4 */

461
	/*
462
463
464
	 * Save general purpose and ARMv8.3-PAuth registers (if enabled).
	 * If Secure Cycle Counter is not disabled in MDCR_EL3 when
	 * ARMv8.5-PMU is implemented, save PMCR_EL0 and disable Cycle Counter.
465
	 */
466
	bl	save_gp_pmcr_pauth_regs
467

468
#if ENABLE_PAUTH
469
470
	/* Load and program APIAKey firmware key */
	bl	pauth_load_bl31_apiakey
471
#endif
472

473
474
475
476
	/*
	 * Populate the parameters for the SMC handler.
	 * We already have x0-x4 in place. x5 will point to a cookie (not used
	 * now). x6 will point to the context structure (SP_EL3) and x7 will
477
	 * contain flags we need to pass to the handler.
478
479
480
481
	 */
	mov	x5, xzr
	mov	x6, sp

482
	/*
483
484
485
	 * Restore the saved C runtime stack value which will become the new
	 * SP_EL0 i.e. EL3 runtime stack. It was saved in the 'cpu_context'
	 * structure prior to the last ERET from EL3.
486
	 */
487
488
489
	ldr	x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]

	/* Switch to SP_EL0 */
490
	msr	spsel, #MODE_SP_EL0
491

492
493
494
495
	/*
	 * Save the SPSR_EL3, ELR_EL3, & SCR_EL3 in case there is a world
	 * switch during SMC handling.
	 * TODO: Revisit if all system registers can be saved later.
496
497
498
499
500
	 */
	mrs	x16, spsr_el3
	mrs	x17, elr_el3
	mrs	x18, scr_el3
	stp	x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
501
	str	x18, [x6, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
502
503
504
505
506
507

	/* Copy SCR_EL3.NS bit to the flag to indicate caller's security */
	bfi	x7, x18, #0, #1

	mov	sp, x12

508
509
510
511
512
513
	/* Get the unique owning entity number */
	ubfx	x16, x0, #FUNCID_OEN_SHIFT, #FUNCID_OEN_WIDTH
	ubfx	x15, x0, #FUNCID_TYPE_SHIFT, #FUNCID_TYPE_WIDTH
	orr	x16, x16, x15, lsl #FUNCID_OEN_WIDTH

	/* Load descriptor index from array of indices */
514
515
	adrp	x14, rt_svc_descs_indices
	add	x14, x14, :lo12:rt_svc_descs_indices
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
	ldrb	w15, [x14, x16]

	/* Any index greater than 127 is invalid. Check bit 7. */
	tbnz	w15, 7, smc_unknown

	/*
	 * Get the descriptor using the index
	 * x11 = (base + off), w15 = index
	 *
	 * handler = (base + off) + (index << log2(size))
	 */
	adr	x11, (__RT_SVC_DESCS_START__ + RT_SVC_DESC_HANDLE)
	lsl	w10, w15, #RT_SVC_SIZE_LOG2
	ldr	x15, [x11, w10, uxtw]

531
532
533
534
	/*
	 * Call the Secure Monitor Call handler and then drop directly into
	 * el3_exit() which will program any remaining architectural state
	 * prior to issuing the ERET to the desired lower EL.
535
536
537
538
539
540
	 */
#if DEBUG
	cbz	x15, rt_svc_fw_critical_error
#endif
	blr	x15

541
	b	el3_exit
542

543
544
smc_unknown:
	/*
545
546
547
548
	 * Unknown SMC call. Populate return value with SMC_UNK and call
	 * el3_exit() which will restore the remaining architectural state
	 * i.e., SYS, GP and PAuth registers(if any) prior to issuing the ERET
         * to the desired lower EL.
549
	 */
550
	mov	x0, #SMC_UNK
551
552
	str	x0, [x6, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
	b	el3_exit
553
554

smc_prohibited:
555
556
	restore_ptw_el1_sys_regs
	ldp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
557
	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
558
	mov	x0, #SMC_UNK
559
	exception_return
560

561
#if DEBUG
562
rt_svc_fw_critical_error:
563
	/* Switch to SP_ELx */
564
	msr	spsel, #MODE_SP_ELX
565
	no_ret	report_unhandled_exception
566
#endif
567
endfunc smc_handler
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603

	/* ---------------------------------------------------------------------
	 * The following code handles exceptions caused by BRK instructions.
	 * Following a BRK instruction, the only real valid cause of action is
	 * to print some information and panic, as the code that caused it is
	 * likely in an inconsistent internal state.
	 *
	 * This is initially intended to be used in conjunction with
	 * __builtin_trap.
	 * ---------------------------------------------------------------------
	 */
#ifdef MONITOR_TRAPS
func brk_handler
	/* Extract the ISS */
	mrs	x10, esr_el3
	ubfx	x10, x10, #ESR_ISS_SHIFT, #ESR_ISS_LENGTH

	/* Ensure the console is initialized */
	bl	plat_crash_console_init

	adr	x4, brk_location
	bl	asm_print_str
	mrs	x4, elr_el3
	bl	asm_print_hex
	bl	asm_print_newline

	adr	x4, brk_message
	bl	asm_print_str
	mov	x4, x10
	mov	x5, #28
	bl	asm_print_hex_bits
	bl	asm_print_newline

	no_ret	plat_panic_handler
endfunc brk_handler
#endif /* MONITOR_TRAPS */