runtime_exceptions.S 14.1 KB
Newer Older
1
/*
2
 * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
3
 *
dp-arm's avatar
dp-arm committed
4
 * SPDX-License-Identifier: BSD-3-Clause
5
6
 */

7
8
#include <platform_def.h>

9
#include <arch.h>
10
#include <asm_macros.S>
11
12
13
#include <bl31/ea_handle.h>
#include <bl31/interrupt_mgmt.h>
#include <common/runtime_svc.h>
14
#include <context.h>
15
#include <el3_common_macros.S>
16
17
#include <lib/el3_runtime/cpu_data.h>
#include <lib/smccc.h>
18
19
20

	.globl	runtime_exceptions

21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
	.globl	sync_exception_sp_el0
	.globl	irq_sp_el0
	.globl	fiq_sp_el0
	.globl	serror_sp_el0

	.globl	sync_exception_sp_elx
	.globl	irq_sp_elx
	.globl	fiq_sp_elx
	.globl	serror_sp_elx

	.globl	sync_exception_aarch64
	.globl	irq_aarch64
	.globl	fiq_aarch64
	.globl	serror_aarch64

	.globl	sync_exception_aarch32
	.globl	irq_aarch32
	.globl	fiq_aarch32
	.globl	serror_aarch32

41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
	/*
	 * Macro that prepares entry to EL3 upon taking an exception.
	 *
	 * With RAS_EXTENSION, this macro synchronizes pending errors with an ESB
	 * instruction. When an error is thus synchronized, the handling is
	 * delegated to platform EA handler.
	 *
	 * Without RAS_EXTENSION, this macro just saves x30, and unmasks
	 * Asynchronous External Aborts.
	 */
	.macro check_and_unmask_ea
#if RAS_EXTENSION
	/* Synchronize pending External Aborts */
	esb

	/* Unmask the SError interrupt */
	msr	daifclr, #DAIF_ABT_BIT

	/*
	 * Explicitly save x30 so as to free up a register and to enable
	 * branching
	 */
	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]

	/* Check for SErrors synchronized by the ESB instruction */
	mrs	x30, DISR_EL1
	tbz	x30, #DISR_A_BIT, 1f

69
	/*
70
71
72
	 * Save general purpose and ARMv8.3-PAuth registers (if enabled).
	 * If Secure Cycle Counter is not disabled in MDCR_EL3 when
	 * ARMv8.5-PMU is implemented, save PMCR_EL0 and disable Cycle Counter.
73
	 */
74
	bl	save_gp_pmcr_pauth_regs
75

76
	bl	handle_lower_el_ea_esb
77

78
79
	/* Restore general purpose, PMCR_EL0 and ARMv8.3-PAuth registers */
	bl	restore_gp_pmcr_pauth_regs
80
81
82
83
84
85
86
87
88
1:
#else
	/* Unmask the SError interrupt */
	msr	daifclr, #DAIF_ABT_BIT

	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
#endif
	.endm

89
90
91
92
	/* ---------------------------------------------------------------------
	 * This macro handles Synchronous exceptions.
	 * Only SMC exceptions are supported.
	 * ---------------------------------------------------------------------
93
94
	 */
	.macro	handle_sync_exception
dp-arm's avatar
dp-arm committed
95
96
#if ENABLE_RUNTIME_INSTRUMENTATION
	/*
97
98
99
	 * Read the timestamp value and store it in per-cpu data. The value
	 * will be extracted from per-cpu data by the C level SMC handler and
	 * saved to the PMF timestamp region.
dp-arm's avatar
dp-arm committed
100
101
102
103
104
105
106
107
	 */
	mrs	x30, cntpct_el0
	str	x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
	mrs	x29, tpidr_el3
	str	x30, [x29, #CPU_DATA_PMF_TS0_OFFSET]
	ldr	x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
#endif

108
109
110
	mrs	x30, esr_el3
	ubfx	x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH

111
	/* Handle SMC exceptions separately from other synchronous exceptions */
112
113
114
115
116
117
	cmp	x30, #EC_AARCH32_SMC
	b.eq	smc_handler32

	cmp	x30, #EC_AARCH64_SMC
	b.eq	smc_handler64

118
	/* Synchronous exceptions other than the above are assumed to be EA */
119
	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
120
	b	enter_lower_el_sync_ea
121
122
123
	.endm


124
125
126
127
	/* ---------------------------------------------------------------------
	 * This macro handles FIQ or IRQ interrupts i.e. EL3, S-EL1 and NS
	 * interrupts.
	 * ---------------------------------------------------------------------
128
129
	 */
	.macro	handle_interrupt_exception label
130

131
	/*
132
133
134
	 * Save general purpose and ARMv8.3-PAuth registers (if enabled).
	 * If Secure Cycle Counter is not disabled in MDCR_EL3 when
	 * ARMv8.5-PMU is implemented, save PMCR_EL0 and disable Cycle Counter.
135
	 */
136
	bl	save_gp_pmcr_pauth_regs
137

138
#if ENABLE_PAUTH
139
140
	/* Load and program APIAKey firmware key */
	bl	pauth_load_bl31_apiakey
141
#endif
142

143
	/* Save the EL3 system registers needed to return from this exception */
144
145
146
147
	mrs	x0, spsr_el3
	mrs	x1, elr_el3
	stp	x0, x1, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]

148
149
150
	/* Switch to the runtime stack i.e. SP_EL0 */
	ldr	x2, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
	mov	x20, sp
151
	msr	spsel, #MODE_SP_EL0
152
153
154
	mov	sp, x2

	/*
155
156
157
	 * Find out whether this is a valid interrupt type.
	 * If the interrupt controller reports a spurious interrupt then return
	 * to where we came from.
158
	 */
159
	bl	plat_ic_get_pending_interrupt_type
160
161
162
163
	cmp	x0, #INTR_TYPE_INVAL
	b.eq	interrupt_exit_\label

	/*
164
165
	 * Get the registered handler for this interrupt type.
	 * A NULL return value could be 'cause of the following conditions:
166
	 *
167
168
	 * a. An interrupt of a type was routed correctly but a handler for its
	 *    type was not registered.
169
	 *
170
171
	 * b. An interrupt of a type was not routed correctly so a handler for
	 *    its type was not registered.
172
	 *
173
174
175
176
177
	 * c. An interrupt of a type was routed correctly to EL3, but was
	 *    deasserted before its pending state could be read. Another
	 *    interrupt of a different type pended at the same time and its
	 *    type was reported as pending instead. However, a handler for this
	 *    type was not registered.
178
	 *
179
180
181
182
	 * a. and b. can only happen due to a programming error. The
	 * occurrence of c. could be beyond the control of Trusted Firmware.
	 * It makes sense to return from this exception instead of reporting an
	 * error.
183
184
	 */
	bl	get_interrupt_type_handler
185
	cbz	x0, interrupt_exit_\label
186
187
188
189
190
191
192
193
194
195
196
	mov	x21, x0

	mov	x0, #INTR_ID_UNAVAILABLE

	/* Set the current security state in the 'flags' parameter */
	mrs	x2, scr_el3
	ubfx	x1, x2, #0, #1

	/* Restore the reference to the 'handle' i.e. SP_EL3 */
	mov	x2, x20

197
	/* x3 will point to a cookie (not used now) */
198
199
	mov	x3, xzr

200
201
202
203
204
205
206
207
208
209
	/* Call the interrupt type handler */
	blr	x21

interrupt_exit_\label:
	/* Return from exception, possibly in a different security state */
	b	el3_exit

	.endm


210
211
vector_base runtime_exceptions

212
213
214
	/* ---------------------------------------------------------------------
	 * Current EL with SP_EL0 : 0x0 - 0x200
	 * ---------------------------------------------------------------------
215
	 */
216
vector_entry sync_exception_sp_el0
217
218
219
220
221
222
223
224
225
226
227
228
229
#ifdef MONITOR_TRAPS
	stp x29, x30, [sp, #-16]!

	mrs	x30, esr_el3
	ubfx	x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH

	/* Check for BRK */
	cmp	x30, #EC_BRK
	b.eq	brk_handler

	ldp x29, x30, [sp], #16
#endif /* MONITOR_TRAPS */

230
	/* We don't expect any synchronous exceptions from EL3 */
231
	b	report_unhandled_exception
232
end_vector_entry sync_exception_sp_el0
233

234
vector_entry irq_sp_el0
235
236
237
238
	/*
	 * EL3 code is non-reentrant. Any asynchronous exception is a serious
	 * error. Loop infinitely.
	 */
239
	b	report_unhandled_interrupt
240
end_vector_entry irq_sp_el0
241

242
243

vector_entry fiq_sp_el0
244
	b	report_unhandled_interrupt
245
end_vector_entry fiq_sp_el0
246

247
248

vector_entry serror_sp_el0
249
	no_ret	plat_handle_el3_ea
250
end_vector_entry serror_sp_el0
251

252
253
254
	/* ---------------------------------------------------------------------
	 * Current EL with SP_ELx: 0x200 - 0x400
	 * ---------------------------------------------------------------------
255
	 */
256
vector_entry sync_exception_sp_elx
257
258
259
260
261
	/*
	 * This exception will trigger if anything went wrong during a previous
	 * exception entry or exit or while handling an earlier unexpected
	 * synchronous exception. There is a high probability that SP_EL3 is
	 * corrupted.
262
	 */
263
	b	report_unhandled_exception
264
end_vector_entry sync_exception_sp_elx
265

266
vector_entry irq_sp_elx
267
	b	report_unhandled_interrupt
268
end_vector_entry irq_sp_elx
269

270
vector_entry fiq_sp_elx
271
	b	report_unhandled_interrupt
272
end_vector_entry fiq_sp_elx
273

274
vector_entry serror_sp_elx
275
	no_ret	plat_handle_el3_ea
276
end_vector_entry serror_sp_elx
277

278
	/* ---------------------------------------------------------------------
279
	 * Lower EL using AArch64 : 0x400 - 0x600
280
	 * ---------------------------------------------------------------------
281
	 */
282
vector_entry sync_exception_aarch64
283
284
285
286
287
	/*
	 * This exception vector will be the entry point for SMCs and traps
	 * that are unhandled at lower ELs most commonly. SP_EL3 should point
	 * to a valid cpu context where the general purpose and system register
	 * state can be saved.
288
	 */
289
	apply_at_speculative_wa
290
	check_and_unmask_ea
291
	handle_sync_exception
292
end_vector_entry sync_exception_aarch64
293

294
vector_entry irq_aarch64
295
	apply_at_speculative_wa
296
	check_and_unmask_ea
297
	handle_interrupt_exception irq_aarch64
298
end_vector_entry irq_aarch64
299

300
vector_entry fiq_aarch64
301
	apply_at_speculative_wa
302
	check_and_unmask_ea
303
	handle_interrupt_exception fiq_aarch64
304
end_vector_entry fiq_aarch64
305

306
vector_entry serror_aarch64
307
	apply_at_speculative_wa
308
	msr	daifclr, #DAIF_ABT_BIT
309
	b	enter_lower_el_async_ea
310
end_vector_entry serror_aarch64
311

312
	/* ---------------------------------------------------------------------
313
	 * Lower EL using AArch32 : 0x600 - 0x800
314
	 * ---------------------------------------------------------------------
315
	 */
316
vector_entry sync_exception_aarch32
317
318
319
320
321
	/*
	 * This exception vector will be the entry point for SMCs and traps
	 * that are unhandled at lower ELs most commonly. SP_EL3 should point
	 * to a valid cpu context where the general purpose and system register
	 * state can be saved.
322
	 */
323
	apply_at_speculative_wa
324
	check_and_unmask_ea
325
	handle_sync_exception
326
end_vector_entry sync_exception_aarch32
327

328
vector_entry irq_aarch32
329
	apply_at_speculative_wa
330
	check_and_unmask_ea
331
	handle_interrupt_exception irq_aarch32
332
end_vector_entry irq_aarch32
333

334
vector_entry fiq_aarch32
335
	apply_at_speculative_wa
336
	check_and_unmask_ea
337
	handle_interrupt_exception fiq_aarch32
338
end_vector_entry fiq_aarch32
339

340
vector_entry serror_aarch32
341
	apply_at_speculative_wa
342
	msr	daifclr, #DAIF_ABT_BIT
343
	b	enter_lower_el_async_ea
344
end_vector_entry serror_aarch32
345

346
347
348
349
350
351
352
353
#ifdef MONITOR_TRAPS
	.section .rodata.brk_string, "aS"
brk_location:
	.asciz "Error at instruction 0x"
brk_message:
	.asciz "Unexpected BRK instruction with value 0x"
#endif /* MONITOR_TRAPS */

354
	/* ---------------------------------------------------------------------
355
	 * The following code handles secure monitor calls.
356
357
358
359
360
	 * Depending upon the execution state from where the SMC has been
	 * invoked, it frees some general purpose registers to perform the
	 * remaining tasks. They involve finding the runtime service handler
	 * that is the target of the SMC & switching to runtime stacks (SP_EL0)
	 * before calling the handler.
361
	 *
362
363
	 * Note that x30 has been explicitly saved and can be used here
	 * ---------------------------------------------------------------------
364
	 */
365
func smc_handler
366
367
368
369
370
smc_handler32:
	/* Check whether aarch32 issued an SMC64 */
	tbnz	x0, #FUNCID_CC_SHIFT, smc_prohibited

smc_handler64:
371
372
	/* NOTE: The code below must preserve x0-x4 */

373
	/*
374
375
376
	 * Save general purpose and ARMv8.3-PAuth registers (if enabled).
	 * If Secure Cycle Counter is not disabled in MDCR_EL3 when
	 * ARMv8.5-PMU is implemented, save PMCR_EL0 and disable Cycle Counter.
377
	 */
378
	bl	save_gp_pmcr_pauth_regs
379

380
#if ENABLE_PAUTH
381
382
	/* Load and program APIAKey firmware key */
	bl	pauth_load_bl31_apiakey
383
#endif
384

385
386
387
388
	/*
	 * Populate the parameters for the SMC handler.
	 * We already have x0-x4 in place. x5 will point to a cookie (not used
	 * now). x6 will point to the context structure (SP_EL3) and x7 will
389
	 * contain flags we need to pass to the handler.
390
391
392
393
	 */
	mov	x5, xzr
	mov	x6, sp

394
	/*
395
396
397
	 * Restore the saved C runtime stack value which will become the new
	 * SP_EL0 i.e. EL3 runtime stack. It was saved in the 'cpu_context'
	 * structure prior to the last ERET from EL3.
398
	 */
399
400
401
	ldr	x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]

	/* Switch to SP_EL0 */
402
	msr	spsel, #MODE_SP_EL0
403

404
405
406
407
	/*
	 * Save the SPSR_EL3, ELR_EL3, & SCR_EL3 in case there is a world
	 * switch during SMC handling.
	 * TODO: Revisit if all system registers can be saved later.
408
409
410
411
412
	 */
	mrs	x16, spsr_el3
	mrs	x17, elr_el3
	mrs	x18, scr_el3
	stp	x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
413
	str	x18, [x6, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
414
415
416
417
418
419

	/* Copy SCR_EL3.NS bit to the flag to indicate caller's security */
	bfi	x7, x18, #0, #1

	mov	sp, x12

420
421
422
423
424
425
	/* Get the unique owning entity number */
	ubfx	x16, x0, #FUNCID_OEN_SHIFT, #FUNCID_OEN_WIDTH
	ubfx	x15, x0, #FUNCID_TYPE_SHIFT, #FUNCID_TYPE_WIDTH
	orr	x16, x16, x15, lsl #FUNCID_OEN_WIDTH

	/* Load descriptor index from array of indices */
426
427
	adrp	x14, rt_svc_descs_indices
	add	x14, x14, :lo12:rt_svc_descs_indices
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
	ldrb	w15, [x14, x16]

	/* Any index greater than 127 is invalid. Check bit 7. */
	tbnz	w15, 7, smc_unknown

	/*
	 * Get the descriptor using the index
	 * x11 = (base + off), w15 = index
	 *
	 * handler = (base + off) + (index << log2(size))
	 */
	adr	x11, (__RT_SVC_DESCS_START__ + RT_SVC_DESC_HANDLE)
	lsl	w10, w15, #RT_SVC_SIZE_LOG2
	ldr	x15, [x11, w10, uxtw]

443
444
445
446
	/*
	 * Call the Secure Monitor Call handler and then drop directly into
	 * el3_exit() which will program any remaining architectural state
	 * prior to issuing the ERET to the desired lower EL.
447
448
449
450
451
452
	 */
#if DEBUG
	cbz	x15, rt_svc_fw_critical_error
#endif
	blr	x15

453
	b	el3_exit
454

455
456
smc_unknown:
	/*
457
458
459
460
	 * Unknown SMC call. Populate return value with SMC_UNK and call
	 * el3_exit() which will restore the remaining architectural state
	 * i.e., SYS, GP and PAuth registers(if any) prior to issuing the ERET
         * to the desired lower EL.
461
	 */
462
	mov	x0, #SMC_UNK
463
464
	str	x0, [x6, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
	b	el3_exit
465
466

smc_prohibited:
467
468
	restore_ptw_el1_sys_regs
	ldp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
469
	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
470
	mov	x0, #SMC_UNK
471
	exception_return
472

473
#if DEBUG
474
rt_svc_fw_critical_error:
475
	/* Switch to SP_ELx */
476
	msr	spsel, #MODE_SP_ELX
477
	no_ret	report_unhandled_exception
478
#endif
479
endfunc smc_handler
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515

	/* ---------------------------------------------------------------------
	 * The following code handles exceptions caused by BRK instructions.
	 * Following a BRK instruction, the only real valid cause of action is
	 * to print some information and panic, as the code that caused it is
	 * likely in an inconsistent internal state.
	 *
	 * This is initially intended to be used in conjunction with
	 * __builtin_trap.
	 * ---------------------------------------------------------------------
	 */
#ifdef MONITOR_TRAPS
func brk_handler
	/* Extract the ISS */
	mrs	x10, esr_el3
	ubfx	x10, x10, #ESR_ISS_SHIFT, #ESR_ISS_LENGTH

	/* Ensure the console is initialized */
	bl	plat_crash_console_init

	adr	x4, brk_location
	bl	asm_print_str
	mrs	x4, elr_el3
	bl	asm_print_hex
	bl	asm_print_newline

	adr	x4, brk_message
	bl	asm_print_str
	mov	x4, x10
	mov	x5, #28
	bl	asm_print_hex_bits
	bl	asm_print_newline

	no_ret	plat_panic_handler
endfunc brk_handler
#endif /* MONITOR_TRAPS */