runtime_exceptions.S 16 KB
Newer Older
1
/*
2
 * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
3
 *
dp-arm's avatar
dp-arm committed
4
 * SPDX-License-Identifier: BSD-3-Clause
5
6
7
 */

#include <arch.h>
8
#include <asm_macros.S>
9
#include <context.h>
dp-arm's avatar
dp-arm committed
10
#include <cpu_data.h>
11
#include <ea_handle.h>
12
#include <interrupt_mgmt.h>
13
#include <platform_def.h>
14
#include <runtime_svc.h>
15
#include <smccc.h>
16
17
18

	.globl	runtime_exceptions

19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
	.globl	sync_exception_sp_el0
	.globl	irq_sp_el0
	.globl	fiq_sp_el0
	.globl	serror_sp_el0

	.globl	sync_exception_sp_elx
	.globl	irq_sp_elx
	.globl	fiq_sp_elx
	.globl	serror_sp_elx

	.globl	sync_exception_aarch64
	.globl	irq_aarch64
	.globl	fiq_aarch64
	.globl	serror_aarch64

	.globl	sync_exception_aarch32
	.globl	irq_aarch32
	.globl	fiq_aarch32
	.globl	serror_aarch32

39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
	/*
	 * Macro that prepares entry to EL3 upon taking an exception.
	 *
	 * With RAS_EXTENSION, this macro synchronizes pending errors with an ESB
	 * instruction. When an error is thus synchronized, the handling is
	 * delegated to platform EA handler.
	 *
	 * Without RAS_EXTENSION, this macro just saves x30, and unmasks
	 * Asynchronous External Aborts.
	 */
	.macro check_and_unmask_ea
#if RAS_EXTENSION
	/* Synchronize pending External Aborts */
	esb

	/* Unmask the SError interrupt */
	msr	daifclr, #DAIF_ABT_BIT

	/*
	 * Explicitly save x30 so as to free up a register and to enable
	 * branching
	 */
	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]

	/* Check for SErrors synchronized by the ESB instruction */
	mrs	x30, DISR_EL1
	tbz	x30, #DISR_A_BIT, 1f

	/* Save GP registers and restore them afterwards */
	bl	save_gp_registers
	mov	x0, #ERROR_EA_ESB
	mrs	x1, DISR_EL1
	bl	delegate_ea
	bl	restore_gp_registers

1:
#else
	/* Unmask the SError interrupt */
	msr	daifclr, #DAIF_ABT_BIT

	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
#endif
	.endm

83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
	/*
	 * Handle External Abort by delegating to the platform's EA handler.
	 * Once the platform handler returns, the macro exits EL3 and returns to
	 * where the abort was taken from.
	 *
	 * This macro assumes that x30 is available for use.
	 *
	 * 'abort_type' is a constant passed to the platform handler, indicating
	 * the cause of the External Abort.
	 */
	.macro handle_ea abort_type
	/* Save GP registers */
	bl	save_gp_registers

	/* Setup exception class and syndrome arguments for platform handler */
	mov	x0, \abort_type
	mrs	x1, esr_el3
	adr	x30, el3_exit
	b	delegate_ea
	.endm

104
105
106
107
	/* ---------------------------------------------------------------------
	 * This macro handles Synchronous exceptions.
	 * Only SMC exceptions are supported.
	 * ---------------------------------------------------------------------
108
109
	 */
	.macro	handle_sync_exception
dp-arm's avatar
dp-arm committed
110
111
#if ENABLE_RUNTIME_INSTRUMENTATION
	/*
112
113
114
	 * Read the timestamp value and store it in per-cpu data. The value
	 * will be extracted from per-cpu data by the C level SMC handler and
	 * saved to the PMF timestamp region.
dp-arm's avatar
dp-arm committed
115
116
117
118
119
120
121
122
	 */
	mrs	x30, cntpct_el0
	str	x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
	mrs	x29, tpidr_el3
	str	x30, [x29, #CPU_DATA_PMF_TS0_OFFSET]
	ldr	x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
#endif

123
124
125
	mrs	x30, esr_el3
	ubfx	x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH

126
	/* Handle SMC exceptions separately from other synchronous exceptions */
127
128
129
130
131
132
	cmp	x30, #EC_AARCH32_SMC
	b.eq	smc_handler32

	cmp	x30, #EC_AARCH64_SMC
	b.eq	smc_handler64

133
134
135
136
137
138
139
140
141
142
143
144
145
146
	/* Check for I/D aborts from lower EL */
	cmp	x30, #EC_IABORT_LOWER_EL
	b.eq	1f

	cmp	x30, #EC_DABORT_LOWER_EL
	b.ne	2f

1:
	/* Test for EA bit in the instruction syndrome */
	mrs	x30, esr_el3
	tbz	x30, #ESR_ISS_EABORT_EA_BIT, 2f
	handle_ea #ERROR_EA_SYNC

2:
147
	/* Other kinds of synchronous exceptions are not handled */
148
149
	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
	b	report_unhandled_exception
150
151
152
	.endm


153
154
155
156
	/* ---------------------------------------------------------------------
	 * This macro handles FIQ or IRQ interrupts i.e. EL3, S-EL1 and NS
	 * interrupts.
	 * ---------------------------------------------------------------------
157
158
159
	 */
	.macro	handle_interrupt_exception label
	bl	save_gp_registers
160
	/* Save the EL3 system registers needed to return from this exception */
161
162
163
164
	mrs	x0, spsr_el3
	mrs	x1, elr_el3
	stp	x0, x1, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]

165
166
167
168
169
170
171
	/* Switch to the runtime stack i.e. SP_EL0 */
	ldr	x2, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
	mov	x20, sp
	msr	spsel, #0
	mov	sp, x2

	/*
172
173
174
	 * Find out whether this is a valid interrupt type.
	 * If the interrupt controller reports a spurious interrupt then return
	 * to where we came from.
175
	 */
176
	bl	plat_ic_get_pending_interrupt_type
177
178
179
180
	cmp	x0, #INTR_TYPE_INVAL
	b.eq	interrupt_exit_\label

	/*
181
182
	 * Get the registered handler for this interrupt type.
	 * A NULL return value could be 'cause of the following conditions:
183
	 *
184
185
	 * a. An interrupt of a type was routed correctly but a handler for its
	 *    type was not registered.
186
	 *
187
188
	 * b. An interrupt of a type was not routed correctly so a handler for
	 *    its type was not registered.
189
	 *
190
191
192
193
194
	 * c. An interrupt of a type was routed correctly to EL3, but was
	 *    deasserted before its pending state could be read. Another
	 *    interrupt of a different type pended at the same time and its
	 *    type was reported as pending instead. However, a handler for this
	 *    type was not registered.
195
	 *
196
197
198
199
	 * a. and b. can only happen due to a programming error. The
	 * occurrence of c. could be beyond the control of Trusted Firmware.
	 * It makes sense to return from this exception instead of reporting an
	 * error.
200
201
	 */
	bl	get_interrupt_type_handler
202
	cbz	x0, interrupt_exit_\label
203
204
205
206
207
208
209
210
211
212
213
	mov	x21, x0

	mov	x0, #INTR_ID_UNAVAILABLE

	/* Set the current security state in the 'flags' parameter */
	mrs	x2, scr_el3
	ubfx	x1, x2, #0, #1

	/* Restore the reference to the 'handle' i.e. SP_EL3 */
	mov	x2, x20

214
	/* x3 will point to a cookie (not used now) */
215
216
	mov	x3, xzr

217
218
219
220
221
222
223
224
225
226
	/* Call the interrupt type handler */
	blr	x21

interrupt_exit_\label:
	/* Return from exception, possibly in a different security state */
	b	el3_exit

	.endm


227
228
vector_base runtime_exceptions

229
230
231
	/* ---------------------------------------------------------------------
	 * Current EL with SP_EL0 : 0x0 - 0x200
	 * ---------------------------------------------------------------------
232
	 */
233
vector_entry sync_exception_sp_el0
234
	/* We don't expect any synchronous exceptions from EL3 */
235
	b	report_unhandled_exception
236
end_vector_entry sync_exception_sp_el0
237

238
vector_entry irq_sp_el0
239
240
241
242
	/*
	 * EL3 code is non-reentrant. Any asynchronous exception is a serious
	 * error. Loop infinitely.
	 */
243
	b	report_unhandled_interrupt
244
end_vector_entry irq_sp_el0
245

246
247

vector_entry fiq_sp_el0
248
	b	report_unhandled_interrupt
249
end_vector_entry fiq_sp_el0
250

251
252

vector_entry serror_sp_el0
253
	b	report_unhandled_exception
254
end_vector_entry serror_sp_el0
255

256
257
258
	/* ---------------------------------------------------------------------
	 * Current EL with SP_ELx: 0x200 - 0x400
	 * ---------------------------------------------------------------------
259
	 */
260
vector_entry sync_exception_sp_elx
261
262
263
264
265
	/*
	 * This exception will trigger if anything went wrong during a previous
	 * exception entry or exit or while handling an earlier unexpected
	 * synchronous exception. There is a high probability that SP_EL3 is
	 * corrupted.
266
	 */
267
	b	report_unhandled_exception
268
end_vector_entry sync_exception_sp_elx
269

270
vector_entry irq_sp_elx
271
	b	report_unhandled_interrupt
272
end_vector_entry irq_sp_elx
273

274
vector_entry fiq_sp_elx
275
	b	report_unhandled_interrupt
276
end_vector_entry fiq_sp_elx
277

278
vector_entry serror_sp_elx
279
	b	report_unhandled_exception
280
end_vector_entry serror_sp_elx
281

282
	/* ---------------------------------------------------------------------
283
	 * Lower EL using AArch64 : 0x400 - 0x600
284
	 * ---------------------------------------------------------------------
285
	 */
286
vector_entry sync_exception_aarch64
287
288
289
290
291
	/*
	 * This exception vector will be the entry point for SMCs and traps
	 * that are unhandled at lower ELs most commonly. SP_EL3 should point
	 * to a valid cpu context where the general purpose and system register
	 * state can be saved.
292
	 */
293
	check_and_unmask_ea
294
	handle_sync_exception
295
end_vector_entry sync_exception_aarch64
296

297
vector_entry irq_aarch64
298
	check_and_unmask_ea
299
	handle_interrupt_exception irq_aarch64
300
end_vector_entry irq_aarch64
301

302
vector_entry fiq_aarch64
303
	check_and_unmask_ea
304
	handle_interrupt_exception fiq_aarch64
305
end_vector_entry fiq_aarch64
306

307
vector_entry serror_aarch64
308
309
	msr	daifclr, #DAIF_ABT_BIT

310
	/*
311
312
	 * Explicitly save x30 so as to free up a register and to enable
	 * branching
313
	 */
314
315
	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
	handle_ea #ERROR_EA_ASYNC
316
end_vector_entry serror_aarch64
317

318
	/* ---------------------------------------------------------------------
319
	 * Lower EL using AArch32 : 0x600 - 0x800
320
	 * ---------------------------------------------------------------------
321
	 */
322
vector_entry sync_exception_aarch32
323
324
325
326
327
	/*
	 * This exception vector will be the entry point for SMCs and traps
	 * that are unhandled at lower ELs most commonly. SP_EL3 should point
	 * to a valid cpu context where the general purpose and system register
	 * state can be saved.
328
	 */
329
	check_and_unmask_ea
330
	handle_sync_exception
331
end_vector_entry sync_exception_aarch32
332

333
vector_entry irq_aarch32
334
	check_and_unmask_ea
335
	handle_interrupt_exception irq_aarch32
336
end_vector_entry irq_aarch32
337

338
vector_entry fiq_aarch32
339
	check_and_unmask_ea
340
	handle_interrupt_exception fiq_aarch32
341
end_vector_entry fiq_aarch32
342

343
vector_entry serror_aarch32
344
345
	msr	daifclr, #DAIF_ABT_BIT

346
	/*
347
348
	 * Explicitly save x30 so as to free up a register and to enable
	 * branching
349
	 */
350
351
	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
	handle_ea #ERROR_EA_ASYNC
352
end_vector_entry serror_aarch32
353

354

355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
	/* ---------------------------------------------------------------------
	 * This macro takes an argument in x16 that is the index in the
	 * 'rt_svc_descs_indices' array, checks that the value in the array is
	 * valid, and loads in x15 the pointer to the handler of that service.
	 * ---------------------------------------------------------------------
	 */
	.macro	load_rt_svc_desc_pointer
	/* Load descriptor index from array of indices */
	adr	x14, rt_svc_descs_indices
	ldrb	w15, [x14, x16]

#if SMCCC_MAJOR_VERSION == 1
	/* Any index greater than 127 is invalid. Check bit 7. */
	tbnz	w15, 7, smc_unknown
#elif SMCCC_MAJOR_VERSION == 2
	/* Verify that the top 3 bits of the loaded index are 0 (w15 <= 31) */
	cmp	w15, #31
	b.hi	smc_unknown
#endif /* SMCCC_MAJOR_VERSION */

	/*
	 * Get the descriptor using the index
	 * x11 = (base + off), w15 = index
	 *
	 * handler = (base + off) + (index << log2(size))
	 */
	adr	x11, (__RT_SVC_DESCS_START__ + RT_SVC_DESC_HANDLE)
	lsl	w10, w15, #RT_SVC_SIZE_LOG2
	ldr	x15, [x11, w10, uxtw]
	.endm

386
	/* ---------------------------------------------------------------------
387
	 * The following code handles secure monitor calls.
388
389
390
391
392
	 * Depending upon the execution state from where the SMC has been
	 * invoked, it frees some general purpose registers to perform the
	 * remaining tasks. They involve finding the runtime service handler
	 * that is the target of the SMC & switching to runtime stacks (SP_EL0)
	 * before calling the handler.
393
	 *
394
395
	 * Note that x30 has been explicitly saved and can be used here
	 * ---------------------------------------------------------------------
396
	 */
397
func smc_handler
398
399
400
401
402
smc_handler32:
	/* Check whether aarch32 issued an SMC64 */
	tbnz	x0, #FUNCID_CC_SHIFT, smc_prohibited

smc_handler64:
403
404
405
406
	/*
	 * Populate the parameters for the SMC handler.
	 * We already have x0-x4 in place. x5 will point to a cookie (not used
	 * now). x6 will point to the context structure (SP_EL3) and x7 will
407
	 * contain flags we need to pass to the handler.
408
	 *
409
	 * Save x4-x29 and sp_el0.
410
	 */
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
	stp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
	stp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
	stp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
	stp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
	stp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
	stp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
	stp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
	stp	x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
	stp	x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
	stp	x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
	stp	x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
	stp	x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
	stp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
	mrs	x18, sp_el0
	str	x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
426

427
428
429
	mov	x5, xzr
	mov	x6, sp

430
431
#if SMCCC_MAJOR_VERSION == 1

432
433
434
435
436
	/* Get the unique owning entity number */
	ubfx	x16, x0, #FUNCID_OEN_SHIFT, #FUNCID_OEN_WIDTH
	ubfx	x15, x0, #FUNCID_TYPE_SHIFT, #FUNCID_TYPE_WIDTH
	orr	x16, x16, x15, lsl #FUNCID_OEN_WIDTH

437
	load_rt_svc_desc_pointer
438

439
440
441
442
#elif SMCCC_MAJOR_VERSION == 2

	/* Bit 31 must be set */
	tbz	x0, #FUNCID_TYPE_SHIFT, smc_unknown
443

444
	/*
445
446
	 * Check MSB of namespace to decide between compatibility/vendor and
	 * SPCI/SPRT
447
	 */
448
449
450
451
452
453
454
455
	tbz	x0, #(FUNCID_NAMESPACE_SHIFT + 1), compat_or_vendor

	/* Namespaces SPRT and SPCI currently unimplemented */
	b	smc_unknown

compat_or_vendor:

	/* Namespace is b'00 (compatibility) or b'01 (vendor) */
456
457

	/*
458
459
460
461
462
463
	 * Add the LSB of the namespace (bit [28]) to the OEN [27:24] to create
	 * a 5-bit index into the rt_svc_descs_indices array.
	 *
	 * The low 16 entries of the rt_svc_descs_indices array correspond to
	 * OENs of the compatibility namespace and the top 16 entries of the
	 * array are assigned to the vendor namespace descriptor.
464
	 */
465
	ubfx	x16, x0, #FUNCID_OEN_SHIFT, #(FUNCID_OEN_WIDTH + 1)
466

467
468
469
	load_rt_svc_desc_pointer

#endif /* SMCCC_MAJOR_VERSION */
470

471
	/*
472
473
474
	 * Restore the saved C runtime stack value which will become the new
	 * SP_EL0 i.e. EL3 runtime stack. It was saved in the 'cpu_context'
	 * structure prior to the last ERET from EL3.
475
	 */
476
477
478
479
	ldr	x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]

	/* Switch to SP_EL0 */
	msr	spsel, #0
480

481
482
483
484
	/*
	 * Save the SPSR_EL3, ELR_EL3, & SCR_EL3 in case there is a world
	 * switch during SMC handling.
	 * TODO: Revisit if all system registers can be saved later.
485
486
487
488
489
	 */
	mrs	x16, spsr_el3
	mrs	x17, elr_el3
	mrs	x18, scr_el3
	stp	x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
490
	str	x18, [x6, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
491
492
493
494
495
496

	/* Copy SCR_EL3.NS bit to the flag to indicate caller's security */
	bfi	x7, x18, #0, #1

	mov	sp, x12

497
498
499
500
	/*
	 * Call the Secure Monitor Call handler and then drop directly into
	 * el3_exit() which will program any remaining architectural state
	 * prior to issuing the ERET to the desired lower EL.
501
502
503
504
505
506
	 */
#if DEBUG
	cbz	x15, rt_svc_fw_critical_error
#endif
	blr	x15

507
	b	el3_exit
508

509
510
smc_unknown:
	/*
511
512
	 * Unknown SMC call. Populate return value with SMC_UNK, restore
	 * GP registers, and return to caller.
513
	 */
514
	mov	x0, #SMC_UNK
515
516
	str	x0, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
	b	restore_gp_registers_eret
517
518

smc_prohibited:
519
	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
520
	mov	x0, #SMC_UNK
521
522
523
	eret

rt_svc_fw_critical_error:
524
525
	/* Switch to SP_ELx */
	msr	spsel, #1
526
	no_ret	report_unhandled_exception
527
endfunc smc_handler
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586

/*
 * Delegate External Abort handling to platform's EA handler. This function
 * assumes that all GP registers have been saved by the caller.
 *
 * x0: EA reason
 * x1: EA syndrome
 */
func delegate_ea
	/* Save EL3 state */
	mrs	x2, spsr_el3
	mrs	x3, elr_el3
	stp	x2, x3, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]

	/*
	 * Save ESR as handling might involve lower ELs, and returning back to
	 * EL3 from there would trample the original ESR.
	 */
	mrs	x4, scr_el3
	mrs	x5, esr_el3
	stp	x4, x5, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]

	/*
	 * Setup rest of arguments, and call platform External Abort handler.
	 *
	 * x0: EA reason (already in place)
	 * x1: Exception syndrome (already in place).
	 * x2: Cookie (unused for now).
	 * x3: Context pointer.
	 * x4: Flags (security state from SCR for now).
	 */
	mov	x2, xzr
	mov	x3, sp
	ubfx	x4, x4, #0, #1

	/* Switch to runtime stack */
	ldr	x5, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
	msr	spsel, #0
	mov	sp, x5

	mov	x29, x30
	bl	plat_ea_handler
	mov	x30, x29

	/* Make SP point to context */
	msr	spsel, #1

	/* Restore EL3 state */
	ldp	x1, x2, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
	msr	spsr_el3, x1
	msr	elr_el3, x2

	/* Restore ESR_EL3 and SCR_EL3 */
	ldp	x3, x4, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
	msr	scr_el3, x3
	msr	esr_el3, x4

	ret
endfunc delegate_ea