runtime_exceptions.S 12.4 KB
Newer Older
1
/*
2
 * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
3
 *
dp-arm's avatar
dp-arm committed
4
 * SPDX-License-Identifier: BSD-3-Clause
5
6
 */

7
8
#include <platform_def.h>

9
#include <arch.h>
10
#include <asm_macros.S>
11
12
13
#include <bl31/ea_handle.h>
#include <bl31/interrupt_mgmt.h>
#include <common/runtime_svc.h>
14
#include <context.h>
15
16
#include <lib/el3_runtime/cpu_data.h>
#include <lib/smccc.h>
17
18
19

	.globl	runtime_exceptions

20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
	.globl	sync_exception_sp_el0
	.globl	irq_sp_el0
	.globl	fiq_sp_el0
	.globl	serror_sp_el0

	.globl	sync_exception_sp_elx
	.globl	irq_sp_elx
	.globl	fiq_sp_elx
	.globl	serror_sp_elx

	.globl	sync_exception_aarch64
	.globl	irq_aarch64
	.globl	fiq_aarch64
	.globl	serror_aarch64

	.globl	sync_exception_aarch32
	.globl	irq_aarch32
	.globl	fiq_aarch32
	.globl	serror_aarch32

40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
	/*
	 * Macro that prepares entry to EL3 upon taking an exception.
	 *
	 * With RAS_EXTENSION, this macro synchronizes pending errors with an ESB
	 * instruction. When an error is thus synchronized, the handling is
	 * delegated to platform EA handler.
	 *
	 * Without RAS_EXTENSION, this macro just saves x30, and unmasks
	 * Asynchronous External Aborts.
	 */
	.macro check_and_unmask_ea
#if RAS_EXTENSION
	/* Synchronize pending External Aborts */
	esb

	/* Unmask the SError interrupt */
	msr	daifclr, #DAIF_ABT_BIT

	/*
	 * Explicitly save x30 so as to free up a register and to enable
	 * branching
	 */
	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]

	/* Check for SErrors synchronized by the ESB instruction */
	mrs	x30, DISR_EL1
	tbz	x30, #DISR_A_BIT, 1f

	/* Save GP registers and restore them afterwards */
	bl	save_gp_registers
70
71
72
73
74
75
76
77

	/*
	 * If Secure Cycle Counter is not disabled in MDCR_EL3
	 * when ARMv8.5-PMU is implemented, save PMCR_EL0 and
	 * disable all event counters and cycle counter.
	 */
	bl	save_pmcr_disable_pmu

78
	bl	handle_lower_el_ea_esb
79
80
81
82
83
84
85
86
87
88
89
	bl	restore_gp_registers

1:
#else
	/* Unmask the SError interrupt */
	msr	daifclr, #DAIF_ABT_BIT

	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
#endif
	.endm

90
91
92
93
	/* ---------------------------------------------------------------------
	 * This macro handles Synchronous exceptions.
	 * Only SMC exceptions are supported.
	 * ---------------------------------------------------------------------
94
95
	 */
	.macro	handle_sync_exception
dp-arm's avatar
dp-arm committed
96
97
#if ENABLE_RUNTIME_INSTRUMENTATION
	/*
98
99
100
	 * Read the timestamp value and store it in per-cpu data. The value
	 * will be extracted from per-cpu data by the C level SMC handler and
	 * saved to the PMF timestamp region.
dp-arm's avatar
dp-arm committed
101
102
103
104
105
106
107
108
	 */
	mrs	x30, cntpct_el0
	str	x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
	mrs	x29, tpidr_el3
	str	x30, [x29, #CPU_DATA_PMF_TS0_OFFSET]
	ldr	x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
#endif

109
110
111
	mrs	x30, esr_el3
	ubfx	x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH

112
	/* Handle SMC exceptions separately from other synchronous exceptions */
113
114
115
116
117
118
	cmp	x30, #EC_AARCH32_SMC
	b.eq	smc_handler32

	cmp	x30, #EC_AARCH64_SMC
	b.eq	smc_handler64

119
	/* Synchronous exceptions other than the above are assumed to be EA */
120
	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
121
	b	enter_lower_el_sync_ea
122
123
124
	.endm


125
126
127
128
	/* ---------------------------------------------------------------------
	 * This macro handles FIQ or IRQ interrupts i.e. EL3, S-EL1 and NS
	 * interrupts.
	 * ---------------------------------------------------------------------
129
130
	 */
	.macro	handle_interrupt_exception label
131

132
	bl	save_gp_registers
133

134
135
136
137
138
139
140
	/*
	 * If Secure Cycle Counter is not disabled in MDCR_EL3
	 * when ARMv8.5-PMU is implemented, save PMCR_EL0 and
	 * disable all event counters and cycle counter.
	 */
	bl	save_pmcr_disable_pmu

141
	/* Save ARMv8.3-PAuth registers and load firmware key */
142
143
144
#if CTX_INCLUDE_PAUTH_REGS
	bl	pauth_context_save
#endif
145
146
147
#if ENABLE_PAUTH
	bl	pauth_load_bl_apiakey
#endif
148

149
	/* Save the EL3 system registers needed to return from this exception */
150
151
152
153
	mrs	x0, spsr_el3
	mrs	x1, elr_el3
	stp	x0, x1, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]

154
155
156
157
158
159
160
	/* Switch to the runtime stack i.e. SP_EL0 */
	ldr	x2, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
	mov	x20, sp
	msr	spsel, #0
	mov	sp, x2

	/*
161
162
163
	 * Find out whether this is a valid interrupt type.
	 * If the interrupt controller reports a spurious interrupt then return
	 * to where we came from.
164
	 */
165
	bl	plat_ic_get_pending_interrupt_type
166
167
168
169
	cmp	x0, #INTR_TYPE_INVAL
	b.eq	interrupt_exit_\label

	/*
170
171
	 * Get the registered handler for this interrupt type.
	 * A NULL return value could be 'cause of the following conditions:
172
	 *
173
174
	 * a. An interrupt of a type was routed correctly but a handler for its
	 *    type was not registered.
175
	 *
176
177
	 * b. An interrupt of a type was not routed correctly so a handler for
	 *    its type was not registered.
178
	 *
179
180
181
182
183
	 * c. An interrupt of a type was routed correctly to EL3, but was
	 *    deasserted before its pending state could be read. Another
	 *    interrupt of a different type pended at the same time and its
	 *    type was reported as pending instead. However, a handler for this
	 *    type was not registered.
184
	 *
185
186
187
188
	 * a. and b. can only happen due to a programming error. The
	 * occurrence of c. could be beyond the control of Trusted Firmware.
	 * It makes sense to return from this exception instead of reporting an
	 * error.
189
190
	 */
	bl	get_interrupt_type_handler
191
	cbz	x0, interrupt_exit_\label
192
193
194
195
196
197
198
199
200
201
202
	mov	x21, x0

	mov	x0, #INTR_ID_UNAVAILABLE

	/* Set the current security state in the 'flags' parameter */
	mrs	x2, scr_el3
	ubfx	x1, x2, #0, #1

	/* Restore the reference to the 'handle' i.e. SP_EL3 */
	mov	x2, x20

203
	/* x3 will point to a cookie (not used now) */
204
205
	mov	x3, xzr

206
207
208
209
210
211
212
213
214
215
	/* Call the interrupt type handler */
	blr	x21

interrupt_exit_\label:
	/* Return from exception, possibly in a different security state */
	b	el3_exit

	.endm


216
217
vector_base runtime_exceptions

218
219
220
	/* ---------------------------------------------------------------------
	 * Current EL with SP_EL0 : 0x0 - 0x200
	 * ---------------------------------------------------------------------
221
	 */
222
vector_entry sync_exception_sp_el0
223
	/* We don't expect any synchronous exceptions from EL3 */
224
	b	report_unhandled_exception
225
end_vector_entry sync_exception_sp_el0
226

227
vector_entry irq_sp_el0
228
229
230
231
	/*
	 * EL3 code is non-reentrant. Any asynchronous exception is a serious
	 * error. Loop infinitely.
	 */
232
	b	report_unhandled_interrupt
233
end_vector_entry irq_sp_el0
234

235
236

vector_entry fiq_sp_el0
237
	b	report_unhandled_interrupt
238
end_vector_entry fiq_sp_el0
239

240
241

vector_entry serror_sp_el0
242
	no_ret	plat_handle_el3_ea
243
end_vector_entry serror_sp_el0
244

245
246
247
	/* ---------------------------------------------------------------------
	 * Current EL with SP_ELx: 0x200 - 0x400
	 * ---------------------------------------------------------------------
248
	 */
249
vector_entry sync_exception_sp_elx
250
251
252
253
254
	/*
	 * This exception will trigger if anything went wrong during a previous
	 * exception entry or exit or while handling an earlier unexpected
	 * synchronous exception. There is a high probability that SP_EL3 is
	 * corrupted.
255
	 */
256
	b	report_unhandled_exception
257
end_vector_entry sync_exception_sp_elx
258

259
vector_entry irq_sp_elx
260
	b	report_unhandled_interrupt
261
end_vector_entry irq_sp_elx
262

263
vector_entry fiq_sp_elx
264
	b	report_unhandled_interrupt
265
end_vector_entry fiq_sp_elx
266

267
vector_entry serror_sp_elx
268
	no_ret	plat_handle_el3_ea
269
end_vector_entry serror_sp_elx
270

271
	/* ---------------------------------------------------------------------
272
	 * Lower EL using AArch64 : 0x400 - 0x600
273
	 * ---------------------------------------------------------------------
274
	 */
275
vector_entry sync_exception_aarch64
276
277
278
279
280
	/*
	 * This exception vector will be the entry point for SMCs and traps
	 * that are unhandled at lower ELs most commonly. SP_EL3 should point
	 * to a valid cpu context where the general purpose and system register
	 * state can be saved.
281
	 */
282
	check_and_unmask_ea
283
	handle_sync_exception
284
end_vector_entry sync_exception_aarch64
285

286
vector_entry irq_aarch64
287
	check_and_unmask_ea
288
	handle_interrupt_exception irq_aarch64
289
end_vector_entry irq_aarch64
290

291
vector_entry fiq_aarch64
292
	check_and_unmask_ea
293
	handle_interrupt_exception fiq_aarch64
294
end_vector_entry fiq_aarch64
295

296
vector_entry serror_aarch64
297
	msr	daifclr, #DAIF_ABT_BIT
298
	b	enter_lower_el_async_ea
299
end_vector_entry serror_aarch64
300

301
	/* ---------------------------------------------------------------------
302
	 * Lower EL using AArch32 : 0x600 - 0x800
303
	 * ---------------------------------------------------------------------
304
	 */
305
vector_entry sync_exception_aarch32
306
307
308
309
310
	/*
	 * This exception vector will be the entry point for SMCs and traps
	 * that are unhandled at lower ELs most commonly. SP_EL3 should point
	 * to a valid cpu context where the general purpose and system register
	 * state can be saved.
311
	 */
312
	check_and_unmask_ea
313
	handle_sync_exception
314
end_vector_entry sync_exception_aarch32
315

316
vector_entry irq_aarch32
317
	check_and_unmask_ea
318
	handle_interrupt_exception irq_aarch32
319
end_vector_entry irq_aarch32
320

321
vector_entry fiq_aarch32
322
	check_and_unmask_ea
323
	handle_interrupt_exception fiq_aarch32
324
end_vector_entry fiq_aarch32
325

326
vector_entry serror_aarch32
327
	msr	daifclr, #DAIF_ABT_BIT
328
	b	enter_lower_el_async_ea
329
end_vector_entry serror_aarch32
330

331
	/* ---------------------------------------------------------------------
332
	 * The following code handles secure monitor calls.
333
334
335
336
337
	 * Depending upon the execution state from where the SMC has been
	 * invoked, it frees some general purpose registers to perform the
	 * remaining tasks. They involve finding the runtime service handler
	 * that is the target of the SMC & switching to runtime stacks (SP_EL0)
	 * before calling the handler.
338
	 *
339
340
	 * Note that x30 has been explicitly saved and can be used here
	 * ---------------------------------------------------------------------
341
	 */
342
func smc_handler
343
344
345
346
347
smc_handler32:
	/* Check whether aarch32 issued an SMC64 */
	tbnz	x0, #FUNCID_CC_SHIFT, smc_prohibited

smc_handler64:
348
349
350
351
352
	/* NOTE: The code below must preserve x0-x4 */

	/* Save general purpose registers */
	bl	save_gp_registers

353
354
355
356
357
358
359
	/*
	 * If Secure Cycle Counter is not disabled in MDCR_EL3
	 * when ARMv8.5-PMU is implemented, save PMCR_EL0 and
	 * disable all event counters and cycle counter.
	 */
	bl	save_pmcr_disable_pmu

360
	/* Save ARMv8.3-PAuth registers and load firmware key */
361
362
363
#if CTX_INCLUDE_PAUTH_REGS
	bl	pauth_context_save
#endif
364
365
366
#if ENABLE_PAUTH
	bl	pauth_load_bl_apiakey
#endif
367

368
369
370
371
	/*
	 * Populate the parameters for the SMC handler.
	 * We already have x0-x4 in place. x5 will point to a cookie (not used
	 * now). x6 will point to the context structure (SP_EL3) and x7 will
372
	 * contain flags we need to pass to the handler.
373
374
375
376
	 */
	mov	x5, xzr
	mov	x6, sp

377
	/*
378
379
380
	 * Restore the saved C runtime stack value which will become the new
	 * SP_EL0 i.e. EL3 runtime stack. It was saved in the 'cpu_context'
	 * structure prior to the last ERET from EL3.
381
	 */
382
383
384
385
	ldr	x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]

	/* Switch to SP_EL0 */
	msr	spsel, #0
386

387
388
389
390
	/*
	 * Save the SPSR_EL3, ELR_EL3, & SCR_EL3 in case there is a world
	 * switch during SMC handling.
	 * TODO: Revisit if all system registers can be saved later.
391
392
393
394
395
	 */
	mrs	x16, spsr_el3
	mrs	x17, elr_el3
	mrs	x18, scr_el3
	stp	x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
396
	str	x18, [x6, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
397
398
399
400
401
402

	/* Copy SCR_EL3.NS bit to the flag to indicate caller's security */
	bfi	x7, x18, #0, #1

	mov	sp, x12

403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
	/* Get the unique owning entity number */
	ubfx	x16, x0, #FUNCID_OEN_SHIFT, #FUNCID_OEN_WIDTH
	ubfx	x15, x0, #FUNCID_TYPE_SHIFT, #FUNCID_TYPE_WIDTH
	orr	x16, x16, x15, lsl #FUNCID_OEN_WIDTH

	/* Load descriptor index from array of indices */
	adr	x14, rt_svc_descs_indices
	ldrb	w15, [x14, x16]

	/* Any index greater than 127 is invalid. Check bit 7. */
	tbnz	w15, 7, smc_unknown

	/*
	 * Get the descriptor using the index
	 * x11 = (base + off), w15 = index
	 *
	 * handler = (base + off) + (index << log2(size))
	 */
	adr	x11, (__RT_SVC_DESCS_START__ + RT_SVC_DESC_HANDLE)
	lsl	w10, w15, #RT_SVC_SIZE_LOG2
	ldr	x15, [x11, w10, uxtw]

425
426
427
428
	/*
	 * Call the Secure Monitor Call handler and then drop directly into
	 * el3_exit() which will program any remaining architectural state
	 * prior to issuing the ERET to the desired lower EL.
429
430
431
432
433
434
	 */
#if DEBUG
	cbz	x15, rt_svc_fw_critical_error
#endif
	blr	x15

435
	b	el3_exit
436

437
438
smc_unknown:
	/*
439
440
441
442
	 * Unknown SMC call. Populate return value with SMC_UNK and call
	 * el3_exit() which will restore the remaining architectural state
	 * i.e., SYS, GP and PAuth registers(if any) prior to issuing the ERET
         * to the desired lower EL.
443
	 */
444
	mov	x0, #SMC_UNK
445
446
	str	x0, [x6, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
	b	el3_exit
447
448

smc_prohibited:
449
	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
450
	mov	x0, #SMC_UNK
451
452
453
	eret

rt_svc_fw_critical_error:
454
455
	/* Switch to SP_ELx */
	msr	spsel, #1
456
	no_ret	report_unhandled_exception
457
endfunc smc_handler