runtime_exceptions.S 12.8 KB
Newer Older
1
/*
2
 * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
3
 *
dp-arm's avatar
dp-arm committed
4
 * SPDX-License-Identifier: BSD-3-Clause
5
6
7
 */

#include <arch.h>
8
#include <asm_macros.S>
9
#include <context.h>
dp-arm's avatar
dp-arm committed
10
#include <cpu_data.h>
11
#include <interrupt_mgmt.h>
12
#include <platform_def.h>
13
#include <runtime_svc.h>
14
#include <smccc.h>
15
16
17

	.globl	runtime_exceptions

18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
	.globl	sync_exception_sp_el0
	.globl	irq_sp_el0
	.globl	fiq_sp_el0
	.globl	serror_sp_el0

	.globl	sync_exception_sp_elx
	.globl	irq_sp_elx
	.globl	fiq_sp_elx
	.globl	serror_sp_elx

	.globl	sync_exception_aarch64
	.globl	irq_aarch64
	.globl	fiq_aarch64
	.globl	serror_aarch64

	.globl	sync_exception_aarch32
	.globl	irq_aarch32
	.globl	fiq_aarch32
	.globl	serror_aarch32

38
39
40
41
	/* ---------------------------------------------------------------------
	 * This macro handles Synchronous exceptions.
	 * Only SMC exceptions are supported.
	 * ---------------------------------------------------------------------
42
43
	 */
	.macro	handle_sync_exception
44
45
46
	/* Enable the SError interrupt */
	msr	daifclr, #DAIF_ABT_BIT

47
	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
dp-arm's avatar
dp-arm committed
48
49
50

#if ENABLE_RUNTIME_INSTRUMENTATION
	/*
51
52
53
	 * Read the timestamp value and store it in per-cpu data. The value
	 * will be extracted from per-cpu data by the C level SMC handler and
	 * saved to the PMF timestamp region.
dp-arm's avatar
dp-arm committed
54
55
56
57
58
59
60
61
	 */
	mrs	x30, cntpct_el0
	str	x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
	mrs	x29, tpidr_el3
	str	x30, [x29, #CPU_DATA_PMF_TS0_OFFSET]
	ldr	x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
#endif

62
63
64
	mrs	x30, esr_el3
	ubfx	x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH

65
	/* Handle SMC exceptions separately from other synchronous exceptions */
66
67
68
69
70
71
	cmp	x30, #EC_AARCH32_SMC
	b.eq	smc_handler32

	cmp	x30, #EC_AARCH64_SMC
	b.eq	smc_handler64

72
	/* Other kinds of synchronous exceptions are not handled */
73
74
	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
	b	report_unhandled_exception
75
76
77
	.endm


78
79
80
81
	/* ---------------------------------------------------------------------
	 * This macro handles FIQ or IRQ interrupts i.e. EL3, S-EL1 and NS
	 * interrupts.
	 * ---------------------------------------------------------------------
82
83
	 */
	.macro	handle_interrupt_exception label
84
85
86
	/* Enable the SError interrupt */
	msr	daifclr, #DAIF_ABT_BIT

87
88
89
	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
	bl	save_gp_registers

90
	/* Save the EL3 system registers needed to return from this exception */
91
92
93
94
	mrs	x0, spsr_el3
	mrs	x1, elr_el3
	stp	x0, x1, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]

95
96
97
98
99
100
101
	/* Switch to the runtime stack i.e. SP_EL0 */
	ldr	x2, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
	mov	x20, sp
	msr	spsel, #0
	mov	sp, x2

	/*
102
103
104
	 * Find out whether this is a valid interrupt type.
	 * If the interrupt controller reports a spurious interrupt then return
	 * to where we came from.
105
	 */
106
	bl	plat_ic_get_pending_interrupt_type
107
108
109
110
	cmp	x0, #INTR_TYPE_INVAL
	b.eq	interrupt_exit_\label

	/*
111
112
	 * Get the registered handler for this interrupt type.
	 * A NULL return value could be 'cause of the following conditions:
113
	 *
114
115
	 * a. An interrupt of a type was routed correctly but a handler for its
	 *    type was not registered.
116
	 *
117
118
	 * b. An interrupt of a type was not routed correctly so a handler for
	 *    its type was not registered.
119
	 *
120
121
122
123
124
	 * c. An interrupt of a type was routed correctly to EL3, but was
	 *    deasserted before its pending state could be read. Another
	 *    interrupt of a different type pended at the same time and its
	 *    type was reported as pending instead. However, a handler for this
	 *    type was not registered.
125
	 *
126
127
128
129
	 * a. and b. can only happen due to a programming error. The
	 * occurrence of c. could be beyond the control of Trusted Firmware.
	 * It makes sense to return from this exception instead of reporting an
	 * error.
130
131
	 */
	bl	get_interrupt_type_handler
132
	cbz	x0, interrupt_exit_\label
133
134
135
136
137
138
139
140
141
142
143
	mov	x21, x0

	mov	x0, #INTR_ID_UNAVAILABLE

	/* Set the current security state in the 'flags' parameter */
	mrs	x2, scr_el3
	ubfx	x1, x2, #0, #1

	/* Restore the reference to the 'handle' i.e. SP_EL3 */
	mov	x2, x20

144
	/* x3 will point to a cookie (not used now) */
145
146
	mov	x3, xzr

147
148
149
150
151
152
153
154
155
156
	/* Call the interrupt type handler */
	blr	x21

interrupt_exit_\label:
	/* Return from exception, possibly in a different security state */
	b	el3_exit

	.endm


157
158
vector_base runtime_exceptions

159
160
161
	/* ---------------------------------------------------------------------
	 * Current EL with SP_EL0 : 0x0 - 0x200
	 * ---------------------------------------------------------------------
162
	 */
163
vector_entry sync_exception_sp_el0
164
	/* We don't expect any synchronous exceptions from EL3 */
165
	b	report_unhandled_exception
166
	check_vector_size sync_exception_sp_el0
167

168
vector_entry irq_sp_el0
169
170
171
172
	/*
	 * EL3 code is non-reentrant. Any asynchronous exception is a serious
	 * error. Loop infinitely.
	 */
173
	b	report_unhandled_interrupt
174
	check_vector_size irq_sp_el0
175

176
177

vector_entry fiq_sp_el0
178
	b	report_unhandled_interrupt
179
	check_vector_size fiq_sp_el0
180

181
182

vector_entry serror_sp_el0
183
	b	report_unhandled_exception
184
	check_vector_size serror_sp_el0
185

186
187
188
	/* ---------------------------------------------------------------------
	 * Current EL with SP_ELx: 0x200 - 0x400
	 * ---------------------------------------------------------------------
189
	 */
190
vector_entry sync_exception_sp_elx
191
192
193
194
195
	/*
	 * This exception will trigger if anything went wrong during a previous
	 * exception entry or exit or while handling an earlier unexpected
	 * synchronous exception. There is a high probability that SP_EL3 is
	 * corrupted.
196
	 */
197
	b	report_unhandled_exception
198
	check_vector_size sync_exception_sp_elx
199

200
vector_entry irq_sp_elx
201
	b	report_unhandled_interrupt
202
203
	check_vector_size irq_sp_elx

204
vector_entry fiq_sp_elx
205
	b	report_unhandled_interrupt
206
207
	check_vector_size fiq_sp_elx

208
vector_entry serror_sp_elx
209
	b	report_unhandled_exception
210
	check_vector_size serror_sp_elx
211

212
	/* ---------------------------------------------------------------------
213
	 * Lower EL using AArch64 : 0x400 - 0x600
214
	 * ---------------------------------------------------------------------
215
	 */
216
vector_entry sync_exception_aarch64
217
218
219
220
221
	/*
	 * This exception vector will be the entry point for SMCs and traps
	 * that are unhandled at lower ELs most commonly. SP_EL3 should point
	 * to a valid cpu context where the general purpose and system register
	 * state can be saved.
222
223
	 */
	handle_sync_exception
224
	check_vector_size sync_exception_aarch64
225

226
vector_entry irq_aarch64
227
	handle_interrupt_exception irq_aarch64
228
	check_vector_size irq_aarch64
229

230
vector_entry fiq_aarch64
231
	handle_interrupt_exception fiq_aarch64
232
	check_vector_size fiq_aarch64
233

234
vector_entry serror_aarch64
235
236
237
238
	/*
	 * SError exceptions from lower ELs are not currently supported.
	 * Report their occurrence.
	 */
239
	b	report_unhandled_exception
240
	check_vector_size serror_aarch64
241

242
	/* ---------------------------------------------------------------------
243
	 * Lower EL using AArch32 : 0x600 - 0x800
244
	 * ---------------------------------------------------------------------
245
	 */
246
vector_entry sync_exception_aarch32
247
248
249
250
251
	/*
	 * This exception vector will be the entry point for SMCs and traps
	 * that are unhandled at lower ELs most commonly. SP_EL3 should point
	 * to a valid cpu context where the general purpose and system register
	 * state can be saved.
252
253
	 */
	handle_sync_exception
254
	check_vector_size sync_exception_aarch32
255

256
vector_entry irq_aarch32
257
	handle_interrupt_exception irq_aarch32
258
	check_vector_size irq_aarch32
259

260
vector_entry fiq_aarch32
261
	handle_interrupt_exception fiq_aarch32
262
	check_vector_size fiq_aarch32
263

264
vector_entry serror_aarch32
265
266
267
268
	/*
	 * SError exceptions from lower ELs are not currently supported.
	 * Report their occurrence.
	 */
269
	b	report_unhandled_exception
270
271
	check_vector_size serror_aarch32

272

273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
	/* ---------------------------------------------------------------------
	 * This macro takes an argument in x16 that is the index in the
	 * 'rt_svc_descs_indices' array, checks that the value in the array is
	 * valid, and loads in x15 the pointer to the handler of that service.
	 * ---------------------------------------------------------------------
	 */
	.macro	load_rt_svc_desc_pointer
	/* Load descriptor index from array of indices */
	adr	x14, rt_svc_descs_indices
	ldrb	w15, [x14, x16]

#if SMCCC_MAJOR_VERSION == 1
	/* Any index greater than 127 is invalid. Check bit 7. */
	tbnz	w15, 7, smc_unknown
#elif SMCCC_MAJOR_VERSION == 2
	/* Verify that the top 3 bits of the loaded index are 0 (w15 <= 31) */
	cmp	w15, #31
	b.hi	smc_unknown
#endif /* SMCCC_MAJOR_VERSION */

	/*
	 * Get the descriptor using the index
	 * x11 = (base + off), w15 = index
	 *
	 * handler = (base + off) + (index << log2(size))
	 */
	adr	x11, (__RT_SVC_DESCS_START__ + RT_SVC_DESC_HANDLE)
	lsl	w10, w15, #RT_SVC_SIZE_LOG2
	ldr	x15, [x11, w10, uxtw]
	.endm

304
	/* ---------------------------------------------------------------------
305
	 * The following code handles secure monitor calls.
306
307
308
309
310
	 * Depending upon the execution state from where the SMC has been
	 * invoked, it frees some general purpose registers to perform the
	 * remaining tasks. They involve finding the runtime service handler
	 * that is the target of the SMC & switching to runtime stacks (SP_EL0)
	 * before calling the handler.
311
	 *
312
313
	 * Note that x30 has been explicitly saved and can be used here
	 * ---------------------------------------------------------------------
314
	 */
315
func smc_handler
316
317
318
319
320
smc_handler32:
	/* Check whether aarch32 issued an SMC64 */
	tbnz	x0, #FUNCID_CC_SHIFT, smc_prohibited

smc_handler64:
321
322
323
324
	/*
	 * Populate the parameters for the SMC handler.
	 * We already have x0-x4 in place. x5 will point to a cookie (not used
	 * now). x6 will point to the context structure (SP_EL3) and x7 will
325
	 * contain flags we need to pass to the handler.
326
	 *
327
	 * Save x4-x29 and sp_el0.
328
	 */
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
	stp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
	stp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
	stp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
	stp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
	stp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
	stp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
	stp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
	stp	x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
	stp	x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
	stp	x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
	stp	x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
	stp	x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
	stp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
	mrs	x18, sp_el0
	str	x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
344

345
346
347
	mov	x5, xzr
	mov	x6, sp

348
349
#if SMCCC_MAJOR_VERSION == 1

350
351
352
353
354
	/* Get the unique owning entity number */
	ubfx	x16, x0, #FUNCID_OEN_SHIFT, #FUNCID_OEN_WIDTH
	ubfx	x15, x0, #FUNCID_TYPE_SHIFT, #FUNCID_TYPE_WIDTH
	orr	x16, x16, x15, lsl #FUNCID_OEN_WIDTH

355
	load_rt_svc_desc_pointer
356

357
358
359
360
#elif SMCCC_MAJOR_VERSION == 2

	/* Bit 31 must be set */
	tbz	x0, #FUNCID_TYPE_SHIFT, smc_unknown
361

362
	/*
363
364
	 * Check MSB of namespace to decide between compatibility/vendor and
	 * SPCI/SPRT
365
	 */
366
367
368
369
370
371
372
373
	tbz	x0, #(FUNCID_NAMESPACE_SHIFT + 1), compat_or_vendor

	/* Namespaces SPRT and SPCI currently unimplemented */
	b	smc_unknown

compat_or_vendor:

	/* Namespace is b'00 (compatibility) or b'01 (vendor) */
374
375

	/*
376
377
378
379
380
381
	 * Add the LSB of the namespace (bit [28]) to the OEN [27:24] to create
	 * a 5-bit index into the rt_svc_descs_indices array.
	 *
	 * The low 16 entries of the rt_svc_descs_indices array correspond to
	 * OENs of the compatibility namespace and the top 16 entries of the
	 * array are assigned to the vendor namespace descriptor.
382
	 */
383
	ubfx	x16, x0, #FUNCID_OEN_SHIFT, #(FUNCID_OEN_WIDTH + 1)
384

385
386
387
	load_rt_svc_desc_pointer

#endif /* SMCCC_MAJOR_VERSION */
388

389
	/*
390
391
392
	 * Restore the saved C runtime stack value which will become the new
	 * SP_EL0 i.e. EL3 runtime stack. It was saved in the 'cpu_context'
	 * structure prior to the last ERET from EL3.
393
	 */
394
395
396
397
	ldr	x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]

	/* Switch to SP_EL0 */
	msr	spsel, #0
398

399
400
401
402
	/*
	 * Save the SPSR_EL3, ELR_EL3, & SCR_EL3 in case there is a world
	 * switch during SMC handling.
	 * TODO: Revisit if all system registers can be saved later.
403
404
405
406
407
	 */
	mrs	x16, spsr_el3
	mrs	x17, elr_el3
	mrs	x18, scr_el3
	stp	x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
408
	str	x18, [x6, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
409
410
411
412
413
414

	/* Copy SCR_EL3.NS bit to the flag to indicate caller's security */
	bfi	x7, x18, #0, #1

	mov	sp, x12

415
416
417
418
	/*
	 * Call the Secure Monitor Call handler and then drop directly into
	 * el3_exit() which will program any remaining architectural state
	 * prior to issuing the ERET to the desired lower EL.
419
420
421
422
423
424
	 */
#if DEBUG
	cbz	x15, rt_svc_fw_critical_error
#endif
	blr	x15

425
	b	el3_exit
426

427
428
smc_unknown:
	/*
429
430
	 * Unknown SMC call. Populate return value with SMC_UNK, restore
	 * GP registers, and return to caller.
431
	 */
432
	mov	x0, #SMC_UNK
433
434
	str	x0, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
	b	restore_gp_registers_eret
435
436

smc_prohibited:
437
	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
438
	mov	x0, #SMC_UNK
439
440
441
	eret

rt_svc_fw_critical_error:
442
443
	/* Switch to SP_ELx */
	msr	spsel, #1
444
	no_ret	report_unhandled_exception
445
endfunc smc_handler