runtime_exceptions.S 13.6 KB
Newer Older
1
/*
2
 * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
3
 *
dp-arm's avatar
dp-arm committed
4
 * SPDX-License-Identifier: BSD-3-Clause
5
6
7
 */

#include <arch.h>
8
#include <asm_macros.S>
9
#include <context.h>
dp-arm's avatar
dp-arm committed
10
#include <cpu_data.h>
11
#include <ea_handle.h>
12
#include <interrupt_mgmt.h>
13
#include <platform_def.h>
14
#include <runtime_svc.h>
15
#include <smccc.h>
16
17
18

	.globl	runtime_exceptions

19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
	.globl	sync_exception_sp_el0
	.globl	irq_sp_el0
	.globl	fiq_sp_el0
	.globl	serror_sp_el0

	.globl	sync_exception_sp_elx
	.globl	irq_sp_elx
	.globl	fiq_sp_elx
	.globl	serror_sp_elx

	.globl	sync_exception_aarch64
	.globl	irq_aarch64
	.globl	fiq_aarch64
	.globl	serror_aarch64

	.globl	sync_exception_aarch32
	.globl	irq_aarch32
	.globl	fiq_aarch32
	.globl	serror_aarch32

39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
	/*
	 * Macro that prepares entry to EL3 upon taking an exception.
	 *
	 * With RAS_EXTENSION, this macro synchronizes pending errors with an ESB
	 * instruction. When an error is thus synchronized, the handling is
	 * delegated to platform EA handler.
	 *
	 * Without RAS_EXTENSION, this macro just saves x30, and unmasks
	 * Asynchronous External Aborts.
	 */
	.macro check_and_unmask_ea
#if RAS_EXTENSION
	/* Synchronize pending External Aborts */
	esb

	/* Unmask the SError interrupt */
	msr	daifclr, #DAIF_ABT_BIT

	/*
	 * Explicitly save x30 so as to free up a register and to enable
	 * branching
	 */
	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]

	/* Check for SErrors synchronized by the ESB instruction */
	mrs	x30, DISR_EL1
	tbz	x30, #DISR_A_BIT, 1f

	/* Save GP registers and restore them afterwards */
	bl	save_gp_registers
69
	bl	handle_lower_el_ea_esb
70
71
72
73
74
75
76
77
78
79
80
	bl	restore_gp_registers

1:
#else
	/* Unmask the SError interrupt */
	msr	daifclr, #DAIF_ABT_BIT

	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
#endif
	.endm

81
82
83
84
	/* ---------------------------------------------------------------------
	 * This macro handles Synchronous exceptions.
	 * Only SMC exceptions are supported.
	 * ---------------------------------------------------------------------
85
86
	 */
	.macro	handle_sync_exception
dp-arm's avatar
dp-arm committed
87
88
#if ENABLE_RUNTIME_INSTRUMENTATION
	/*
89
90
91
	 * Read the timestamp value and store it in per-cpu data. The value
	 * will be extracted from per-cpu data by the C level SMC handler and
	 * saved to the PMF timestamp region.
dp-arm's avatar
dp-arm committed
92
93
94
95
96
97
98
99
	 */
	mrs	x30, cntpct_el0
	str	x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
	mrs	x29, tpidr_el3
	str	x30, [x29, #CPU_DATA_PMF_TS0_OFFSET]
	ldr	x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
#endif

100
101
102
	mrs	x30, esr_el3
	ubfx	x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH

103
	/* Handle SMC exceptions separately from other synchronous exceptions */
104
105
106
107
108
109
	cmp	x30, #EC_AARCH32_SMC
	b.eq	smc_handler32

	cmp	x30, #EC_AARCH64_SMC
	b.eq	smc_handler64

110
	/* Synchronous exceptions other than the above are assumed to be EA */
111
	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
112
	b	enter_lower_el_sync_ea
113
114
115
	.endm


116
117
118
119
	/* ---------------------------------------------------------------------
	 * This macro handles FIQ or IRQ interrupts i.e. EL3, S-EL1 and NS
	 * interrupts.
	 * ---------------------------------------------------------------------
120
121
122
	 */
	.macro	handle_interrupt_exception label
	bl	save_gp_registers
123
	/* Save the EL3 system registers needed to return from this exception */
124
125
126
127
	mrs	x0, spsr_el3
	mrs	x1, elr_el3
	stp	x0, x1, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]

128
129
130
131
132
133
134
	/* Switch to the runtime stack i.e. SP_EL0 */
	ldr	x2, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
	mov	x20, sp
	msr	spsel, #0
	mov	sp, x2

	/*
135
136
137
	 * Find out whether this is a valid interrupt type.
	 * If the interrupt controller reports a spurious interrupt then return
	 * to where we came from.
138
	 */
139
	bl	plat_ic_get_pending_interrupt_type
140
141
142
143
	cmp	x0, #INTR_TYPE_INVAL
	b.eq	interrupt_exit_\label

	/*
144
145
	 * Get the registered handler for this interrupt type.
	 * A NULL return value could be 'cause of the following conditions:
146
	 *
147
148
	 * a. An interrupt of a type was routed correctly but a handler for its
	 *    type was not registered.
149
	 *
150
151
	 * b. An interrupt of a type was not routed correctly so a handler for
	 *    its type was not registered.
152
	 *
153
154
155
156
157
	 * c. An interrupt of a type was routed correctly to EL3, but was
	 *    deasserted before its pending state could be read. Another
	 *    interrupt of a different type pended at the same time and its
	 *    type was reported as pending instead. However, a handler for this
	 *    type was not registered.
158
	 *
159
160
161
162
	 * a. and b. can only happen due to a programming error. The
	 * occurrence of c. could be beyond the control of Trusted Firmware.
	 * It makes sense to return from this exception instead of reporting an
	 * error.
163
164
	 */
	bl	get_interrupt_type_handler
165
	cbz	x0, interrupt_exit_\label
166
167
168
169
170
171
172
173
174
175
176
	mov	x21, x0

	mov	x0, #INTR_ID_UNAVAILABLE

	/* Set the current security state in the 'flags' parameter */
	mrs	x2, scr_el3
	ubfx	x1, x2, #0, #1

	/* Restore the reference to the 'handle' i.e. SP_EL3 */
	mov	x2, x20

177
	/* x3 will point to a cookie (not used now) */
178
179
	mov	x3, xzr

180
181
182
183
184
185
186
187
188
189
	/* Call the interrupt type handler */
	blr	x21

interrupt_exit_\label:
	/* Return from exception, possibly in a different security state */
	b	el3_exit

	.endm


190
191
vector_base runtime_exceptions

192
193
194
	/* ---------------------------------------------------------------------
	 * Current EL with SP_EL0 : 0x0 - 0x200
	 * ---------------------------------------------------------------------
195
	 */
196
vector_entry sync_exception_sp_el0
197
	/* We don't expect any synchronous exceptions from EL3 */
198
	b	report_unhandled_exception
199
end_vector_entry sync_exception_sp_el0
200

201
vector_entry irq_sp_el0
202
203
204
205
	/*
	 * EL3 code is non-reentrant. Any asynchronous exception is a serious
	 * error. Loop infinitely.
	 */
206
	b	report_unhandled_interrupt
207
end_vector_entry irq_sp_el0
208

209
210

vector_entry fiq_sp_el0
211
	b	report_unhandled_interrupt
212
end_vector_entry fiq_sp_el0
213

214
215

vector_entry serror_sp_el0
216
	no_ret	plat_handle_el3_ea
217
end_vector_entry serror_sp_el0
218

219
220
221
	/* ---------------------------------------------------------------------
	 * Current EL with SP_ELx: 0x200 - 0x400
	 * ---------------------------------------------------------------------
222
	 */
223
vector_entry sync_exception_sp_elx
224
225
226
227
228
	/*
	 * This exception will trigger if anything went wrong during a previous
	 * exception entry or exit or while handling an earlier unexpected
	 * synchronous exception. There is a high probability that SP_EL3 is
	 * corrupted.
229
	 */
230
	b	report_unhandled_exception
231
end_vector_entry sync_exception_sp_elx
232

233
vector_entry irq_sp_elx
234
	b	report_unhandled_interrupt
235
end_vector_entry irq_sp_elx
236

237
vector_entry fiq_sp_elx
238
	b	report_unhandled_interrupt
239
end_vector_entry fiq_sp_elx
240

241
vector_entry serror_sp_elx
242
	no_ret	plat_handle_el3_ea
243
end_vector_entry serror_sp_elx
244

245
	/* ---------------------------------------------------------------------
246
	 * Lower EL using AArch64 : 0x400 - 0x600
247
	 * ---------------------------------------------------------------------
248
	 */
249
vector_entry sync_exception_aarch64
250
251
252
253
254
	/*
	 * This exception vector will be the entry point for SMCs and traps
	 * that are unhandled at lower ELs most commonly. SP_EL3 should point
	 * to a valid cpu context where the general purpose and system register
	 * state can be saved.
255
	 */
256
	check_and_unmask_ea
257
	handle_sync_exception
258
end_vector_entry sync_exception_aarch64
259

260
vector_entry irq_aarch64
261
	check_and_unmask_ea
262
	handle_interrupt_exception irq_aarch64
263
end_vector_entry irq_aarch64
264

265
vector_entry fiq_aarch64
266
	check_and_unmask_ea
267
	handle_interrupt_exception fiq_aarch64
268
end_vector_entry fiq_aarch64
269

270
vector_entry serror_aarch64
271
	msr	daifclr, #DAIF_ABT_BIT
272
	b	enter_lower_el_async_ea
273
end_vector_entry serror_aarch64
274

275
	/* ---------------------------------------------------------------------
276
	 * Lower EL using AArch32 : 0x600 - 0x800
277
	 * ---------------------------------------------------------------------
278
	 */
279
vector_entry sync_exception_aarch32
280
281
282
283
284
	/*
	 * This exception vector will be the entry point for SMCs and traps
	 * that are unhandled at lower ELs most commonly. SP_EL3 should point
	 * to a valid cpu context where the general purpose and system register
	 * state can be saved.
285
	 */
286
	check_and_unmask_ea
287
	handle_sync_exception
288
end_vector_entry sync_exception_aarch32
289

290
vector_entry irq_aarch32
291
	check_and_unmask_ea
292
	handle_interrupt_exception irq_aarch32
293
end_vector_entry irq_aarch32
294

295
vector_entry fiq_aarch32
296
	check_and_unmask_ea
297
	handle_interrupt_exception fiq_aarch32
298
end_vector_entry fiq_aarch32
299

300
vector_entry serror_aarch32
301
	msr	daifclr, #DAIF_ABT_BIT
302
	b	enter_lower_el_async_ea
303
end_vector_entry serror_aarch32
304

305

306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
	/* ---------------------------------------------------------------------
	 * This macro takes an argument in x16 that is the index in the
	 * 'rt_svc_descs_indices' array, checks that the value in the array is
	 * valid, and loads in x15 the pointer to the handler of that service.
	 * ---------------------------------------------------------------------
	 */
	.macro	load_rt_svc_desc_pointer
	/* Load descriptor index from array of indices */
	adr	x14, rt_svc_descs_indices
	ldrb	w15, [x14, x16]

#if SMCCC_MAJOR_VERSION == 1
	/* Any index greater than 127 is invalid. Check bit 7. */
	tbnz	w15, 7, smc_unknown
#elif SMCCC_MAJOR_VERSION == 2
	/* Verify that the top 3 bits of the loaded index are 0 (w15 <= 31) */
	cmp	w15, #31
	b.hi	smc_unknown
#endif /* SMCCC_MAJOR_VERSION */

	/*
	 * Get the descriptor using the index
	 * x11 = (base + off), w15 = index
	 *
	 * handler = (base + off) + (index << log2(size))
	 */
	adr	x11, (__RT_SVC_DESCS_START__ + RT_SVC_DESC_HANDLE)
	lsl	w10, w15, #RT_SVC_SIZE_LOG2
	ldr	x15, [x11, w10, uxtw]
	.endm

337
	/* ---------------------------------------------------------------------
338
	 * The following code handles secure monitor calls.
339
340
341
342
343
	 * Depending upon the execution state from where the SMC has been
	 * invoked, it frees some general purpose registers to perform the
	 * remaining tasks. They involve finding the runtime service handler
	 * that is the target of the SMC & switching to runtime stacks (SP_EL0)
	 * before calling the handler.
344
	 *
345
346
	 * Note that x30 has been explicitly saved and can be used here
	 * ---------------------------------------------------------------------
347
	 */
348
func smc_handler
349
350
351
352
353
smc_handler32:
	/* Check whether aarch32 issued an SMC64 */
	tbnz	x0, #FUNCID_CC_SHIFT, smc_prohibited

smc_handler64:
354
355
356
357
	/*
	 * Populate the parameters for the SMC handler.
	 * We already have x0-x4 in place. x5 will point to a cookie (not used
	 * now). x6 will point to the context structure (SP_EL3) and x7 will
358
	 * contain flags we need to pass to the handler.
359
	 *
360
	 * Save x4-x29 and sp_el0.
361
	 */
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
	stp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
	stp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
	stp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
	stp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
	stp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
	stp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
	stp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
	stp	x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
	stp	x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
	stp	x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
	stp	x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
	stp	x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
	stp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
	mrs	x18, sp_el0
	str	x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
377

378
379
380
	mov	x5, xzr
	mov	x6, sp

381
382
#if SMCCC_MAJOR_VERSION == 1

383
384
385
386
387
	/* Get the unique owning entity number */
	ubfx	x16, x0, #FUNCID_OEN_SHIFT, #FUNCID_OEN_WIDTH
	ubfx	x15, x0, #FUNCID_TYPE_SHIFT, #FUNCID_TYPE_WIDTH
	orr	x16, x16, x15, lsl #FUNCID_OEN_WIDTH

388
	load_rt_svc_desc_pointer
389

390
391
392
393
#elif SMCCC_MAJOR_VERSION == 2

	/* Bit 31 must be set */
	tbz	x0, #FUNCID_TYPE_SHIFT, smc_unknown
394

395
	/*
396
397
	 * Check MSB of namespace to decide between compatibility/vendor and
	 * SPCI/SPRT
398
	 */
399
400
401
402
403
404
405
406
	tbz	x0, #(FUNCID_NAMESPACE_SHIFT + 1), compat_or_vendor

	/* Namespaces SPRT and SPCI currently unimplemented */
	b	smc_unknown

compat_or_vendor:

	/* Namespace is b'00 (compatibility) or b'01 (vendor) */
407
408

	/*
409
410
411
412
413
414
	 * Add the LSB of the namespace (bit [28]) to the OEN [27:24] to create
	 * a 5-bit index into the rt_svc_descs_indices array.
	 *
	 * The low 16 entries of the rt_svc_descs_indices array correspond to
	 * OENs of the compatibility namespace and the top 16 entries of the
	 * array are assigned to the vendor namespace descriptor.
415
	 */
416
	ubfx	x16, x0, #FUNCID_OEN_SHIFT, #(FUNCID_OEN_WIDTH + 1)
417

418
419
420
	load_rt_svc_desc_pointer

#endif /* SMCCC_MAJOR_VERSION */
421

422
	/*
423
424
425
	 * Restore the saved C runtime stack value which will become the new
	 * SP_EL0 i.e. EL3 runtime stack. It was saved in the 'cpu_context'
	 * structure prior to the last ERET from EL3.
426
	 */
427
428
429
430
	ldr	x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]

	/* Switch to SP_EL0 */
	msr	spsel, #0
431

432
433
434
435
	/*
	 * Save the SPSR_EL3, ELR_EL3, & SCR_EL3 in case there is a world
	 * switch during SMC handling.
	 * TODO: Revisit if all system registers can be saved later.
436
437
438
439
440
	 */
	mrs	x16, spsr_el3
	mrs	x17, elr_el3
	mrs	x18, scr_el3
	stp	x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
441
	str	x18, [x6, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
442
443
444
445
446
447

	/* Copy SCR_EL3.NS bit to the flag to indicate caller's security */
	bfi	x7, x18, #0, #1

	mov	sp, x12

448
449
450
451
	/*
	 * Call the Secure Monitor Call handler and then drop directly into
	 * el3_exit() which will program any remaining architectural state
	 * prior to issuing the ERET to the desired lower EL.
452
453
454
455
456
457
	 */
#if DEBUG
	cbz	x15, rt_svc_fw_critical_error
#endif
	blr	x15

458
	b	el3_exit
459

460
461
smc_unknown:
	/*
462
463
	 * Unknown SMC call. Populate return value with SMC_UNK, restore
	 * GP registers, and return to caller.
464
	 */
465
	mov	x0, #SMC_UNK
466
467
	str	x0, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
	b	restore_gp_registers_eret
468
469

smc_prohibited:
470
	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
471
	mov	x0, #SMC_UNK
472
473
474
	eret

rt_svc_fw_critical_error:
475
476
	/* Switch to SP_ELx */
	msr	spsel, #1
477
	no_ret	report_unhandled_exception
478
endfunc smc_handler