wa_cve_2017_5715_bpiall.S 11.2 KB
Newer Older
1
/*
2
 * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
3
4
5
6
7
 *
 * SPDX-License-Identifier: BSD-3-Clause
 */

#include <arch.h>
8
#include <arm_arch_svc.h>
9
10
11
#include <asm_macros.S>
#include <context.h>

12
	.globl	wa_cve_2017_5715_bpiall_vbar
13
14
15

#define EMIT_BPIALL		0xee070fd5
#define EMIT_SMC		0xe1600070
16
#define ESR_EL3_A64_SMC0	0x5e000000
17

18
	.macro	apply_cve_2017_5715_wa _from_vector
19
20
21
22
23
24
	/*
	 * Save register state to enable a call to AArch32 S-EL1 and return
	 * Identify the original calling vector in w2 (==_from_vector)
	 * Use w3-w6 for additional register state preservation while in S-EL1
	 */

25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
	/* Save GP regs */
	stp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
	stp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
	stp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
	stp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
	stp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
	stp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
	stp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
	stp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
	stp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
	stp	x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
	stp	x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
	stp	x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
	stp	x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
	stp	x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
	stp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]

42
43
44
45
46
47
48
49
	/* Identify the original exception vector */
	mov	w2, \_from_vector

	/* Preserve 32-bit system registers in GP registers through the workaround */
	mrs	x3, esr_el3
	mrs	x4, spsr_el3
	mrs	x5, scr_el3
	mrs	x6, sctlr_el1
50
51

	/*
52
53
54
55
56
	 * Preserve LR and ELR_EL3 registers in the GP regs context.
	 * Temporarily use the CTX_GPREG_SP_EL0 slot to preserve ELR_EL3
	 * through the workaround. This is OK because at this point the
	 * current state for this context's SP_EL0 is in the live system
	 * register, which is unmodified by the workaround.
57
	 */
58
59
	mrs	x7, elr_el3
	stp	x30, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
60
61

	/*
62
	 * Load system registers for entry to S-EL1.
63
64
	 */

65
66
67
68
	/* Mask all interrupts and set AArch32 Supervisor mode */
	movz	w8, SPSR_MODE32(MODE32_svc, SPSR_T_ARM, SPSR_E_LITTLE, SPSR_AIF_MASK)

	/* Switch EL3 exception vectors while the workaround is executing. */
69
	adr	x9, wa_cve_2017_5715_bpiall_ret_vbar
70
71
72

	/* Setup SCTLR_EL1 with MMU off and I$ on */
	ldr	x10, stub_sel1_sctlr
73

74
75
	/* Land at the S-EL1 workaround stub */
	adr	x11, aarch32_stub
76
77
78
79
80
81

	/*
	 * Setting SCR_EL3 to all zeroes means that the NS, RW
	 * and SMD bits are configured as expected.
	 */
	msr	scr_el3, xzr
82
83
84
85
	msr	spsr_el3, x8
	msr	vbar_el3, x9
	msr	sctlr_el1, x10
	msr	elr_el3, x11
86
87
88
89
90
91
92
93
94
95

	eret
	.endm

	/* ---------------------------------------------------------------------
	 * This vector table is used at runtime to enter the workaround at
	 * AArch32 S-EL1 for Sync/IRQ/FIQ/SError exceptions.  If the workaround
	 * is not enabled, the existing runtime exception vector table is used.
	 * ---------------------------------------------------------------------
	 */
96
vector_base wa_cve_2017_5715_bpiall_vbar
97
98
99
100
101

	/* ---------------------------------------------------------------------
	 * Current EL with SP_EL0 : 0x0 - 0x200
	 * ---------------------------------------------------------------------
	 */
102
vector_entry bpiall_sync_exception_sp_el0
103
	b	sync_exception_sp_el0
104
105
	nop	/* to force 8 byte alignment for the following stub */

106
107
108
109
	/*
	 * Since each vector table entry is 128 bytes, we can store the
	 * stub context in the unused space to minimize memory footprint.
	 */
110
111
112
113
stub_sel1_sctlr:
	.quad	SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT

aarch32_stub:
114
115
116
	.word	EMIT_BPIALL
	.word	EMIT_SMC

117
	check_vector_size bpiall_sync_exception_sp_el0
118

119
vector_entry bpiall_irq_sp_el0
120
	b	irq_sp_el0
121
	check_vector_size bpiall_irq_sp_el0
122

123
vector_entry bpiall_fiq_sp_el0
124
	b	fiq_sp_el0
125
	check_vector_size bpiall_fiq_sp_el0
126

127
vector_entry bpiall_serror_sp_el0
128
	b	serror_sp_el0
129
	check_vector_size bpiall_serror_sp_el0
130
131
132
133
134

	/* ---------------------------------------------------------------------
	 * Current EL with SP_ELx: 0x200 - 0x400
	 * ---------------------------------------------------------------------
	 */
135
vector_entry bpiall_sync_exception_sp_elx
136
	b	sync_exception_sp_elx
137
	check_vector_size bpiall_sync_exception_sp_elx
138

139
vector_entry bpiall_irq_sp_elx
140
	b	irq_sp_elx
141
	check_vector_size bpiall_irq_sp_elx
142

143
vector_entry bpiall_fiq_sp_elx
144
	b	fiq_sp_elx
145
	check_vector_size bpiall_fiq_sp_elx
146

147
vector_entry bpiall_serror_sp_elx
148
	b	serror_sp_elx
149
	check_vector_size bpiall_serror_sp_elx
150
151
152
153
154

	/* ---------------------------------------------------------------------
	 * Lower EL using AArch64 : 0x400 - 0x600
	 * ---------------------------------------------------------------------
	 */
155
156
157
vector_entry bpiall_sync_exception_aarch64
	apply_cve_2017_5715_wa 1
	check_vector_size bpiall_sync_exception_aarch64
158

159
160
161
vector_entry bpiall_irq_aarch64
	apply_cve_2017_5715_wa 2
	check_vector_size bpiall_irq_aarch64
162

163
164
165
vector_entry bpiall_fiq_aarch64
	apply_cve_2017_5715_wa 4
	check_vector_size bpiall_fiq_aarch64
166

167
168
169
vector_entry bpiall_serror_aarch64
	apply_cve_2017_5715_wa 8
	check_vector_size bpiall_serror_aarch64
170
171
172
173
174

	/* ---------------------------------------------------------------------
	 * Lower EL using AArch32 : 0x600 - 0x800
	 * ---------------------------------------------------------------------
	 */
175
176
177
vector_entry bpiall_sync_exception_aarch32
	apply_cve_2017_5715_wa 1
	check_vector_size bpiall_sync_exception_aarch32
178

179
180
181
vector_entry bpiall_irq_aarch32
	apply_cve_2017_5715_wa 2
	check_vector_size bpiall_irq_aarch32
182

183
184
185
vector_entry bpiall_fiq_aarch32
	apply_cve_2017_5715_wa 4
	check_vector_size bpiall_fiq_aarch32
186

187
188
189
vector_entry bpiall_serror_aarch32
	apply_cve_2017_5715_wa 8
	check_vector_size bpiall_serror_aarch32
190
191
192
193
194
195
196
197

	/* ---------------------------------------------------------------------
	 * This vector table is used while the workaround is executing.  It
	 * installs a simple SMC handler to allow the Sync/IRQ/FIQ/SError
	 * workaround stubs to enter EL3 from S-EL1.  It restores the previous
	 * EL3 state before proceeding with the normal runtime exception vector.
	 * ---------------------------------------------------------------------
	 */
198
vector_base wa_cve_2017_5715_bpiall_ret_vbar
199
200
201
202
203

	/* ---------------------------------------------------------------------
	 * Current EL with SP_EL0 : 0x0 - 0x200 (UNUSED)
	 * ---------------------------------------------------------------------
	 */
204
vector_entry bpiall_ret_sync_exception_sp_el0
205
	b	report_unhandled_exception
206
	check_vector_size bpiall_ret_sync_exception_sp_el0
207

208
vector_entry bpiall_ret_irq_sp_el0
209
	b	report_unhandled_interrupt
210
	check_vector_size bpiall_ret_irq_sp_el0
211

212
vector_entry bpiall_ret_fiq_sp_el0
213
	b	report_unhandled_interrupt
214
	check_vector_size bpiall_ret_fiq_sp_el0
215

216
vector_entry bpiall_ret_serror_sp_el0
217
	b	report_unhandled_exception
218
	check_vector_size bpiall_ret_serror_sp_el0
219
220
221
222
223

	/* ---------------------------------------------------------------------
	 * Current EL with SP_ELx: 0x200 - 0x400 (UNUSED)
	 * ---------------------------------------------------------------------
	 */
224
vector_entry bpiall_ret_sync_exception_sp_elx
225
	b	report_unhandled_exception
226
	check_vector_size bpiall_ret_sync_exception_sp_elx
227

228
vector_entry bpiall_ret_irq_sp_elx
229
	b	report_unhandled_interrupt
230
	check_vector_size bpiall_ret_irq_sp_elx
231

232
vector_entry bpiall_ret_fiq_sp_elx
233
	b	report_unhandled_interrupt
234
	check_vector_size bpiall_ret_fiq_sp_elx
235

236
vector_entry bpiall_ret_serror_sp_elx
237
	b	report_unhandled_exception
238
	check_vector_size bpiall_ret_serror_sp_elx
239
240
241
242
243

	/* ---------------------------------------------------------------------
	 * Lower EL using AArch64 : 0x400 - 0x600 (UNUSED)
	 * ---------------------------------------------------------------------
	 */
244
vector_entry bpiall_ret_sync_exception_aarch64
245
	b	report_unhandled_exception
246
	check_vector_size bpiall_ret_sync_exception_aarch64
247

248
vector_entry bpiall_ret_irq_aarch64
249
	b	report_unhandled_interrupt
250
	check_vector_size bpiall_ret_irq_aarch64
251

252
vector_entry bpiall_ret_fiq_aarch64
253
	b	report_unhandled_interrupt
254
	check_vector_size bpiall_ret_fiq_aarch64
255

256
vector_entry bpiall_ret_serror_aarch64
257
	b	report_unhandled_exception
258
	check_vector_size bpiall_ret_serror_aarch64
259
260
261
262
263

	/* ---------------------------------------------------------------------
	 * Lower EL using AArch32 : 0x600 - 0x800
	 * ---------------------------------------------------------------------
	 */
264
vector_entry bpiall_ret_sync_exception_aarch32
265
266
267
268
269
270
	/*
	 * w2 indicates which SEL1 stub was run and thus which original vector was used
	 * w3-w6 contain saved system register state (esr_el3 in w3)
	 * Restore LR and ELR_EL3 register state from the GP regs context
	 */
	ldp	x30, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
271
272

	/* Apply the restored system register state */
273
274
275
276
277
	msr	esr_el3, x3
	msr	spsr_el3, x4
	msr	scr_el3, x5
	msr	sctlr_el1, x6
	msr	elr_el3, x7
278
279
280
281
282
283

	/*
	 * Workaround is complete, so swap VBAR_EL3 to point
	 * to workaround entry table in preparation for subsequent
	 * Sync/IRQ/FIQ/SError exceptions.
	 */
284
	adr	x0, wa_cve_2017_5715_bpiall_vbar
285
	msr	vbar_el3, x0
286
287

	/*
288
	 * Restore all GP regs except x2 and x3 (esr).  The value in x2
289
290
	 * indicates the type of the original exception.
	 */
291
	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
292
293
294
295
296
297
298
299
300
301
302
303
304
305
	ldp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
	ldp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
	ldp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
	ldp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
	ldp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
	ldp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
	ldp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
	ldp	x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
	ldp	x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
	ldp	x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
	ldp	x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
	ldp	x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
	ldp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]

306
307
	/* Fast path Sync exceptions.  Static predictor will fall through. */
	tbz	w2, #0, workaround_not_sync
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324

	/*
	 * Check if SMC is coming from A64 state on #0
	 * with W0 = SMCCC_ARCH_WORKAROUND_1
	 *
	 * This sequence evaluates as:
	 *    (W0==SMCCC_ARCH_WORKAROUND_1) ? (ESR_EL3==SMC#0) : (NE)
	 * allowing use of a single branch operation
	 */
	orr	w2, wzr, #SMCCC_ARCH_WORKAROUND_1
	cmp	w0, w2
	mov_imm	w2, ESR_EL3_A64_SMC0
	ccmp	w3, w2, #0, eq
	/* Static predictor will predict a fall through */
	bne	1f
	eret
1:
325
	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
326
	b	sync_exception_aarch64
327
	check_vector_size bpiall_ret_sync_exception_aarch32
328

329
vector_entry bpiall_ret_irq_aarch32
330
	b	report_unhandled_interrupt
331
332
333
334
335

	/*
	 * Post-workaround fan-out for non-sync exceptions
	 */
workaround_not_sync:
336
337
	tbnz	w2, #3, bpiall_ret_serror
	tbnz	w2, #2, bpiall_ret_fiq
338
339
	/* IRQ */
	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
340
	b	irq_aarch64
341

342
bpiall_ret_fiq:
343
344
345
	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
	b	fiq_aarch64

346
bpiall_ret_serror:
347
348
	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
	b	serror_aarch64
349
	check_vector_size bpiall_ret_irq_aarch32
350

351
vector_entry bpiall_ret_fiq_aarch32
352
	b	report_unhandled_interrupt
353
	check_vector_size bpiall_ret_fiq_aarch32
354

355
vector_entry bpiall_ret_serror_aarch32
356
	b	report_unhandled_exception
357
	check_vector_size bpiall_ret_serror_aarch32