denver.S 9.89 KB
Newer Older
1
/*
2
 * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
3
 *
dp-arm's avatar
dp-arm committed
4
 * SPDX-License-Identifier: BSD-3-Clause
5
6
7
8
9
 */

#include <arch.h>
#include <asm_macros.S>
#include <assert_macros.S>
10
#include <context.h>
11
12
13
14
#include <denver.h>
#include <cpu_macros.S>
#include <plat_macros.S>

15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
	/* -------------------------------------------------
	 * CVE-2017-5715 mitigation
	 *
	 * Flush the indirect branch predictor and RSB on
	 * entry to EL3 by issuing a newly added instruction
	 * for Denver CPUs.
	 *
	 * To achieve this without performing any branch
	 * instruction, a per-cpu vbar is installed which
	 * executes the workaround and then branches off to
	 * the corresponding vector entry in the main vector
	 * table.
	 * -------------------------------------------------
	 */
	.globl	workaround_bpflush_runtime_exceptions

vector_base workaround_bpflush_runtime_exceptions

	.macro	apply_workaround
	stp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]

	/* -------------------------------------------------
	 * A new write-only system register where a write of
	 * 1 to bit 0 will cause the indirect branch predictor
	 * and RSB to be flushed.
	 *
	 * A write of 0 to bit 0 will be ignored. A write of
	 * 1 to any other bit will cause an MCA.
	 * -------------------------------------------------
	 */
	mov	x0, #1
	msr	s3_0_c15_c0_6, x0
	isb

	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
	.endm

	/* ---------------------------------------------------------------------
	 * Current EL with SP_EL0 : 0x0 - 0x200
	 * ---------------------------------------------------------------------
	 */
vector_entry workaround_bpflush_sync_exception_sp_el0
	b	sync_exception_sp_el0
58
end_vector_entry workaround_bpflush_sync_exception_sp_el0
59
60
61

vector_entry workaround_bpflush_irq_sp_el0
	b	irq_sp_el0
62
end_vector_entry workaround_bpflush_irq_sp_el0
63
64
65

vector_entry workaround_bpflush_fiq_sp_el0
	b	fiq_sp_el0
66
end_vector_entry workaround_bpflush_fiq_sp_el0
67
68
69

vector_entry workaround_bpflush_serror_sp_el0
	b	serror_sp_el0
70
end_vector_entry workaround_bpflush_serror_sp_el0
71
72
73
74
75
76
77

	/* ---------------------------------------------------------------------
	 * Current EL with SP_ELx: 0x200 - 0x400
	 * ---------------------------------------------------------------------
	 */
vector_entry workaround_bpflush_sync_exception_sp_elx
	b	sync_exception_sp_elx
78
end_vector_entry workaround_bpflush_sync_exception_sp_elx
79
80
81

vector_entry workaround_bpflush_irq_sp_elx
	b	irq_sp_elx
82
end_vector_entry workaround_bpflush_irq_sp_elx
83
84
85

vector_entry workaround_bpflush_fiq_sp_elx
	b	fiq_sp_elx
86
end_vector_entry workaround_bpflush_fiq_sp_elx
87
88
89

vector_entry workaround_bpflush_serror_sp_elx
	b	serror_sp_elx
90
end_vector_entry workaround_bpflush_serror_sp_elx
91
92
93
94
95
96
97
98

	/* ---------------------------------------------------------------------
	 * Lower EL using AArch64 : 0x400 - 0x600
	 * ---------------------------------------------------------------------
	 */
vector_entry workaround_bpflush_sync_exception_aarch64
	apply_workaround
	b	sync_exception_aarch64
99
end_vector_entry workaround_bpflush_sync_exception_aarch64
100
101
102
103

vector_entry workaround_bpflush_irq_aarch64
	apply_workaround
	b	irq_aarch64
104
end_vector_entry workaround_bpflush_irq_aarch64
105
106
107
108

vector_entry workaround_bpflush_fiq_aarch64
	apply_workaround
	b	fiq_aarch64
109
end_vector_entry workaround_bpflush_fiq_aarch64
110
111
112
113

vector_entry workaround_bpflush_serror_aarch64
	apply_workaround
	b	serror_aarch64
114
end_vector_entry workaround_bpflush_serror_aarch64
115
116
117
118
119
120
121
122

	/* ---------------------------------------------------------------------
	 * Lower EL using AArch32 : 0x600 - 0x800
	 * ---------------------------------------------------------------------
	 */
vector_entry workaround_bpflush_sync_exception_aarch32
	apply_workaround
	b	sync_exception_aarch32
123
end_vector_entry workaround_bpflush_sync_exception_aarch32
124
125
126
127

vector_entry workaround_bpflush_irq_aarch32
	apply_workaround
	b	irq_aarch32
128
end_vector_entry workaround_bpflush_irq_aarch32
129
130
131
132

vector_entry workaround_bpflush_fiq_aarch32
	apply_workaround
	b	fiq_aarch32
133
end_vector_entry workaround_bpflush_fiq_aarch32
134
135
136
137

vector_entry workaround_bpflush_serror_aarch32
	apply_workaround
	b	serror_aarch32
138
end_vector_entry workaround_bpflush_serror_aarch32
139

140
141
	.global	denver_disable_dco

142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
	/* ---------------------------------------------
	 * Disable debug interfaces
	 * ---------------------------------------------
	 */
func denver_disable_ext_debug
	mov	x0, #1
	msr	osdlr_el1, x0
	isb
	dsb	sy
	ret
endfunc denver_disable_ext_debug

	/* ----------------------------------------------------
	 * Enable dynamic code optimizer (DCO)
	 * ----------------------------------------------------
	 */
func denver_enable_dco
159
160
	mov	x3, x30
	bl	plat_my_core_pos
161
162
163
	mov	x1, #1
	lsl	x1, x1, x0
	msr	s3_0_c15_c0_2, x1
164
	mov	x30, x3
165
166
167
168
169
170
171
172
173
	ret
endfunc denver_enable_dco

	/* ----------------------------------------------------
	 * Disable dynamic code optimizer (DCO)
	 * ----------------------------------------------------
	 */
func denver_disable_dco

174
175
	mov	x3, x30

176
	/* turn off background work */
177
	bl	plat_my_core_pos
178
179
180
181
182
183
184
185
186
187
188
189
190
	mov	x1, #1
	lsl	x1, x1, x0
	lsl	x2, x1, #16
	msr	s3_0_c15_c0_2, x2
	isb

	/* wait till the background work turns off */
1:	mrs	x2, s3_0_c15_c0_2
	lsr	x2, x2, #32
	and	w2, w2, 0xFFFF
	and	x2, x2, x1
	cbnz	x2, 1b

191
	mov	x30, x3
192
193
194
	ret
endfunc denver_disable_dco

195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
func check_errata_cve_2017_5715
	mov	x0, #ERRATA_MISSING
#if WORKAROUND_CVE_2017_5715
	/*
	 * Check if the CPU supports the special instruction
	 * required to flush the indirect branch predictor and
	 * RSB. Support for this operation can be determined by
	 * comparing bits 19:16 of ID_AFR0_EL1 with 0b0001.
	 */
	mrs	x1, id_afr0_el1
	mov	x2, #0x10000
	and	x1, x1, x2
	cbz	x1, 1f
	mov	x0, #ERRATA_APPLIES
1:
#endif
	ret
endfunc check_errata_cve_2017_5715

214
215
216
217
218
219
220
221
222
func check_errata_cve_2018_3639
#if WORKAROUND_CVE_2018_3639
	mov	x0, #ERRATA_APPLIES
#else
	mov	x0, #ERRATA_MISSING
#endif
	ret
endfunc check_errata_cve_2018_3639

223
224
225
226
227
228
229
230
	/* -------------------------------------------------
	 * The CPU Ops reset function for Denver.
	 * -------------------------------------------------
	 */
func denver_reset_func

	mov	x19, x30

231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
	/*
	 * Check if the CPU supports the special instruction
	 * required to flush the indirect branch predictor and
	 * RSB. Support for this operation can be determined by
	 * comparing bits 19:16 of ID_AFR0_EL1 with 0b0001.
	 */
	mrs	x0, id_afr0_el1
	mov	x1, #0x10000
	and	x0, x0, x1
	cmp	x0, #0
	adr	x1, workaround_bpflush_runtime_exceptions
	mrs	x2, vbar_el3
	csel	x0, x1, x2, ne
	msr	vbar_el3, x0
#endif

248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
#if WORKAROUND_CVE_2018_3639
	/*
	 * Denver CPUs with DENVER_MIDR_PN3 or earlier, use different
	 * bits in the ACTLR_EL3 register to disable speculative
	 * store buffer and memory disambiguation.
	 */
	mrs	x0, midr_el1
	mov_imm	x1, DENVER_MIDR_PN4
	cmp	x0, x1
	mrs	x0, actlr_el3
	mov	x1, #(DENVER_CPU_DIS_MD_EL3 | DENVER_CPU_DIS_SSB_EL3)
	mov	x2, #(DENVER_PN4_CPU_DIS_MD_EL3 | DENVER_PN4_CPU_DIS_SSB_EL3)
	csel	x3, x1, x2, ne
	orr	x0, x0, x3
	msr	actlr_el3, x0
	isb
	dsb	sy
#endif

267
268
269
270
271
272
273
274
275
	/* ----------------------------------------------------
	 * Reset ACTLR.PMSTATE to C1 state
	 * ----------------------------------------------------
	 */
	mrs	x0, actlr_el1
	bic	x0, x0, #DENVER_CPU_PMSTATE_MASK
	orr	x0, x0, #DENVER_CPU_PMSTATE_C1
	msr	actlr_el1, x0

276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
	/* ----------------------------------------------------
	 * Enable dynamic code optimizer (DCO)
	 * ----------------------------------------------------
	 */
	bl	denver_enable_dco

	ret	x19
endfunc denver_reset_func

	/* ----------------------------------------------------
	 * The CPU Ops core power down function for Denver.
	 * ----------------------------------------------------
	 */
func denver_core_pwr_dwn

	mov	x19, x30

	/* ---------------------------------------------
	 * Force the debug interfaces to be quiescent
	 * ---------------------------------------------
	 */
	bl	denver_disable_ext_debug

	ret	x19
endfunc denver_core_pwr_dwn

	/* -------------------------------------------------------
	 * The CPU Ops cluster power down function for Denver.
	 * -------------------------------------------------------
	 */
func denver_cluster_pwr_dwn
	ret
endfunc denver_cluster_pwr_dwn

310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
#if REPORT_ERRATA
	/*
	 * Errata printing function for Denver. Must follow AAPCS.
	 */
func denver_errata_report
	stp	x8, x30, [sp, #-16]!

	bl	cpu_get_rev_var
	mov	x8, x0

	/*
	 * Report all errata. The revision-variant information is passed to
	 * checking functions of each errata.
	 */
	report_errata WORKAROUND_CVE_2017_5715, denver, cve_2017_5715
325
	report_errata WORKAROUND_CVE_2018_3639, denver, cve_2018_3639
326
327
328
329
330
331

	ldp	x8, x30, [sp], #16
	ret
endfunc denver_errata_report
#endif

332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
	/* ---------------------------------------------
	 * This function provides Denver specific
	 * register information for crash reporting.
	 * It needs to return with x6 pointing to
	 * a list of register names in ascii and
	 * x8 - x15 having values of registers to be
	 * reported.
	 * ---------------------------------------------
	 */
.section .rodata.denver_regs, "aS"
denver_regs:  /* The ascii list of register names to be reported */
	.asciz	"actlr_el1", ""

func denver_cpu_reg_dump
	adr	x6, denver_regs
	mrs	x8, ACTLR_EL1
	ret
endfunc denver_cpu_reg_dump

351
declare_cpu_ops_wa denver, DENVER_MIDR_PN0, \
352
	denver_reset_func, \
353
354
	check_errata_cve_2017_5715, \
	CPU_NO_EXTRA2_FUNC, \
355
356
357
	denver_core_pwr_dwn, \
	denver_cluster_pwr_dwn

358
declare_cpu_ops_wa denver, DENVER_MIDR_PN1, \
359
	denver_reset_func, \
360
361
	check_errata_cve_2017_5715, \
	CPU_NO_EXTRA2_FUNC, \
362
363
364
	denver_core_pwr_dwn, \
	denver_cluster_pwr_dwn

365
declare_cpu_ops_wa denver, DENVER_MIDR_PN2, \
366
	denver_reset_func, \
367
368
	check_errata_cve_2017_5715, \
	CPU_NO_EXTRA2_FUNC, \
369
370
371
	denver_core_pwr_dwn, \
	denver_cluster_pwr_dwn

372
declare_cpu_ops_wa denver, DENVER_MIDR_PN3, \
373
	denver_reset_func, \
374
375
	check_errata_cve_2017_5715, \
	CPU_NO_EXTRA2_FUNC, \
376
377
378
	denver_core_pwr_dwn, \
	denver_cluster_pwr_dwn

379
declare_cpu_ops_wa denver, DENVER_MIDR_PN4, \
380
	denver_reset_func, \
381
382
	check_errata_cve_2017_5715, \
	CPU_NO_EXTRA2_FUNC, \
383
384
	denver_core_pwr_dwn, \
	denver_cluster_pwr_dwn