cortex_a76.S 11.9 KB
Newer Older
1
/*
2
 * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
3
4
5
6
7
8
 *
 * SPDX-License-Identifier: BSD-3-Clause
 */

#include <arch.h>
#include <asm_macros.S>
9
#include <common/bl_common.h>
10
#include <context.h>
11
12
13
#include <cortex_a76.h>
#include <cpu_macros.S>
#include <plat_macros.S>
14
#include <services/arm_arch_svc.h>
15

16
17
18
19
20
/* Hardware handled coherency */
#if HW_ASSISTED_COHERENCY == 0
#error "Cortex-A76 must be compiled with HW_ASSISTED_COHERENCY enabled"
#endif

21
22
23
#define ESR_EL3_A64_SMC0	0x5e000000
#define ESR_EL3_A32_SMC0	0x4e000000

24
#if DYNAMIC_WORKAROUND_CVE_2018_3639
25
26
	/*
	 * This macro applies the mitigation for CVE-2018-3639.
Ambroise Vincent's avatar
Ambroise Vincent committed
27
	 * It implements a fast path where `SMCCC_ARCH_WORKAROUND_2`
28
29
30
	 * SMC calls from a lower EL running in AArch32 or AArch64
	 * will go through the fast and return early.
	 *
Ambroise Vincent's avatar
Ambroise Vincent committed
31
	 * The macro saves x2-x3 to the context. In the fast path
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
	 * x0-x3 registers do not need to be restored as the calling
	 * context will have saved them.
	 */
	.macro apply_cve_2018_3639_wa _is_sync_exception _esr_el3_val
	stp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]

	.if \_is_sync_exception
		/*
		 * Ensure SMC is coming from A64/A32 state on #0
		 * with W0 = SMCCC_ARCH_WORKAROUND_2
		 *
		 * This sequence evaluates as:
		 *    (W0==SMCCC_ARCH_WORKAROUND_2) ? (ESR_EL3==SMC#0) : (NE)
		 * allowing use of a single branch operation
		 */
		orr	w2, wzr, #SMCCC_ARCH_WORKAROUND_2
		cmp	x0, x2
		mrs	x3, esr_el3
		mov_imm	w2, \_esr_el3_val
		ccmp	w2, w3, #0, eq
		/*
		 * Static predictor will predict a fall-through, optimizing
		 * the `SMCCC_ARCH_WORKAROUND_2` fast path.
		 */
		bne	1f

		/*
		 * The sequence below implements the `SMCCC_ARCH_WORKAROUND_2`
		 * fast path.
		 */
		cmp	x1, xzr /* enable/disable check */

		/*
		 * When the calling context wants mitigation disabled,
		 * we program the mitigation disable function in the
		 * CPU context, which gets invoked on subsequent exits from
Ambroise Vincent's avatar
Ambroise Vincent committed
68
		 * EL3 via the `el3_exit` function. Otherwise NULL is
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
		 * programmed in the CPU context, which results in caller's
		 * inheriting the EL3 mitigation state (enabled) on subsequent
		 * `el3_exit`.
		 */
		mov	x0, xzr
		adr	x1, cortex_a76_disable_wa_cve_2018_3639
		csel	x1, x1, x0, eq
		str	x1, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE]

		mrs	x2, CORTEX_A76_CPUACTLR2_EL1
		orr	x1, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
		bic	x3, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
		csel	x3, x3, x1, eq
		msr	CORTEX_A76_CPUACTLR2_EL1, x3
		eret	/* ERET implies ISB */
	.endif
1:
	/*
Ambroise Vincent's avatar
Ambroise Vincent committed
87
	 * Always enable v4 mitigation during EL3 execution. This is not
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
	 * required for the fast path above because it does not perform any
	 * memory loads.
	 */
	mrs	x2, CORTEX_A76_CPUACTLR2_EL1
	orr	x2, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
	msr	CORTEX_A76_CPUACTLR2_EL1, x2
	isb

	/*
	 * The caller may have passed arguments to EL3 via x2-x3.
	 * Restore these registers from the context before jumping to the
	 * main runtime vector table entry.
	 */
	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
	.endm

vector_base cortex_a76_wa_cve_2018_3639_a76_vbar

	/* ---------------------------------------------------------------------
	 * Current EL with SP_EL0 : 0x0 - 0x200
	 * ---------------------------------------------------------------------
	 */
vector_entry cortex_a76_sync_exception_sp_el0
	b	sync_exception_sp_el0
112
end_vector_entry cortex_a76_sync_exception_sp_el0
113
114
115

vector_entry cortex_a76_irq_sp_el0
	b	irq_sp_el0
116
end_vector_entry cortex_a76_irq_sp_el0
117
118
119

vector_entry cortex_a76_fiq_sp_el0
	b	fiq_sp_el0
120
end_vector_entry cortex_a76_fiq_sp_el0
121
122
123

vector_entry cortex_a76_serror_sp_el0
	b	serror_sp_el0
124
end_vector_entry cortex_a76_serror_sp_el0
125
126
127
128
129
130
131

	/* ---------------------------------------------------------------------
	 * Current EL with SP_ELx: 0x200 - 0x400
	 * ---------------------------------------------------------------------
	 */
vector_entry cortex_a76_sync_exception_sp_elx
	b	sync_exception_sp_elx
132
end_vector_entry cortex_a76_sync_exception_sp_elx
133
134
135

vector_entry cortex_a76_irq_sp_elx
	b	irq_sp_elx
136
end_vector_entry cortex_a76_irq_sp_elx
137
138
139

vector_entry cortex_a76_fiq_sp_elx
	b	fiq_sp_elx
140
end_vector_entry cortex_a76_fiq_sp_elx
141
142
143

vector_entry cortex_a76_serror_sp_elx
	b	serror_sp_elx
144
end_vector_entry cortex_a76_serror_sp_elx
145
146
147
148
149
150
151
152

	/* ---------------------------------------------------------------------
	 * Lower EL using AArch64 : 0x400 - 0x600
	 * ---------------------------------------------------------------------
	 */
vector_entry cortex_a76_sync_exception_aarch64
	apply_cve_2018_3639_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A64_SMC0
	b	sync_exception_aarch64
153
end_vector_entry cortex_a76_sync_exception_aarch64
154
155
156
157

vector_entry cortex_a76_irq_aarch64
	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
	b	irq_aarch64
158
end_vector_entry cortex_a76_irq_aarch64
159
160
161
162

vector_entry cortex_a76_fiq_aarch64
	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
	b	fiq_aarch64
163
end_vector_entry cortex_a76_fiq_aarch64
164
165
166
167

vector_entry cortex_a76_serror_aarch64
	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
	b	serror_aarch64
168
end_vector_entry cortex_a76_serror_aarch64
169
170
171
172
173
174
175
176

	/* ---------------------------------------------------------------------
	 * Lower EL using AArch32 : 0x600 - 0x800
	 * ---------------------------------------------------------------------
	 */
vector_entry cortex_a76_sync_exception_aarch32
	apply_cve_2018_3639_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A32_SMC0
	b	sync_exception_aarch32
177
end_vector_entry cortex_a76_sync_exception_aarch32
178
179
180
181

vector_entry cortex_a76_irq_aarch32
	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
	b	irq_aarch32
182
end_vector_entry cortex_a76_irq_aarch32
183
184
185
186

vector_entry cortex_a76_fiq_aarch32
	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
	b	fiq_aarch32
187
end_vector_entry cortex_a76_fiq_aarch32
188
189
190
191

vector_entry cortex_a76_serror_aarch32
	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
	b	serror_aarch32
192
end_vector_entry cortex_a76_serror_aarch32
193
#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639 */
194

195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
	/* --------------------------------------------------
	 * Errata Workaround for Cortex A76 Errata #1073348.
	 * This applies only to revision <= r1p0 of Cortex A76.
	 * Inputs:
	 * x0: variant[4:7] and revision[0:3] of current cpu.
	 * Shall clobber: x0-x17
	 * --------------------------------------------------
	 */
func errata_a76_1073348_wa
	/*
	 * Compare x0 against revision r1p0
	 */
	mov	x17, x30
	bl	check_errata_1073348
	cbz	x0, 1f
	mrs	x1, CORTEX_A76_CPUACTLR_EL1
	orr	x1, x1 ,#CORTEX_A76_CPUACTLR_EL1_DISABLE_STATIC_PREDICTION
	msr	CORTEX_A76_CPUACTLR_EL1, x1
	isb
1:
	ret	x17
	endfunc errata_a76_1073348_wa

func check_errata_1073348
	mov	x1, #0x10
	b	cpu_rev_var_ls
endfunc check_errata_1073348

223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
	/* --------------------------------------------------
	 * Errata Workaround for Cortex A76 Errata #1130799.
	 * This applies only to revision <= r2p0 of Cortex A76.
	 * Inputs:
	 * x0: variant[4:7] and revision[0:3] of current cpu.
	 * Shall clobber: x0-x17
	 * --------------------------------------------------
	 */
func errata_a76_1130799_wa
	/*
	 * Compare x0 against revision r2p0
	 */
	mov	x17, x30
	bl	check_errata_1130799
	cbz	x0, 1f
	mrs	x1, CORTEX_A76_CPUACTLR2_EL1
	orr	x1, x1 ,#(1 << 59)
	msr	CORTEX_A76_CPUACTLR2_EL1, x1
	isb
1:
	ret	x17
endfunc errata_a76_1130799_wa

func check_errata_1130799
	mov	x1, #0x20
	b	cpu_rev_var_ls
endfunc check_errata_1130799

251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
	/* --------------------------------------------------
	 * Errata Workaround for Cortex A76 Errata #1220197.
	 * This applies only to revision <= r2p0 of Cortex A76.
	 * Inputs:
	 * x0: variant[4:7] and revision[0:3] of current cpu.
	 * Shall clobber: x0-x17
	 * --------------------------------------------------
	 */
func errata_a76_1220197_wa
/*
 * Compare x0 against revision r2p0
 */
	mov	x17, x30
	bl	check_errata_1220197
	cbz	x0, 1f
	mrs	x1, CORTEX_A76_CPUECTLR_EL1
	orr	x1, x1, #CORTEX_A76_CPUECTLR_EL1_WS_THR_L2
	msr	CORTEX_A76_CPUECTLR_EL1, x1
	isb
1:
	ret	x17
endfunc errata_a76_1220197_wa

func check_errata_1220197
	mov	x1, #0x20
	b	cpu_rev_var_ls
endfunc check_errata_1220197

279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
func check_errata_cve_2018_3639
#if WORKAROUND_CVE_2018_3639
	mov	x0, #ERRATA_APPLIES
#else
	mov	x0, #ERRATA_MISSING
#endif
	ret
endfunc check_errata_cve_2018_3639

func cortex_a76_disable_wa_cve_2018_3639
	mrs	x0, CORTEX_A76_CPUACTLR2_EL1
	bic	x0, x0, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
	msr	CORTEX_A76_CPUACTLR2_EL1, x0
	isb
	ret
endfunc cortex_a76_disable_wa_cve_2018_3639

296
297
298
299
300
	/* -------------------------------------------------
	 * The CPU Ops reset function for Cortex-A76.
	 * Shall clobber: x0-x19
	 * -------------------------------------------------
	 */
301
func cortex_a76_reset_func
302
	mov	x19, x30
303
	bl	cpu_get_rev_var
304
	mov	x18, x0
305

306
307
308
309
310
#if ERRATA_A76_1073348
	mov	x0, x18
	bl	errata_a76_1073348_wa
#endif

311
#if ERRATA_A76_1130799
312
	mov	x0, x18
313
314
	bl	errata_a76_1130799_wa
#endif
315

316
317
318
319
320
#if ERRATA_A76_1220197
	mov	x0, x18
	bl	errata_a76_1220197_wa
#endif

321
#if WORKAROUND_CVE_2018_3639
322
323
324
	/* If the PE implements SSBS, we don't need the dynamic workaround */
	mrs	x0, id_aa64pfr1_el1
	lsr	x0, x0, #ID_AA64PFR1_EL1_SSBS_SHIFT
Ambroise Vincent's avatar
Ambroise Vincent committed
325
	and	x0, x0, #ID_AA64PFR1_EL1_SSBS_MASK
326
327
328
329
330
#if !DYNAMIC_WORKAROUND_CVE_2018_3639 && ENABLE_ASSERTIONS
	cmp	x0, 0
	ASM_ASSERT(ne)
#endif
#if DYNAMIC_WORKAROUND_CVE_2018_3639
331
	cbnz	x0, 1f
332
333
334
335
336
	mrs	x0, CORTEX_A76_CPUACTLR2_EL1
	orr	x0, x0, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
	msr	CORTEX_A76_CPUACTLR2_EL1, x0
	isb

337
#ifdef IMAGE_BL31
338
339
	/*
	 * The Cortex-A76 generic vectors are overwritten to use the vectors
Ambroise Vincent's avatar
Ambroise Vincent committed
340
	 * defined above. This is required in order to apply mitigation
341
342
343
344
345
	 * against CVE-2018-3639 on exception entry from lower ELs.
	 */
	adr	x0, cortex_a76_wa_cve_2018_3639_a76_vbar
	msr	vbar_el3, x0
	isb
346
#endif /* IMAGE_BL31 */
347

348
1:
349
350
#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639 */
#endif /* WORKAROUND_CVE_2018_3639 */
351

352
353
354
355
#if ERRATA_DSU_798953
	bl	errata_dsu_798953_wa
#endif

356
357
358
#if ERRATA_DSU_936184
	bl	errata_dsu_936184_wa
#endif
359

360
	ret	x19
361
362
endfunc cortex_a76_reset_func

363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
	/* ---------------------------------------------
	 * HW will do the cache maintenance while powering down
	 * ---------------------------------------------
	 */
func cortex_a76_core_pwr_dwn
	/* ---------------------------------------------
	 * Enable CPU power down bit in power control register
	 * ---------------------------------------------
	 */
	mrs	x0, CORTEX_A76_CPUPWRCTLR_EL1
	orr	x0, x0, #CORTEX_A76_CORE_PWRDN_EN_MASK
	msr	CORTEX_A76_CPUPWRCTLR_EL1, x0
	isb
	ret
endfunc cortex_a76_core_pwr_dwn

379
380
#if REPORT_ERRATA
/*
381
 * Errata printing function for Cortex A76. Must follow AAPCS.
382
383
384
385
386
387
388
389
390
391
392
 */
func cortex_a76_errata_report
	stp	x8, x30, [sp, #-16]!

	bl	cpu_get_rev_var
	mov	x8, x0

	/*
	 * Report all errata. The revision-variant information is passed to
	 * checking functions of each errata.
	 */
393
	report_errata ERRATA_A76_1073348, cortex_a76, 1073348
394
	report_errata ERRATA_A76_1130799, cortex_a76, 1130799
395
	report_errata ERRATA_A76_1220197, cortex_a76, 1220197
396
	report_errata WORKAROUND_CVE_2018_3639, cortex_a76, cve_2018_3639
397
	report_errata ERRATA_DSU_798953, cortex_a76, dsu_798953
398
	report_errata ERRATA_DSU_936184, cortex_a76, dsu_936184
399
400
401
402
403
404

	ldp	x8, x30, [sp], #16
	ret
endfunc cortex_a76_errata_report
#endif

405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
	/* ---------------------------------------------
	 * This function provides cortex_a76 specific
	 * register information for crash reporting.
	 * It needs to return with x6 pointing to
	 * a list of register names in ascii and
	 * x8 - x15 having values of registers to be
	 * reported.
	 * ---------------------------------------------
	 */
.section .rodata.cortex_a76_regs, "aS"
cortex_a76_regs:  /* The ascii list of register names to be reported */
	.asciz	"cpuectlr_el1", ""

func cortex_a76_cpu_reg_dump
	adr	x6, cortex_a76_regs
	mrs	x8, CORTEX_A76_CPUECTLR_EL1
	ret
endfunc cortex_a76_cpu_reg_dump

424
425
426
427
declare_cpu_ops_wa cortex_a76, CORTEX_A76_MIDR, \
	cortex_a76_reset_func, \
	CPU_NO_EXTRA1_FUNC, \
	cortex_a76_disable_wa_cve_2018_3639, \
428
	cortex_a76_core_pwr_dwn