cortex_a76.S 8.82 KB
Newer Older
1
/*
2
 * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
3
4
5
6
7
 *
 * SPDX-License-Identifier: BSD-3-Clause
 */

#include <arch.h>
8
#include <arm_arch_svc.h>
9
10
#include <asm_macros.S>
#include <bl_common.h>
11
#include <context.h>
12
13
14
15
#include <cortex_a76.h>
#include <cpu_macros.S>
#include <plat_macros.S>

16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
#if !DYNAMIC_WORKAROUND_CVE_2018_3639
#error Cortex A76 requires DYNAMIC_WORKAROUND_CVE_2018_3639=1
#endif

#define ESR_EL3_A64_SMC0	0x5e000000
#define ESR_EL3_A32_SMC0	0x4e000000

	/*
	 * This macro applies the mitigation for CVE-2018-3639.
	 * It implements a fash path where `SMCCC_ARCH_WORKAROUND_2`
	 * SMC calls from a lower EL running in AArch32 or AArch64
	 * will go through the fast and return early.
	 *
	 * The macro saves x2-x3 to the context.  In the fast path
	 * x0-x3 registers do not need to be restored as the calling
	 * context will have saved them.
	 */
	.macro apply_cve_2018_3639_wa _is_sync_exception _esr_el3_val
	stp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]

	.if \_is_sync_exception
		/*
		 * Ensure SMC is coming from A64/A32 state on #0
		 * with W0 = SMCCC_ARCH_WORKAROUND_2
		 *
		 * This sequence evaluates as:
		 *    (W0==SMCCC_ARCH_WORKAROUND_2) ? (ESR_EL3==SMC#0) : (NE)
		 * allowing use of a single branch operation
		 */
		orr	w2, wzr, #SMCCC_ARCH_WORKAROUND_2
		cmp	x0, x2
		mrs	x3, esr_el3
		mov_imm	w2, \_esr_el3_val
		ccmp	w2, w3, #0, eq
		/*
		 * Static predictor will predict a fall-through, optimizing
		 * the `SMCCC_ARCH_WORKAROUND_2` fast path.
		 */
		bne	1f

		/*
		 * The sequence below implements the `SMCCC_ARCH_WORKAROUND_2`
		 * fast path.
		 */
		cmp	x1, xzr /* enable/disable check */

		/*
		 * When the calling context wants mitigation disabled,
		 * we program the mitigation disable function in the
		 * CPU context, which gets invoked on subsequent exits from
		 * EL3 via the `el3_exit` function.  Otherwise NULL is
		 * programmed in the CPU context, which results in caller's
		 * inheriting the EL3 mitigation state (enabled) on subsequent
		 * `el3_exit`.
		 */
		mov	x0, xzr
		adr	x1, cortex_a76_disable_wa_cve_2018_3639
		csel	x1, x1, x0, eq
		str	x1, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE]

		mrs	x2, CORTEX_A76_CPUACTLR2_EL1
		orr	x1, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
		bic	x3, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
		csel	x3, x3, x1, eq
		msr	CORTEX_A76_CPUACTLR2_EL1, x3
		eret	/* ERET implies ISB */
	.endif
1:
	/*
	 * Always enable v4 mitigation during EL3 execution.  This is not
	 * required for the fast path above because it does not perform any
	 * memory loads.
	 */
	mrs	x2, CORTEX_A76_CPUACTLR2_EL1
	orr	x2, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
	msr	CORTEX_A76_CPUACTLR2_EL1, x2
	isb

	/*
	 * The caller may have passed arguments to EL3 via x2-x3.
	 * Restore these registers from the context before jumping to the
	 * main runtime vector table entry.
	 */
	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
	.endm

vector_base cortex_a76_wa_cve_2018_3639_a76_vbar

	/* ---------------------------------------------------------------------
	 * Current EL with SP_EL0 : 0x0 - 0x200
	 * ---------------------------------------------------------------------
	 */
vector_entry cortex_a76_sync_exception_sp_el0
	b	sync_exception_sp_el0
110
end_vector_entry cortex_a76_sync_exception_sp_el0
111
112
113

vector_entry cortex_a76_irq_sp_el0
	b	irq_sp_el0
114
end_vector_entry cortex_a76_irq_sp_el0
115
116
117

vector_entry cortex_a76_fiq_sp_el0
	b	fiq_sp_el0
118
end_vector_entry cortex_a76_fiq_sp_el0
119
120
121

vector_entry cortex_a76_serror_sp_el0
	b	serror_sp_el0
122
end_vector_entry cortex_a76_serror_sp_el0
123
124
125
126
127
128
129

	/* ---------------------------------------------------------------------
	 * Current EL with SP_ELx: 0x200 - 0x400
	 * ---------------------------------------------------------------------
	 */
vector_entry cortex_a76_sync_exception_sp_elx
	b	sync_exception_sp_elx
130
end_vector_entry cortex_a76_sync_exception_sp_elx
131
132
133

vector_entry cortex_a76_irq_sp_elx
	b	irq_sp_elx
134
end_vector_entry cortex_a76_irq_sp_elx
135
136
137

vector_entry cortex_a76_fiq_sp_elx
	b	fiq_sp_elx
138
end_vector_entry cortex_a76_fiq_sp_elx
139
140
141

vector_entry cortex_a76_serror_sp_elx
	b	serror_sp_elx
142
end_vector_entry cortex_a76_serror_sp_elx
143
144
145
146
147
148
149
150

	/* ---------------------------------------------------------------------
	 * Lower EL using AArch64 : 0x400 - 0x600
	 * ---------------------------------------------------------------------
	 */
vector_entry cortex_a76_sync_exception_aarch64
	apply_cve_2018_3639_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A64_SMC0
	b	sync_exception_aarch64
151
end_vector_entry cortex_a76_sync_exception_aarch64
152
153
154
155

vector_entry cortex_a76_irq_aarch64
	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
	b	irq_aarch64
156
end_vector_entry cortex_a76_irq_aarch64
157
158
159
160

vector_entry cortex_a76_fiq_aarch64
	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
	b	fiq_aarch64
161
end_vector_entry cortex_a76_fiq_aarch64
162
163
164
165

vector_entry cortex_a76_serror_aarch64
	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
	b	serror_aarch64
166
end_vector_entry cortex_a76_serror_aarch64
167
168
169
170
171
172
173
174

	/* ---------------------------------------------------------------------
	 * Lower EL using AArch32 : 0x600 - 0x800
	 * ---------------------------------------------------------------------
	 */
vector_entry cortex_a76_sync_exception_aarch32
	apply_cve_2018_3639_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A32_SMC0
	b	sync_exception_aarch32
175
end_vector_entry cortex_a76_sync_exception_aarch32
176
177
178
179

vector_entry cortex_a76_irq_aarch32
	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
	b	irq_aarch32
180
end_vector_entry cortex_a76_irq_aarch32
181
182
183
184

vector_entry cortex_a76_fiq_aarch32
	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
	b	fiq_aarch32
185
end_vector_entry cortex_a76_fiq_aarch32
186
187
188
189

vector_entry cortex_a76_serror_aarch32
	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
	b	serror_aarch32
190
end_vector_entry cortex_a76_serror_aarch32
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209

func check_errata_cve_2018_3639
#if WORKAROUND_CVE_2018_3639
	mov	x0, #ERRATA_APPLIES
#else
	mov	x0, #ERRATA_MISSING
#endif
	ret
endfunc check_errata_cve_2018_3639

func cortex_a76_disable_wa_cve_2018_3639
	mrs	x0, CORTEX_A76_CPUACTLR2_EL1
	bic	x0, x0, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
	msr	CORTEX_A76_CPUACTLR2_EL1, x0
	isb
	ret
endfunc cortex_a76_disable_wa_cve_2018_3639

func cortex_a76_reset_func
210
	mov	x19, x30
211

212
#if WORKAROUND_CVE_2018_3639
213
214
215
216
217
218
	/* If the PE implements SSBS, we don't need the dynamic workaround */
	mrs	x0, id_aa64pfr1_el1
	lsr	x0, x0, #ID_AA64PFR1_EL1_SSBS_SHIFT
	and     x0, x0, #ID_AA64PFR1_EL1_SSBS_MASK
	cbnz	x0, 1f

219
220
221
222
223
	mrs	x0, CORTEX_A76_CPUACTLR2_EL1
	orr	x0, x0, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
	msr	CORTEX_A76_CPUACTLR2_EL1, x0
	isb

224
#ifdef IMAGE_BL31
225
226
227
228
229
230
231
232
233
	/*
	 * The Cortex-A76 generic vectors are overwritten to use the vectors
	 * defined above.  This is required in order to apply mitigation
	 * against CVE-2018-3639 on exception entry from lower ELs.
	 */
	adr	x0, cortex_a76_wa_cve_2018_3639_a76_vbar
	msr	vbar_el3, x0
	isb
#endif
234

235
236
237
1:
#endif

238
239
240
241
#if ERRATA_DSU_936184
	bl	errata_dsu_936184_wa
#endif
	ret	x19
242
243
endfunc cortex_a76_reset_func

244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
	/* ---------------------------------------------
	 * HW will do the cache maintenance while powering down
	 * ---------------------------------------------
	 */
func cortex_a76_core_pwr_dwn
	/* ---------------------------------------------
	 * Enable CPU power down bit in power control register
	 * ---------------------------------------------
	 */
	mrs	x0, CORTEX_A76_CPUPWRCTLR_EL1
	orr	x0, x0, #CORTEX_A76_CORE_PWRDN_EN_MASK
	msr	CORTEX_A76_CPUPWRCTLR_EL1, x0
	isb
	ret
endfunc cortex_a76_core_pwr_dwn

260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
#if REPORT_ERRATA
/*
 * Errata printing function for Cortex Cortex A76. Must follow AAPCS.
 */
func cortex_a76_errata_report
	stp	x8, x30, [sp, #-16]!

	bl	cpu_get_rev_var
	mov	x8, x0

	/*
	 * Report all errata. The revision-variant information is passed to
	 * checking functions of each errata.
	 */
	report_errata WORKAROUND_CVE_2018_3639, cortex_a76, cve_2018_3639
275
	report_errata ERRATA_DSU_936184, cortex_a76, dsu_936184
276
277
278
279
280
281

	ldp	x8, x30, [sp], #16
	ret
endfunc cortex_a76_errata_report
#endif

282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
	/* ---------------------------------------------
	 * This function provides cortex_a76 specific
	 * register information for crash reporting.
	 * It needs to return with x6 pointing to
	 * a list of register names in ascii and
	 * x8 - x15 having values of registers to be
	 * reported.
	 * ---------------------------------------------
	 */
.section .rodata.cortex_a76_regs, "aS"
cortex_a76_regs:  /* The ascii list of register names to be reported */
	.asciz	"cpuectlr_el1", ""

func cortex_a76_cpu_reg_dump
	adr	x6, cortex_a76_regs
	mrs	x8, CORTEX_A76_CPUECTLR_EL1
	ret
endfunc cortex_a76_cpu_reg_dump

301
302
303
304
declare_cpu_ops_wa cortex_a76, CORTEX_A76_MIDR, \
	cortex_a76_reset_func, \
	CPU_NO_EXTRA1_FUNC, \
	cortex_a76_disable_wa_cve_2018_3639, \
305
	cortex_a76_core_pwr_dwn