el3_common_macros.S 13.2 KB
Newer Older
1
/*
2
 * Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved.
3
 *
dp-arm's avatar
dp-arm committed
4
 * SPDX-License-Identifier: BSD-3-Clause
5
6
 */

7
8
#ifndef EL3_COMMON_MACROS_S
#define EL3_COMMON_MACROS_S
9
10
11
12
13
14
15
16

#include <arch.h>
#include <asm_macros.S>
#include <assert_macros.S>

	/*
	 * Helper macro to initialise EL3 registers we care about.
	 */
17
	.macro el3_arch_init_common
18
	/* ---------------------------------------------------------------------
19
20
21
22
23
24
25
26
27
	 * SCTLR has already been initialised - read current value before
	 * modifying.
	 *
	 * SCTLR.I: Enable the instruction cache.
	 *
	 * SCTLR.A: Enable Alignment fault checking. All instructions that load
	 *  or store one or more registers have an alignment check that the
	 *  address being accessed is aligned to the size of the data element(s)
	 *  being accessed.
28
29
	 * ---------------------------------------------------------------------
	 */
30
	ldr	r1, =(SCTLR_I_BIT | SCTLR_A_BIT)
31
32
33
34
35
	ldcopr	r0, SCTLR
	orr	r0, r0, r1
	stcopr	r0, SCTLR
	isb

36
37
38
39
40
41
	/* ---------------------------------------------------------------------
	 * Initialise SCR, setting all fields rather than relying on the hw.
	 *
	 * SCR.SIF: Enabled so that Secure state instruction fetches from
	 *  Non-secure memory are not permitted.
	 * ---------------------------------------------------------------------
42
	 */
43
	ldr	r0, =(SCR_RESET_VAL | SCR_SIF_BIT)
44
45
46
47
48
49
50
51
52
53
	stcopr	r0, SCR

	/* -----------------------------------------------------
	 * Enable the Asynchronous data abort now that the
	 * exception vectors have been setup.
	 * -----------------------------------------------------
	 */
	cpsie   a
	isb

54
55
56
57
58
59
60
61
62
63
64
	/* ---------------------------------------------------------------------
	 * Initialise NSACR, setting all the fields, except for the
	 * IMPLEMENTATION DEFINED field, rather than relying on the hw. Some
	 * fields are architecturally UNKNOWN on reset.
	 *
	 * NSACR_ENABLE_FP_ACCESS: Represents NSACR.cp11 and NSACR.cp10. The
	 *  cp11 field is ignored, but is set to same value as cp10. The cp10
	 *  field is set to allow access to Advanced SIMD and floating point
	 *  features from both Security states.
	 * ---------------------------------------------------------------------
	 */
65
	ldcopr	r0, NSACR
66
67
	and	r0, r0, #NSACR_IMP_DEF_MASK
	orr	r0, r0, #(NSACR_RESET_VAL | NSACR_ENABLE_FP_ACCESS)
68
69
70
	stcopr	r0, NSACR
	isb

71
72
73
74
75
76
77
78
79
80
81
82
	/* ---------------------------------------------------------------------
	 * Initialise CPACR, setting all fields rather than relying on hw. Some
	 * fields are architecturally UNKNOWN on reset.
	 *
	 * CPACR.TRCDIS: Trap control for PL0 and PL1 System register accesses
	 *  to trace registers. Set to zero to allow access.
	 *
	 * CPACR_ENABLE_FP_ACCESS: Represents CPACR.cp11 and CPACR.cp10. The
	 *  cp11 field is ignored, but is set to same value as cp10. The cp10
	 *  field is set to allow full access from PL0 and PL1 to floating-point
	 *  and Advanced SIMD features.
	 * ---------------------------------------------------------------------
83
	 */
84
	ldr	r0, =((CPACR_RESET_VAL | CPACR_ENABLE_FP_ACCESS) & ~(TRCDIS_BIT))
85
86
87
	stcopr	r0, CPACR
	isb

88
89
90
91
92
93
94
	/* ---------------------------------------------------------------------
	 * Initialise FPEXC, setting all fields rather than relying on hw. Some
	 * fields are architecturally UNKNOWN on reset and are set to zero
	 * except for field(s) listed below.
	 *
	 * FPEXC.EN: Enable access to Advanced SIMD and floating point features
	 *  from all exception levels.
95
96
97
98
99
         *
         * __SOFTFP__: Predefined macro exposed by soft-float toolchain.
         *  ARMv7 and Cortex-A32(ARMv8/aarch32) has both soft-float and
         *  hard-float variants of toolchain, avoid compiling below code with
         *  soft-float toolchain as "vmsr" instruction will not be recognized.
100
101
	 * ---------------------------------------------------------------------
	 */
102
#if ((ARM_ARCH_MAJOR > 7) || defined(ARMV7_SUPPORTS_VFP)) && !(__SOFTFP__)
103
	ldr	r0, =(FPEXC_RESET_VAL | FPEXC_EN_BIT)
104
105
	vmsr	FPEXC, r0
	isb
106
#endif
107

Etienne Carriere's avatar
Etienne Carriere committed
108
#if (ARM_ARCH_MAJOR > 7)
109
110
111
112
	/* ---------------------------------------------------------------------
	 * Initialise SDCR, setting all the fields rather than relying on hw.
	 *
	 * SDCR.SPD: Disable AArch32 privileged debug. Debug exceptions from
113
114
	 *  Secure EL1 are disabled.
	 *
115
116
	 * SDCR.SCCD: Set to one so that cycle counting by PMCCNTR is prohibited
	 *  in Secure state. This bit is RES0 in versions of the architecture
117
118
	 *  earlier than ARMv8.5, setting it to 1 doesn't have any effect on
	 *  them.
119
120
	 * ---------------------------------------------------------------------
	 */
121
	ldr	r0, =(SDCR_RESET_VAL | SDCR_SPD(SDCR_SPD_DISABLE) | SDCR_SCCD_BIT)
122
	stcopr	r0, SDCR
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147

	/* ---------------------------------------------------------------------
	 * Initialise PMCR, setting all fields rather than relying
	 * on hw. Some fields are architecturally UNKNOWN on reset.
	 *
	 * PMCR.LP: Set to one so that event counter overflow, that
	 *  is recorded in PMOVSCLR[0-30], occurs on the increment
	 *  that changes PMEVCNTR<n>[63] from 1 to 0, when ARMv8.5-PMU
	 *  is implemented. This bit is RES0 in versions of the architecture
	 *  earlier than ARMv8.5, setting it to 1 doesn't have any effect
	 *  on them.
	 *  This bit is Reserved, UNK/SBZP in ARMv7.
	 *
	 * PMCR.LC: Set to one so that cycle counter overflow, that
	 *  is recorded in PMOVSCLR[31], occurs on the increment
	 *  that changes PMCCNTR[63] from 1 to 0.
	 *  This bit is Reserved, UNK/SBZP in ARMv7.
	 *
	 * PMCR.DP: Set to one to prohibit cycle counting whilst in Secure mode.
	 * ---------------------------------------------------------------------
	 */
	ldr	r0, =(PMCR_RESET_VAL | PMCR_DP_BIT | PMCR_LC_BIT | \
		      PMCR_LP_BIT)
#else
	ldr	r0, =(PMCR_RESET_VAL | PMCR_DP_BIT)
Etienne Carriere's avatar
Etienne Carriere committed
148
#endif
149
	stcopr	r0, PMCR
150

Sathees Balya's avatar
Sathees Balya committed
151
152
153
154
155
156
157
158
159
160
161
162
	/*
	 * If Data Independent Timing (DIT) functionality is implemented,
	 * always enable DIT in EL3
	 */
	ldcopr	r0, ID_PFR0
	and	r0, r0, #(ID_PFR0_DIT_MASK << ID_PFR0_DIT_SHIFT)
	cmp	r0, #ID_PFR0_DIT_SUPPORTED
	bne	1f
	mrs	r0, cpsr
	orr	r0, r0, #CPSR_DIT_BIT
	msr	cpsr_cxsf, r0
1:
163
164
165
166
167
168
169
170
171
172
173
174
	.endm

/* -----------------------------------------------------------------------------
 * This is the super set of actions that need to be performed during a cold boot
 * or a warm boot in EL3. This code is shared by BL1 and BL32 (SP_MIN).
 *
 * This macro will always perform reset handling, architectural initialisations
 * and stack setup. The rest of the actions are optional because they might not
 * be needed, depending on the context in which this macro is called. This is
 * why this macro is parameterised ; each parameter allows to enable/disable
 * some actions.
 *
175
176
177
 *  _init_sctlr:
 *	Whether the macro needs to initialise the SCTLR register including
 *	configuring the endianness of data accesses.
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
 *
 *  _warm_boot_mailbox:
 *	Whether the macro needs to detect the type of boot (cold/warm). The
 *	detection is based on the platform entrypoint address : if it is zero
 *	then it is a cold boot, otherwise it is a warm boot. In the latter case,
 *	this macro jumps on the platform entrypoint address.
 *
 *  _secondary_cold_boot:
 *	Whether the macro needs to identify the CPU that is calling it: primary
 *	CPU or secondary CPU. The primary CPU will be allowed to carry on with
 *	the platform initialisations, while the secondaries will be put in a
 *	platform-specific state in the meantime.
 *
 *	If the caller knows this macro will only be called by the primary CPU
 *	then this parameter can be defined to 0 to skip this step.
 *
 * _init_memory:
 *	Whether the macro needs to initialise the memory.
 *
 * _init_c_runtime:
 *	Whether the macro needs to initialise the C runtime environment.
 *
 * _exception_vectors:
 *	Address of the exception vectors to program in the VBAR_EL3 register.
 * -----------------------------------------------------------------------------
 */
	.macro el3_entrypoint_common					\
205
		_init_sctlr, _warm_boot_mailbox, _secondary_cold_boot,	\
206
207
208
		_init_memory, _init_c_runtime, _exception_vectors

	/* Make sure we are in Secure Mode */
209
#if ENABLE_ASSERTIONS
210
211
212
213
214
	ldcopr	r0, SCR
	tst	r0, #SCR_NS_BIT
	ASM_ASSERT(eq)
#endif

215
	.if \_init_sctlr
216
		/* -------------------------------------------------------------
217
218
219
220
221
222
223
224
225
226
227
228
229
		 * This is the initialisation of SCTLR and so must ensure that
		 * all fields are explicitly set rather than relying on hw. Some
		 * fields reset to an IMPLEMENTATION DEFINED value.
		 *
		 * SCTLR.TE: Set to zero so that exceptions to an Exception
		 *  Level executing at PL1 are taken to A32 state.
		 *
		 * SCTLR.EE: Set the CPU endianness before doing anything that
		 *  might involve memory reads or writes. Set to zero to select
		 *  Little Endian.
		 *
		 * SCTLR.V: Set to zero to select the normal exception vectors
		 *  with base address held in VBAR.
230
231
232
		 *
		 * SCTLR.DSSBS: Set to zero to disable speculation store bypass
		 *  safe behaviour upon exception entry to EL3.
233
234
		 * -------------------------------------------------------------
		 */
235
236
		ldr     r0, =(SCTLR_RESET_VAL & ~(SCTLR_TE_BIT | SCTLR_EE_BIT | \
				SCTLR_V_BIT | SCTLR_DSSBS_BIT))
237
238
		stcopr	r0, SCTLR
		isb
239
	.endif /* _init_sctlr */
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257

	/* Switch to monitor mode */
	cps	#MODE32_mon
	isb

	.if \_warm_boot_mailbox
		/* -------------------------------------------------------------
		 * This code will be executed for both warm and cold resets.
		 * Now is the time to distinguish between the two.
		 * Query the platform entrypoint address and if it is not zero
		 * then it means it is a warm boot so jump to this address.
		 * -------------------------------------------------------------
		 */
		bl	plat_get_my_entrypoint
		cmp	r0, #0
		bxne	r0
	.endif /* _warm_boot_mailbox */

258
259
260
261
262
263
264
265
266
	/* ---------------------------------------------------------------------
	 * Set the exception vectors (VBAR/MVBAR).
	 * ---------------------------------------------------------------------
	 */
	ldr	r0, =\_exception_vectors
	stcopr	r0, VBAR
	stcopr	r0, MVBAR
	isb

267
268
269
270
271
272
273
274
	/* ---------------------------------------------------------------------
	 * It is a cold boot.
	 * Perform any processor specific actions upon reset e.g. cache, TLB
	 * invalidations etc.
	 * ---------------------------------------------------------------------
	 */
	bl	reset_handler

275
	el3_arch_init_common
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292

	.if \_secondary_cold_boot
		/* -------------------------------------------------------------
		 * Check if this is a primary or secondary CPU cold boot.
		 * The primary CPU will set up the platform while the
		 * secondaries are placed in a platform-specific state until the
		 * primary CPU performs the necessary actions to bring them out
		 * of that state and allows entry into the OS.
		 * -------------------------------------------------------------
		 */
		bl	plat_is_my_cpu_primary
		cmp	r0, #0
		bne	do_primary_cold_boot

		/* This is a cold boot on a secondary CPU */
		bl	plat_secondary_cold_boot_setup
		/* plat_secondary_cold_boot_setup() is not supposed to return */
293
		no_ret	plat_panic_handler
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316

	do_primary_cold_boot:
	.endif /* _secondary_cold_boot */

	/* ---------------------------------------------------------------------
	 * Initialize memory now. Secondary CPU initialization won't get to this
	 * point.
	 * ---------------------------------------------------------------------
	 */

	.if \_init_memory
		bl	platform_mem_init
	.endif /* _init_memory */

	/* ---------------------------------------------------------------------
	 * Init C runtime environment:
	 *   - Zero-initialise the NOBITS sections. There are 2 of them:
	 *       - the .bss section;
	 *       - the coherent memory section (if any).
	 *   - Relocate the data section from ROM to RAM, if required.
	 * ---------------------------------------------------------------------
	 */
	.if \_init_c_runtime
Roberto Vargas's avatar
Roberto Vargas committed
317
#if defined(IMAGE_BL32) || (defined(IMAGE_BL2) && BL2_AT_EL3)
318
		/* -----------------------------------------------------------------
Roberto Vargas's avatar
Roberto Vargas committed
319
		 * Invalidate the RW memory used by the image. This
320
321
322
323
324
325
326
327
328
329
		 * includes the data and NOBITS sections. This is done to
		 * safeguard against possible corruption of this memory by
		 * dirty cache lines in a system cache as a result of use by
		 * an earlier boot loader stage.
		 * -----------------------------------------------------------------
		 */
		ldr	r0, =__RW_START__
		ldr	r1, =__RW_END__
		sub	r1, r1, r0
		bl	inv_dcache_range
Roberto Vargas's avatar
Roberto Vargas committed
330
#endif
331

332
333
334
335
336
		/*
		 * zeromem uses r12 whereas it is used to save previous BL arg3,
		 * save it in r7
		 */
		mov	r7, r12
337
338
339
340
341
342
343
344
345
346
		ldr	r0, =__BSS_START__
		ldr	r1, =__BSS_SIZE__
		bl	zeromem

#if USE_COHERENT_MEM
		ldr	r0, =__COHERENT_RAM_START__
		ldr	r1, =__COHERENT_RAM_UNALIGNED_SIZE__
		bl	zeromem
#endif

347
348
349
		/* Restore r12 */
		mov	r12, r7

350
#if defined(IMAGE_BL1) || (defined(IMAGE_BL2) && BL2_AT_EL3 && BL2_IN_XIP_MEM)
351
352
353
354
355
356
357
		/* -----------------------------------------------------
		 * Copy data from ROM to RAM.
		 * -----------------------------------------------------
		 */
		ldr	r0, =__DATA_RAM_START__
		ldr	r1, =__DATA_ROM_START__
		ldr	r2, =__DATA_SIZE__
358
		bl	memcpy4
359
360
361
362
363
364
365
366
367
368
369
#endif
	.endif /* _init_c_runtime */

	/* ---------------------------------------------------------------------
	 * Allocate a stack whose memory will be marked as Normal-IS-WBWA when
	 * the MMU is enabled. There is no risk of reading stale stack memory
	 * after enabling the MMU as only the primary CPU is running at the
	 * moment.
	 * ---------------------------------------------------------------------
	 */
	bl	plat_set_my_stack
370
371
372
373
374
375

#if STACK_PROTECTOR_ENABLED
	.if \_init_c_runtime
	bl	update_stack_protector_canary
	.endif /* _init_c_runtime */
#endif
376
377
	.endm

378
#endif /* EL3_COMMON_MACROS_S */