el3_common_macros.S 15.2 KB
Newer Older
1
/*
2
 * Copyright (c) 2015-2019, ARM Limited and Contributors. All rights reserved.
3
 *
dp-arm's avatar
dp-arm committed
4
 * SPDX-License-Identifier: BSD-3-Clause
5
6
 */

7
8
#ifndef EL3_COMMON_MACROS_S
#define EL3_COMMON_MACROS_S
9
10
11
12
13
14
15

#include <arch.h>
#include <asm_macros.S>

	/*
	 * Helper macro to initialise EL3 registers we care about.
	 */
16
	.macro el3_arch_init_common
17
	/* ---------------------------------------------------------------------
18
19
20
21
22
	 * SCTLR_EL3 has already been initialised - read current value before
	 * modifying.
	 *
	 * SCTLR_EL3.I: Enable the instruction cache.
	 *
23
	 * SCTLR_EL3.SA: Enable Stack Alignment check. A SP alignment fault
24
25
26
27
28
29
30
31
	 *  exception is generated if a load or store instruction executed at
	 *  EL3 uses the SP as the base address and the SP is not aligned to a
	 *  16-byte boundary.
	 *
	 * SCTLR_EL3.A: Enable Alignment fault checking. All instructions that
	 *  load or store one or more registers have an alignment check that the
	 *  address being accessed is aligned to the size of the data element(s)
	 *  being accessed.
32
33
34
35
36
37
38
39
	 * ---------------------------------------------------------------------
	 */
	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
	mrs	x0, sctlr_el3
	orr	x0, x0, x1
	msr	sctlr_el3, x0
	isb

40
#ifdef IMAGE_BL31
41
42
43
44
45
46
47
48
49
50
51
52
	/* ---------------------------------------------------------------------
	 * Initialise the per-cpu cache pointer to the CPU.
	 * This is done early to enable crash reporting to have access to crash
	 * stack. Since crash reporting depends on cpu_data to report the
	 * unhandled exception, not doing so can lead to recursive exceptions
	 * due to a NULL TPIDR_EL3.
	 * ---------------------------------------------------------------------
	 */
	bl	init_cpu_data_ptr
#endif /* IMAGE_BL31 */

	/* ---------------------------------------------------------------------
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
	 * Initialise SCR_EL3, setting all fields rather than relying on hw.
	 * All fields are architecturally UNKNOWN on reset. The following fields
	 * do not change during the TF lifetime. The remaining fields are set to
	 * zero here but are updated ahead of transitioning to a lower EL in the
	 * function cm_init_context_common().
	 *
	 * SCR_EL3.TWE: Set to zero so that execution of WFE instructions at
	 *  EL2, EL1 and EL0 are not trapped to EL3.
	 *
	 * SCR_EL3.TWI: Set to zero so that execution of WFI instructions at
	 *  EL2, EL1 and EL0 are not trapped to EL3.
	 *
	 * SCR_EL3.SIF: Set to one to disable instruction fetches from
	 *  Non-secure memory.
	 *
	 * SCR_EL3.SMD: Set to zero to enable SMC calls at EL1 and above, from
	 *  both Security states and both Execution states.
	 *
	 * SCR_EL3.EA: Set to one to route External Aborts and SError Interrupts
	 *  to EL3 when executing at any EL.
73
74
75
76
	 *
	 * SCR_EL3.{API,APK}: For Armv8.3 pointer authentication feature,
	 * disable traps to EL3 when accessing key registers or using pointer
	 * authentication instructions from lower ELs.
77
78
	 * ---------------------------------------------------------------------
	 */
79
	mov_imm	x0, ((SCR_RESET_VAL | SCR_EA_BIT | SCR_SIF_BIT) \
80
			& ~(SCR_TWE_BIT | SCR_TWI_BIT | SCR_SMD_BIT))
81
82
83
84
85
86
87
88
#if CTX_INCLUDE_PAUTH_REGS
	/*
	 * If the pointer authentication registers are saved during world
	 * switches, enable pointer authentication everywhere, as it is safe to
	 * do so.
	 */
	orr	x0, x0, #(SCR_API_BIT | SCR_APK_BIT)
#endif
89
	msr	scr_el3, x0
90
91

	/* ---------------------------------------------------------------------
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
	 * Initialise MDCR_EL3, setting all fields rather than relying on hw.
	 * Some fields are architecturally UNKNOWN on reset.
	 *
	 * MDCR_EL3.SDD: Set to one to disable AArch64 Secure self-hosted debug.
	 *  Debug exceptions, other than Breakpoint Instruction exceptions, are
	 *  disabled from all ELs in Secure state.
	 *
	 * MDCR_EL3.SPD32: Set to 0b10 to disable AArch32 Secure self-hosted
	 *  privileged debug from S-EL1.
	 *
	 * MDCR_EL3.TDOSA: Set to zero so that EL2 and EL2 System register
	 *  access to the powerdown debug registers do not trap to EL3.
	 *
	 * MDCR_EL3.TDA: Set to zero to allow EL0, EL1 and EL2 access to the
	 *  debug registers, other than those registers that are controlled by
	 *  MDCR_EL3.TDOSA.
	 *
	 * MDCR_EL3.TPM: Set to zero so that EL0, EL1, and EL2 System register
	 *  accesses to all Performance Monitors registers do not trap to EL3.
111
112
113
114
115
	 *
	 * MDCR_EL3.SCCD: Set to one so that cycle counting by PMCCNTR_EL0 is
	 *  prohibited in Secure state. This bit is RES0 in versions of the
	 *  architecture earlier than ARMv8.5, setting it to 1 doesn't have any
	 *  effect on them.
116
117
118
119
120
121
	 *
	 * MDCR_EL3.SPME: Set to zero so that event counting by the programmable
	 *  counters PMEVCNTR<n>_EL0 is prohibited in Secure state. If ARMv8.2
	 *  Debug is not implemented this bit does not have any effect on the
	 *  counters unless there is support for the implementation defined
	 *  authentication interface ExternalSecureNoninvasiveDebugEnabled().
122
123
	 * ---------------------------------------------------------------------
	 */
124
	mov_imm	x0, ((MDCR_EL3_RESET_VAL | MDCR_SDD_BIT | \
125
		      MDCR_SPD32(MDCR_SPD32_DISABLE) | MDCR_SCCD_BIT) & \
126
127
		    ~(MDCR_SPME_BIT | MDCR_TDOSA_BIT | MDCR_TDA_BIT | \
		      MDCR_TPM_BIT))
128

129
	msr	mdcr_el3, x0
130

131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
	/* ---------------------------------------------------------------------
	 * Initialise PMCR_EL0 setting all fields rather than relying
	 * on hw. Some fields are architecturally UNKNOWN on reset.
	 *
	 * PMCR_EL0.LP: Set to one so that event counter overflow, that
	 *  is recorded in PMOVSCLR_EL0[0-30], occurs on the increment
	 *  that changes PMEVCNTR<n>_EL0[63] from 1 to 0, when ARMv8.5-PMU
	 *  is implemented. This bit is RES0 in versions of the architecture
	 *  earlier than ARMv8.5, setting it to 1 doesn't have any effect
	 *  on them.
	 *
	 * PMCR_EL0.LC: Set to one so that cycle counter overflow, that
	 *  is recorded in PMOVSCLR_EL0[31], occurs on the increment
	 *  that changes PMCCNTR_EL0[63] from 1 to 0.
	 *
	 * PMCR_EL0.DP: Set to one so that the cycle counter,
	 *  PMCCNTR_EL0 does not count when event counting is prohibited.
	 *
	 * PMCR_EL0.X: Set to zero to disable export of events.
	 *
	 * PMCR_EL0.D: Set to zero so that, when enabled, PMCCNTR_EL0
	 *  counts on every clock cycle.
	 * ---------------------------------------------------------------------
	 */
	mov_imm	x0, ((PMCR_EL0_RESET_VAL | PMCR_EL0_LP_BIT | \
		      PMCR_EL0_LC_BIT | PMCR_EL0_DP_BIT) & \
		    ~(PMCR_EL0_X_BIT | PMCR_EL0_D_BIT))

	msr	pmcr_el0, x0

161
162
163
	/* ---------------------------------------------------------------------
	 * Enable External Aborts and SError Interrupts now that the exception
	 * vectors have been setup.
164
165
166
167
168
	 * ---------------------------------------------------------------------
	 */
	msr	daifclr, #DAIF_ABT_BIT

	/* ---------------------------------------------------------------------
169
170
171
172
173
174
175
176
177
	 * Initialise CPTR_EL3, setting all fields rather than relying on hw.
	 * All fields are architecturally UNKNOWN on reset.
	 *
	 * CPTR_EL3.TCPAC: Set to zero so that any accesses to CPACR_EL1,
	 *  CPTR_EL2, CPACR, or HCPTR do not trap to EL3.
	 *
	 * CPTR_EL3.TTA: Set to zero so that System register accesses to the
	 *  trace registers do not trap to EL3.
	 *
David Cunado's avatar
David Cunado committed
178
179
180
	 * CPTR_EL3.TFP: Set to zero so that accesses to the V- or Z- registers
	 *  by Advanced SIMD, floating-point or SVE instructions (if implemented)
	 *  do not trap to EL3.
181
	 */
182
	mov_imm x0, (CPTR_EL3_RESET_VAL & ~(TCPAC_BIT | TTA_BIT | TFP_BIT))
183
	msr	cptr_el3, x0
Sathees Balya's avatar
Sathees Balya committed
184
185
186
187
188
189
190
191
192
193
194
195

	/*
	 * If Data Independent Timing (DIT) functionality is implemented,
	 * always enable DIT in EL3
	 */
	mrs	x0, id_aa64pfr0_el1
	ubfx	x0, x0, #ID_AA64PFR0_DIT_SHIFT, #ID_AA64PFR0_DIT_LENGTH
	cmp	x0, #ID_AA64PFR0_DIT_SUPPORTED
	bne	1f
	mov	x0, #DIT_BIT
	msr	DIT, x0
1:
196
197
198
199
	.endm

/* -----------------------------------------------------------------------------
 * This is the super set of actions that need to be performed during a cold boot
200
 * or a warm boot in EL3. This code is shared by BL1 and BL31.
201
202
203
204
205
206
207
 *
 * This macro will always perform reset handling, architectural initialisations
 * and stack setup. The rest of the actions are optional because they might not
 * be needed, depending on the context in which this macro is called. This is
 * why this macro is parameterised ; each parameter allows to enable/disable
 * some actions.
 *
208
209
210
 *  _init_sctlr:
 *	Whether the macro needs to initialise SCTLR_EL3, including configuring
 *      the endianness of data accesses.
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
 *
 *  _warm_boot_mailbox:
 *	Whether the macro needs to detect the type of boot (cold/warm). The
 *	detection is based on the platform entrypoint address : if it is zero
 *	then it is a cold boot, otherwise it is a warm boot. In the latter case,
 *	this macro jumps on the platform entrypoint address.
 *
 *  _secondary_cold_boot:
 *	Whether the macro needs to identify the CPU that is calling it: primary
 *	CPU or secondary CPU. The primary CPU will be allowed to carry on with
 *	the platform initialisations, while the secondaries will be put in a
 *	platform-specific state in the meantime.
 *
 *	If the caller knows this macro will only be called by the primary CPU
 *	then this parameter can be defined to 0 to skip this step.
 *
 * _init_memory:
 *	Whether the macro needs to initialise the memory.
 *
 * _init_c_runtime:
 *	Whether the macro needs to initialise the C runtime environment.
 *
 * _exception_vectors:
 *	Address of the exception vectors to program in the VBAR_EL3 register.
 * -----------------------------------------------------------------------------
 */
	.macro el3_entrypoint_common					\
238
		_init_sctlr, _warm_boot_mailbox, _secondary_cold_boot,	\
239
240
		_init_memory, _init_c_runtime, _exception_vectors

241
	.if \_init_sctlr
242
		/* -------------------------------------------------------------
243
244
245
246
247
248
249
250
251
252
253
254
255
256
		 * This is the initialisation of SCTLR_EL3 and so must ensure
		 * that all fields are explicitly set rather than relying on hw.
		 * Some fields reset to an IMPLEMENTATION DEFINED value and
		 * others are architecturally UNKNOWN on reset.
		 *
		 * SCTLR.EE: Set the CPU endianness before doing anything that
		 *  might involve memory reads or writes. Set to zero to select
		 *  Little Endian.
		 *
		 * SCTLR_EL3.WXN: For the EL3 translation regime, this field can
		 *  force all memory regions that are writeable to be treated as
		 *  XN (Execute-never). Set to zero so that this control has no
		 *  effect on memory access permissions.
		 *
257
		 * SCTLR_EL3.SA: Set to zero to disable Stack Alignment check.
258
259
		 *
		 * SCTLR_EL3.A: Set to zero to disable Alignment fault checking.
260
261
262
		 *
		 * SCTLR.DSSBS: Set to zero to disable speculation store bypass
		 *  safe behaviour upon exception entry to EL3.
263
264
		 * -------------------------------------------------------------
		 */
265
		mov_imm	x0, (SCTLR_RESET_VAL & ~(SCTLR_EE_BIT | SCTLR_WXN_BIT \
266
				| SCTLR_SA_BIT | SCTLR_A_BIT | SCTLR_DSSBS_BIT))
267
268
		msr	sctlr_el3, x0
		isb
269
	.endif /* _init_sctlr */
270
271
272
273
274
275
276
277
278

	.if \_warm_boot_mailbox
		/* -------------------------------------------------------------
		 * This code will be executed for both warm and cold resets.
		 * Now is the time to distinguish between the two.
		 * Query the platform entrypoint address and if it is not zero
		 * then it means it is a warm boot so jump to this address.
		 * -------------------------------------------------------------
		 */
279
		bl	plat_get_my_entrypoint
280
281
282
283
284
285
		cbz	x0, do_cold_boot
		br	x0

	do_cold_boot:
	.endif /* _warm_boot_mailbox */

286
287
288
289
290
291
292
293
	/* ---------------------------------------------------------------------
	 * Set the exception vectors.
	 * ---------------------------------------------------------------------
	 */
	adr	x0, \_exception_vectors
	msr	vbar_el3, x0
	isb

294
295
296
297
298
299
300
301
	/* ---------------------------------------------------------------------
	 * It is a cold boot.
	 * Perform any processor specific actions upon reset e.g. cache, TLB
	 * invalidations etc.
	 * ---------------------------------------------------------------------
	 */
	bl	reset_handler

302
	el3_arch_init_common
303

304
305
	.if \_secondary_cold_boot
		/* -------------------------------------------------------------
306
		 * Check if this is a primary or secondary CPU cold boot.
307
308
309
310
311
312
		 * The primary CPU will set up the platform while the
		 * secondaries are placed in a platform-specific state until the
		 * primary CPU performs the necessary actions to bring them out
		 * of that state and allows entry into the OS.
		 * -------------------------------------------------------------
		 */
313
		bl	plat_is_my_cpu_primary
314
		cbnz	w0, do_primary_cold_boot
315
316
317
318

		/* This is a cold boot on a secondary CPU */
		bl	plat_secondary_cold_boot_setup
		/* plat_secondary_cold_boot_setup() is not supposed to return */
319
		bl	el3_panic
320
321
322
323
324

	do_primary_cold_boot:
	.endif /* _secondary_cold_boot */

	/* ---------------------------------------------------------------------
325
326
	 * Initialize memory now. Secondary CPU initialization won't get to this
	 * point.
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
	 * ---------------------------------------------------------------------
	 */

	.if \_init_memory
		bl	platform_mem_init
	.endif /* _init_memory */

	/* ---------------------------------------------------------------------
	 * Init C runtime environment:
	 *   - Zero-initialise the NOBITS sections. There are 2 of them:
	 *       - the .bss section;
	 *       - the coherent memory section (if any).
	 *   - Relocate the data section from ROM to RAM, if required.
	 * ---------------------------------------------------------------------
	 */
	.if \_init_c_runtime
343
#if defined(IMAGE_BL31) || (defined(IMAGE_BL2) && BL2_AT_EL3 && BL2_INV_DCACHE)
344
345
346
347
348
349
350
351
		/* -------------------------------------------------------------
		 * Invalidate the RW memory used by the BL31 image. This
		 * includes the data and NOBITS sections. This is done to
		 * safeguard against possible corruption of this memory by
		 * dirty cache lines in a system cache as a result of use by
		 * an earlier boot loader stage.
		 * -------------------------------------------------------------
		 */
352
353
354
355
		adrp	x0, __RW_START__
		add	x0, x0, :lo12:__RW_START__
		adrp	x1, __RW_END__
		add	x1, x1, :lo12:__RW_END__
356
357
		sub	x1, x1, x0
		bl	inv_dcache_range
Roberto Vargas's avatar
Roberto Vargas committed
358
#endif
359
360
		adrp	x0, __BSS_START__
		add	x0, x0, :lo12:__BSS_START__
361

362
363
364
		adrp	x1, __BSS_END__
		add	x1, x1, :lo12:__BSS_END__
		sub	x1, x1, x0
365
		bl	zeromem
366
367

#if USE_COHERENT_MEM
368
369
370
371
372
		adrp	x0, __COHERENT_RAM_START__
		add	x0, x0, :lo12:__COHERENT_RAM_START__
		adrp	x1, __COHERENT_RAM_END_UNALIGNED__
		add	x1, x1, :lo12: __COHERENT_RAM_END_UNALIGNED__
		sub	x1, x1, x0
373
		bl	zeromem
374
375
#endif

376
#if defined(IMAGE_BL1) || (defined(IMAGE_BL2) && BL2_AT_EL3 && BL2_IN_XIP_MEM)
377
378
379
380
381
382
383
		adrp	x0, __DATA_RAM_START__
		add	x0, x0, :lo12:__DATA_RAM_START__
		adrp	x1, __DATA_ROM_START__
		add	x1, x1, :lo12:__DATA_ROM_START__
		adrp	x2, __DATA_RAM_END__
		add	x2, x2, :lo12:__DATA_RAM_END__
		sub	x2, x2, x0
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
		bl	memcpy16
#endif
	.endif /* _init_c_runtime */

	/* ---------------------------------------------------------------------
	 * Use SP_EL0 for the C runtime stack.
	 * ---------------------------------------------------------------------
	 */
	msr	spsel, #0

	/* ---------------------------------------------------------------------
	 * Allocate a stack whose memory will be marked as Normal-IS-WBWA when
	 * the MMU is enabled. There is no risk of reading stale stack memory
	 * after enabling the MMU as only the primary CPU is running at the
	 * moment.
	 * ---------------------------------------------------------------------
	 */
401
	bl	plat_set_my_stack
402
403
404
405
406
407

#if STACK_PROTECTOR_ENABLED
	.if \_init_c_runtime
	bl	update_stack_protector_canary
	.endif /* _init_c_runtime */
#endif
408
409
	.endm

410
#endif /* EL3_COMMON_MACROS_S */