el3_common_macros.S 17.8 KB
Newer Older
1
/*
johpow01's avatar
johpow01 committed
2
 * Copyright (c) 2015-2021, Arm Limited and Contributors. All rights reserved.
3
 *
dp-arm's avatar
dp-arm committed
4
 * SPDX-License-Identifier: BSD-3-Clause
5
6
 */

7
8
#ifndef EL3_COMMON_MACROS_S
#define EL3_COMMON_MACROS_S
9
10
11

#include <arch.h>
#include <asm_macros.S>
12
#include <context.h>
13
#include <lib/xlat_tables/xlat_tables_defs.h>
14
15
16
17

	/*
	 * Helper macro to initialise EL3 registers we care about.
	 */
18
	.macro el3_arch_init_common
19
	/* ---------------------------------------------------------------------
20
21
22
23
24
	 * SCTLR_EL3 has already been initialised - read current value before
	 * modifying.
	 *
	 * SCTLR_EL3.I: Enable the instruction cache.
	 *
25
	 * SCTLR_EL3.SA: Enable Stack Alignment check. A SP alignment fault
26
27
28
29
30
31
32
33
	 *  exception is generated if a load or store instruction executed at
	 *  EL3 uses the SP as the base address and the SP is not aligned to a
	 *  16-byte boundary.
	 *
	 * SCTLR_EL3.A: Enable Alignment fault checking. All instructions that
	 *  load or store one or more registers have an alignment check that the
	 *  address being accessed is aligned to the size of the data element(s)
	 *  being accessed.
34
35
36
37
38
39
40
41
	 * ---------------------------------------------------------------------
	 */
	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
	mrs	x0, sctlr_el3
	orr	x0, x0, x1
	msr	sctlr_el3, x0
	isb

42
#ifdef IMAGE_BL31
43
44
45
46
47
48
49
50
51
52
53
54
	/* ---------------------------------------------------------------------
	 * Initialise the per-cpu cache pointer to the CPU.
	 * This is done early to enable crash reporting to have access to crash
	 * stack. Since crash reporting depends on cpu_data to report the
	 * unhandled exception, not doing so can lead to recursive exceptions
	 * due to a NULL TPIDR_EL3.
	 * ---------------------------------------------------------------------
	 */
	bl	init_cpu_data_ptr
#endif /* IMAGE_BL31 */

	/* ---------------------------------------------------------------------
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
	 * Initialise SCR_EL3, setting all fields rather than relying on hw.
	 * All fields are architecturally UNKNOWN on reset. The following fields
	 * do not change during the TF lifetime. The remaining fields are set to
	 * zero here but are updated ahead of transitioning to a lower EL in the
	 * function cm_init_context_common().
	 *
	 * SCR_EL3.TWE: Set to zero so that execution of WFE instructions at
	 *  EL2, EL1 and EL0 are not trapped to EL3.
	 *
	 * SCR_EL3.TWI: Set to zero so that execution of WFI instructions at
	 *  EL2, EL1 and EL0 are not trapped to EL3.
	 *
	 * SCR_EL3.SIF: Set to one to disable instruction fetches from
	 *  Non-secure memory.
	 *
	 * SCR_EL3.SMD: Set to zero to enable SMC calls at EL1 and above, from
	 *  both Security states and both Execution states.
	 *
	 * SCR_EL3.EA: Set to one to route External Aborts and SError Interrupts
	 *  to EL3 when executing at any EL.
75
76
77
78
	 *
	 * SCR_EL3.{API,APK}: For Armv8.3 pointer authentication feature,
	 * disable traps to EL3 when accessing key registers or using pointer
	 * authentication instructions from lower ELs.
79
80
	 * ---------------------------------------------------------------------
	 */
81
	mov_imm	x0, ((SCR_RESET_VAL | SCR_EA_BIT | SCR_SIF_BIT) \
82
			& ~(SCR_TWE_BIT | SCR_TWI_BIT | SCR_SMD_BIT))
83
84
85
86
87
88
89
#if CTX_INCLUDE_PAUTH_REGS
	/*
	 * If the pointer authentication registers are saved during world
	 * switches, enable pointer authentication everywhere, as it is safe to
	 * do so.
	 */
	orr	x0, x0, #(SCR_API_BIT | SCR_APK_BIT)
johpow01's avatar
johpow01 committed
90
91
92
93
94
95
96
#endif
#if ENABLE_RME
	/*
	 * Current RME-enabled model does not run in R-EL2 unless EEL2 is enabled.
	 * EL3 cannot access ICC_SRE_EL2 either.
	 */
	orr	x0, x0, #SCR_EEL2_BIT
97
#endif
98
	msr	scr_el3, x0
99
100

	/* ---------------------------------------------------------------------
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
	 * Initialise MDCR_EL3, setting all fields rather than relying on hw.
	 * Some fields are architecturally UNKNOWN on reset.
	 *
	 * MDCR_EL3.SDD: Set to one to disable AArch64 Secure self-hosted debug.
	 *  Debug exceptions, other than Breakpoint Instruction exceptions, are
	 *  disabled from all ELs in Secure state.
	 *
	 * MDCR_EL3.SPD32: Set to 0b10 to disable AArch32 Secure self-hosted
	 *  privileged debug from S-EL1.
	 *
	 * MDCR_EL3.TDOSA: Set to zero so that EL2 and EL2 System register
	 *  access to the powerdown debug registers do not trap to EL3.
	 *
	 * MDCR_EL3.TDA: Set to zero to allow EL0, EL1 and EL2 access to the
	 *  debug registers, other than those registers that are controlled by
	 *  MDCR_EL3.TDOSA.
	 *
	 * MDCR_EL3.TPM: Set to zero so that EL0, EL1, and EL2 System register
	 *  accesses to all Performance Monitors registers do not trap to EL3.
120
121
122
123
124
	 *
	 * MDCR_EL3.SCCD: Set to one so that cycle counting by PMCCNTR_EL0 is
	 *  prohibited in Secure state. This bit is RES0 in versions of the
	 *  architecture earlier than ARMv8.5, setting it to 1 doesn't have any
	 *  effect on them.
125
126
127
128
129
130
	 *
	 * MDCR_EL3.SPME: Set to zero so that event counting by the programmable
	 *  counters PMEVCNTR<n>_EL0 is prohibited in Secure state. If ARMv8.2
	 *  Debug is not implemented this bit does not have any effect on the
	 *  counters unless there is support for the implementation defined
	 *  authentication interface ExternalSecureNoninvasiveDebugEnabled().
131
132
	 * ---------------------------------------------------------------------
	 */
133
	mov_imm	x0, ((MDCR_EL3_RESET_VAL | MDCR_SDD_BIT | \
134
		      MDCR_SPD32(MDCR_SPD32_DISABLE) | MDCR_SCCD_BIT) & \
135
136
		    ~(MDCR_SPME_BIT | MDCR_TDOSA_BIT | MDCR_TDA_BIT | \
		      MDCR_TPM_BIT))
137

138
	msr	mdcr_el3, x0
139

140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
	/* ---------------------------------------------------------------------
	 * Initialise PMCR_EL0 setting all fields rather than relying
	 * on hw. Some fields are architecturally UNKNOWN on reset.
	 *
	 * PMCR_EL0.LP: Set to one so that event counter overflow, that
	 *  is recorded in PMOVSCLR_EL0[0-30], occurs on the increment
	 *  that changes PMEVCNTR<n>_EL0[63] from 1 to 0, when ARMv8.5-PMU
	 *  is implemented. This bit is RES0 in versions of the architecture
	 *  earlier than ARMv8.5, setting it to 1 doesn't have any effect
	 *  on them.
	 *
	 * PMCR_EL0.LC: Set to one so that cycle counter overflow, that
	 *  is recorded in PMOVSCLR_EL0[31], occurs on the increment
	 *  that changes PMCCNTR_EL0[63] from 1 to 0.
	 *
	 * PMCR_EL0.DP: Set to one so that the cycle counter,
	 *  PMCCNTR_EL0 does not count when event counting is prohibited.
	 *
	 * PMCR_EL0.X: Set to zero to disable export of events.
	 *
	 * PMCR_EL0.D: Set to zero so that, when enabled, PMCCNTR_EL0
	 *  counts on every clock cycle.
	 * ---------------------------------------------------------------------
	 */
	mov_imm	x0, ((PMCR_EL0_RESET_VAL | PMCR_EL0_LP_BIT | \
		      PMCR_EL0_LC_BIT | PMCR_EL0_DP_BIT) & \
		    ~(PMCR_EL0_X_BIT | PMCR_EL0_D_BIT))

	msr	pmcr_el0, x0

170
171
172
	/* ---------------------------------------------------------------------
	 * Enable External Aborts and SError Interrupts now that the exception
	 * vectors have been setup.
173
174
175
176
177
	 * ---------------------------------------------------------------------
	 */
	msr	daifclr, #DAIF_ABT_BIT

	/* ---------------------------------------------------------------------
178
179
180
181
182
183
184
185
186
	 * Initialise CPTR_EL3, setting all fields rather than relying on hw.
	 * All fields are architecturally UNKNOWN on reset.
	 *
	 * CPTR_EL3.TCPAC: Set to zero so that any accesses to CPACR_EL1,
	 *  CPTR_EL2, CPACR, or HCPTR do not trap to EL3.
	 *
	 * CPTR_EL3.TTA: Set to zero so that System register accesses to the
	 *  trace registers do not trap to EL3.
	 *
David Cunado's avatar
David Cunado committed
187
188
189
	 * CPTR_EL3.TFP: Set to zero so that accesses to the V- or Z- registers
	 *  by Advanced SIMD, floating-point or SVE instructions (if implemented)
	 *  do not trap to EL3.
190
	 */
191
	mov_imm x0, (CPTR_EL3_RESET_VAL & ~(TCPAC_BIT | TTA_BIT | TFP_BIT))
192
	msr	cptr_el3, x0
Sathees Balya's avatar
Sathees Balya committed
193
194
195
196
197
198
199
200
201
202
203
204

	/*
	 * If Data Independent Timing (DIT) functionality is implemented,
	 * always enable DIT in EL3
	 */
	mrs	x0, id_aa64pfr0_el1
	ubfx	x0, x0, #ID_AA64PFR0_DIT_SHIFT, #ID_AA64PFR0_DIT_LENGTH
	cmp	x0, #ID_AA64PFR0_DIT_SUPPORTED
	bne	1f
	mov	x0, #DIT_BIT
	msr	DIT, x0
1:
205
206
207
208
	.endm

/* -----------------------------------------------------------------------------
 * This is the super set of actions that need to be performed during a cold boot
209
 * or a warm boot in EL3. This code is shared by BL1 and BL31.
210
211
212
213
214
215
216
 *
 * This macro will always perform reset handling, architectural initialisations
 * and stack setup. The rest of the actions are optional because they might not
 * be needed, depending on the context in which this macro is called. This is
 * why this macro is parameterised ; each parameter allows to enable/disable
 * some actions.
 *
217
218
219
 *  _init_sctlr:
 *	Whether the macro needs to initialise SCTLR_EL3, including configuring
 *      the endianness of data accesses.
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
 *
 *  _warm_boot_mailbox:
 *	Whether the macro needs to detect the type of boot (cold/warm). The
 *	detection is based on the platform entrypoint address : if it is zero
 *	then it is a cold boot, otherwise it is a warm boot. In the latter case,
 *	this macro jumps on the platform entrypoint address.
 *
 *  _secondary_cold_boot:
 *	Whether the macro needs to identify the CPU that is calling it: primary
 *	CPU or secondary CPU. The primary CPU will be allowed to carry on with
 *	the platform initialisations, while the secondaries will be put in a
 *	platform-specific state in the meantime.
 *
 *	If the caller knows this macro will only be called by the primary CPU
 *	then this parameter can be defined to 0 to skip this step.
 *
 * _init_memory:
 *	Whether the macro needs to initialise the memory.
 *
 * _init_c_runtime:
 *	Whether the macro needs to initialise the C runtime environment.
 *
 * _exception_vectors:
 *	Address of the exception vectors to program in the VBAR_EL3 register.
244
245
246
247
248
249
 *
 * _pie_fixup_size:
 *	Size of memory region to fixup Global Descriptor Table (GDT).
 *
 *	A non-zero value is expected when firmware needs GDT to be fixed-up.
 *
250
251
252
 * -----------------------------------------------------------------------------
 */
	.macro el3_entrypoint_common					\
253
		_init_sctlr, _warm_boot_mailbox, _secondary_cold_boot,	\
254
255
		_init_memory, _init_c_runtime, _exception_vectors,	\
		_pie_fixup_size
256

257
	.if \_init_sctlr
258
		/* -------------------------------------------------------------
259
260
261
262
263
264
265
266
267
268
269
270
271
272
		 * This is the initialisation of SCTLR_EL3 and so must ensure
		 * that all fields are explicitly set rather than relying on hw.
		 * Some fields reset to an IMPLEMENTATION DEFINED value and
		 * others are architecturally UNKNOWN on reset.
		 *
		 * SCTLR.EE: Set the CPU endianness before doing anything that
		 *  might involve memory reads or writes. Set to zero to select
		 *  Little Endian.
		 *
		 * SCTLR_EL3.WXN: For the EL3 translation regime, this field can
		 *  force all memory regions that are writeable to be treated as
		 *  XN (Execute-never). Set to zero so that this control has no
		 *  effect on memory access permissions.
		 *
273
		 * SCTLR_EL3.SA: Set to zero to disable Stack Alignment check.
274
275
		 *
		 * SCTLR_EL3.A: Set to zero to disable Alignment fault checking.
276
277
278
		 *
		 * SCTLR.DSSBS: Set to zero to disable speculation store bypass
		 *  safe behaviour upon exception entry to EL3.
279
280
		 * -------------------------------------------------------------
		 */
281
		mov_imm	x0, (SCTLR_RESET_VAL & ~(SCTLR_EE_BIT | SCTLR_WXN_BIT \
282
				| SCTLR_SA_BIT | SCTLR_A_BIT | SCTLR_DSSBS_BIT))
283
284
		msr	sctlr_el3, x0
		isb
285
	.endif /* _init_sctlr */
286

287
288
289
290
#if DISABLE_MTPMU
		bl	mtpmu_disable
#endif

291
292
293
294
295
296
297
298
	.if \_warm_boot_mailbox
		/* -------------------------------------------------------------
		 * This code will be executed for both warm and cold resets.
		 * Now is the time to distinguish between the two.
		 * Query the platform entrypoint address and if it is not zero
		 * then it means it is a warm boot so jump to this address.
		 * -------------------------------------------------------------
		 */
299
		bl	plat_get_my_entrypoint
300
301
302
303
304
305
		cbz	x0, do_cold_boot
		br	x0

	do_cold_boot:
	.endif /* _warm_boot_mailbox */

306
307
308
309
310
311
312
313
314
315
316
317
318
	.if \_pie_fixup_size
#if ENABLE_PIE
		/*
		 * ------------------------------------------------------------
		 * If PIE is enabled fixup the Global descriptor Table only
		 * once during primary core cold boot path.
		 *
		 * Compile time base address, required for fixup, is calculated
		 * using "pie_fixup" label present within first page.
		 * ------------------------------------------------------------
		 */
	pie_fixup:
		ldr	x0, =pie_fixup
319
		and	x0, x0, #~(PAGE_SIZE_MASK)
320
321
322
323
324
325
		mov_imm	x1, \_pie_fixup_size
		add	x1, x1, x0
		bl	fixup_gdt_reloc
#endif /* ENABLE_PIE */
	.endif /* _pie_fixup_size */

326
327
328
329
330
331
332
333
	/* ---------------------------------------------------------------------
	 * Set the exception vectors.
	 * ---------------------------------------------------------------------
	 */
	adr	x0, \_exception_vectors
	msr	vbar_el3, x0
	isb

johpow01's avatar
johpow01 committed
334
#if !(defined(IMAGE_BL2) && ENABLE_RME)
335
336
337
338
339
340
341
	/* ---------------------------------------------------------------------
	 * It is a cold boot.
	 * Perform any processor specific actions upon reset e.g. cache, TLB
	 * invalidations etc.
	 * ---------------------------------------------------------------------
	 */
	bl	reset_handler
johpow01's avatar
johpow01 committed
342
#endif
343

344
	el3_arch_init_common
345

346
347
	.if \_secondary_cold_boot
		/* -------------------------------------------------------------
348
		 * Check if this is a primary or secondary CPU cold boot.
349
350
351
352
353
354
		 * The primary CPU will set up the platform while the
		 * secondaries are placed in a platform-specific state until the
		 * primary CPU performs the necessary actions to bring them out
		 * of that state and allows entry into the OS.
		 * -------------------------------------------------------------
		 */
355
		bl	plat_is_my_cpu_primary
356
		cbnz	w0, do_primary_cold_boot
357
358
359
360

		/* This is a cold boot on a secondary CPU */
		bl	plat_secondary_cold_boot_setup
		/* plat_secondary_cold_boot_setup() is not supposed to return */
361
		bl	el3_panic
362
363
364
365
366

	do_primary_cold_boot:
	.endif /* _secondary_cold_boot */

	/* ---------------------------------------------------------------------
367
368
	 * Initialize memory now. Secondary CPU initialization won't get to this
	 * point.
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
	 * ---------------------------------------------------------------------
	 */

	.if \_init_memory
		bl	platform_mem_init
	.endif /* _init_memory */

	/* ---------------------------------------------------------------------
	 * Init C runtime environment:
	 *   - Zero-initialise the NOBITS sections. There are 2 of them:
	 *       - the .bss section;
	 *       - the coherent memory section (if any).
	 *   - Relocate the data section from ROM to RAM, if required.
	 * ---------------------------------------------------------------------
	 */
	.if \_init_c_runtime
johpow01's avatar
johpow01 committed
385
386
#if defined(IMAGE_BL31) || (defined(IMAGE_BL2) && \
	((BL2_AT_EL3 && BL2_INV_DCACHE) || ENABLE_RME))
387
388
389
390
391
392
393
394
		/* -------------------------------------------------------------
		 * Invalidate the RW memory used by the BL31 image. This
		 * includes the data and NOBITS sections. This is done to
		 * safeguard against possible corruption of this memory by
		 * dirty cache lines in a system cache as a result of use by
		 * an earlier boot loader stage.
		 * -------------------------------------------------------------
		 */
395
396
397
398
		adrp	x0, __RW_START__
		add	x0, x0, :lo12:__RW_START__
		adrp	x1, __RW_END__
		add	x1, x1, :lo12:__RW_END__
399
400
		sub	x1, x1, x0
		bl	inv_dcache_range
401
402
403
404
405
406
407
408
#if defined(IMAGE_BL31) && SEPARATE_NOBITS_REGION
		adrp	x0, __NOBITS_START__
		add	x0, x0, :lo12:__NOBITS_START__
		adrp	x1, __NOBITS_END__
		add	x1, x1, :lo12:__NOBITS_END__
		sub	x1, x1, x0
		bl	inv_dcache_range
#endif
Roberto Vargas's avatar
Roberto Vargas committed
409
#endif
410
411
		adrp	x0, __BSS_START__
		add	x0, x0, :lo12:__BSS_START__
412

413
414
415
		adrp	x1, __BSS_END__
		add	x1, x1, :lo12:__BSS_END__
		sub	x1, x1, x0
416
		bl	zeromem
417
418

#if USE_COHERENT_MEM
419
420
421
422
423
		adrp	x0, __COHERENT_RAM_START__
		add	x0, x0, :lo12:__COHERENT_RAM_START__
		adrp	x1, __COHERENT_RAM_END_UNALIGNED__
		add	x1, x1, :lo12: __COHERENT_RAM_END_UNALIGNED__
		sub	x1, x1, x0
424
		bl	zeromem
425
426
#endif

427
#if defined(IMAGE_BL1) || (defined(IMAGE_BL2) && BL2_AT_EL3 && BL2_IN_XIP_MEM)
428
429
430
431
432
433
434
		adrp	x0, __DATA_RAM_START__
		add	x0, x0, :lo12:__DATA_RAM_START__
		adrp	x1, __DATA_ROM_START__
		add	x1, x1, :lo12:__DATA_ROM_START__
		adrp	x2, __DATA_RAM_END__
		add	x2, x2, :lo12:__DATA_RAM_END__
		sub	x2, x2, x0
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
		bl	memcpy16
#endif
	.endif /* _init_c_runtime */

	/* ---------------------------------------------------------------------
	 * Use SP_EL0 for the C runtime stack.
	 * ---------------------------------------------------------------------
	 */
	msr	spsel, #0

	/* ---------------------------------------------------------------------
	 * Allocate a stack whose memory will be marked as Normal-IS-WBWA when
	 * the MMU is enabled. There is no risk of reading stale stack memory
	 * after enabling the MMU as only the primary CPU is running at the
	 * moment.
	 * ---------------------------------------------------------------------
	 */
452
	bl	plat_set_my_stack
453
454
455
456
457
458

#if STACK_PROTECTOR_ENABLED
	.if \_init_c_runtime
	bl	update_stack_protector_canary
	.endif /* _init_c_runtime */
#endif
459
460
	.endm

461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
	.macro	apply_at_speculative_wa
#if ERRATA_SPECULATIVE_AT
	/*
	 * Explicitly save x30 so as to free up a register and to enable
	 * branching and also, save x29 which will be used in the called
	 * function
	 */
	stp	x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
	bl	save_and_update_ptw_el1_sys_regs
	ldp	x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
#endif
	.endm

	.macro	restore_ptw_el1_sys_regs
#if ERRATA_SPECULATIVE_AT
	/* -----------------------------------------------------------
	 * In case of ERRATA_SPECULATIVE_AT, must follow below order
	 * to ensure that page table walk is not enabled until
	 * restoration of all EL1 system registers. TCR_EL1 register
	 * should be updated at the end which restores previous page
	 * table walk setting of stage1 i.e.(TCR_EL1.EPDx) bits. ISB
	 * ensures that CPU does below steps in order.
	 *
	 * 1. Ensure all other system registers are written before
	 *    updating SCTLR_EL1 using ISB.
	 * 2. Restore SCTLR_EL1 register.
	 * 3. Ensure SCTLR_EL1 written successfully using ISB.
	 * 4. Restore TCR_EL1 register.
	 * -----------------------------------------------------------
	 */
	isb
	ldp	x28, x29, [sp, #CTX_EL1_SYSREGS_OFFSET + CTX_SCTLR_EL1]
	msr	sctlr_el1, x28
	isb
	msr	tcr_el1, x29
#endif
	.endm

499
#endif /* EL3_COMMON_MACROS_S */