tegra_helpers.S 11.8 KB
Newer Older
1
/*
2
 * Copyright (c) 2015-2019, ARM Limited and Contributors. All rights reserved.
3
 * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
4
 *
dp-arm's avatar
dp-arm committed
5
 * SPDX-License-Identifier: BSD-3-Clause
6
7
8
9
10
11
 */
#include <arch.h>
#include <asm_macros.S>
#include <assert_macros.S>
#include <cpu_macros.S>
#include <cortex_a53.h>
12
#include <cortex_a57.h>
13
#include <platform_def.h>
14
#include <tegra_def.h>
15
#include <tegra_platform.h>
16

17
18
19
20
21
#define MIDR_PN_CORTEX_A57		0xD07

/*******************************************************************************
 * Implementation defined ACTLR_EL3 bit definitions
 ******************************************************************************/
22
23
24
25
26
27
28
29
30
31
#define ACTLR_ELx_L2ACTLR_BIT		(U(1) << 6)
#define ACTLR_ELx_L2ECTLR_BIT		(U(1) << 5)
#define ACTLR_ELx_L2CTLR_BIT		(U(1) << 4)
#define ACTLR_ELx_CPUECTLR_BIT		(U(1) << 1)
#define ACTLR_ELx_CPUACTLR_BIT		(U(1) << 0)
#define ACTLR_ELx_ENABLE_ALL_ACCESS	(ACTLR_ELx_L2ACTLR_BIT | \
					 ACTLR_ELx_L2ECTLR_BIT | \
					 ACTLR_ELx_L2CTLR_BIT | \
					 ACTLR_ELx_CPUECTLR_BIT | \
					 ACTLR_ELx_CPUACTLR_BIT)
32

33
	/* Global functions */
34
35
36
	.globl	plat_is_my_cpu_primary
	.globl	plat_my_core_pos
	.globl	plat_get_my_entrypoint
37
38
39
40
	.globl	plat_secondary_cold_boot_setup
	.globl	platform_mem_init
	.globl	plat_crash_console_init
	.globl	plat_crash_console_putc
41
	.globl	plat_crash_console_flush
42
	.weak	plat_core_pos_by_mpidr
43
44
45
46
	.globl	tegra_secure_entrypoint
	.globl	plat_reset_handler

	/* Global variables */
47
	.globl	tegra_sec_entry_point
48
49
	.globl	ns_image_entrypoint
	.globl	tegra_bl31_phys_base
50
	.globl	tegra_console_base
51
52
53
54
55
56
57

	/* ---------------------
	 * Common CPU init code
	 * ---------------------
	 */
.macro	cpu_init_common

58
	/* ------------------------------------------------
59
60
	 * We enable procesor retention, L2/CPUECTLR NS
	 * access and ECC/Parity protection for A57 CPUs
61
62
63
64
65
66
67
68
69
	 * ------------------------------------------------
	 */
	mrs	x0, midr_el1
	mov	x1, #(MIDR_PN_MASK << MIDR_PN_SHIFT)
	and	x0, x0, x1
	lsr	x0, x0, #MIDR_PN_SHIFT
	cmp	x0, #MIDR_PN_CORTEX_A57
	b.ne	1f

70
71
72
	/* ---------------------------
	 * Enable processor retention
	 * ---------------------------
73
	 */
74
75
76
	mrs	x0, CORTEX_A57_L2ECTLR_EL1
	mov	x1, #RETENTION_ENTRY_TICKS_512
	bic	x0, x0, #CORTEX_A57_L2ECTLR_RET_CTRL_MASK
77
	orr	x0, x0, x1
78
	msr	CORTEX_A57_L2ECTLR_EL1, x0
79
80
	isb

81
82
83
	mrs	x0, CORTEX_A57_ECTLR_EL1
	mov	x1, #RETENTION_ENTRY_TICKS_512
	bic	x0, x0, #CORTEX_A57_ECTLR_CPU_RET_CTRL_MASK
84
	orr	x0, x0, x1
85
	msr	CORTEX_A57_ECTLR_EL1, x0
86
87
	isb

88
89
90
	/* -------------------------------------------------------
	 * Enable L2 and CPU ECTLR RW access from non-secure world
	 * -------------------------------------------------------
91
	 */
92
	mrs	x0, actlr_el3
93
	mov	x1, #ACTLR_ELx_ENABLE_ALL_ACCESS
94
	orr	x0, x0, x1
95
	msr	actlr_el3, x0
96
	mrs	x0, actlr_el2
97
	mov	x1, #ACTLR_ELx_ENABLE_ALL_ACCESS
98
	orr	x0, x0, x1
99
100
101
102
103
104
105
	msr	actlr_el2, x0
	isb

	/* --------------------------------
	 * Enable the cycle count register
	 * --------------------------------
	 */
106
1:	mrs	x0, pmcr_el0
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
	ubfx	x0, x0, #11, #5		// read PMCR.N field
	mov	x1, #1
	lsl	x0, x1, x0
	sub	x0, x0, #1		// mask of event counters
	orr	x0, x0, #0x80000000	// disable overflow intrs
	msr	pmintenclr_el1, x0
	msr	pmuserenr_el0, x1	// enable user mode access

	/* ----------------------------------------------------------------
	 * Allow non-privileged access to CNTVCT: Set CNTKCTL (Kernel Count
	 * register), bit 1 (EL0VCTEN) to enable access to CNTVCT/CNTFRQ
	 * registers from EL0.
	 * ----------------------------------------------------------------
	 */
	mrs	x0, cntkctl_el1
	orr	x0, x0, #EL0VCTEN_BIT
	msr	cntkctl_el1, x0
.endm

	/* -----------------------------------------------------
127
	 * unsigned int plat_is_my_cpu_primary(void);
128
129
130
131
	 *
	 * This function checks if this is the Primary CPU
	 * -----------------------------------------------------
	 */
132
133
func plat_is_my_cpu_primary
	mrs	x0, mpidr_el1
134
135
136
137
	and	x0, x0, #(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK)
	cmp	x0, #TEGRA_PRIMARY_CPU
	cset	x0, eq
	ret
138
endfunc plat_is_my_cpu_primary
139

140
	/* ----------------------------------------------------------
141
142
	 * unsigned int plat_my_core_pos(void);
	 *
143
	 * result: CorePos = CoreId + (ClusterId * cpus per cluster)
144
	 * Registers clobbered: x0, x8
145
	 * ----------------------------------------------------------
146
147
	 */
func plat_my_core_pos
148
	mov	x8, x30
149
	mrs	x0, mpidr_el1
150
151
	bl	plat_core_pos_by_mpidr
	ret	x8
152
153
154
155
156
157
158
159
endfunc plat_my_core_pos

	/* -----------------------------------------------------
	 * unsigned long plat_get_my_entrypoint (void);
	 *
	 * Main job of this routine is to distinguish between
	 * a cold and warm boot. If the tegra_sec_entry_point for
	 * this CPU is present, then it's a warm boot.
160
161
162
	 *
	 * -----------------------------------------------------
	 */
163
164
165
func plat_get_my_entrypoint
	adr	x1, tegra_sec_entry_point
	ldr	x0, [x1]
166
	ret
167
endfunc plat_get_my_entrypoint
168

169
170
171
	/* -----------------------------------------------------
	 * int platform_get_core_pos(int mpidr);
	 *
172
173
	 * result: CorePos = (ClusterId * cpus per cluster) +
	 *                   CoreId
174
175
176
177
178
	 * -----------------------------------------------------
	 */
func platform_get_core_pos
	and	x1, x0, #MPIDR_CPU_MASK
	and	x0, x0, #MPIDR_CLUSTER_MASK
179
180
181
182
	lsr	x0, x0, #MPIDR_AFFINITY_BITS
	mov	x2, #PLATFORM_MAX_CPUS_PER_CLUSTER
	mul	x0, x0, x2
	add	x0, x1, x0
183
184
185
	ret
endfunc platform_get_core_pos

186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
	/* -----------------------------------------------------
	 * void plat_secondary_cold_boot_setup (void);
	 *
	 * This function performs any platform specific actions
	 * needed for a secondary cpu after a cold reset. Right
	 * now this is a stub function.
	 * -----------------------------------------------------
	 */
func plat_secondary_cold_boot_setup
	mov	x0, #0
	ret
endfunc plat_secondary_cold_boot_setup

	/* --------------------------------------------------------
	 * void platform_mem_init (void);
	 *
	 * Any memory init, relocation to be done before the
	 * platform boots. Called very early in the boot process.
	 * --------------------------------------------------------
	 */
func platform_mem_init
	mov	x0, #0
	ret
endfunc platform_mem_init

	/* ---------------------------------------------------
	 * Function to handle a platform reset and store
	 * input parameters passed by BL2.
	 * ---------------------------------------------------
	 */
func plat_reset_handler

218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
	/* ----------------------------------------------------
	 * Verify if we are running from BL31_BASE address
	 * ----------------------------------------------------
	 */
	adr	x18, bl31_entrypoint
	mov	x17, #BL31_BASE
	cmp	x18, x17
	b.eq	1f

	/* ----------------------------------------------------
	 * Copy the entire BL31 code to BL31_BASE if we are not
	 * running from it already
	 * ----------------------------------------------------
	 */
	mov	x0, x17
	mov	x1, x18
	mov	x2, #BL31_SIZE
_loop16:
	cmp	x2, #16
237
	b.lo	_loop1
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
	ldp	x3, x4, [x1], #16
	stp	x3, x4, [x0], #16
	sub	x2, x2, #16
	b	_loop16
	/* copy byte per byte */
_loop1:
	cbz	x2, _end
	ldrb	w3, [x1], #1
	strb	w3, [x0], #1
	subs	x2, x2, #1
	b.ne	_loop1

	/* ----------------------------------------------------
	 * Jump to BL31_BASE and start execution again
	 * ----------------------------------------------------
	 */
_end:	mov	x0, x20
	mov	x1, x21
	br	x17
1:

259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
	/* -----------------------------------
	 * derive and save the phys_base addr
	 * -----------------------------------
	 */
	adr	x17, tegra_bl31_phys_base
	ldr	x18, [x17]
	cbnz	x18, 1f
	adr	x18, bl31_entrypoint
	str	x18, [x17]

1:	cpu_init_common

	ret
endfunc plat_reset_handler

274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
	/* ------------------------------------------------------
	 * int32_t plat_core_pos_by_mpidr(u_register_t mpidr)
	 *
	 * This function implements a part of the critical
	 * interface between the psci generic layer and the
	 * platform that allows the former to query the platform
	 * to convert an MPIDR to a unique linear index. An error
	 * code (-1) is returned in case the MPIDR is invalid.
	 *
	 * Clobbers: x0-x3
	 * ------------------------------------------------------
	 */
func plat_core_pos_by_mpidr
	lsr	x1, x0, #MPIDR_AFF0_SHIFT
	and	x1, x1, #MPIDR_AFFLVL_MASK /* core id */
	lsr	x2, x0, #MPIDR_AFF1_SHIFT
	and	x2, x2, #MPIDR_AFFLVL_MASK /* cluster id */

	/* core_id >= PLATFORM_MAX_CPUS_PER_CLUSTER */
	mov	x0, #-1
	cmp	x1, #(PLATFORM_MAX_CPUS_PER_CLUSTER - 1)
	b.gt	1f

	/* cluster_id >= PLATFORM_CLUSTER_COUNT */
	cmp	x2, #(PLATFORM_CLUSTER_COUNT - 1)
	b.gt	1f

	/* CorePos = CoreId + (ClusterId * cpus per cluster) */
	mov	x3, #PLATFORM_MAX_CPUS_PER_CLUSTER
	mul	x3, x3, x2
	add	x0, x1, x3

1:
	ret
endfunc plat_core_pos_by_mpidr

310
311
312
313
	/* ----------------------------------------
	 * Secure entrypoint function for CPU boot
	 * ----------------------------------------
	 */
314
func tegra_secure_entrypoint _align=6
315
316
317

#if ERRATA_TEGRA_INVALIDATE_BTB_AT_BOOT

318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
	/* --------------------------------------------------------
	 * Skip the invalidate BTB workaround for Tegra210B01 SKUs.
	 * --------------------------------------------------------
	 */
	mov	x0, #TEGRA_MISC_BASE
	add	x0, x0, #HARDWARE_REVISION_OFFSET
	ldr	w1, [x0]
	lsr	w1, w1, #CHIP_ID_SHIFT
	and	w1, w1, #CHIP_ID_MASK
	cmp	w1, #TEGRA_CHIPID_TEGRA21	/* T210? */
	b.ne	2f
	ldr	w1, [x0]
	lsr	w1, w1, #MAJOR_VERSION_SHIFT
	and	w1, w1, #MAJOR_VERSION_MASK
	cmp	w1, #0x02			/* T210 B01? */
	b.eq	2f

335
336
337
338
339
	/* -------------------------------------------------------
	 * Invalidate BTB along with I$ to remove any stale
	 * entries from the branch predictor array.
	 * -------------------------------------------------------
	 */
340
	mrs	x0, CORTEX_A57_CPUACTLR_EL1
341
	orr	x0, x0, #1
342
	msr	CORTEX_A57_CPUACTLR_EL1, x0	/* invalidate BTB and I$ together */
343
344
345
346
347
348
	dsb	sy
	isb
	ic	iallu			/* actual invalidate */
	dsb	sy
	isb

349
	mrs	x0, CORTEX_A57_CPUACTLR_EL1
350
	bic	x0, x0, #1
351
	msr	CORTEX_A57_CPUACTLR_EL1, X0	/* restore original CPUACTLR_EL1 */
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
	dsb	sy
	isb

	.rept	7
	nop				/* wait */
	.endr

	/* -----------------------------------------------
	 * Extract OSLK bit and check if it is '1'. This
	 * bit remains '0' for A53 on warm-resets. If '1',
	 * turn off regional clock gating and request warm
	 * reset.
	 * -----------------------------------------------
	 */
	mrs	x0, oslsr_el1
	and	x0, x0, #2
	mrs	x1, mpidr_el1
	bics	xzr, x0, x1, lsr #7	/* 0 = slow cluster or warm reset */
	b.eq	restore_oslock
	mov	x0, xzr
	msr	oslar_el1, x0		/* os lock stays 0 across warm reset */
	mov	x3, #3
	movz	x4, #0x8000, lsl #48
375
	msr	CORTEX_A57_CPUACTLR_EL1, x4	/* turn off RCG */
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
	isb
	msr	rmr_el3, x3		/* request warm reset */
	isb
	dsb	sy
1:	wfi
	b	1b

	/* --------------------------------------------------
	 * These nops are here so that speculative execution
	 * won't harm us before we are done with warm reset.
	 * --------------------------------------------------
	 */
	.rept	65
	nop
	.endr
391
2:
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
	/* --------------------------------------------------
	 * Do not insert instructions here
	 * --------------------------------------------------
	 */
#endif

	/* --------------------------------------------------
	 * Restore OS Lock bit
	 * --------------------------------------------------
	 */
restore_oslock:
	mov	x0, #1
	msr	oslar_el1, x0

	/* --------------------------------------------------
	 * Get secure world's entry point and jump to it
	 * --------------------------------------------------
	 */
410
	bl	plat_get_my_entrypoint
411
412
413
414
415
416
417
	br	x0
endfunc tegra_secure_entrypoint

	.data
	.align 3

	/* --------------------------------------------------
418
	 * CPU Secure entry point - resume from suspend
419
420
	 * --------------------------------------------------
	 */
421
tegra_sec_entry_point:
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
	.quad	0

	/* --------------------------------------------------
	 * NS world's cold boot entry point
	 * --------------------------------------------------
	 */
ns_image_entrypoint:
	.quad	0

	/* --------------------------------------------------
	 * BL31's physical base address
	 * --------------------------------------------------
	 */
tegra_bl31_phys_base:
	.quad	0
437
438
439
440
441
442
443

	/* --------------------------------------------------
	 * UART controller base for console init
	 * --------------------------------------------------
	 */
tegra_console_base:
	.quad	0