xlat_tables.c 6.72 KB
Newer Older
1
/*
2
 * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
3
 *
dp-arm's avatar
dp-arm committed
4
 * SPDX-License-Identifier: BSD-3-Clause
5
6
7
8
9
 */

#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
10
#include <bl_common.h>
11
#include <cassert.h>
12
#include <common_def.h>
13
#include <platform_def.h>
14
#include <sys/types.h>
15
#include <utils.h>
16
17
18
19
#include <xlat_tables.h>
#include "../xlat_tables_private.h"

/*
20
 * Each platform can define the size of the virtual address space, which is
21
22
23
24
 * defined in PLAT_VIRT_ADDR_SPACE_SIZE. TCR.TxSZ is calculated as 64 minus the
 * width of said address space. The value of TCR.TxSZ must be in the range 16
 * to 39 [1], which means that the virtual address space width must be in the
 * range 48 to 25 bits.
25
 *
26
27
28
29
30
31
 * Here we calculate the initial lookup level from the value of
 * PLAT_VIRT_ADDR_SPACE_SIZE. For a 4 KB page size, level 0 supports virtual
 * address spaces of widths 48 to 40 bits, level 1 from 39 to 31, and level 2
 * from 30 to 25. Wider or narrower address spaces are not supported. As a
 * result, level 3 cannot be used as initial lookup level with 4 KB
 * granularity. [2]
32
 *
33
34
35
36
 * For example, for a 35-bit address space (i.e. PLAT_VIRT_ADDR_SPACE_SIZE ==
 * 1 << 35), TCR.TxSZ will be programmed to (64 - 35) = 29. According to Table
 * D4-11 in the ARM ARM, the initial lookup level for an address space like
 * that is 1.
37
38
39
40
41
 *
 * See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
 * information:
 * [1] Page 1730: 'Input address size', 'For all translation stages'.
 * [2] Section D4.2.5
42
43
 */

44
#if PLAT_VIRT_ADDR_SPACE_SIZE > (1ULL << (64 - TCR_TxSZ_MIN))
45

46
# error "PLAT_VIRT_ADDR_SPACE_SIZE is too big."
47

48
#elif PLAT_VIRT_ADDR_SPACE_SIZE > (1ULL << L0_XLAT_ADDRESS_SHIFT)
49
50

# define XLAT_TABLE_LEVEL_BASE	0
51
52
# define NUM_BASE_LEVEL_ENTRIES	\
		(PLAT_VIRT_ADDR_SPACE_SIZE >> L0_XLAT_ADDRESS_SHIFT)
53

54
#elif PLAT_VIRT_ADDR_SPACE_SIZE > (1 << L1_XLAT_ADDRESS_SHIFT)
55
56

# define XLAT_TABLE_LEVEL_BASE	1
57
58
# define NUM_BASE_LEVEL_ENTRIES	\
		(PLAT_VIRT_ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
59

60
#elif PLAT_VIRT_ADDR_SPACE_SIZE >= (1 << (64 - TCR_TxSZ_MAX))
61
62

# define XLAT_TABLE_LEVEL_BASE	2
63
64
# define NUM_BASE_LEVEL_ENTRIES	\
		(PLAT_VIRT_ADDR_SPACE_SIZE >> L2_XLAT_ADDRESS_SHIFT)
65
66
67

#else

68
# error "PLAT_VIRT_ADDR_SPACE_SIZE is too small."
69
70

#endif
71

72
73
static uint64_t base_xlation_table[NUM_BASE_LEVEL_ENTRIES]
		__aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t));
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105

static unsigned long long tcr_ps_bits;

static unsigned long long calc_physical_addr_size_bits(
					unsigned long long max_addr)
{
	/* Physical address can't exceed 48 bits */
	assert((max_addr & ADDR_MASK_48_TO_63) == 0);

	/* 48 bits address */
	if (max_addr & ADDR_MASK_44_TO_47)
		return TCR_PS_BITS_256TB;

	/* 44 bits address */
	if (max_addr & ADDR_MASK_42_TO_43)
		return TCR_PS_BITS_16TB;

	/* 42 bits address */
	if (max_addr & ADDR_MASK_40_TO_41)
		return TCR_PS_BITS_4TB;

	/* 40 bits address */
	if (max_addr & ADDR_MASK_36_TO_39)
		return TCR_PS_BITS_1TB;

	/* 36 bits address */
	if (max_addr & ADDR_MASK_32_TO_35)
		return TCR_PS_BITS_64GB;

	return TCR_PS_BITS_4GB;
}

106
#if ENABLE_ASSERTIONS
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
/* Physical Address ranges supported in the AArch64 Memory Model */
static const unsigned int pa_range_bits_arr[] = {
	PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100,
	PARANGE_0101
};

static unsigned long long get_max_supported_pa(void)
{
	u_register_t pa_range = read_id_aa64mmfr0_el1() &
						ID_AA64MMFR0_EL1_PARANGE_MASK;

	/* All other values are reserved */
	assert(pa_range < ARRAY_SIZE(pa_range_bits_arr));

	return (1ULL << pa_range_bits_arr[pa_range]) - 1ULL;
}
123
#endif /* ENABLE_ASSERTIONS */
124

125
126
127
128
129
void init_xlat_tables(void)
{
	unsigned long long max_pa;
	uintptr_t max_va;
	print_mmap();
130
131
	init_xlation_table(0, base_xlation_table, XLAT_TABLE_LEVEL_BASE,
			   &max_va, &max_pa);
132
133
134
135
136

	assert(max_va <= PLAT_VIRT_ADDR_SPACE_SIZE - 1);
	assert(max_pa <= PLAT_PHY_ADDR_SPACE_SIZE - 1);
	assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <= get_max_supported_pa());

137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
	tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
}

/*******************************************************************************
 * Macro generating the code for the function enabling the MMU in the given
 * exception level, assuming that the pagetables have already been created.
 *
 *   _el:		Exception level at which the function will run
 *   _tcr_extra:	Extra bits to set in the TCR register. This mask will
 *			be OR'ed with the default TCR value.
 *   _tlbi_fct:		Function to invalidate the TLBs at the current
 *			exception level
 ******************************************************************************/
#define DEFINE_ENABLE_MMU_EL(_el, _tcr_extra, _tlbi_fct)		\
	void enable_mmu_el##_el(unsigned int flags)				\
	{								\
		uint64_t mair, tcr, ttbr;				\
		uint32_t sctlr;						\
									\
		assert(IS_IN_EL(_el));					\
		assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0);	\
									\
		/* Set attributes in the right indices of the MAIR */	\
		mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);	\
		mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,		\
				ATTR_IWBWA_OWBWA_NTR_INDEX);		\
		mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE,		\
				ATTR_NON_CACHEABLE_INDEX);		\
		write_mair_el##_el(mair);				\
									\
		/* Invalidate TLBs at the current exception level */	\
		_tlbi_fct();						\
									\
		/* Set TCR bits as well. */				\
171
		/* Set T0SZ to (64 - width of virtual address space) */	\
172
173
174
175
176
177
178
179
180
181
182
		if (flags & XLAT_TABLE_NC) {				\
			/* Inner & outer non-cacheable non-shareable. */\
			tcr = TCR_SH_NON_SHAREABLE |			\
				TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC |	\
				(64 - __builtin_ctzl(PLAT_VIRT_ADDR_SPACE_SIZE));\
		} else {						\
			/* Inner & outer WBWA & shareable. */		\
			tcr = TCR_SH_INNER_SHAREABLE |			\
				TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA |	\
				(64 - __builtin_ctzl(PLAT_VIRT_ADDR_SPACE_SIZE));\
		}							\
183
184
185
186
		tcr |= _tcr_extra;					\
		write_tcr_el##_el(tcr);					\
									\
		/* Set TTBR bits as well */				\
187
		ttbr = (uint64_t) base_xlation_table;			\
188
189
190
191
192
193
		write_ttbr0_el##_el(ttbr);				\
									\
		/* Ensure all translation table writes have drained */	\
		/* into memory, the TLB invalidation is complete, */	\
		/* and translation register writes are committed */	\
		/* before enabling the MMU */				\
194
		dsbish();						\
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
		isb();							\
									\
		sctlr = read_sctlr_el##_el();				\
		sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT;			\
									\
		if (flags & DISABLE_DCACHE)				\
			sctlr &= ~SCTLR_C_BIT;				\
		else							\
			sctlr |= SCTLR_C_BIT;				\
									\
		write_sctlr_el##_el(sctlr);				\
									\
		/* Ensure the MMU enable takes effect immediately */	\
		isb();							\
	}

/* Define EL1 and EL3 variants of the function enabling the MMU */
DEFINE_ENABLE_MMU_EL(1,
		(tcr_ps_bits << TCR_EL1_IPS_SHIFT),
		tlbivmalle1)
DEFINE_ENABLE_MMU_EL(3,
		TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT),
		tlbialle3)