xlat_tables_context.c 7.2 KB
Newer Older
1
/*
2
 * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
3
4
5
6
 *
 * SPDX-License-Identifier: BSD-3-Clause
 */

7
#include <arch_helpers.h>
8
#include <assert.h>
9

10
#include <platform_def.h>
11
12
13
14

#include <common/debug.h>
#include <lib/xlat_tables/xlat_tables_defs.h>
#include <lib/xlat_tables/xlat_tables_v2.h>
15
16
17

#include "xlat_tables_private.h"

18
19
20
21
22
23
/*
 * MMU configuration register values for the active translation context. Used
 * from the MMU assembly helpers.
 */
uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX];

24
25
26
27
/*
 * Allocate and initialise the default translation context for the BL image
 * currently executing.
 */
28
29
30
31
32
#if PLAT_RO_XLAT_TABLES
REGISTER_XLAT_CONTEXT_RO_BASE_TABLE(tf, MAX_MMAP_REGIONS, MAX_XLAT_TABLES,
		PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE,
		EL_REGIME_INVALID, "xlat_table");
#else
33
34
REGISTER_XLAT_CONTEXT(tf, MAX_MMAP_REGIONS, MAX_XLAT_TABLES,
		PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE);
35
#endif
36
37
38
39
40
41
42
43
44
45
46
47
48
49

void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, size_t size,
		     unsigned int attr)
{
	mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);

	mmap_add_region_ctx(&tf_xlat_ctx, &mm);
}

void mmap_add(const mmap_region_t *mm)
{
	mmap_add_ctx(&tf_xlat_ctx, mm);
}

50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
void mmap_add_region_alloc_va(unsigned long long base_pa, uintptr_t *base_va,
			      size_t size, unsigned int attr)
{
	mmap_region_t mm = MAP_REGION_ALLOC_VA(base_pa, size, attr);

	mmap_add_region_alloc_va_ctx(&tf_xlat_ctx, &mm);

	*base_va = mm.base_va;
}

void mmap_add_alloc_va(mmap_region_t *mm)
{
	while (mm->granularity != 0U) {
		assert(mm->base_va == 0U);
		mmap_add_region_alloc_va_ctx(&tf_xlat_ctx, mm);
		mm++;
	}
}

69
70
71
72
73
74
75
76
77
78
#if PLAT_XLAT_TABLES_DYNAMIC

int mmap_add_dynamic_region(unsigned long long base_pa, uintptr_t base_va,
			    size_t size, unsigned int attr)
{
	mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);

	return mmap_add_dynamic_region_ctx(&tf_xlat_ctx, &mm);
}

79
80
81
82
83
84
85
86
87
88
89
90
91
92
int mmap_add_dynamic_region_alloc_va(unsigned long long base_pa,
				     uintptr_t *base_va, size_t size,
				     unsigned int attr)
{
	mmap_region_t mm = MAP_REGION_ALLOC_VA(base_pa, size, attr);

	int rc = mmap_add_dynamic_region_alloc_va_ctx(&tf_xlat_ctx, &mm);

	*base_va = mm.base_va;

	return rc;
}


93
94
95
96
97
98
99
100
int mmap_remove_dynamic_region(uintptr_t base_va, size_t size)
{
	return mmap_remove_dynamic_region_ctx(&tf_xlat_ctx,
					base_va, size);
}

#endif /* PLAT_XLAT_TABLES_DYNAMIC */

101
void __init init_xlat_tables(void)
102
{
103
104
	assert(tf_xlat_ctx.xlat_regime == EL_REGIME_INVALID);

Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
105
	unsigned int current_el = xlat_arch_current_el();
106

Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
107
	if (current_el == 1U) {
108
		tf_xlat_ctx.xlat_regime = EL1_EL0_REGIME;
109
110
	} else if (current_el == 2U) {
		tf_xlat_ctx.xlat_regime = EL2_REGIME;
111
	} else {
Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
112
		assert(current_el == 3U);
113
114
115
		tf_xlat_ctx.xlat_regime = EL3_REGIME;
	}

116
117
118
	init_xlat_tables_ctx(&tf_xlat_ctx);
}

119
120
121
122
123
124
125
126
127
128
int xlat_get_mem_attributes(uintptr_t base_va, uint32_t *attr)
{
	return xlat_get_mem_attributes_ctx(&tf_xlat_ctx, base_va, attr);
}

int xlat_change_mem_attributes(uintptr_t base_va, size_t size, uint32_t attr)
{
	return xlat_change_mem_attributes_ctx(&tf_xlat_ctx, base_va, size, attr);
}

129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
#if PLAT_RO_XLAT_TABLES
/* Change the memory attributes of the descriptors which resolve the address
 * range that belongs to the translation tables themselves, which are by default
 * mapped as part of read-write data in the BL image's memory.
 *
 * Since the translation tables map themselves via these level 3 (page)
 * descriptors, any change applied to them with the MMU on would introduce a
 * chicken and egg problem because of the break-before-make sequence.
 * Eventually, it would reach the descriptor that resolves the very table it
 * belongs to and the invalidation (break step) would cause the subsequent write
 * (make step) to it to generate an MMU fault. Therefore, the MMU is disabled
 * before making the change.
 *
 * No assumption is made about what data this function needs, therefore all the
 * caches are flushed in order to ensure coherency. A future optimization would
 * be to only flush the required data to main memory.
 */
int xlat_make_tables_readonly(void)
{
	assert(tf_xlat_ctx.initialized == true);
#ifdef __aarch64__
	if (tf_xlat_ctx.xlat_regime == EL1_EL0_REGIME) {
		disable_mmu_el1();
	} else if (tf_xlat_ctx.xlat_regime == EL3_REGIME) {
		disable_mmu_el3();
	} else {
		assert(tf_xlat_ctx.xlat_regime == EL2_REGIME);
		return -1;
	}

	/* Flush all caches. */
	dcsw_op_all(DCCISW);
#else /* !__aarch64__ */
	assert(tf_xlat_ctx.xlat_regime == EL1_EL0_REGIME);
	/* On AArch32, we flush the caches before disabling the MMU. The reason
	 * for this is that the dcsw_op_all AArch32 function pushes some
	 * registers onto the stack under the assumption that it is writing to
	 * cache, which is not true with the MMU off. This would result in the
	 * stack becoming corrupted and a wrong/junk value for the LR being
	 * restored at the end of the routine.
	 */
	dcsw_op_all(DC_OP_CISW);
	disable_mmu_secure();
#endif

	int rc = xlat_change_mem_attributes_ctx(&tf_xlat_ctx,
				(uintptr_t)tf_xlat_ctx.tables,
				tf_xlat_ctx.tables_num * XLAT_TABLE_SIZE,
				MT_RO_DATA | MT_SECURE);

#ifdef __aarch64__
	if (tf_xlat_ctx.xlat_regime == EL1_EL0_REGIME) {
		enable_mmu_el1(0U);
	} else {
		assert(tf_xlat_ctx.xlat_regime == EL3_REGIME);
		enable_mmu_el3(0U);
	}
#else /* !__aarch64__ */
	enable_mmu_svc_mon(0U);
#endif

	if (rc == 0) {
		tf_xlat_ctx.readonly_tables = true;
	}

	return rc;
}
#endif /* PLAT_RO_XLAT_TABLES */

198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
/*
 * If dynamic allocation of new regions is disabled then by the time we call the
 * function enabling the MMU, we'll have registered all the memory regions to
 * map for the system's lifetime. Therefore, at this point we know the maximum
 * physical address that will ever be mapped.
 *
 * If dynamic allocation is enabled then we can't make any such assumption
 * because the maximum physical address could get pushed while adding a new
 * region. Therefore, in this case we have to assume that the whole address
 * space size might be mapped.
 */
#ifdef PLAT_XLAT_TABLES_DYNAMIC
#define MAX_PHYS_ADDR	tf_xlat_ctx.pa_max_address
#else
#define MAX_PHYS_ADDR	tf_xlat_ctx.max_pa
#endif

215
#ifdef __aarch64__
216

217
void enable_mmu_el1(unsigned int flags)
218
{
219
220
	setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
		      tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
221
		      tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
222
	enable_mmu_direct_el1(flags);
223
224
}

225
void enable_mmu_el2(unsigned int flags)
226
227
228
229
{
	setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
		      tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
		      tf_xlat_ctx.va_max_address, EL2_REGIME);
230
	enable_mmu_direct_el2(flags);
231
232
}

233
void enable_mmu_el3(unsigned int flags)
234
{
235
236
	setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
		      tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
237
238
		      tf_xlat_ctx.va_max_address, EL3_REGIME);
	enable_mmu_direct_el3(flags);
239
240
}

241
242
243
#else /* !__aarch64__ */

void enable_mmu_svc_mon(unsigned int flags)
244
245
246
{
	setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
		      tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
247
248
		      tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
	enable_mmu_direct_svc_mon(flags);
249
250
}

251
void enable_mmu_hyp(unsigned int flags)
252
{
253
254
	setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
		      tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
255
256
		      tf_xlat_ctx.va_max_address, EL2_REGIME);
	enable_mmu_direct_hyp(flags);
257
258
}

259
#endif /* __aarch64__ */