xlat_tables_context.c 4.68 KB
Newer Older
1
2
3
4
5
6
/*
 * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
 *
 * SPDX-License-Identifier: BSD-3-Clause
 */

7
#include <assert.h>
8

9
#include <platform_def.h>
10
11
12
13

#include <common/debug.h>
#include <lib/xlat_tables/xlat_tables_defs.h>
#include <lib/xlat_tables/xlat_tables_v2.h>
14
15
16

#include "xlat_tables_private.h"

17
18
19
20
21
22
/*
 * MMU configuration register values for the active translation context. Used
 * from the MMU assembly helpers.
 */
uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX];

23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
/*
 * Allocate and initialise the default translation context for the BL image
 * currently executing.
 */
REGISTER_XLAT_CONTEXT(tf, MAX_MMAP_REGIONS, MAX_XLAT_TABLES,
		PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE);

void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, size_t size,
		     unsigned int attr)
{
	mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);

	mmap_add_region_ctx(&tf_xlat_ctx, &mm);
}

void mmap_add(const mmap_region_t *mm)
{
	mmap_add_ctx(&tf_xlat_ctx, mm);
}

43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
void mmap_add_region_alloc_va(unsigned long long base_pa, uintptr_t *base_va,
			      size_t size, unsigned int attr)
{
	mmap_region_t mm = MAP_REGION_ALLOC_VA(base_pa, size, attr);

	mmap_add_region_alloc_va_ctx(&tf_xlat_ctx, &mm);

	*base_va = mm.base_va;
}

void mmap_add_alloc_va(mmap_region_t *mm)
{
	while (mm->granularity != 0U) {
		assert(mm->base_va == 0U);
		mmap_add_region_alloc_va_ctx(&tf_xlat_ctx, mm);
		mm++;
	}
}

62
63
64
65
66
67
68
69
70
71
#if PLAT_XLAT_TABLES_DYNAMIC

int mmap_add_dynamic_region(unsigned long long base_pa, uintptr_t base_va,
			    size_t size, unsigned int attr)
{
	mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);

	return mmap_add_dynamic_region_ctx(&tf_xlat_ctx, &mm);
}

72
73
74
75
76
77
78
79
80
81
82
83
84
85
int mmap_add_dynamic_region_alloc_va(unsigned long long base_pa,
				     uintptr_t *base_va, size_t size,
				     unsigned int attr)
{
	mmap_region_t mm = MAP_REGION_ALLOC_VA(base_pa, size, attr);

	int rc = mmap_add_dynamic_region_alloc_va_ctx(&tf_xlat_ctx, &mm);

	*base_va = mm.base_va;

	return rc;
}


86
87
88
89
90
91
92
93
int mmap_remove_dynamic_region(uintptr_t base_va, size_t size)
{
	return mmap_remove_dynamic_region_ctx(&tf_xlat_ctx,
					base_va, size);
}

#endif /* PLAT_XLAT_TABLES_DYNAMIC */

94
void __init init_xlat_tables(void)
95
{
96
97
	assert(tf_xlat_ctx.xlat_regime == EL_REGIME_INVALID);

Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
98
	unsigned int current_el = xlat_arch_current_el();
99

Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
100
	if (current_el == 1U) {
101
		tf_xlat_ctx.xlat_regime = EL1_EL0_REGIME;
102
103
	} else if (current_el == 2U) {
		tf_xlat_ctx.xlat_regime = EL2_REGIME;
104
	} else {
Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
105
		assert(current_el == 3U);
106
107
108
		tf_xlat_ctx.xlat_regime = EL3_REGIME;
	}

109
110
111
	init_xlat_tables_ctx(&tf_xlat_ctx);
}

112
113
114
115
116
117
118
119
120
121
int xlat_get_mem_attributes(uintptr_t base_va, uint32_t *attr)
{
	return xlat_get_mem_attributes_ctx(&tf_xlat_ctx, base_va, attr);
}

int xlat_change_mem_attributes(uintptr_t base_va, size_t size, uint32_t attr)
{
	return xlat_change_mem_attributes_ctx(&tf_xlat_ctx, base_va, size, attr);
}

122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
/*
 * If dynamic allocation of new regions is disabled then by the time we call the
 * function enabling the MMU, we'll have registered all the memory regions to
 * map for the system's lifetime. Therefore, at this point we know the maximum
 * physical address that will ever be mapped.
 *
 * If dynamic allocation is enabled then we can't make any such assumption
 * because the maximum physical address could get pushed while adding a new
 * region. Therefore, in this case we have to assume that the whole address
 * space size might be mapped.
 */
#ifdef PLAT_XLAT_TABLES_DYNAMIC
#define MAX_PHYS_ADDR	tf_xlat_ctx.pa_max_address
#else
#define MAX_PHYS_ADDR	tf_xlat_ctx.max_pa
#endif

139
#ifdef __aarch64__
140

141
void enable_mmu_el1(unsigned int flags)
142
{
143
144
	setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
		      tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
145
		      tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
146
	enable_mmu_direct_el1(flags);
147
148
}

149
void enable_mmu_el2(unsigned int flags)
150
151
152
153
{
	setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
		      tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
		      tf_xlat_ctx.va_max_address, EL2_REGIME);
154
	enable_mmu_direct_el2(flags);
155
156
}

157
void enable_mmu_el3(unsigned int flags)
158
{
159
160
	setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
		      tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
161
162
		      tf_xlat_ctx.va_max_address, EL3_REGIME);
	enable_mmu_direct_el3(flags);
163
164
}

165
166
167
#else /* !__aarch64__ */

void enable_mmu_svc_mon(unsigned int flags)
168
169
170
{
	setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
		      tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
171
172
		      tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
	enable_mmu_direct_svc_mon(flags);
173
174
}

175
void enable_mmu_hyp(unsigned int flags)
176
{
177
178
	setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
		      tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
179
180
		      tf_xlat_ctx.va_max_address, EL2_REGIME);
	enable_mmu_direct_hyp(flags);
181
182
}

183
#endif /* __aarch64__ */