xlat_tables_utils.c 16.1 KB
Newer Older
1
2
3
4
5
6
7
8
/*
 * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
 *
 * SPDX-License-Identifier: BSD-3-Clause
 */

#include <assert.h>
#include <errno.h>
9
#include <stdbool.h>
10
#include <stdint.h>
11
#include <stdio.h>
12
13
14
15
16
17
18
19

#include <platform_def.h>

#include <arch_helpers.h>
#include <common/debug.h>
#include <lib/utils_def.h>
#include <lib/xlat_tables/xlat_tables_defs.h>
#include <lib/xlat_tables/xlat_tables_v2.h>
20
21
22
23
24

#include "xlat_tables_private.h"

#if LOG_LEVEL < LOG_LEVEL_VERBOSE

Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
25
void xlat_mmap_print(__unused const mmap_region_t *mmap)
26
27
28
29
30
31
32
33
34
35
36
{
	/* Empty */
}

void xlat_tables_print(__unused xlat_ctx_t *ctx)
{
	/* Empty */
}

#else /* if LOG_LEVEL >= LOG_LEVEL_VERBOSE */

Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
37
void xlat_mmap_print(const mmap_region_t *mmap)
38
{
39
	printf("mmap:\n");
40
41
42
	const mmap_region_t *mm = mmap;

	while (mm->size != 0U) {
43
44
45
		printf(" VA:0x%lx  PA:0x%llx  size:0x%zx  attr:0x%x  granularity:0x%zx\n",
		       mm->base_va, mm->base_pa, mm->size, mm->attr,
		       mm->granularity);
46
47
		++mm;
	};
48
	printf("\n");
49
50
51
52
53
}

/* Print the attributes of the specified block descriptor. */
static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc)
{
Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
54
	uint64_t mem_type_index = ATTR_INDEX_GET(desc);
55
56
57
	int xlat_regime = ctx->xlat_regime;

	if (mem_type_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
58
		printf("MEM");
59
	} else if (mem_type_index == ATTR_NON_CACHEABLE_INDEX) {
60
		printf("NC");
61
62
	} else {
		assert(mem_type_index == ATTR_DEVICE_INDEX);
63
		printf("DEV");
64
65
	}

66
67
	if ((xlat_regime == EL3_REGIME) || (xlat_regime == EL2_REGIME)) {
		/* For EL3 and EL2 only check the AP[2] and XN bits. */
68
69
		printf(((desc & LOWER_ATTRS(AP_RO)) != 0ULL) ? "-RO" : "-RW");
		printf(((desc & UPPER_ATTRS(XN)) != 0ULL) ? "-XN" : "-EXEC");
70
	} else {
71
		assert(xlat_regime == EL1_EL0_REGIME);
72
		/*
73
74
75
76
77
78
79
		 * For EL0 and EL1:
		 * - In AArch64 PXN and UXN can be set independently but in
		 *   AArch32 there is no UXN (XN affects both privilege levels).
		 *   For consistency, we set them simultaneously in both cases.
		 * - RO and RW permissions must be the same in EL1 and EL0. If
		 *   EL0 can access that memory region, so can EL1, with the
		 *   same permissions.
80
		 */
81
82
83
84
85
86
#if ENABLE_ASSERTIONS
		uint64_t xn_mask = xlat_arch_regime_get_xn_desc(EL1_EL0_REGIME);
		uint64_t xn_perm = desc & xn_mask;

		assert((xn_perm == xn_mask) || (xn_perm == 0ULL));
#endif
87
		printf(((desc & LOWER_ATTRS(AP_RO)) != 0ULL) ? "-RO" : "-RW");
88
		/* Only check one of PXN and UXN, the other one is the same. */
89
		printf(((desc & UPPER_ATTRS(PXN)) != 0ULL) ? "-XN" : "-EXEC");
90
91
92
93
		/*
		 * Privileged regions can only be accessed from EL1, user
		 * regions can be accessed from EL1 and EL0.
		 */
94
		printf(((desc & LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED)) != 0ULL)
95
			  ? "-USER" : "-PRIV");
96
97
	}

98
	printf(((LOWER_ATTRS(NS) & desc) != 0ULL) ? "-NS" : "-S");
99
100
101
102
103
104
105
106
107
108
109
110
111
}

static const char * const level_spacers[] = {
	"[LV0] ",
	"  [LV1] ",
	"    [LV2] ",
	"      [LV3] "
};

static const char *invalid_descriptors_ommited =
		"%s(%d invalid descriptors omitted)\n";

/*
112
 * Function that reads the translation tables passed as an argument
113
114
 * and prints their status.
 */
Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
115
116
117
static void xlat_tables_print_internal(xlat_ctx_t *ctx, uintptr_t table_base_va,
		const uint64_t *table_base, unsigned int table_entries,
		unsigned int level)
118
119
120
{
	assert(level <= XLAT_TABLE_LEVEL_MAX);

121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
	/*
	 * data structure to track DESC_TABLE entry before iterate into subtable
	 * of next translation level. it will be restored after return from
	 * subtable iteration.
	 */
	struct desc_table {
		const uint64_t *table_base;
		uintptr_t table_idx_va;
		unsigned int idx;
	} desc_tables[XLAT_TABLE_LEVEL_MAX + 1] = {
		{NULL, 0U, XLAT_TABLE_ENTRIES}, };
	unsigned int this_level = level;
	const uint64_t *this_base = table_base;
	unsigned int max_entries = table_entries;
	size_t level_size = XLAT_BLOCK_SIZE(this_level);
Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
136
	unsigned int table_idx = 0U;
137
	uintptr_t table_idx_va = table_base_va;
138
139
140
141
142
143
144
145
146

	/*
	 * Keep track of how many invalid descriptors are counted in a row.
	 * Whenever multiple invalid descriptors are found, only the first one
	 * is printed, and a line is added to inform about how many descriptors
	 * have been omitted.
	 */
	int invalid_row_count = 0;

147
148
149
	while (this_base != NULL) {
		/* finish current xlat level */
		if (table_idx >= max_entries) {
150
			if (invalid_row_count > 1) {
151
				printf(invalid_descriptors_ommited,
152
153
					  level_spacers[this_level],
					  invalid_row_count - 1);
154
155
156
			}
			invalid_row_count = 0;

157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
			/* no parent level to iterate. */
			if (this_level <= level) {
				this_base = NULL;
				table_idx = max_entries + 1;
			} else {
				/* retore previous DESC_TABLE entry and start
				 * to iterate.
				 */
				this_level--;
				level_size = XLAT_BLOCK_SIZE(this_level);
				this_base = desc_tables[this_level].table_base;
				table_idx = desc_tables[this_level].idx;
				table_idx_va =
					desc_tables[this_level].table_idx_va;
				if (this_level == level) {
					max_entries = table_entries;
				} else {
					max_entries = XLAT_TABLE_ENTRIES;
				}

				assert(this_base != NULL);
			}
		} else {
			uint64_t desc = this_base[table_idx];

			if ((desc & DESC_MASK) == INVALID_DESC) {
				if (invalid_row_count == 0) {
					printf("%sVA:0x%lx size:0x%zx\n",
						  level_spacers[this_level],
						  table_idx_va, level_size);
				}
				invalid_row_count++;
				table_idx++;
				table_idx_va += level_size;
			} else {
				if (invalid_row_count > 1) {
					printf(invalid_descriptors_ommited,
						  level_spacers[this_level],
						  invalid_row_count - 1);
				}
				invalid_row_count = 0;
198
				/*
199
200
201
202
				 * Check if this is a table or a block. Tables
				 * are only allowed in levels other than 3, but
				 * DESC_PAGE has the same value as DESC_TABLE,
				 * so we need to check.
203
204
				 */

205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
				if (((desc & DESC_MASK) == TABLE_DESC) &&
				    (this_level < XLAT_TABLE_LEVEL_MAX)) {
					uintptr_t addr_inner;

					/*
					 * Do not print any PA for a table
					 * descriptor, as it doesn't directly
					 * map physical memory but instead
					 * points to the next translation
					 * table in the translation table walk.
					 */
					printf("%sVA:0x%lx size:0x%zx\n",
					       level_spacers[this_level],
					       table_idx_va, level_size);

					addr_inner = desc & TABLE_ADDR_MASK;
					/* save current xlat level */
					desc_tables[this_level].table_base =
						this_base;
					desc_tables[this_level].idx =
						table_idx + 1;
					desc_tables[this_level].table_idx_va =
						table_idx_va + level_size;

					/* start iterating next level entries */
					this_base = (uint64_t *)addr_inner;
					max_entries = XLAT_TABLE_ENTRIES;
					this_level++;
					level_size =
						XLAT_BLOCK_SIZE(this_level);
					table_idx = 0U;
				} else {
					printf("%sVA:0x%lx PA:0x%llx size:0x%zx ",
					       level_spacers[this_level],
					       table_idx_va,
					       (uint64_t)(desc & TABLE_ADDR_MASK),
					       level_size);
					xlat_desc_print(ctx, desc);
					printf("\n");

					table_idx++;
					table_idx_va += level_size;

				}
249
250
251
252
253
254
255
256
			}
		}
	}
}

void xlat_tables_print(xlat_ctx_t *ctx)
{
	const char *xlat_regime_str;
Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
257
258
	int used_page_tables;

259
260
	if (ctx->xlat_regime == EL1_EL0_REGIME) {
		xlat_regime_str = "1&0";
261
262
	} else if (ctx->xlat_regime == EL2_REGIME) {
		xlat_regime_str = "2";
263
264
265
266
267
268
269
	} else {
		assert(ctx->xlat_regime == EL3_REGIME);
		xlat_regime_str = "3";
	}
	VERBOSE("Translation tables state:\n");
	VERBOSE("  Xlat regime:     EL%s\n", xlat_regime_str);
	VERBOSE("  Max allowed PA:  0x%llx\n", ctx->pa_max_address);
Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
270
	VERBOSE("  Max allowed VA:  0x%lx\n", ctx->va_max_address);
271
	VERBOSE("  Max mapped PA:   0x%llx\n", ctx->max_pa);
Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
272
	VERBOSE("  Max mapped VA:   0x%lx\n", ctx->max_va);
273

Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
274
275
	VERBOSE("  Initial lookup level: %u\n", ctx->base_level);
	VERBOSE("  Entries @initial lookup level: %u\n",
276
277
278
279
		ctx->base_table_entries);

#if PLAT_XLAT_TABLES_DYNAMIC
	used_page_tables = 0;
Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
280
	for (int i = 0; i < ctx->tables_num; ++i) {
281
282
283
284
285
286
		if (ctx->tables_mapped_regions[i] != 0)
			++used_page_tables;
	}
#else
	used_page_tables = ctx->next_table;
#endif
Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
287
	VERBOSE("  Used %d sub-tables out of %d (spare: %d)\n",
288
289
290
		used_page_tables, ctx->tables_num,
		ctx->tables_num - used_page_tables);

Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
291
	xlat_tables_print_internal(ctx, 0U, ctx->base_table,
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
				   ctx->base_table_entries, ctx->base_level);
}

#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */

/*
 * Do a translation table walk to find the block or page descriptor that maps
 * virtual_addr.
 *
 * On success, return the address of the descriptor within the translation
 * table. Its lookup level is stored in '*out_level'.
 * On error, return NULL.
 *
 * xlat_table_base
 *   Base address for the initial lookup level.
 * xlat_table_base_entries
 *   Number of entries in the translation table for the initial lookup level.
 * virt_addr_space_size
 *   Size in bytes of the virtual address space.
 */
static uint64_t *find_xlat_table_entry(uintptr_t virtual_addr,
				       void *xlat_table_base,
Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
314
				       unsigned int xlat_table_base_entries,
315
				       unsigned long long virt_addr_space_size,
Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
316
				       unsigned int *out_level)
317
318
319
{
	unsigned int start_level;
	uint64_t *table;
Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
320
	unsigned int entries;
321
322
323
324
325
326
327
328
329

	start_level = GET_XLAT_TABLE_LEVEL_BASE(virt_addr_space_size);

	table = xlat_table_base;
	entries = xlat_table_base_entries;

	for (unsigned int level = start_level;
	     level <= XLAT_TABLE_LEVEL_MAX;
	     ++level) {
Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
330
		uint64_t idx, desc, desc_type;
331
332
333

		idx = XLAT_TABLE_IDX(virtual_addr, level);
		if (idx >= entries) {
334
335
			WARN("Missing xlat table entry at address 0x%lx\n",
			     virtual_addr);
336
337
338
339
340
341
342
343
344
345
346
347
348
			return NULL;
		}

		desc = table[idx];
		desc_type = desc & DESC_MASK;

		if (desc_type == INVALID_DESC) {
			VERBOSE("Invalid entry (memory not mapped)\n");
			return NULL;
		}

		if (level == XLAT_TABLE_LEVEL_MAX) {
			/*
349
			 * Only page descriptors allowed at the final lookup
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
			 * level.
			 */
			assert(desc_type == PAGE_DESC);
			*out_level = level;
			return &table[idx];
		}

		if (desc_type == BLOCK_DESC) {
			*out_level = level;
			return &table[idx];
		}

		assert(desc_type == TABLE_DESC);
		table = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
		entries = XLAT_TABLE_ENTRIES;
	}

	/*
	 * This shouldn't be reached, the translation table walk should end at
	 * most at level XLAT_TABLE_LEVEL_MAX and return from inside the loop.
	 */
371
	assert(false);
372
373
374
375
376

	return NULL;
}


377
378
static int xlat_get_mem_attributes_internal(const xlat_ctx_t *ctx,
		uintptr_t base_va, uint32_t *attributes, uint64_t **table_entry,
Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
379
		unsigned long long *addr_pa, unsigned int *table_level)
380
381
382
{
	uint64_t *entry;
	uint64_t desc;
Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
383
	unsigned int level;
384
385
386
387
388
389
	unsigned long long virt_addr_space_size;

	/*
	 * Sanity-check arguments.
	 */
	assert(ctx != NULL);
390
	assert(ctx->initialized);
Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
391
	assert((ctx->xlat_regime == EL1_EL0_REGIME) ||
392
	       (ctx->xlat_regime == EL2_REGIME) ||
Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
393
	       (ctx->xlat_regime == EL3_REGIME));
394

Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
395
396
	virt_addr_space_size = (unsigned long long)ctx->va_max_address + 1ULL;
	assert(virt_addr_space_size > 0U);
397
398
399
400
401
402
403

	entry = find_xlat_table_entry(base_va,
				ctx->base_table,
				ctx->base_table_entries,
				virt_addr_space_size,
				&level);
	if (entry == NULL) {
Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
404
		WARN("Address 0x%lx is not mapped.\n", base_va);
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
		return -EINVAL;
	}

	if (addr_pa != NULL) {
		*addr_pa = *entry & TABLE_ADDR_MASK;
	}

	if (table_entry != NULL) {
		*table_entry = entry;
	}

	if (table_level != NULL) {
		*table_level = level;
	}

	desc = *entry;

#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
	VERBOSE("Attributes: ");
	xlat_desc_print(ctx, desc);
425
	printf("\n");
426
427
428
#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */

	assert(attributes != NULL);
Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
429
	*attributes = 0U;
430

Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
431
	uint64_t attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
432
433
434
435
436
437
438
439
440
441

	if (attr_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
		*attributes |= MT_MEMORY;
	} else if (attr_index == ATTR_NON_CACHEABLE_INDEX) {
		*attributes |= MT_NON_CACHEABLE;
	} else {
		assert(attr_index == ATTR_DEVICE_INDEX);
		*attributes |= MT_DEVICE;
	}

Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
442
	uint64_t ap2_bit = (desc >> AP2_SHIFT) & 1U;
443
444
445
446
447

	if (ap2_bit == AP2_RW)
		*attributes |= MT_RW;

	if (ctx->xlat_regime == EL1_EL0_REGIME) {
Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
448
449
		uint64_t ap1_bit = (desc >> AP1_SHIFT) & 1U;

450
451
452
453
		if (ap1_bit == AP1_ACCESS_UNPRIVILEGED)
			*attributes |= MT_USER;
	}

Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
454
	uint64_t ns_bit = (desc >> NS_SHIFT) & 1U;
455

Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
456
	if (ns_bit == 1U)
457
458
459
460
461
462
463
		*attributes |= MT_NS;

	uint64_t xn_mask = xlat_arch_regime_get_xn_desc(ctx->xlat_regime);

	if ((desc & xn_mask) == xn_mask) {
		*attributes |= MT_EXECUTE_NEVER;
	} else {
Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
464
		assert((desc & xn_mask) == 0U);
465
466
467
468
469
470
	}

	return 0;
}


471
472
int xlat_get_mem_attributes_ctx(const xlat_ctx_t *ctx, uintptr_t base_va,
				uint32_t *attr)
473
{
474
475
	return xlat_get_mem_attributes_internal(ctx, base_va, attr,
				NULL, NULL, NULL);
476
477
478
}


479
480
int xlat_change_mem_attributes_ctx(const xlat_ctx_t *ctx, uintptr_t base_va,
				   size_t size, uint32_t attr)
481
482
483
484
{
	/* Note: This implementation isn't optimized. */

	assert(ctx != NULL);
485
	assert(ctx->initialized);
486
487

	unsigned long long virt_addr_space_size =
Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
488
489
		(unsigned long long)ctx->va_max_address + 1U;
	assert(virt_addr_space_size > 0U);
490
491

	if (!IS_PAGE_ALIGNED(base_va)) {
Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
492
493
		WARN("%s: Address 0x%lx is not aligned on a page boundary.\n",
		     __func__, base_va);
494
495
496
		return -EINVAL;
	}

Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
497
	if (size == 0U) {
498
499
500
501
		WARN("%s: Size is 0.\n", __func__);
		return -EINVAL;
	}

Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
502
	if ((size % PAGE_SIZE) != 0U) {
503
504
505
506
507
		WARN("%s: Size 0x%zx is not a multiple of a page size.\n",
		     __func__, size);
		return -EINVAL;
	}

Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
508
	if (((attr & MT_EXECUTE_NEVER) == 0U) && ((attr & MT_RW) != 0U)) {
509
		WARN("%s: Mapping memory as read-write and executable not allowed.\n",
510
511
512
513
		     __func__);
		return -EINVAL;
	}

Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
514
	size_t pages_count = size / PAGE_SIZE;
515

Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
516
517
	VERBOSE("Changing memory attributes of %zu pages starting from address 0x%lx...\n",
		pages_count, base_va);
518
519
520
521
522
523

	uintptr_t base_va_original = base_va;

	/*
	 * Sanity checks.
	 */
Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
524
525
526
527
	for (size_t i = 0U; i < pages_count; ++i) {
		const uint64_t *entry;
		uint64_t desc, attr_index;
		unsigned int level;
528
529
530
531
532
533
534

		entry = find_xlat_table_entry(base_va,
					      ctx->base_table,
					      ctx->base_table_entries,
					      virt_addr_space_size,
					      &level);
		if (entry == NULL) {
Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
535
			WARN("Address 0x%lx is not mapped.\n", base_va);
536
537
538
539
540
541
542
543
544
545
546
			return -EINVAL;
		}

		desc = *entry;

		/*
		 * Check that all the required pages are mapped at page
		 * granularity.
		 */
		if (((desc & DESC_MASK) != PAGE_DESC) ||
			(level != XLAT_TABLE_LEVEL_MAX)) {
Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
547
548
			WARN("Address 0x%lx is not mapped at the right granularity.\n",
			     base_va);
549
550
551
552
553
554
555
556
			WARN("Granularity is 0x%llx, should be 0x%x.\n",
			     (unsigned long long)XLAT_BLOCK_SIZE(level), PAGE_SIZE);
			return -EINVAL;
		}

		/*
		 * If the region type is device, it shouldn't be executable.
		 */
Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
557
		attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
558
		if (attr_index == ATTR_DEVICE_INDEX) {
Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
559
560
561
			if ((attr & MT_EXECUTE_NEVER) == 0U) {
				WARN("Setting device memory as executable at address 0x%lx.",
				     base_va);
562
563
564
565
566
567
568
569
570
571
				return -EINVAL;
			}
		}

		base_va += PAGE_SIZE;
	}

	/* Restore original value. */
	base_va = base_va_original;

Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
572
	for (unsigned int i = 0U; i < pages_count; ++i) {
573

Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
574
575
576
577
		uint32_t old_attr = 0U, new_attr;
		uint64_t *entry = NULL;
		unsigned int level = 0U;
		unsigned long long addr_pa = 0ULL;
578

579
		(void) xlat_get_mem_attributes_internal(ctx, base_va, &old_attr,
580
581
582
583
584
585
586
587
588
					    &entry, &addr_pa, &level);

		/*
		 * From attr, only MT_RO/MT_RW, MT_EXECUTE/MT_EXECUTE_NEVER and
		 * MT_USER/MT_PRIVILEGED are taken into account. Any other
		 * information is ignored.
		 */

		/* Clean the old attributes so that they can be rebuilt. */
589
		new_attr = old_attr & ~(MT_RW | MT_EXECUTE_NEVER | MT_USER);
590
591
592
593
594

		/*
		 * Update attributes, but filter out the ones this function
		 * isn't allowed to change.
		 */
595
		new_attr |= attr & (MT_RW | MT_EXECUTE_NEVER | MT_USER);
596
597
598
599
600
601
602

		/*
		 * The break-before-make sequence requires writing an invalid
		 * descriptor and making sure that the system sees the change
		 * before writing the new descriptor.
		 */
		*entry = INVALID_DESC;
603
604
605
#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
		dccvac((uintptr_t)entry);
#endif
606
		/* Invalidate any cached copy of this mapping in the TLBs. */
607
		xlat_arch_tlbi_va(base_va, ctx->xlat_regime);
608
609
610
611
612
613

		/* Ensure completion of the invalidation. */
		xlat_arch_tlbi_va_sync();

		/* Write new descriptor */
		*entry = xlat_desc(ctx, new_attr, addr_pa, level);
614
615
616
#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
		dccvac((uintptr_t)entry);
#endif
617
618
619
620
621
622
623
624
		base_va += PAGE_SIZE;
	}

	/* Ensure that the last descriptor writen is seen by the system. */
	dsbish();

	return 0;
}