Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
Menu
Open sidebar
adam.huang
Arm Trusted Firmware
Commits
2ee596c4
Unverified
Commit
2ee596c4
authored
Jul 30, 2018
by
Dimitris Papastamos
Committed by
GitHub
Jul 30, 2018
Browse files
Merge pull request #1493 from antonio-nino-diaz-arm/an/xlat-misra
Fix MISRA defects in xlat tables lib and SP805 driver
parents
eef90a77
354305c3
Changes
21
Show whitespace changes
Inline
Side-by-side
drivers/arm/sp805/sp805.c
View file @
2ee596c4
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015
-2018
, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
...
...
@@ -10,17 +10,17 @@
/* Inline register access functions */
static
inline
void
sp805_write_wdog_load
(
uintptr_t
base
,
u
nsigned
long
value
)
static
inline
void
sp805_write_wdog_load
(
uintptr_t
base
,
u
int32_t
value
)
{
mmio_write_32
(
base
+
SP805_WDOG_LOAD_OFF
,
value
);
}
static
inline
void
sp805_write_wdog_ctrl
(
uintptr_t
base
,
u
nsigned
long
value
)
static
inline
void
sp805_write_wdog_ctrl
(
uintptr_t
base
,
u
int32_t
value
)
{
mmio_write_32
(
base
+
SP805_WDOG_CTR_OFF
,
value
);
}
static
inline
void
sp805_write_wdog_lock
(
uintptr_t
base
,
u
nsigned
long
value
)
static
inline
void
sp805_write_wdog_lock
(
uintptr_t
base
,
u
int32_t
value
)
{
mmio_write_32
(
base
+
SP805_WDOG_LOCK_OFF
,
value
);
}
...
...
@@ -28,23 +28,23 @@ static inline void sp805_write_wdog_lock(uintptr_t base, unsigned long value)
/* Public API implementation */
void
sp805_start
(
uintptr_t
base
,
unsigned
long
ticks
)
void
sp805_start
(
uintptr_t
base
,
unsigned
int
ticks
)
{
sp805_write_wdog_load
(
base
,
ticks
);
sp805_write_wdog_ctrl
(
base
,
SP805_CTR_RESEN
|
SP805_CTR_INTEN
);
/* Lock registers access */
sp805_write_wdog_lock
(
base
,
0
);
sp805_write_wdog_lock
(
base
,
0
U
);
}
void
sp805_stop
(
uintptr_t
base
)
{
sp805_write_wdog_lock
(
base
,
WDOG_UNLOCK_KEY
);
sp805_write_wdog_ctrl
(
base
,
0
);
sp805_write_wdog_ctrl
(
base
,
0
U
);
}
void
sp805_refresh
(
uintptr_t
base
,
unsigned
long
ticks
)
void
sp805_refresh
(
uintptr_t
base
,
unsigned
int
ticks
)
{
sp805_write_wdog_lock
(
base
,
WDOG_UNLOCK_KEY
);
sp805_write_wdog_load
(
base
,
ticks
);
sp805_write_wdog_lock
(
base
,
0
);
sp805_write_wdog_lock
(
base
,
0
U
);
}
include/drivers/arm/sp805.h
View file @
2ee596c4
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015
-2018
, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef __SP805_H__
#define __SP805_H__
#ifndef SP805_H
#define SP805_H
#include <utils_def.h>
/* SP805 register offset */
#define SP805_WDOG_LOAD_OFF 0x000
#define SP805_WDOG_CTR_OFF 0x008
#define SP805_WDOG_LOCK_OFF 0xc00
#define SP805_WDOG_LOAD_OFF
UL(
0x000
)
#define SP805_WDOG_CTR_OFF
UL(
0x008
)
#define SP805_WDOG_LOCK_OFF
UL(
0xc00
)
/* Magic word to unlock the wd registers */
#define WDOG_UNLOCK_KEY 0x1ACCE551
#define WDOG_UNLOCK_KEY
U(
0x1ACCE551
)
/* Register field definitions */
#define SP805_CTR_RESEN (
1
<< 1)
#define SP805_CTR_INTEN (
1
<< 0)
#define SP805_CTR_RESEN (
U(1)
<< 1)
#define SP805_CTR_INTEN (
U(1)
<< 0)
#ifndef __ASSEMBLY__
...
...
@@ -25,10 +27,10 @@
/* Public high level API */
void
sp805_start
(
uintptr_t
base
,
unsigned
long
ticks
);
void
sp805_start
(
uintptr_t
base
,
unsigned
int
ticks
);
void
sp805_stop
(
uintptr_t
base
);
void
sp805_refresh
(
uintptr_t
base
,
unsigned
long
ticks
);
void
sp805_refresh
(
uintptr_t
base
,
unsigned
int
ticks
);
#endif
/* __ASSEMBLY__ */
#endif
/*
__
SP805_H
__
*/
#endif
/* SP805_H */
include/lib/xlat_tables/aarch32/xlat_tables_aarch32.h
View file @
2ee596c4
...
...
@@ -4,8 +4,8 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef
__
XLAT_TABLES_AARCH32_H
__
#define
__
XLAT_TABLES_AARCH32_H
__
#ifndef XLAT_TABLES_AARCH32_H
#define XLAT_TABLES_AARCH32_H
#include <arch.h>
#include <utils_def.h>
...
...
@@ -24,7 +24,7 @@
* The define below specifies the first table level that allows block
* descriptors.
*/
#if PAGE_SIZE !=
(4 * 1024)
#if PAGE_SIZE !=
PAGE_SIZE_4KB
#error "Invalid granule size. AArch32 supports 4KB pages only."
#endif
...
...
@@ -43,8 +43,8 @@
* [1] See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
* information, Section G4.6.5
*/
#define MIN_VIRT_ADDR_SPACE_SIZE (ULL(1) << (32 - TTBCR_TxSZ_MAX))
#define MAX_VIRT_ADDR_SPACE_SIZE (ULL(1) << (32 - TTBCR_TxSZ_MIN))
#define MIN_VIRT_ADDR_SPACE_SIZE (ULL(1) <<
(U
(32
)
- TTBCR_TxSZ_MAX))
#define MAX_VIRT_ADDR_SPACE_SIZE (ULL(1) <<
(U
(32
)
- TTBCR_TxSZ_MIN))
/*
* Here we calculate the initial lookup level from the value of the given
...
...
@@ -66,7 +66,8 @@
* valid. Therefore, the caller is expected to check it is the case using the
* CHECK_VIRT_ADDR_SPACE_SIZE() macro first.
*/
#define GET_XLAT_TABLE_LEVEL_BASE(_virt_addr_space_size) \
(((_virt_addr_space_size) > (ULL(1) << L1_XLAT_ADDRESS_SHIFT)) ? 1 : 2)
#define GET_XLAT_TABLE_LEVEL_BASE(_virt_addr_space_sz) \
(((_virt_addr_space_sz) > (ULL(1) << L1_XLAT_ADDRESS_SHIFT)) ? \
U(1) : U(2))
#endif
/*
__
XLAT_TABLES_AARCH32_H
__
*/
#endif
/* XLAT_TABLES_AARCH32_H */
include/lib/xlat_tables/aarch64/xlat_tables_aarch64.h
View file @
2ee596c4
...
...
@@ -4,8 +4,8 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef
__
XLAT_TABLES_AARCH64_H
__
#define
__
XLAT_TABLES_AARCH64_H
__
#ifndef XLAT_TABLES_AARCH64_H
#define XLAT_TABLES_AARCH64_H
#include <arch.h>
#include <utils_def.h>
...
...
@@ -30,9 +30,9 @@ unsigned long long tcr_physical_addr_size_bits(unsigned long long max_addr);
* The define below specifies the first table level that allows block
* descriptors.
*/
#if PAGE_SIZE ==
(4 * 1024)
#if PAGE_SIZE ==
PAGE_SIZE_4KB
# define MIN_LVL_BLOCK_DESC U(1)
#elif PAGE_SIZE ==
(16 * 1024
) || PAGE_SIZE ==
(64 * 1024
)
#elif
(
PAGE_SIZE ==
PAGE_SIZE_16KB
) ||
(
PAGE_SIZE ==
PAGE_SIZE_64KB
)
# define MIN_LVL_BLOCK_DESC U(2)
#endif
...
...
@@ -50,8 +50,8 @@ unsigned long long tcr_physical_addr_size_bits(unsigned long long max_addr);
* information:
* Page 1730: 'Input address size', 'For all translation stages'.
*/
#define MIN_VIRT_ADDR_SPACE_SIZE (ULL(1) << (64 - TCR_TxSZ_MAX))
#define MAX_VIRT_ADDR_SPACE_SIZE (ULL(1) << (64 - TCR_TxSZ_MIN))
#define MIN_VIRT_ADDR_SPACE_SIZE (ULL(1) <<
(U
(64
)
- TCR_TxSZ_MAX))
#define MAX_VIRT_ADDR_SPACE_SIZE (ULL(1) <<
(U
(64
)
- TCR_TxSZ_MIN))
/*
* Here we calculate the initial lookup level from the value of the given
...
...
@@ -74,10 +74,10 @@ unsigned long long tcr_physical_addr_size_bits(unsigned long long max_addr);
* valid. Therefore, the caller is expected to check it is the case using the
* CHECK_VIRT_ADDR_SPACE_SIZE() macro first.
*/
#define GET_XLAT_TABLE_LEVEL_BASE(_virt_addr_space_s
ize)
\
(((_virt_addr_space_s
ize
) > (ULL(1) << L0_XLAT_ADDRESS_SHIFT))
\
? 0
\
: (((_virt_addr_space_s
ize
) > (ULL(1) << L1_XLAT_ADDRESS_SHIFT)) \
? 1 : 2))
#define GET_XLAT_TABLE_LEVEL_BASE(_virt_addr_space_s
z)
\
(((_virt_addr_space_s
z
) > (ULL(1) << L0_XLAT_ADDRESS_SHIFT)) \
? 0
U
\
: (((_virt_addr_space_s
z
) > (ULL(1) << L1_XLAT_ADDRESS_SHIFT)) \
? 1
U
: 2
U
))
#endif
/*
__
XLAT_TABLES_AARCH64_H
__
*/
#endif
/* XLAT_TABLES_AARCH64_H */
include/lib/xlat_tables/xlat_mmu_helpers.h
View file @
2ee596c4
...
...
@@ -4,8 +4,8 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef
__
XLAT_MMU_HELPERS_H
__
#define
__
XLAT_MMU_HELPERS_H
__
#ifndef XLAT_MMU_HELPERS_H
#define XLAT_MMU_HELPERS_H
/*
* The following flags are passed to enable_mmu_xxx() to override the default
...
...
@@ -52,6 +52,7 @@
#ifndef __ASSEMBLY__
#include <stdint.h>
#include <sys/types.h>
/*
...
...
@@ -82,4 +83,4 @@ size_t xlat_arch_get_max_supported_granule_size(void);
#endif
/* __ASSEMBLY__ */
#endif
/*
__
XLAT_MMU_HELPERS_H
__
*/
#endif
/* XLAT_MMU_HELPERS_H */
include/lib/xlat_tables/xlat_tables.h
View file @
2ee596c4
/*
* Copyright (c) 2014-201
7
, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2014-201
8
, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef
__
XLAT_TABLES_H
__
#define
__
XLAT_TABLES_H
__
#ifndef XLAT_TABLES_H
#define XLAT_TABLES_H
#include <xlat_tables_defs.h>
...
...
@@ -92,4 +92,4 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
void
mmap_add
(
const
mmap_region_t
*
mm
);
#endif
/*__ASSEMBLY__*/
#endif
/*
__
XLAT_TABLES_H
__
*/
#endif
/* XLAT_TABLES_H */
include/lib/xlat_tables/xlat_tables_arch.h
View file @
2ee596c4
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017
-2018
, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef
__
XLAT_TABLES_ARCH_H
__
#define
__
XLAT_TABLES_ARCH_H
__
#ifndef XLAT_TABLES_ARCH_H
#define XLAT_TABLES_ARCH_H
#ifdef AARCH32
#include "aarch32/xlat_tables_aarch32.h"
...
...
@@ -40,4 +40,4 @@
((addr_space_size) >> \
XLAT_ADDR_SHIFT(GET_XLAT_TABLE_LEVEL_BASE(addr_space_size)))
#endif
/*
__
XLAT_TABLES_ARCH_H
__
*/
#endif
/* XLAT_TABLES_ARCH_H */
include/lib/xlat_tables/xlat_tables_defs.h
View file @
2ee596c4
...
...
@@ -4,8 +4,8 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef
__
XLAT_TABLES_DEFS_H
__
#define
__
XLAT_TABLES_DEFS_H
__
#ifndef XLAT_TABLES_DEFS_H
#define XLAT_TABLES_DEFS_H
#include <arch.h>
#include <utils_def.h>
...
...
@@ -24,6 +24,10 @@
#define TWO_MB_INDEX(x) ((x) >> TWO_MB_SHIFT)
#define FOUR_KB_INDEX(x) ((x) >> FOUR_KB_SHIFT)
#define PAGE_SIZE_4KB U(4096)
#define PAGE_SIZE_16KB U(16384)
#define PAGE_SIZE_64KB U(65536)
#define INVALID_DESC U(0x0)
/*
* A block descriptor points to a region of memory bigger than the granule size
...
...
@@ -66,8 +70,8 @@
*/
#define PAGE_SIZE_SHIFT FOUR_KB_SHIFT
#define PAGE_SIZE (U(1) << PAGE_SIZE_SHIFT)
#define PAGE_SIZE_MASK (PAGE_SIZE - 1)
#define IS_PAGE_ALIGNED(addr) (((addr) & PAGE_SIZE_MASK) == 0)
#define PAGE_SIZE_MASK (PAGE_SIZE -
U(
1)
)
#define IS_PAGE_ALIGNED(addr) (((addr) & PAGE_SIZE_MASK) ==
U(
0)
)
#define XLAT_ENTRY_SIZE_SHIFT U(3)
/* Each MMU table entry is 8 bytes (1 << 3) */
#define XLAT_ENTRY_SIZE (U(1) << XLAT_ENTRY_SIZE_SHIFT)
...
...
@@ -80,7 +84,7 @@
/* Values for number of entries in each MMU translation table */
#define XLAT_TABLE_ENTRIES_SHIFT (XLAT_TABLE_SIZE_SHIFT - XLAT_ENTRY_SIZE_SHIFT)
#define XLAT_TABLE_ENTRIES (U(1) << XLAT_TABLE_ENTRIES_SHIFT)
#define XLAT_TABLE_ENTRIES_MASK (XLAT_TABLE_ENTRIES - 1)
#define XLAT_TABLE_ENTRIES_MASK (XLAT_TABLE_ENTRIES -
U(
1)
)
/* Values to convert a memory address to an index into a translation table */
#define L3_XLAT_ADDRESS_SHIFT PAGE_SIZE_SHIFT
...
...
@@ -90,9 +94,9 @@
#define XLAT_ADDR_SHIFT(level) (PAGE_SIZE_SHIFT + \
((XLAT_TABLE_LEVEL_MAX - (level)) * XLAT_TABLE_ENTRIES_SHIFT))
#define XLAT_BLOCK_SIZE(level) (
(u_register_t)1
<< XLAT_ADDR_SHIFT(level))
#define XLAT_BLOCK_SIZE(level) (
UL(1)
<< XLAT_ADDR_SHIFT(level))
/* Mask to get the bits used to index inside a block of a certain level */
#define XLAT_BLOCK_MASK(level) (XLAT_BLOCK_SIZE(level) - 1)
#define XLAT_BLOCK_MASK(level) (XLAT_BLOCK_SIZE(level) -
UL(
1)
)
/* Mask to get the address bits common to a block of a certain table level*/
#define XLAT_ADDR_MASK(level) (~XLAT_BLOCK_MASK(level))
/*
...
...
@@ -111,13 +115,13 @@
* when stage 1 translations can only support one VA range.
*/
#define AP2_SHIFT U(0x7)
#define AP2_RO U(0x1)
#define AP2_RW U(0x0)
#define AP2_RO U
LL
(0x1)
#define AP2_RW U
LL
(0x0)
#define AP1_SHIFT U(0x6)
#define AP1_ACCESS_UNPRIVILEGED U(0x1)
#define AP1_NO_ACCESS_UNPRIVILEGED U(0x0)
#define AP1_RES1 U(0x1)
#define AP1_ACCESS_UNPRIVILEGED U
LL
(0x1)
#define AP1_NO_ACCESS_UNPRIVILEGED U
LL
(0x0)
#define AP1_RES1 U
LL
(0x1)
/*
* The following definitions must all be passed to the LOWER_ATTRS() macro to
...
...
@@ -129,9 +133,9 @@
#define AP_NO_ACCESS_UNPRIVILEGED (AP1_NO_ACCESS_UNPRIVILEGED << 4)
#define AP_ONE_VA_RANGE_RES1 (AP1_RES1 << 4)
#define NS (U(0x1) << 3)
#define ATTR_NON_CACHEABLE_INDEX U(0x2)
#define ATTR_DEVICE_INDEX U(0x1)
#define ATTR_IWBWA_OWBWA_NTR_INDEX U(0x0)
#define ATTR_NON_CACHEABLE_INDEX U
LL
(0x2)
#define ATTR_DEVICE_INDEX U
LL
(0x1)
#define ATTR_IWBWA_OWBWA_NTR_INDEX U
LL
(0x0)
#define LOWER_ATTRS(x) (((x) & U(0xfff)) << 2)
/* Normal Memory, Outer Write-Through non-transient, Inner Non-cacheable */
...
...
include/lib/xlat_tables/xlat_tables_v2.h
View file @
2ee596c4
...
...
@@ -4,8 +4,8 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef
__
XLAT_TABLES_V2_H
__
#define
__
XLAT_TABLES_V2_H
__
#ifndef XLAT_TABLES_V2_H
#define XLAT_TABLES_V2_H
#include <xlat_tables_defs.h>
#include <xlat_tables_v2_helpers.h>
...
...
@@ -27,7 +27,7 @@
/* Helper macro to define an mmap_region_t. */
#define MAP_REGION(_pa, _va, _sz, _attr) \
_
MAP_REGION_FULL_SPEC(_pa, _va, _sz, _attr, REGION_DEFAULT_GRANULARITY)
MAP_REGION_FULL_SPEC(_pa, _va, _sz, _attr, REGION_DEFAULT_GRANULARITY)
/* Helper macro to define an mmap_region_t with an identity mapping. */
#define MAP_REGION_FLAT(_adr, _sz, _attr) \
...
...
@@ -44,7 +44,7 @@
* equivalent to the MAP_REGION() macro.
*/
#define MAP_REGION2(_pa, _va, _sz, _attr, _gr) \
_
MAP_REGION_FULL_SPEC(_pa, _va, _sz, _attr, _gr)
MAP_REGION_FULL_SPEC(_pa, _va, _sz, _attr, _gr)
/*
* Shifts and masks to access fields of an mmap attribute
...
...
@@ -163,7 +163,7 @@ typedef struct xlat_ctx xlat_ctx_t;
*/
#define REGISTER_XLAT_CONTEXT(_ctx_name, _mmap_count, _xlat_tables_count, \
_virt_addr_space_size, _phy_addr_space_size) \
_
REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, (_mmap_count), \
REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, (_mmap_count), \
(_xlat_tables_count), \
(_virt_addr_space_size), \
(_phy_addr_space_size), \
...
...
@@ -183,7 +183,7 @@ typedef struct xlat_ctx xlat_ctx_t;
#define REGISTER_XLAT_CONTEXT2(_ctx_name, _mmap_count, _xlat_tables_count, \
_virt_addr_space_size, _phy_addr_space_size, \
_xlat_regime, _section_name) \
_
REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, (_mmap_count), \
REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, (_mmap_count), \
(_xlat_tables_count), \
(_virt_addr_space_size), \
(_phy_addr_space_size), \
...
...
@@ -296,7 +296,7 @@ int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx,
* translation tables are not modified by any other code while this function is
* executing.
*/
int
change_mem_attributes
(
xlat_ctx_t
*
ctx
,
uintptr_t
base_va
,
size_t
size
,
int
change_mem_attributes
(
const
xlat_ctx_t
*
ctx
,
uintptr_t
base_va
,
size_t
size
,
uint32_t
attr
);
/*
...
...
@@ -318,4 +318,4 @@ int get_mem_attributes(const xlat_ctx_t *ctx, uintptr_t base_va,
uint32_t
*
attributes
);
#endif
/*__ASSEMBLY__*/
#endif
/*
__
XLAT_TABLES_V2_H
__
*/
#endif
/* XLAT_TABLES_V2_H */
include/lib/xlat_tables/xlat_tables_v2_helpers.h
View file @
2ee596c4
...
...
@@ -9,10 +9,10 @@
* used outside of this library code.
*/
#ifndef
__
XLAT_TABLES_V2_HELPERS_H
__
#define
__
XLAT_TABLES_V2_HELPERS_H
__
#ifndef XLAT_TABLES_V2_HELPERS_H
#define XLAT_TABLES_V2_HELPERS_H
#ifndef
__
XLAT_TABLES_V2_H
__
#ifndef XLAT_TABLES_V2_H
#error "Do not include this header file directly. Include xlat_tables_v2.h instead."
#endif
...
...
@@ -32,7 +32,7 @@ struct mmap_region;
* the fields of the structure but its parameter list is not guaranteed to
* remain stable as we add members to mmap_region_t.
*/
#define
_
MAP_REGION_FULL_SPEC(_pa, _va, _sz, _attr, _gr) \
#define MAP_REGION_FULL_SPEC(_pa, _va, _sz, _attr, _gr)
\
{ \
.base_pa = (_pa), \
.base_va = (_va), \
...
...
@@ -58,7 +58,7 @@ struct xlat_ctx {
* null entry.
*/
struct
mmap_region
*
mmap
;
unsigned
int
mmap_num
;
int
mmap_num
;
/*
* Array of finer-grain translation tables.
...
...
@@ -66,7 +66,7 @@ struct xlat_ctx {
* contain both level-2 and level-3 entries.
*/
uint64_t
(
*
tables
)[
XLAT_TABLE_ENTRIES
];
unsigned
int
tables_num
;
int
tables_num
;
/*
* Keep track of how many regions are mapped in each table. The base
* table can't be unmapped so it isn't needed to keep track of it.
...
...
@@ -75,7 +75,7 @@ struct xlat_ctx {
int
*
tables_mapped_regions
;
#endif
/* PLAT_XLAT_TABLES_DYNAMIC */
unsigned
int
next_table
;
int
next_table
;
/*
* Base translation table. It doesn't need to have the same amount of
...
...
@@ -96,7 +96,7 @@ struct xlat_ctx {
unsigned
int
base_level
;
/* Set to 1 when the translation tables are initialized. */
unsigned
int
initialized
;
int
initialized
;
/*
* Translation regime managed by this xlat_ctx_t. It should be one of
...
...
@@ -106,27 +106,27 @@ struct xlat_ctx {
};
#if PLAT_XLAT_TABLES_DYNAMIC
#define _ALLOC_DYNMAP_STRUCT(_ctx_name, _xlat_tables_count) \
#define
XLAT
_ALLOC_DYNMAP_STRUCT(_ctx_name, _xlat_tables_count) \
static int _ctx_name##_mapped_regions[_xlat_tables_count];
#define _REGISTER_DYNMAP_STRUCT(_ctx_name) \
#define
XLAT
_REGISTER_DYNMAP_STRUCT(_ctx_name) \
.tables_mapped_regions = _ctx_name##_mapped_regions,
#else
#define _ALLOC_DYNMAP_STRUCT(_ctx_name, _xlat_tables_count) \
#define
XLAT
_ALLOC_DYNMAP_STRUCT(_ctx_name, _xlat_tables_count) \
/* do nothing */
#define _REGISTER_DYNMAP_STRUCT(_ctx_name) \
#define
XLAT
_REGISTER_DYNMAP_STRUCT(_ctx_name) \
/* do nothing */
#endif
/* PLAT_XLAT_TABLES_DYNAMIC */
#define
_
REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, _mmap_count,
_xlat_tables_count,
\
_
virt_addr_space_size, _phy
_addr_space_size,
\
_xlat_regime, _section_name)
\
#define REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, _mmap_count,
\
_
xlat_tables_count, _virt
_addr_space_size, \
_phy_addr_space_size,
_xlat_regime, _section_name)\
CASSERT(CHECK_VIRT_ADDR_SPACE_SIZE(_virt_addr_space_size), \
assert_invalid_virtual_addr_space_size_for_##_ctx_name);
\
assert_invalid_virtual_addr_space_size_for_##_ctx_name);\
\
CASSERT(CHECK_PHY_ADDR_SPACE_SIZE(_phy_addr_space_size), \
assert_invalid_physical_addr_space_sizefor_##_ctx_name);
\
assert_invalid_physical_addr_space_sizefor_##_ctx_name);\
\
static mmap_region_t _ctx_name##_mmap[_mmap_count + 1]; \
\
...
...
@@ -136,30 +136,30 @@ struct xlat_ctx {
\
static uint64_t _ctx_name##_base_xlat_table \
[GET_NUM_BASE_LEVEL_ENTRIES(_virt_addr_space_size)] \
__aligned(GET_NUM_BASE_LEVEL_ENTRIES(_virt_addr_space_size)
\
__aligned(GET_NUM_BASE_LEVEL_ENTRIES(_virt_addr_space_size)\
* sizeof(uint64_t)); \
\
_ALLOC_DYNMAP_STRUCT(_ctx_name, _xlat_tables_count)
\
XLAT
_ALLOC_DYNMAP_STRUCT(_ctx_name, _xlat_tables_count) \
\
static xlat_ctx_t _ctx_name##_xlat_ctx = { \
.va_max_address = (_virt_addr_space_size) - 1
,
\
.pa_max_address = (_phy_addr_space_size) - 1
,
\
.va_max_address = (_virt_addr_space_size) - 1
UL,
\
.pa_max_address = (_phy_addr_space_size) - 1
ULL,
\
.mmap = _ctx_name##_mmap, \
.mmap_num = (_mmap_count), \
.base_level = GET_XLAT_TABLE_LEVEL_BASE(_virt_addr_space_size),
\
.base_level = GET_XLAT_TABLE_LEVEL_BASE(_virt_addr_space_size),\
.base_table = _ctx_name##_base_xlat_table, \
.base_table_entries = \
GET_NUM_BASE_LEVEL_ENTRIES(_virt_addr_space_size),
\
GET_NUM_BASE_LEVEL_ENTRIES(_virt_addr_space_size),\
.tables = _ctx_name##_xlat_tables, \
.tables_num = _xlat_tables_count, \
_REGISTER_DYNMAP_STRUCT(_ctx_name)
\
XLAT
_REGISTER_DYNMAP_STRUCT(_ctx_name) \
.xlat_regime = (_xlat_regime), \
.max_pa = 0,
\
.max_va = 0,
\
.max_pa = 0
U
, \
.max_va = 0
U
, \
.next_table = 0, \
.initialized = 0, \
}
#endif
/*__ASSEMBLY__*/
#endif
/*
__
XLAT_TABLES_V2_HELPERS_H
__
*/
#endif
/* XLAT_TABLES_V2_HELPERS_H */
lib/xlat_tables/aarch32/xlat_tables.c
View file @
2ee596c4
...
...
@@ -13,7 +13,7 @@
#include <xlat_tables.h>
#include "../xlat_tables_private.h"
#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING)
#if
(
ARM_ARCH_MAJOR == 7
)
&& !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING)
#error ARMv7 target does not support LPAE MMU descriptors
#endif
...
...
@@ -34,16 +34,16 @@ static unsigned long long get_max_supported_pa(void)
}
#endif
/* ENABLE_ASSERTIONS */
int
xlat_arch_current_el
(
void
)
unsigned
int
xlat_arch_current_el
(
void
)
{
/*
* If EL3 is in AArch32 mode, all secure PL1 modes (Monitor, System,
* SVC, Abort, UND, IRQ and FIQ modes) execute at EL3.
*/
return
3
;
return
3
U
;
}
uint64_t
xlat_arch_get_xn_desc
(
int
el
__unused
)
uint64_t
xlat_arch_get_xn_desc
(
unsigned
int
el
__unused
)
{
return
UPPER_ATTRS
(
XN
);
}
...
...
@@ -53,12 +53,12 @@ void init_xlat_tables(void)
unsigned
long
long
max_pa
;
uintptr_t
max_va
;
print_mmap
();
init_xlation_table
(
0
,
base_xlation_table
,
XLAT_TABLE_LEVEL_BASE
,
init_xlation_table
(
0
U
,
base_xlation_table
,
XLAT_TABLE_LEVEL_BASE
,
&
max_va
,
&
max_pa
);
assert
(
max_va
<=
PLAT_VIRT_ADDR_SPACE_SIZE
-
1
);
assert
(
max_pa
<=
PLAT_PHY_ADDR_SPACE_SIZE
-
1
);
assert
((
PLAT_PHY_ADDR_SPACE_SIZE
-
1
)
<=
get_max_supported_pa
());
assert
(
max_va
<=
(
PLAT_VIRT_ADDR_SPACE_SIZE
-
1
U
)
);
assert
(
max_pa
<=
(
PLAT_PHY_ADDR_SPACE_SIZE
-
1
U
)
);
assert
((
PLAT_PHY_ADDR_SPACE_SIZE
-
1
U
)
<=
get_max_supported_pa
());
}
/*******************************************************************************
...
...
@@ -71,7 +71,7 @@ void enable_mmu_secure(unsigned int flags)
uint64_t
ttbr0
;
assert
(
IS_IN_SECURE
());
assert
((
read_sctlr
()
&
SCTLR_M_BIT
)
==
0
);
assert
((
read_sctlr
()
&
SCTLR_M_BIT
)
==
0
U
);
/* Set attributes in the right indices of the MAIR */
mair0
=
MAIR0_ATTR_SET
(
ATTR_DEVICE
,
ATTR_DEVICE_INDEX
);
...
...
@@ -87,18 +87,18 @@ void enable_mmu_secure(unsigned int flags)
/*
* Set TTBCR bits as well. Set TTBR0 table properties. Disable TTBR1.
*/
if
(
flags
&
XLAT_TABLE_NC
)
{
int
t0sz
=
32
-
__builtin_ctzll
(
PLAT_VIRT_ADDR_SPACE_SIZE
);
if
((
flags
&
XLAT_TABLE_NC
)
!=
0U
)
{
/* Inner & outer non-cacheable non-shareable. */
ttbcr
=
TTBCR_EAE_BIT
|
TTBCR_SH0_NON_SHAREABLE
|
TTBCR_RGN0_OUTER_NC
|
TTBCR_RGN0_INNER_NC
|
(
32
-
__builtin_ctzll
(
PLAT_VIRT_ADDR_SPACE_SIZE
));
TTBCR_RGN0_INNER_NC
|
(
uint32_t
)
t0sz
;
}
else
{
/* Inner & outer WBWA & shareable. */
ttbcr
=
TTBCR_EAE_BIT
|
TTBCR_SH0_INNER_SHAREABLE
|
TTBCR_RGN0_OUTER_WBA
|
TTBCR_RGN0_INNER_WBA
|
(
32
-
__builtin_ctzll
(
PLAT_VIRT_ADDR_SPACE_SIZE
));
TTBCR_RGN0_INNER_WBA
|
(
uint32_t
)
t0sz
;
}
ttbcr
|=
TTBCR_EPD1_BIT
;
write_ttbcr
(
ttbcr
);
...
...
@@ -106,7 +106,7 @@ void enable_mmu_secure(unsigned int flags)
/* Set TTBR0 bits as well */
ttbr0
=
(
uintptr_t
)
base_xlation_table
;
write64_ttbr0
(
ttbr0
);
write64_ttbr1
(
0
);
write64_ttbr1
(
0
U
);
/*
* Ensure all translation table writes have drained
...
...
@@ -120,7 +120,7 @@ void enable_mmu_secure(unsigned int flags)
sctlr
=
read_sctlr
();
sctlr
|=
SCTLR_WXN_BIT
|
SCTLR_M_BIT
;
if
(
flags
&
DISABLE_DCACHE
)
if
(
(
flags
&
DISABLE_DCACHE
)
!=
0U
)
sctlr
&=
~
SCTLR_C_BIT
;
else
sctlr
|=
SCTLR_C_BIT
;
...
...
lib/xlat_tables/aarch64/xlat_tables.c
View file @
2ee596c4
...
...
@@ -31,26 +31,26 @@ static unsigned long long calc_physical_addr_size_bits(
unsigned
long
long
max_addr
)
{
/* Physical address can't exceed 48 bits */
assert
((
max_addr
&
ADDR_MASK_48_TO_63
)
==
0
);
assert
((
max_addr
&
ADDR_MASK_48_TO_63
)
==
0
U
);
/* 48 bits address */
if
(
max_addr
&
ADDR_MASK_44_TO_47
)
if
(
(
max_addr
&
ADDR_MASK_44_TO_47
)
!=
0U
)
return
TCR_PS_BITS_256TB
;
/* 44 bits address */
if
(
max_addr
&
ADDR_MASK_42_TO_43
)
if
(
(
max_addr
&
ADDR_MASK_42_TO_43
)
!=
0U
)
return
TCR_PS_BITS_16TB
;
/* 42 bits address */
if
(
max_addr
&
ADDR_MASK_40_TO_41
)
if
(
(
max_addr
&
ADDR_MASK_40_TO_41
)
!=
0U
)
return
TCR_PS_BITS_4TB
;
/* 40 bits address */
if
(
max_addr
&
ADDR_MASK_36_TO_39
)
if
(
(
max_addr
&
ADDR_MASK_36_TO_39
)
!=
0U
)
return
TCR_PS_BITS_1TB
;
/* 36 bits address */
if
(
max_addr
&
ADDR_MASK_32_TO_35
)
if
(
(
max_addr
&
ADDR_MASK_32_TO_35
)
!=
0U
)
return
TCR_PS_BITS_64GB
;
return
TCR_PS_BITS_4GB
;
...
...
@@ -78,21 +78,21 @@ static unsigned long long get_max_supported_pa(void)
}
#endif
/* ENABLE_ASSERTIONS */
int
xlat_arch_current_el
(
void
)
unsigned
int
xlat_arch_current_el
(
void
)
{
int
el
=
GET_EL
(
read_CurrentEl
());
unsigned
int
el
=
(
unsigned
int
)
GET_EL
(
read_CurrentEl
());
assert
(
el
>
0
);
assert
(
el
>
0
U
);
return
el
;
}
uint64_t
xlat_arch_get_xn_desc
(
int
el
)
uint64_t
xlat_arch_get_xn_desc
(
unsigned
int
el
)
{
if
(
el
==
3
)
{
if
(
el
==
3
U
)
{
return
UPPER_ATTRS
(
XN
);
}
else
{
assert
(
el
==
1
);
assert
(
el
==
1
U
);
return
UPPER_ATTRS
(
PXN
);
}
}
...
...
@@ -102,12 +102,12 @@ void init_xlat_tables(void)
unsigned
long
long
max_pa
;
uintptr_t
max_va
;
print_mmap
();
init_xlation_table
(
0
,
base_xlation_table
,
XLAT_TABLE_LEVEL_BASE
,
init_xlation_table
(
0
U
,
base_xlation_table
,
XLAT_TABLE_LEVEL_BASE
,
&
max_va
,
&
max_pa
);
assert
(
max_va
<=
PLAT_VIRT_ADDR_SPACE_SIZE
-
1
);
assert
(
max_pa
<=
PLAT_PHY_ADDR_SPACE_SIZE
-
1
);
assert
((
PLAT_PHY_ADDR_SPACE_SIZE
-
1
)
<=
get_max_supported_pa
());
assert
(
max_va
<=
(
PLAT_VIRT_ADDR_SPACE_SIZE
-
1
U
)
);
assert
(
max_pa
<=
(
PLAT_PHY_ADDR_SPACE_SIZE
-
1
U
)
);
assert
((
PLAT_PHY_ADDR_SPACE_SIZE
-
1
U
)
<=
get_max_supported_pa
());
tcr_ps_bits
=
calc_physical_addr_size_bits
(
max_pa
);
}
...
...
@@ -129,7 +129,7 @@ void init_xlat_tables(void)
uint32_t sctlr; \
\
assert(IS_IN_EL(_el)); \
assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0); \
assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0
U
); \
\
/* Set attributes in the right indices of the MAIR */
\
mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); \
...
...
@@ -144,16 +144,18 @@ void init_xlat_tables(void)
\
/* Set TCR bits as well. */
\
/* Set T0SZ to (64 - width of virtual address space) */
\
if (flags & XLAT_TABLE_NC) { \
int t0sz = 64 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE);\
\
if ((flags & XLAT_TABLE_NC) != 0U) { \
/* Inner & outer non-cacheable non-shareable. */
\
tcr = TCR_SH_NON_SHAREABLE | \
TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC | \
(
64 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE));
\
(
uint64_t) t0sz;
\
} else { \
/* Inner & outer WBWA & shareable. */
\
tcr = TCR_SH_INNER_SHAREABLE | \
TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA | \
(
64 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE));
\
(
uint64_t) t0sz;
\
} \
tcr |= _tcr_extra; \
write_tcr_el##_el(tcr); \
...
...
@@ -172,7 +174,7 @@ void init_xlat_tables(void)
sctlr = read_sctlr_el##_el(); \
sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT; \
\
if (flags & DISABLE_DCACHE)
\
if
(
(flags & DISABLE_DCACHE)
!= 0U)
\
sctlr &= ~SCTLR_C_BIT; \
else \
sctlr |= SCTLR_C_BIT; \
...
...
lib/xlat_tables/xlat_tables_common.c
View file @
2ee596c4
...
...
@@ -32,6 +32,7 @@
#endif
#define UNSET_DESC ~0ULL
#define MT_UNKNOWN ~0U
static
uint64_t
xlat_tables
[
MAX_XLAT_TABLES
][
XLAT_TABLE_ENTRIES
]
__aligned
(
XLAT_TABLE_SIZE
)
__section
(
"xlat_table"
);
...
...
@@ -55,7 +56,7 @@ void print_mmap(void)
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
debug_print
(
"mmap:
\n
"
);
mmap_region_t
*
mm
=
mmap
;
while
(
mm
->
size
)
{
while
(
mm
->
size
!=
0U
)
{
debug_print
(
" VA:%p PA:0x%llx size:0x%zx attr:0x%x
\n
"
,
(
void
*
)
mm
->
base_va
,
mm
->
base_pa
,
mm
->
size
,
mm
->
attr
);
...
...
@@ -69,46 +70,47 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
size_t
size
,
unsigned
int
attr
)
{
mmap_region_t
*
mm
=
mmap
;
mmap_region_t
*
mm_last
=
mm
+
ARRAY_SIZE
(
mmap
)
-
1
;
unsigned
long
long
end_pa
=
base_pa
+
size
-
1
;
uintptr_t
end_va
=
base_va
+
size
-
1
;
const
mmap_region_t
*
mm_last
=
mm
+
ARRAY_SIZE
(
mmap
)
-
1
U
;
unsigned
long
long
end_pa
=
base_pa
+
size
-
1
U
;
uintptr_t
end_va
=
base_va
+
size
-
1
U
;
assert
(
IS_PAGE_ALIGNED
(
base_pa
));
assert
(
IS_PAGE_ALIGNED
(
base_va
));
assert
(
IS_PAGE_ALIGNED
(
size
));
if
(
!
size
)
if
(
size
==
0U
)
return
;
assert
(
base_pa
<
end_pa
);
/* Check for overflows */
assert
(
base_va
<
end_va
);
assert
((
base_va
+
(
uintptr_t
)
size
-
(
uintptr_t
)
1
)
<=
(
PLAT_VIRT_ADDR_SPACE_SIZE
-
1
));
(
PLAT_VIRT_ADDR_SPACE_SIZE
-
1
U
));
assert
((
base_pa
+
(
unsigned
long
long
)
size
-
1ULL
)
<=
(
PLAT_PHY_ADDR_SPACE_SIZE
-
1
));
(
PLAT_PHY_ADDR_SPACE_SIZE
-
1
U
));
#if ENABLE_ASSERTIONS
/* Check for PAs and VAs overlaps with all other regions */
for
(
mm
=
mmap
;
mm
->
size
;
++
mm
)
{
uintptr_t
mm_end_va
=
mm
->
base_va
+
mm
->
size
-
1
;
uintptr_t
mm_end_va
=
mm
->
base_va
+
mm
->
size
-
1
U
;
/*
* Check if one of the regions is completely inside the other
* one.
*/
int
fully_overlapped_va
=
((
base_va
>=
mm
->
base_va
)
&&
(
end_va
<=
mm_end_va
))
||
((
mm
->
base_va
>=
base_va
)
&&
(
mm_end_va
<=
end_va
));
(((
base_va
>=
mm
->
base_va
)
&&
(
end_va
<=
mm_end_va
))
||
((
mm
->
base_va
>=
base_va
)
&&
(
mm_end_va
<=
end_va
)))
?
1
:
0
;
/*
* Full VA overlaps are only allowed if both regions are
* identity mapped (zero offset) or have the same VA to PA
* offset. Also, make sure that it's not the exact same area.
*/
if
(
fully_overlapped_va
)
{
if
(
fully_overlapped_va
==
1
)
{
assert
((
mm
->
base_va
-
mm
->
base_pa
)
==
(
base_va
-
base_pa
));
assert
((
base_va
!=
mm
->
base_va
)
||
(
size
!=
mm
->
size
));
...
...
@@ -122,12 +124,12 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
unsigned
long
long
mm_end_pa
=
mm
->
base_pa
+
mm
->
size
-
1
;
int
separated_pa
=
(
end_pa
<
mm
->
base_pa
)
||
(
base_pa
>
mm_end_pa
);
int
separated_va
=
(
end_va
<
mm
->
base_va
)
||
(
base_va
>
mm_end_va
);
int
separated_pa
=
((
end_pa
<
mm
->
base_pa
)
||
(
base_pa
>
mm_end_pa
)
)
?
1
:
0
;
int
separated_va
=
((
end_va
<
mm
->
base_va
)
||
(
base_va
>
mm_end_va
)
)
?
1
:
0
;
assert
(
separated_va
&&
separated_pa
);
assert
(
(
separated_va
==
1
)
&&
(
separated_pa
==
1
)
);
}
}
...
...
@@ -136,7 +138,7 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
#endif
/* ENABLE_ASSERTIONS */
/* Find correct place in mmap to insert new region */
while
(
mm
->
base_va
<
base_va
&&
mm
->
size
)
while
(
(
mm
->
base_va
<
base_va
)
&&
(
mm
->
size
!=
0U
)
)
++
mm
;
/*
...
...
@@ -154,10 +156,10 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
++
mm
;
/* Make room for new region by moving other regions up by one place */
memmove
(
mm
+
1
,
mm
,
(
uintptr_t
)
mm_last
-
(
uintptr_t
)
mm
);
(
void
)
memmove
(
mm
+
1
,
mm
,
(
uintptr_t
)
mm_last
-
(
uintptr_t
)
mm
);
/* Check we haven't lost the empty sentinal from the end of the array */
assert
(
mm_last
->
size
==
0
);
assert
(
mm_last
->
size
==
0
U
);
mm
->
base_pa
=
base_pa
;
mm
->
base_va
=
base_va
;
...
...
@@ -172,9 +174,12 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
void
mmap_add
(
const
mmap_region_t
*
mm
)
{
while
(
mm
->
size
)
{
mmap_add_region
(
mm
->
base_pa
,
mm
->
base_va
,
mm
->
size
,
mm
->
attr
);
++
mm
;
const
mmap_region_t
*
mm_cursor
=
mm
;
while
(
mm_cursor
->
size
!=
0U
)
{
mmap_add_region
(
mm_cursor
->
base_pa
,
mm_cursor
->
base_va
,
mm_cursor
->
size
,
mm_cursor
->
attr
);
mm_cursor
++
;
}
}
...
...
@@ -185,7 +190,7 @@ static uint64_t mmap_desc(unsigned int attr, unsigned long long addr_pa,
int
mem_type
;
/* Make sure that the granularity is fine enough to map this address. */
assert
((
addr_pa
&
XLAT_BLOCK_MASK
(
level
))
==
0
);
assert
((
addr_pa
&
XLAT_BLOCK_MASK
(
level
))
==
0
U
);
desc
=
addr_pa
;
/*
...
...
@@ -193,8 +198,8 @@ static uint64_t mmap_desc(unsigned int attr, unsigned long long addr_pa,
* rest.
*/
desc
|=
(
level
==
XLAT_TABLE_LEVEL_MAX
)
?
PAGE_DESC
:
BLOCK_DESC
;
desc
|=
(
attr
&
MT_NS
)
?
LOWER_ATTRS
(
NS
)
:
0
;
desc
|=
(
attr
&
MT_RW
)
?
LOWER_ATTRS
(
AP_RW
)
:
LOWER_ATTRS
(
AP_RO
);
desc
|=
(
(
attr
&
MT_NS
)
!=
0U
)
?
LOWER_ATTRS
(
NS
)
:
0
U
;
desc
|=
(
(
attr
&
MT_RW
)
!=
0U
)
?
LOWER_ATTRS
(
AP_RW
)
:
LOWER_ATTRS
(
AP_RO
);
/*
* Always set the access flag, as this library assumes access flag
* faults aren't managed.
...
...
@@ -239,7 +244,7 @@ static uint64_t mmap_desc(unsigned int attr, unsigned long long addr_pa,
* For read-only memory, rely on the MT_EXECUTE/MT_EXECUTE_NEVER
* attribute to figure out the value of the XN bit.
*/
if
((
attr
&
MT_RW
)
||
(
attr
&
MT_EXECUTE_NEVER
))
{
if
((
(
attr
&
MT_RW
)
!=
0U
)
||
(
(
attr
&
MT_EXECUTE_NEVER
)
!=
0U
)
)
{
desc
|=
execute_never_mask
;
}
...
...
@@ -253,9 +258,9 @@ static uint64_t mmap_desc(unsigned int attr, unsigned long long addr_pa,
debug_print
((
mem_type
==
MT_MEMORY
)
?
"MEM"
:
((
mem_type
==
MT_NON_CACHEABLE
)
?
"NC"
:
"DEV"
));
debug_print
(
attr
&
MT_RW
?
"-RW"
:
"-RO"
);
debug_print
(
attr
&
MT_NS
?
"-NS"
:
"-S"
);
debug_print
(
attr
&
MT_EXECUTE_NEVER
?
"-XN"
:
"-EXEC"
);
debug_print
(
((
attr
&
MT_RW
)
!=
0U
)
?
"-RW"
:
"-RO"
);
debug_print
(
((
attr
&
MT_NS
)
!=
0U
)
?
"-NS"
:
"-S"
);
debug_print
(
((
attr
&
MT_EXECUTE_NEVER
)
!=
0U
)
?
"-XN"
:
"-EXEC"
);
return
desc
;
}
...
...
@@ -265,14 +270,14 @@ static uint64_t mmap_desc(unsigned int attr, unsigned long long addr_pa,
*
* On success, this function returns 0.
* If there are partial overlaps (meaning that a smaller size is needed) or if
* the region can't be found in the given area, it returns
-1. In this case the
* value pointed by attr should be ignored by the caller.
* the region can't be found in the given area, it returns
MT_UNKNOWN. In this
*
case the
value pointed by attr should be ignored by the caller.
*/
static
int
mmap_region_attr
(
mmap_region_t
*
mm
,
uintptr_t
base_va
,
static
unsigned
int
mmap_region_attr
(
const
mmap_region_t
*
mm
,
uintptr_t
base_va
,
size_t
size
,
unsigned
int
*
attr
)
{
/* Don't assume that the area is contained in the first region */
int
ret
=
-
1
;
unsigned
int
ret
=
MT_UNKNOWN
;
/*
* Get attributes from last (innermost) region that contains the
...
...
@@ -289,26 +294,26 @@ static int mmap_region_attr(mmap_region_t *mm, uintptr_t base_va,
* in region 2. The loop shouldn't stop at region 2 as inner regions
* have priority over outer regions, it should stop at region 5.
*/
for
(
;
;
++
mm
)
{
for
(
;
;
++
mm
)
{
if
(
!
mm
->
size
)
if
(
mm
->
size
==
0U
)
return
ret
;
/* Reached end of list */
if
(
mm
->
base_va
>
base_va
+
size
-
1
)
if
(
mm
->
base_va
>
(
base_va
+
size
-
1
U
)
)
return
ret
;
/* Next region is after area so end */
if
(
mm
->
base_va
+
mm
->
size
-
1
<
base_va
)
if
(
(
mm
->
base_va
+
mm
->
size
-
1
U
)
<
base_va
)
continue
;
/* Next region has already been overtaken */
if
(
!
ret
&&
mm
->
attr
==
*
attr
)
if
(
(
ret
==
0U
)
&&
(
mm
->
attr
==
*
attr
)
)
continue
;
/* Region doesn't override attribs so skip */
if
(
mm
->
base_va
>
base_va
||
mm
->
base_va
+
mm
->
size
-
1
<
base_va
+
size
-
1
)
return
-
1
;
/* Region doesn't fully cover
our
area */
if
(
(
mm
->
base_va
>
base_va
)
||
((
mm
->
base_va
+
mm
->
size
-
1
U
)
<
(
base_va
+
size
-
1
U
))
)
return
MT_UNKNOWN
;
/* Region doesn't fully cover area */
*
attr
=
mm
->
attr
;
ret
=
0
;
ret
=
0
U
;
}
return
ret
;
}
...
...
@@ -318,7 +323,8 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
uint64_t
*
table
,
unsigned
int
level
)
{
assert
(
level
>=
XLAT_TABLE_LEVEL_MIN
&&
level
<=
XLAT_TABLE_LEVEL_MAX
);
assert
((
level
>=
XLAT_TABLE_LEVEL_MIN
)
&&
(
level
<=
XLAT_TABLE_LEVEL_MAX
));
unsigned
int
level_size_shift
=
L0_XLAT_ADDRESS_SHIFT
-
level
*
XLAT_TABLE_ENTRIES_SHIFT
;
...
...
@@ -331,10 +337,10 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
do
{
uint64_t
desc
=
UNSET_DESC
;
if
(
!
mm
->
size
)
{
if
(
mm
->
size
==
0U
)
{
/* Done mapping regions; finish zeroing the table */
desc
=
INVALID_DESC
;
}
else
if
(
mm
->
base_va
+
mm
->
size
-
1
<
base_va
)
{
}
else
if
(
(
mm
->
base_va
+
mm
->
size
-
1
U
)
<
base_va
)
{
/* This area is after the region so get next region */
++
mm
;
continue
;
...
...
@@ -343,7 +349,7 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
debug_print
(
"%s VA:%p size:0x%llx "
,
get_level_spacer
(
level
),
(
void
*
)
base_va
,
(
unsigned
long
long
)
level_size
);
if
(
mm
->
base_va
>
base_va
+
level_size
-
1
)
{
if
(
mm
->
base_va
>
(
base_va
+
level_size
-
1
U
)
)
{
/* Next region is after this area. Nothing to map yet */
desc
=
INVALID_DESC
;
/* Make sure that the current level allows block descriptors */
...
...
@@ -354,9 +360,10 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
* it will return the innermost region's attributes.
*/
unsigned
int
attr
;
int
r
=
mmap_region_attr
(
mm
,
base_va
,
level_size
,
&
attr
);
unsigned
int
r
=
mmap_region_attr
(
mm
,
base_va
,
level_size
,
&
attr
);
if
(
!
r
)
{
if
(
r
==
0U
)
{
desc
=
mmap_desc
(
attr
,
base_va
-
mm
->
base_va
+
mm
->
base_pa
,
level
);
...
...
@@ -365,13 +372,15 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
if
(
desc
==
UNSET_DESC
)
{
/* Area not covered by a region so need finer table */
uint64_t
*
new_table
=
xlat_tables
[
next_xlat
++
];
uint64_t
*
new_table
=
xlat_tables
[
next_xlat
];
next_xlat
++
;
assert
(
next_xlat
<=
MAX_XLAT_TABLES
);
desc
=
TABLE_DESC
|
(
uintptr_t
)
new_table
;
/* Recurse to fill in new table */
mm
=
init_xlation_table_inner
(
mm
,
base_va
,
new_table
,
level
+
1
);
new_table
,
level
+
1U
);
}
debug_print
(
"
\n
"
);
...
...
@@ -379,7 +388,7 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
*
table
++
=
desc
;
base_va
+=
level_size
;
}
while
((
base_va
&
level_index_mask
)
&&
(
base_va
-
1
<
PLAT_VIRT_ADDR_SPACE_SIZE
-
1
));
(
(
base_va
-
1
U
)
<
(
PLAT_VIRT_ADDR_SPACE_SIZE
-
1
U
)
));
return
mm
;
}
...
...
@@ -388,15 +397,15 @@ void init_xlation_table(uintptr_t base_va, uint64_t *table,
unsigned
int
level
,
uintptr_t
*
max_va
,
unsigned
long
long
*
max_pa
)
{
int
el
=
xlat_arch_current_el
();
unsigned
int
el
=
xlat_arch_current_el
();
execute_never_mask
=
xlat_arch_get_xn_desc
(
el
);
if
(
el
==
3
)
{
if
(
el
==
3
U
)
{
ap1_mask
=
LOWER_ATTRS
(
AP_ONE_VA_RANGE_RES1
);
}
else
{
assert
(
el
==
1
);
ap1_mask
=
0
;
assert
(
el
==
1
U
);
ap1_mask
=
0
ULL
;
}
init_xlation_table_inner
(
mmap
,
base_va
,
table
,
level
);
...
...
lib/xlat_tables/xlat_tables_private.h
View file @
2ee596c4
/*
* Copyright (c) 2016-201
7
, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016-201
8
, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef
__
XLAT_TABLES_PRIVATE_H
__
#define
__
XLAT_TABLES_PRIVATE_H
__
#ifndef XLAT_TABLES_PRIVATE_H
#define XLAT_TABLES_PRIVATE_H
#include <cassert.h>
#include <platform_def.h>
...
...
@@ -44,17 +44,17 @@ CASSERT(CHECK_PHY_ADDR_SPACE_SIZE(PLAT_PHY_ADDR_SPACE_SIZE),
void
print_mmap
(
void
);
/* Returns the current Exception Level. The returned EL must be 1 or higher. */
int
xlat_arch_current_el
(
void
);
unsigned
int
xlat_arch_current_el
(
void
);
/*
* Returns the bit mask that has to be ORed to the rest of a translation table
* descriptor so that execution of code is prohibited at the given Exception
* Level.
*/
uint64_t
xlat_arch_get_xn_desc
(
int
el
);
uint64_t
xlat_arch_get_xn_desc
(
unsigned
int
el
);
void
init_xlation_table
(
uintptr_t
base_va
,
uint64_t
*
table
,
unsigned
int
level
,
uintptr_t
*
max_va
,
unsigned
long
long
*
max_pa
);
#endif
/*
__
XLAT_TABLES_PRIVATE_H
__
*/
#endif
/* XLAT_TABLES_PRIVATE_H */
lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
View file @
2ee596c4
...
...
@@ -14,7 +14,7 @@
#include <xlat_tables_v2.h>
#include "../xlat_tables_private.h"
#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING)
#if
(
ARM_ARCH_MAJOR == 7
)
&& !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING)
#error ARMv7 target does not support LPAE MMU descriptors
#endif
...
...
@@ -27,12 +27,12 @@ int xlat_arch_is_granule_size_supported(size_t size)
* The library uses the long descriptor translation table format, which
* supports 4 KiB pages only.
*/
return
(
size
==
(
4U
*
1024U
))
;
return
(
size
==
PAGE_SIZE_4KB
)
?
1
:
0
;
}
size_t
xlat_arch_get_max_supported_granule_size
(
void
)
{
return
4U
*
1024U
;
return
PAGE_SIZE_4KB
;
}
#if ENABLE_ASSERTIONS
...
...
@@ -90,7 +90,7 @@ void xlat_arch_tlbi_va_sync(void)
isb
();
}
int
xlat_arch_current_el
(
void
)
unsigned
int
xlat_arch_current_el
(
void
)
{
/*
* If EL3 is in AArch32 mode, all secure PL1 modes (Monitor, System,
...
...
@@ -100,7 +100,7 @@ int xlat_arch_current_el(void)
* in AArch64 except for the XN bits, but we set and unset them at the
* same time, so there's no difference in practice.
*/
return
1
;
return
1
U
;
}
/*******************************************************************************
...
...
@@ -143,20 +143,23 @@ void setup_mmu_cfg(uint64_t *params, unsigned int flags,
* 32 bits.
*/
if
(
max_va
!=
UINT32_MAX
)
{
uintptr_t
virtual_addr_space_size
=
max_va
+
1
;
uintptr_t
virtual_addr_space_size
=
max_va
+
1U
;
assert
(
CHECK_VIRT_ADDR_SPACE_SIZE
(
virtual_addr_space_size
));
/*
* __builtin_ctzll(0) is undefined but here we are guaranteed
* that virtual_addr_space_size is in the range [1, UINT32_MAX].
*/
ttbcr
|=
32
-
__builtin_ctzll
(
virtual_addr_space_size
);
int
t0sz
=
32
-
__builtin_ctzll
(
virtual_addr_space_size
);
ttbcr
|=
(
uint32_t
)
t0sz
;
}
/*
* Set the cacheability and shareability attributes for memory
* associated with translation table walks using TTBR0.
*/
if
(
flags
&
XLAT_TABLE_NC
)
{
if
(
(
flags
&
XLAT_TABLE_NC
)
!=
0U
)
{
/* Inner & outer non-cacheable non-shareable. */
ttbcr
|=
TTBCR_SH0_NON_SHAREABLE
|
TTBCR_RGN0_OUTER_NC
|
TTBCR_RGN0_INNER_NC
;
...
...
lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
View file @
2ee596c4
...
...
@@ -20,58 +20,58 @@ int xlat_arch_is_granule_size_supported(size_t size)
{
u_register_t
id_aa64mmfr0_el1
=
read_id_aa64mmfr0_el1
();
if
(
size
==
(
4U
*
1024U
)
)
{
return
((
id_aa64mmfr0_el1
>>
ID_AA64MMFR0_EL1_TGRAN4_SHIFT
)
&
if
(
size
==
PAGE_SIZE_4KB
)
{
return
((
(
id_aa64mmfr0_el1
>>
ID_AA64MMFR0_EL1_TGRAN4_SHIFT
)
&
ID_AA64MMFR0_EL1_TGRAN4_MASK
)
==
ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED
;
}
else
if
(
size
==
(
16U
*
1024U
)
)
{
return
((
id_aa64mmfr0_el1
>>
ID_AA64MMFR0_EL1_TGRAN16_SHIFT
)
&
ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED
)
?
1
:
0
;
}
else
if
(
size
==
PAGE_SIZE_16KB
)
{
return
((
(
id_aa64mmfr0_el1
>>
ID_AA64MMFR0_EL1_TGRAN16_SHIFT
)
&
ID_AA64MMFR0_EL1_TGRAN16_MASK
)
==
ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED
;
}
else
if
(
size
==
(
64U
*
1024U
)
)
{
return
((
id_aa64mmfr0_el1
>>
ID_AA64MMFR0_EL1_TGRAN64_SHIFT
)
&
ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED
)
?
1
:
0
;
}
else
if
(
size
==
PAGE_SIZE_64KB
)
{
return
((
(
id_aa64mmfr0_el1
>>
ID_AA64MMFR0_EL1_TGRAN64_SHIFT
)
&
ID_AA64MMFR0_EL1_TGRAN64_MASK
)
==
ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED
;
}
ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED
)
?
1
:
0
;
}
else
{
return
0
;
}
}
size_t
xlat_arch_get_max_supported_granule_size
(
void
)
{
if
(
xlat_arch_is_granule_size_supported
(
64U
*
1024U
)
)
{
return
64U
*
1024U
;
}
else
if
(
xlat_arch_is_granule_size_supported
(
16U
*
1024U
)
)
{
return
16U
*
1024U
;
if
(
xlat_arch_is_granule_size_supported
(
PAGE_SIZE_64KB
)
!=
0
)
{
return
PAGE_SIZE_64KB
;
}
else
if
(
xlat_arch_is_granule_size_supported
(
PAGE_SIZE_16KB
)
!=
0
)
{
return
PAGE_SIZE_16KB
;
}
else
{
assert
(
xlat_arch_is_granule_size_supported
(
4U
*
1024U
)
);
return
4U
*
1024U
;
assert
(
xlat_arch_is_granule_size_supported
(
PAGE_SIZE_4KB
)
!=
0
);
return
PAGE_SIZE_4KB
;
}
}
unsigned
long
long
tcr_physical_addr_size_bits
(
unsigned
long
long
max_addr
)
{
/* Physical address can't exceed 48 bits */
assert
((
max_addr
&
ADDR_MASK_48_TO_63
)
==
0
);
assert
((
max_addr
&
ADDR_MASK_48_TO_63
)
==
0
U
);
/* 48 bits address */
if
(
max_addr
&
ADDR_MASK_44_TO_47
)
if
(
(
max_addr
&
ADDR_MASK_44_TO_47
)
!=
0U
)
return
TCR_PS_BITS_256TB
;
/* 44 bits address */
if
(
max_addr
&
ADDR_MASK_42_TO_43
)
if
(
(
max_addr
&
ADDR_MASK_42_TO_43
)
!=
0U
)
return
TCR_PS_BITS_16TB
;
/* 42 bits address */
if
(
max_addr
&
ADDR_MASK_40_TO_41
)
if
(
(
max_addr
&
ADDR_MASK_40_TO_41
)
!=
0U
)
return
TCR_PS_BITS_4TB
;
/* 40 bits address */
if
(
max_addr
&
ADDR_MASK_36_TO_39
)
if
(
(
max_addr
&
ADDR_MASK_36_TO_39
)
!=
0U
)
return
TCR_PS_BITS_1TB
;
/* 36 bits address */
if
(
max_addr
&
ADDR_MASK_32_TO_35
)
if
(
(
max_addr
&
ADDR_MASK_32_TO_35
)
!=
0U
)
return
TCR_PS_BITS_64GB
;
return
TCR_PS_BITS_4GB
;
...
...
@@ -102,12 +102,12 @@ unsigned long long xlat_arch_get_max_supported_pa(void)
int
is_mmu_enabled_ctx
(
const
xlat_ctx_t
*
ctx
)
{
if
(
ctx
->
xlat_regime
==
EL1_EL0_REGIME
)
{
assert
(
xlat_arch_current_el
()
>=
1
);
return
(
read_sctlr_el1
()
&
SCTLR_M_BIT
)
!=
0
;
assert
(
xlat_arch_current_el
()
>=
1
U
);
return
(
(
read_sctlr_el1
()
&
SCTLR_M_BIT
)
!=
0U
)
?
1
:
0
;
}
else
{
assert
(
ctx
->
xlat_regime
==
EL3_REGIME
);
assert
(
xlat_arch_current_el
()
>=
3
);
return
(
read_sctlr_el3
()
&
SCTLR_M_BIT
)
!=
0
;
assert
(
xlat_arch_current_el
()
>=
3
U
);
return
(
(
read_sctlr_el3
()
&
SCTLR_M_BIT
)
!=
0U
)
?
1
:
0
;
}
}
...
...
@@ -137,11 +137,11 @@ void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime)
* exception level (see section D4.9.2 of the ARM ARM rev B.a).
*/
if
(
xlat_regime
==
EL1_EL0_REGIME
)
{
assert
(
xlat_arch_current_el
()
>=
1
);
assert
(
xlat_arch_current_el
()
>=
1
U
);
tlbivaae1is
(
TLBI_ADDR
(
va
));
}
else
{
assert
(
xlat_regime
==
EL3_REGIME
);
assert
(
xlat_arch_current_el
()
>=
3
);
assert
(
xlat_arch_current_el
()
>=
3
U
);
tlbivae3is
(
TLBI_ADDR
(
va
));
}
}
...
...
@@ -169,11 +169,11 @@ void xlat_arch_tlbi_va_sync(void)
isb
();
}
int
xlat_arch_current_el
(
void
)
unsigned
int
xlat_arch_current_el
(
void
)
{
int
el
=
GET_EL
(
read_CurrentEl
());
unsigned
int
el
=
(
unsigned
int
)
GET_EL
(
read_CurrentEl
());
assert
(
el
>
0
);
assert
(
el
>
0
U
);
return
el
;
}
...
...
@@ -194,22 +194,24 @@ void setup_mmu_cfg(uint64_t *params, unsigned int flags,
* Limit the input address ranges and memory region sizes translated
* using TTBR0 to the given virtual address space size.
*/
assert
(
max_va
<
((
uint64_t
)
UINTPTR_MAX
));
assert
(
max_va
<
((
uint64_t
)
UINTPTR_MAX
));
virtual_addr_space_size
=
max_va
+
1
;
virtual_addr_space_size
=
(
uintptr_t
)
max_va
+
1
U
;
assert
(
CHECK_VIRT_ADDR_SPACE_SIZE
(
virtual_addr_space_size
));
/*
* __builtin_ctzll(0) is undefined but here we are guaranteed that
* virtual_addr_space_size is in the range [1,UINTPTR_MAX].
*/
tcr
=
(
uint64_t
)
64
-
__builtin_ctzll
(
virtual_addr_space_size
);
int
t0sz
=
64
-
__builtin_ctzll
(
virtual_addr_space_size
);
tcr
=
(
uint64_t
)
t0sz
;
/*
* Set the cacheability and shareability attributes for memory
* associated with translation table walks.
*/
if
((
flags
&
XLAT_TABLE_NC
)
!=
0
)
{
if
((
flags
&
XLAT_TABLE_NC
)
!=
0
U
)
{
/* Inner & outer non-cacheable non-shareable. */
tcr
|=
TCR_SH_NON_SHAREABLE
|
TCR_RGN_OUTER_NC
|
TCR_RGN_INNER_NC
;
...
...
lib/xlat_tables_v2/xlat_tables_context.c
View file @
2ee596c4
...
...
@@ -78,12 +78,12 @@ void init_xlat_tables(void)
{
assert
(
tf_xlat_ctx
.
xlat_regime
==
EL_REGIME_INVALID
);
int
current_el
=
xlat_arch_current_el
();
unsigned
int
current_el
=
xlat_arch_current_el
();
if
(
current_el
==
1
)
{
if
(
current_el
==
1
U
)
{
tf_xlat_ctx
.
xlat_regime
=
EL1_EL0_REGIME
;
}
else
{
assert
(
current_el
==
3
);
assert
(
current_el
==
3
U
);
tf_xlat_ctx
.
xlat_regime
=
EL3_REGIME
;
}
...
...
lib/xlat_tables_v2/xlat_tables_core.c
View file @
2ee596c4
...
...
@@ -29,9 +29,9 @@
* Returns the index of the array corresponding to the specified translation
* table.
*/
static
int
xlat_table_get_index
(
xlat_ctx_t
*
ctx
,
const
uint64_t
*
table
)
static
int
xlat_table_get_index
(
const
xlat_ctx_t
*
ctx
,
const
uint64_t
*
table
)
{
for
(
unsigned
int
i
=
0
;
i
<
ctx
->
tables_num
;
i
++
)
for
(
int
i
=
0
;
i
<
ctx
->
tables_num
;
i
++
)
if
(
ctx
->
tables
[
i
]
==
table
)
return
i
;
...
...
@@ -45,9 +45,9 @@ static int xlat_table_get_index(xlat_ctx_t *ctx, const uint64_t *table)
}
/* Returns a pointer to an empty translation table. */
static
uint64_t
*
xlat_table_get_empty
(
xlat_ctx_t
*
ctx
)
static
uint64_t
*
xlat_table_get_empty
(
const
xlat_ctx_t
*
ctx
)
{
for
(
unsigned
int
i
=
0
;
i
<
ctx
->
tables_num
;
i
++
)
for
(
int
i
=
0
;
i
<
ctx
->
tables_num
;
i
++
)
if
(
ctx
->
tables_mapped_regions
[
i
]
==
0
)
return
ctx
->
tables
[
i
];
...
...
@@ -55,21 +55,28 @@ static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
}
/* Increments region count for a given table. */
static
void
xlat_table_inc_regions_count
(
xlat_ctx_t
*
ctx
,
const
uint64_t
*
table
)
static
void
xlat_table_inc_regions_count
(
const
xlat_ctx_t
*
ctx
,
const
uint64_t
*
table
)
{
ctx
->
tables_mapped_regions
[
xlat_table_get_index
(
ctx
,
table
)]
++
;
int
idx
=
xlat_table_get_index
(
ctx
,
table
);
ctx
->
tables_mapped_regions
[
idx
]
++
;
}
/* Decrements region count for a given table. */
static
void
xlat_table_dec_regions_count
(
xlat_ctx_t
*
ctx
,
const
uint64_t
*
table
)
static
void
xlat_table_dec_regions_count
(
const
xlat_ctx_t
*
ctx
,
const
uint64_t
*
table
)
{
ctx
->
tables_mapped_regions
[
xlat_table_get_index
(
ctx
,
table
)]
--
;
int
idx
=
xlat_table_get_index
(
ctx
,
table
);
ctx
->
tables_mapped_regions
[
idx
]
--
;
}
/* Returns 0 if the specified table isn't empty, otherwise 1. */
static
int
xlat_table_is_empty
(
xlat_ctx_t
*
ctx
,
const
uint64_t
*
table
)
static
int
xlat_table_is_empty
(
const
xlat_ctx_t
*
ctx
,
const
uint64_t
*
table
)
{
return
!
ctx
->
tables_mapped_regions
[
xlat_table_get_index
(
ctx
,
table
)];
return
(
ctx
->
tables_mapped_regions
[
xlat_table_get_index
(
ctx
,
table
)]
==
0
)
?
1
:
0
;
}
#else
/* PLAT_XLAT_TABLES_DYNAMIC */
...
...
@@ -88,13 +95,13 @@ static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
* Returns a block/page table descriptor for the given level and attributes.
*/
uint64_t
xlat_desc
(
const
xlat_ctx_t
*
ctx
,
uint32_t
attr
,
unsigned
long
long
addr_pa
,
int
level
)
unsigned
long
long
addr_pa
,
unsigned
int
level
)
{
uint64_t
desc
;
int
mem_type
;
u
int
32_t
mem_type
;
/* Make sure that the granularity is fine enough to map this address. */
assert
((
addr_pa
&
XLAT_BLOCK_MASK
(
level
))
==
0
);
assert
((
addr_pa
&
XLAT_BLOCK_MASK
(
level
))
==
0
U
);
desc
=
addr_pa
;
/*
...
...
@@ -111,8 +118,8 @@ uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
* Deduce other fields of the descriptor based on the MT_NS and MT_RW
* memory region attributes.
*/
desc
|=
(
attr
&
MT_NS
)
?
LOWER_ATTRS
(
NS
)
:
0
;
desc
|=
(
attr
&
MT_RW
)
?
LOWER_ATTRS
(
AP_RW
)
:
LOWER_ATTRS
(
AP_RO
);
desc
|=
(
(
attr
&
MT_NS
)
!=
0U
)
?
LOWER_ATTRS
(
NS
)
:
0
U
;
desc
|=
(
(
attr
&
MT_RW
)
!=
0U
)
?
LOWER_ATTRS
(
AP_RW
)
:
LOWER_ATTRS
(
AP_RO
);
/*
* Do not allow unprivileged access when the mapping is for a privileged
...
...
@@ -120,7 +127,7 @@ uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
* lower exception levels, set AP[2] to AP_NO_ACCESS_UNPRIVILEGED.
*/
if
(
ctx
->
xlat_regime
==
EL1_EL0_REGIME
)
{
if
(
attr
&
MT_USER
)
{
if
(
(
attr
&
MT_USER
)
!=
0U
)
{
/* EL0 mapping requested, so we give User access */
desc
|=
LOWER_ATTRS
(
AP_ACCESS_UNPRIVILEGED
);
}
else
{
...
...
@@ -172,7 +179,7 @@ uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
* translation regime and the policy applied in
* xlat_arch_regime_get_xn_desc().
*/
if
((
attr
&
MT_RW
)
||
(
attr
&
MT_EXECUTE_NEVER
))
{
if
((
(
attr
&
MT_RW
)
!=
0U
)
||
(
(
attr
&
MT_EXECUTE_NEVER
)
!=
0U
)
)
{
desc
|=
xlat_arch_regime_get_xn_desc
(
ctx
->
xlat_regime
);
}
...
...
@@ -223,10 +230,10 @@ typedef enum {
static
void
xlat_tables_unmap_region
(
xlat_ctx_t
*
ctx
,
mmap_region_t
*
mm
,
const
uintptr_t
table_base_va
,
uint64_t
*
const
table_base
,
const
int
table_entries
,
const
unsigned
int
table_entries
,
const
unsigned
int
level
)
{
assert
(
level
>=
ctx
->
base_level
&&
level
<=
XLAT_TABLE_LEVEL_MAX
);
assert
(
(
level
>=
ctx
->
base_level
)
&&
(
level
<=
XLAT_TABLE_LEVEL_MAX
)
)
;
uint64_t
*
subtable
;
uint64_t
desc
;
...
...
@@ -234,16 +241,16 @@ static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
uintptr_t
table_idx_va
;
uintptr_t
table_idx_end_va
;
/* End VA of this entry */
uintptr_t
region_end_va
=
mm
->
base_va
+
mm
->
size
-
1
;
uintptr_t
region_end_va
=
mm
->
base_va
+
mm
->
size
-
1
U
;
int
table_idx
;
unsigned
int
table_idx
;
if
(
mm
->
base_va
>
table_base_va
)
{
/* Find the first index of the table affected by the region. */
table_idx_va
=
mm
->
base_va
&
~
XLAT_BLOCK_MASK
(
level
);
table_idx
=
(
table_idx_va
-
table_base_va
)
>>
XLAT_ADDR_SHIFT
(
level
);
table_idx
=
(
unsigned
int
)(
(
table_idx_va
-
table_base_va
)
>>
XLAT_ADDR_SHIFT
(
level
)
)
;
assert
(
table_idx
<
table_entries
);
}
else
{
...
...
@@ -254,19 +261,18 @@ static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
while
(
table_idx
<
table_entries
)
{
table_idx_end_va
=
table_idx_va
+
XLAT_BLOCK_SIZE
(
level
)
-
1
;
table_idx_end_va
=
table_idx_va
+
XLAT_BLOCK_SIZE
(
level
)
-
1
U
;
desc
=
table_base
[
table_idx
];
uint64_t
desc_type
=
desc
&
DESC_MASK
;
action_t
action
=
ACTION_NONE
;
action_t
action
;
if
((
mm
->
base_va
<=
table_idx_va
)
&&
(
region_end_va
>=
table_idx_end_va
))
{
/* Region covers all block */
if
(
level
==
3
)
{
if
(
level
==
3
U
)
{
/*
* Last level, only page descriptors allowed,
* erase it.
...
...
@@ -293,7 +299,6 @@ static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
}
else
if
((
mm
->
base_va
<=
table_idx_end_va
)
||
(
region_end_va
>=
table_idx_va
))
{
/*
* Region partially covers block.
*
...
...
@@ -302,12 +307,13 @@ static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
* There must be a table descriptor here, if not there
* was a problem when mapping the region.
*/
assert
(
level
<
3
);
assert
(
level
<
3U
);
assert
(
desc_type
==
TABLE_DESC
);
action
=
ACTION_RECURSE_INTO_TABLE
;
}
else
{
/* The region doesn't cover the block at all */
action
=
ACTION_NONE
;
}
if
(
action
==
ACTION_WRITE_BLOCK_ENTRY
)
{
...
...
@@ -322,12 +328,12 @@ static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
/* Recurse to write into subtable */
xlat_tables_unmap_region
(
ctx
,
mm
,
table_idx_va
,
subtable
,
XLAT_TABLE_ENTRIES
,
level
+
1
);
level
+
1
U
);
/*
* If the subtable is now empty, remove its reference.
*/
if
(
xlat_table_is_empty
(
ctx
,
subtable
))
{
if
(
xlat_table_is_empty
(
ctx
,
subtable
)
!=
0
)
{
table_base
[
table_idx
]
=
INVALID_DESC
;
xlat_arch_tlbi_va
(
table_idx_va
,
ctx
->
xlat_regime
);
...
...
@@ -356,12 +362,12 @@ static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
* specified region.
*/
static
action_t
xlat_tables_map_region_action
(
const
mmap_region_t
*
mm
,
const
int
desc_type
,
const
unsigned
long
long
dest_pa
,
const
uintptr_t
table_entry_base_va
,
const
unsigned
int
level
)
unsigned
int
desc_type
,
unsigned
long
long
dest_pa
,
uintptr_t
table_entry_base_va
,
unsigned
int
level
)
{
uintptr_t
mm_end_va
=
mm
->
base_va
+
mm
->
size
-
1
;
uintptr_t
mm_end_va
=
mm
->
base_va
+
mm
->
size
-
1
U
;
uintptr_t
table_entry_end_va
=
table_entry_base_va
+
XLAT_BLOCK_SIZE
(
level
)
-
1
;
table_entry_base_va
+
XLAT_BLOCK_SIZE
(
level
)
-
1
U
;
/*
* The descriptor types allowed depend on the current table level.
...
...
@@ -378,7 +384,7 @@ static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
* translation with this granularity in principle.
*/
if
(
level
==
3
)
{
if
(
level
==
3
U
)
{
/*
* Last level, only page descriptors are allowed.
*/
...
...
@@ -416,8 +422,8 @@ static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
* Also, check if the current level allows block
* descriptors. If not, create a table instead.
*/
if
((
dest_pa
&
XLAT_BLOCK_MASK
(
level
))
||
(
level
<
MIN_LVL_BLOCK_DESC
)
||
if
((
(
dest_pa
&
XLAT_BLOCK_MASK
(
level
))
!=
0U
)
||
(
level
<
MIN_LVL_BLOCK_DESC
)
||
(
mm
->
granularity
<
XLAT_BLOCK_SIZE
(
level
)))
return
ACTION_CREATE_NEW_TABLE
;
else
...
...
@@ -449,7 +455,7 @@ static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
* mmap region failed to detect that PA and VA must at least be
* aligned to PAGE_SIZE.
*/
assert
(
level
<
3
);
assert
(
level
<
3
U
);
if
(
desc_type
==
INVALID_DESC
)
{
/*
...
...
@@ -472,13 +478,14 @@ static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
*/
return
ACTION_RECURSE_INTO_TABLE
;
}
}
}
else
{
/*
* This table entry is outside of the region specified in the
arguments,
* don't write anything to it.
* This table entry is outside of the region specified in the
*
arguments,
don't write anything to it.
*/
return
ACTION_NONE
;
}
}
/*
...
...
@@ -488,14 +495,14 @@ static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
* should have been mapped.
*/
static
uintptr_t
xlat_tables_map_region
(
xlat_ctx_t
*
ctx
,
mmap_region_t
*
mm
,
const
uintptr_t
table_base_va
,
uintptr_t
table_base_va
,
uint64_t
*
const
table_base
,
const
int
table_entries
,
const
unsigned
int
level
)
unsigned
int
table_entries
,
unsigned
int
level
)
{
assert
(
level
>=
ctx
->
base_level
&&
level
<=
XLAT_TABLE_LEVEL_MAX
);
assert
(
(
level
>=
ctx
->
base_level
)
&&
(
level
<=
XLAT_TABLE_LEVEL_MAX
)
)
;
uintptr_t
mm_end_va
=
mm
->
base_va
+
mm
->
size
-
1
;
uintptr_t
mm_end_va
=
mm
->
base_va
+
mm
->
size
-
1
U
;
uintptr_t
table_idx_va
;
unsigned
long
long
table_idx_pa
;
...
...
@@ -503,20 +510,20 @@ static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
uint64_t
*
subtable
;
uint64_t
desc
;
int
table_idx
;
unsigned
int
table_idx
;
if
(
mm
->
base_va
>
table_base_va
)
{
/* Find the first index of the table affected by the region. */
table_idx_va
=
mm
->
base_va
&
~
XLAT_BLOCK_MASK
(
level
);
table_idx
=
(
table_idx_va
-
table_base_va
)
>>
XLAT_ADDR_SHIFT
(
level
);
table_idx
=
(
unsigned
int
)(
(
table_idx_va
-
table_base_va
)
>>
XLAT_ADDR_SHIFT
(
level
)
)
;
assert
(
table_idx
<
table_entries
);
}
else
{
/* Start from the beginning of the table. */
table_idx_va
=
table_base_va
;
table_idx
=
0
;
table_idx
=
0
U
;
}
#if PLAT_XLAT_TABLES_DYNAMIC
...
...
@@ -531,7 +538,8 @@ static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
table_idx_pa
=
mm
->
base_pa
+
table_idx_va
-
mm
->
base_va
;
action_t
action
=
xlat_tables_map_region_action
(
mm
,
desc
&
DESC_MASK
,
table_idx_pa
,
table_idx_va
,
level
);
(
uint32_t
)(
desc
&
DESC_MASK
),
table_idx_pa
,
table_idx_va
,
level
);
if
(
action
==
ACTION_WRITE_BLOCK_ENTRY
)
{
...
...
@@ -540,6 +548,7 @@ static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
level
);
}
else
if
(
action
==
ACTION_CREATE_NEW_TABLE
)
{
uintptr_t
end_va
;
subtable
=
xlat_table_get_empty
(
ctx
);
if
(
subtable
==
NULL
)
{
...
...
@@ -551,20 +560,23 @@ static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
table_base
[
table_idx
]
=
TABLE_DESC
|
(
unsigned
long
)
subtable
;
/* Recurse to write into subtable */
uintptr_t
end_va
=
xlat_tables_map_region
(
ctx
,
mm
,
table_idx_va
,
end_va
=
xlat_tables_map_region
(
ctx
,
mm
,
table_idx_va
,
subtable
,
XLAT_TABLE_ENTRIES
,
level
+
1
);
if
(
end_va
!=
table_idx_va
+
XLAT_BLOCK_SIZE
(
level
)
-
1
)
level
+
1U
);
if
(
end_va
!=
(
table_idx_va
+
XLAT_BLOCK_SIZE
(
level
)
-
1U
))
return
end_va
;
}
else
if
(
action
==
ACTION_RECURSE_INTO_TABLE
)
{
uintptr_t
end_va
;
subtable
=
(
uint64_t
*
)(
uintptr_t
)(
desc
&
TABLE_ADDR_MASK
);
/* Recurse to write into subtable */
uintptr_t
end_va
=
xlat_tables_map_region
(
ctx
,
mm
,
table_idx_va
,
end_va
=
xlat_tables_map_region
(
ctx
,
mm
,
table_idx_va
,
subtable
,
XLAT_TABLE_ENTRIES
,
level
+
1
);
if
(
end_va
!=
table_idx_va
+
XLAT_BLOCK_SIZE
(
level
)
-
1
)
level
+
1U
);
if
(
end_va
!=
(
table_idx_va
+
XLAT_BLOCK_SIZE
(
level
)
-
1U
))
return
end_va
;
}
else
{
...
...
@@ -581,7 +593,7 @@ static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
break
;
}
return
table_idx_va
-
1
;
return
table_idx_va
-
1
U
;
}
/*
...
...
@@ -593,23 +605,23 @@ static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
* ENOMEM: There is not enough memory in the mmap array.
* EPERM: Region overlaps another one in an invalid way.
*/
static
int
mmap_add_region_check
(
xlat_ctx_t
*
ctx
,
const
mmap_region_t
*
mm
)
static
int
mmap_add_region_check
(
const
xlat_ctx_t
*
ctx
,
const
mmap_region_t
*
mm
)
{
unsigned
long
long
base_pa
=
mm
->
base_pa
;
uintptr_t
base_va
=
mm
->
base_va
;
size_t
size
=
mm
->
size
;
size_t
granularity
=
mm
->
granularity
;
unsigned
long
long
end_pa
=
base_pa
+
size
-
1
;
uintptr_t
end_va
=
base_va
+
size
-
1
;
unsigned
long
long
end_pa
=
base_pa
+
size
-
1
U
;
uintptr_t
end_va
=
base_va
+
size
-
1
U
;
if
(
!
IS_PAGE_ALIGNED
(
base_pa
)
||
!
IS_PAGE_ALIGNED
(
base_va
)
||
!
IS_PAGE_ALIGNED
(
size
))
return
-
EINVAL
;
if
((
granularity
!=
XLAT_BLOCK_SIZE
(
1
))
&&
(
granularity
!=
XLAT_BLOCK_SIZE
(
2
))
&&
(
granularity
!=
XLAT_BLOCK_SIZE
(
3
)))
{
if
((
granularity
!=
XLAT_BLOCK_SIZE
(
1
U
))
&&
(
granularity
!=
XLAT_BLOCK_SIZE
(
2
U
))
&&
(
granularity
!=
XLAT_BLOCK_SIZE
(
3
U
)))
{
return
-
EINVAL
;
}
...
...
@@ -624,26 +636,26 @@ static int mmap_add_region_check(xlat_ctx_t *ctx, const mmap_region_t *mm)
return
-
ERANGE
;
/* Check that there is space in the ctx->mmap array */
if
(
ctx
->
mmap
[
ctx
->
mmap_num
-
1
].
size
!=
0
)
if
(
ctx
->
mmap
[
ctx
->
mmap_num
-
1
].
size
!=
0
U
)
return
-
ENOMEM
;
/* Check for PAs and VAs overlaps with all other regions */
for
(
mmap_region_t
*
mm_cursor
=
ctx
->
mmap
;
mm_cursor
->
size
;
++
mm_cursor
)
{
for
(
const
mmap_region_t
*
mm_cursor
=
ctx
->
mmap
;
mm_cursor
->
size
!=
0U
;
++
mm_cursor
)
{
uintptr_t
mm_cursor_end_va
=
mm_cursor
->
base_va
+
mm_cursor
->
size
-
1
;
+
mm_cursor
->
size
-
1
U
;
/*
* Check if one of the regions is completely inside the other
* one.
*/
int
fully_overlapped_va
=
((
base_va
>=
mm_cursor
->
base_va
)
&&
((
(
base_va
>=
mm_cursor
->
base_va
)
&&
(
end_va
<=
mm_cursor_end_va
))
||
((
mm_cursor
->
base_va
>=
base_va
)
&&
(
mm_cursor_end_va
<=
end_va
));
(
mm_cursor_end_va
<=
end_va
)))
?
1
:
0
;
/*
* Full VA overlaps are only allowed if both regions are
...
...
@@ -651,11 +663,11 @@ static int mmap_add_region_check(xlat_ctx_t *ctx, const mmap_region_t *mm)
* offset. Also, make sure that it's not the exact same area.
* This can only be done with static regions.
*/
if
(
fully_overlapped_va
)
{
if
(
fully_overlapped_va
!=
0
)
{
#if PLAT_XLAT_TABLES_DYNAMIC
if
((
mm
->
attr
&
MT_DYNAMIC
)
||
(
mm_cursor
->
attr
&
MT_DYNAMIC
))
if
((
(
mm
->
attr
&
MT_DYNAMIC
)
!=
0U
)
||
(
(
mm_cursor
->
attr
&
MT_DYNAMIC
)
!=
0U
)
)
return
-
EPERM
;
#endif
/* PLAT_XLAT_TABLES_DYNAMIC */
if
((
mm_cursor
->
base_va
-
mm_cursor
->
base_pa
)
!=
...
...
@@ -674,16 +686,14 @@ static int mmap_add_region_check(xlat_ctx_t *ctx, const mmap_region_t *mm)
*/
unsigned
long
long
mm_cursor_end_pa
=
mm_cursor
->
base_pa
+
mm_cursor
->
size
-
1
;
mm_cursor
->
base_pa
+
mm_cursor
->
size
-
1
U
;
int
separated_pa
=
(
end_pa
<
mm_cursor
->
base_pa
)
||
(
base_pa
>
mm_cursor_end_pa
);
int
separated_va
=
(
end_va
<
mm_cursor
->
base_va
)
||
(
base_va
>
mm_cursor_end_va
);
int
separated_pa
=
((
end_pa
<
mm_cursor
->
base_pa
)
||
(
base_pa
>
mm_cursor_end_pa
))
?
1
:
0
;
int
separated_va
=
((
end_va
<
mm_cursor
->
base_va
)
||
(
base_va
>
mm_cursor_end_va
))
?
1
:
0
;
if
(
!
(
separated_va
&&
separated_pa
))
if
((
separated_va
==
0
)
||
(
separated_pa
==
0
))
return
-
EPERM
;
}
}
...
...
@@ -695,17 +705,17 @@ void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
{
mmap_region_t
*
mm_cursor
=
ctx
->
mmap
,
*
mm_destination
;
const
mmap_region_t
*
mm_end
=
ctx
->
mmap
+
ctx
->
mmap_num
;
mmap_region_t
*
mm_last
;
unsigned
long
long
end_pa
=
mm
->
base_pa
+
mm
->
size
-
1
;
uintptr_t
end_va
=
mm
->
base_va
+
mm
->
size
-
1
;
const
mmap_region_t
*
mm_last
;
unsigned
long
long
end_pa
=
mm
->
base_pa
+
mm
->
size
-
1
U
;
uintptr_t
end_va
=
mm
->
base_va
+
mm
->
size
-
1
U
;
int
ret
;
/* Ignore empty regions */
if
(
!
mm
->
size
)
if
(
mm
->
size
==
0U
)
return
;
/* Static regions must be added before initializing the xlat tables. */
assert
(
!
ctx
->
initialized
);
assert
(
ctx
->
initialized
==
0
);
ret
=
mmap_add_region_check
(
ctx
,
mm
);
if
(
ret
!=
0
)
{
...
...
@@ -738,13 +748,15 @@ void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
* Overlapping is only allowed for static regions.
*/
while
((
mm_cursor
->
base_va
+
mm_cursor
->
size
-
1
)
<
end_va
&&
mm_cursor
->
size
)
while
((
(
mm_cursor
->
base_va
+
mm_cursor
->
size
-
1
U
)
<
end_va
)
&&
(
mm_cursor
->
size
!=
0U
))
{
++
mm_cursor
;
}
while
((
mm_cursor
->
base_va
+
mm_cursor
->
size
-
1
==
end_va
)
&&
(
mm_cursor
->
size
!=
0U
)
&&
(
mm_cursor
->
size
<
mm
->
size
))
while
((
(
mm_cursor
->
base_va
+
mm_cursor
->
size
-
1
U
)
==
end_va
)
&&
(
mm_cursor
->
size
!=
0U
)
&&
(
mm_cursor
->
size
<
mm
->
size
))
{
++
mm_cursor
;
}
/*
* Find the last entry marker in the mmap
...
...
@@ -763,7 +775,7 @@ void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
/* Make room for new region by moving other regions up by one place */
mm_destination
=
mm_cursor
+
1
;
memmove
(
mm_destination
,
mm_cursor
,
(
void
)
memmove
(
mm_destination
,
mm_cursor
,
(
uintptr_t
)
mm_last
-
(
uintptr_t
)
mm_cursor
);
/*
...
...
@@ -783,9 +795,11 @@ void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
void
mmap_add_ctx
(
xlat_ctx_t
*
ctx
,
const
mmap_region_t
*
mm
)
{
while
(
mm
->
size
)
{
mmap_add_region_ctx
(
ctx
,
mm
);
mm
++
;
const
mmap_region_t
*
mm_cursor
=
mm
;
while
(
mm_cursor
->
size
!=
0U
)
{
mmap_add_region_ctx
(
ctx
,
mm_cursor
);
mm_cursor
++
;
}
}
...
...
@@ -794,13 +808,13 @@ void mmap_add_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
int
mmap_add_dynamic_region_ctx
(
xlat_ctx_t
*
ctx
,
mmap_region_t
*
mm
)
{
mmap_region_t
*
mm_cursor
=
ctx
->
mmap
;
mmap_region_t
*
mm_last
=
mm_cursor
+
ctx
->
mmap_num
;
unsigned
long
long
end_pa
=
mm
->
base_pa
+
mm
->
size
-
1
;
uintptr_t
end_va
=
mm
->
base_va
+
mm
->
size
-
1
;
const
mmap_region_t
*
mm_last
=
mm_cursor
+
ctx
->
mmap_num
;
unsigned
long
long
end_pa
=
mm
->
base_pa
+
mm
->
size
-
1
U
;
uintptr_t
end_va
=
mm
->
base_va
+
mm
->
size
-
1
U
;
int
ret
;
/* Nothing to do */
if
(
!
mm
->
size
)
if
(
mm
->
size
==
0U
)
return
0
;
/* Now this region is a dynamic one */
...
...
@@ -815,16 +829,18 @@ int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
* static regions in mmap_add_region_ctx().
*/
while
((
mm_cursor
->
base_va
+
mm_cursor
->
size
-
1
)
<
end_va
&&
mm_cursor
->
size
)
while
((
(
mm_cursor
->
base_va
+
mm_cursor
->
size
-
1
U
)
<
end_va
)
&&
(
mm_cursor
->
size
!=
0U
))
{
++
mm_cursor
;
}
while
((
mm_cursor
->
base_va
+
mm_cursor
->
size
-
1
==
end_va
)
&&
(
mm_cursor
->
size
<
mm
->
size
))
while
((
(
mm_cursor
->
base_va
+
mm_cursor
->
size
-
1
U
)
==
end_va
)
&&
(
mm_cursor
->
size
!=
0U
)
&&
(
mm_cursor
->
size
<
mm
->
size
))
{
++
mm_cursor
;
}
/* Make room for new region by moving other regions up by one place */
memmove
(
mm_cursor
+
1
,
mm_cursor
,
(
void
)
memmove
(
mm_cursor
+
1
U
,
mm_cursor
,
(
uintptr_t
)
mm_last
-
(
uintptr_t
)
mm_cursor
);
/*
...
...
@@ -832,7 +848,7 @@ int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
* This shouldn't happen as we have checked in mmap_add_region_check
* that there is free space.
*/
assert
(
mm_last
->
size
==
0
);
assert
(
mm_last
->
size
==
0
U
);
*
mm_cursor
=
*
mm
;
...
...
@@ -840,14 +856,14 @@ int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
* Update the translation tables if the xlat tables are initialized. If
* not, this region will be mapped when they are initialized.
*/
if
(
ctx
->
initialized
)
{
uintptr_t
end_va
=
xlat_tables_map_region
(
ctx
,
mm_cursor
,
0
,
ctx
->
base_table
,
ctx
->
base_table_entries
,
if
(
ctx
->
initialized
!=
0
)
{
end_va
=
xlat_tables_map_region
(
ctx
,
mm_cursor
,
0
U
,
ctx
->
base_table
,
ctx
->
base_table_entries
,
ctx
->
base_level
);
/* Failed to map, remove mmap entry, unmap and return error. */
if
(
end_va
!=
mm_cursor
->
base_va
+
mm_cursor
->
size
-
1
)
{
memmove
(
mm_cursor
,
mm_cursor
+
1
,
if
(
end_va
!=
(
mm_cursor
->
base_va
+
mm_cursor
->
size
-
1
U
)
)
{
(
void
)
memmove
(
mm_cursor
,
mm_cursor
+
1
U
,
(
uintptr_t
)
mm_last
-
(
uintptr_t
)
mm_cursor
);
/*
...
...
@@ -862,13 +878,14 @@ int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
* entries, undo every change done up to this point.
*/
mmap_region_t
unmap_mm
=
{
.
base_pa
=
0
,
.
base_pa
=
0
U
,
.
base_va
=
mm
->
base_va
,
.
size
=
end_va
-
mm
->
base_va
,
.
attr
=
0
.
attr
=
0
U
};
xlat_tables_unmap_region
(
ctx
,
&
unmap_mm
,
0
,
ctx
->
base_table
,
ctx
->
base_table_entries
,
ctx
->
base_level
);
xlat_tables_unmap_region
(
ctx
,
&
unmap_mm
,
0U
,
ctx
->
base_table
,
ctx
->
base_table_entries
,
ctx
->
base_level
);
return
-
ENOMEM
;
}
...
...
@@ -903,61 +920,61 @@ int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx, uintptr_t base_va,
size_t
size
)
{
mmap_region_t
*
mm
=
ctx
->
mmap
;
mmap_region_t
*
mm_last
=
mm
+
ctx
->
mmap_num
;
const
mmap_region_t
*
mm_last
=
mm
+
ctx
->
mmap_num
;
int
update_max_va_needed
=
0
;
int
update_max_pa_needed
=
0
;
/* Check sanity of mmap array. */
assert
(
mm
[
ctx
->
mmap_num
].
size
==
0
);
assert
(
mm
[
ctx
->
mmap_num
].
size
==
0
U
);
while
(
mm
->
size
)
{
while
(
mm
->
size
!=
0U
)
{
if
((
mm
->
base_va
==
base_va
)
&&
(
mm
->
size
==
size
))
break
;
++
mm
;
}
/* Check that the region was found */
if
(
mm
->
size
==
0
)
if
(
mm
->
size
==
0
U
)
return
-
EINVAL
;
/* If the region is static it can't be removed */
if
(
!
(
mm
->
attr
&
MT_DYNAMIC
))
if
((
mm
->
attr
&
MT_DYNAMIC
)
==
0U
)
return
-
EPERM
;
/* Check if this region is using the top VAs or PAs. */
if
((
mm
->
base_va
+
mm
->
size
-
1
)
==
ctx
->
max_va
)
if
((
mm
->
base_va
+
mm
->
size
-
1
U
)
==
ctx
->
max_va
)
update_max_va_needed
=
1
;
if
((
mm
->
base_pa
+
mm
->
size
-
1
)
==
ctx
->
max_pa
)
if
((
mm
->
base_pa
+
mm
->
size
-
1
U
)
==
ctx
->
max_pa
)
update_max_pa_needed
=
1
;
/* Update the translation tables if needed */
if
(
ctx
->
initialized
)
{
xlat_tables_unmap_region
(
ctx
,
mm
,
0
,
ctx
->
base_table
,
if
(
ctx
->
initialized
!=
0
)
{
xlat_tables_unmap_region
(
ctx
,
mm
,
0
U
,
ctx
->
base_table
,
ctx
->
base_table_entries
,
ctx
->
base_level
);
xlat_arch_tlbi_va_sync
();
}
/* Remove this region by moving the rest down by one place. */
memmove
(
mm
,
mm
+
1
,
(
uintptr_t
)
mm_last
-
(
uintptr_t
)
mm
);
(
void
)
memmove
(
mm
,
mm
+
1
U
,
(
uintptr_t
)
mm_last
-
(
uintptr_t
)
mm
);
/* Check if we need to update the max VAs and PAs */
if
(
update_max_va_needed
)
{
ctx
->
max_va
=
0
;
if
(
update_max_va_needed
==
1
)
{
ctx
->
max_va
=
0
U
;
mm
=
ctx
->
mmap
;
while
(
mm
->
size
)
{
if
((
mm
->
base_va
+
mm
->
size
-
1
)
>
ctx
->
max_va
)
ctx
->
max_va
=
mm
->
base_va
+
mm
->
size
-
1
;
while
(
mm
->
size
!=
0U
)
{
if
((
mm
->
base_va
+
mm
->
size
-
1
U
)
>
ctx
->
max_va
)
ctx
->
max_va
=
mm
->
base_va
+
mm
->
size
-
1
U
;
++
mm
;
}
}
if
(
update_max_pa_needed
)
{
ctx
->
max_pa
=
0
;
if
(
update_max_pa_needed
==
1
)
{
ctx
->
max_pa
=
0
U
;
mm
=
ctx
->
mmap
;
while
(
mm
->
size
)
{
if
((
mm
->
base_pa
+
mm
->
size
-
1
)
>
ctx
->
max_pa
)
ctx
->
max_pa
=
mm
->
base_pa
+
mm
->
size
-
1
;
while
(
mm
->
size
!=
0U
)
{
if
((
mm
->
base_pa
+
mm
->
size
-
1
U
)
>
ctx
->
max_pa
)
ctx
->
max_pa
=
mm
->
base_pa
+
mm
->
size
-
1
U
;
++
mm
;
}
}
...
...
@@ -970,9 +987,10 @@ int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx, uintptr_t base_va,
void
init_xlat_tables_ctx
(
xlat_ctx_t
*
ctx
)
{
assert
(
ctx
!=
NULL
);
assert
(
!
ctx
->
initialized
);
assert
(
ctx
->
xlat_regime
==
EL3_REGIME
||
ctx
->
xlat_regime
==
EL1_EL0_REGIME
);
assert
(
!
is_mmu_enabled_ctx
(
ctx
));
assert
(
ctx
->
initialized
==
0
);
assert
((
ctx
->
xlat_regime
==
EL3_REGIME
)
||
(
ctx
->
xlat_regime
==
EL1_EL0_REGIME
));
assert
(
is_mmu_enabled_ctx
(
ctx
)
==
0
);
mmap_region_t
*
mm
=
ctx
->
mmap
;
...
...
@@ -980,25 +998,26 @@ void init_xlat_tables_ctx(xlat_ctx_t *ctx)
/* All tables must be zeroed before mapping any region. */
for
(
unsigned
int
i
=
0
;
i
<
ctx
->
base_table_entries
;
i
++
)
for
(
unsigned
int
i
=
0
U
;
i
<
ctx
->
base_table_entries
;
i
++
)
ctx
->
base_table
[
i
]
=
INVALID_DESC
;
for
(
unsigned
int
j
=
0
;
j
<
ctx
->
tables_num
;
j
++
)
{
for
(
int
j
=
0
;
j
<
ctx
->
tables_num
;
j
++
)
{
#if PLAT_XLAT_TABLES_DYNAMIC
ctx
->
tables_mapped_regions
[
j
]
=
0
;
#endif
for
(
unsigned
int
i
=
0
;
i
<
XLAT_TABLE_ENTRIES
;
i
++
)
for
(
unsigned
int
i
=
0
U
;
i
<
XLAT_TABLE_ENTRIES
;
i
++
)
ctx
->
tables
[
j
][
i
]
=
INVALID_DESC
;
}
while
(
mm
->
size
)
{
uintptr_t
end_va
=
xlat_tables_map_region
(
ctx
,
mm
,
0
,
ctx
->
base_table
,
ctx
->
base_table_entries
,
ctx
->
base_level
);
while
(
mm
->
size
!=
0U
)
{
uintptr_t
end_va
=
xlat_tables_map_region
(
ctx
,
mm
,
0U
,
ctx
->
base_table
,
ctx
->
base_table_entries
,
ctx
->
base_level
);
if
(
end_va
!=
mm
->
base_va
+
mm
->
size
-
1
)
{
if
(
end_va
!=
(
mm
->
base_va
+
mm
->
size
-
1
U
)
)
{
ERROR
(
"Not enough memory to map region:
\n
"
" VA:
%p
PA:0x%llx size:0x%zx attr:0x%x
\n
"
,
(
void
*
)
mm
->
base_va
,
mm
->
base_pa
,
mm
->
size
,
mm
->
attr
);
" VA:
0x%lx
PA:0x%llx size:0x%zx attr:0x%x
\n
"
,
mm
->
base_va
,
mm
->
base_pa
,
mm
->
size
,
mm
->
attr
);
panic
();
}
...
...
lib/xlat_tables_v2/xlat_tables_private.h
View file @
2ee596c4
...
...
@@ -4,8 +4,8 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef
__
XLAT_TABLES_PRIVATE_H
__
#define
__
XLAT_TABLES_PRIVATE_H
__
#ifndef XLAT_TABLES_PRIVATE_H
#define XLAT_TABLES_PRIVATE_H
#include <platform_def.h>
#include <xlat_tables_defs.h>
...
...
@@ -35,6 +35,8 @@
#endif
/* PLAT_XLAT_TABLES_DYNAMIC */
extern
uint64_t
mmu_cfg_params
[
MMU_CFG_PARAM_MAX
];
/*
* Return the execute-never mask that will prevent instruction fetch at the
* given translation regime.
...
...
@@ -61,7 +63,7 @@ void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime);
void
xlat_arch_tlbi_va_sync
(
void
);
/* Print VA, PA, size and attributes of all regions in the mmap array. */
void
xlat_mmap_print
(
mmap_region_t
*
const
mmap
);
void
xlat_mmap_print
(
const
mmap_region_t
*
mmap
);
/*
* Print the current state of the translation tables by reading them from
...
...
@@ -73,14 +75,14 @@ void xlat_tables_print(xlat_ctx_t *ctx);
* Returns a block/page table descriptor for the given level and attributes.
*/
uint64_t
xlat_desc
(
const
xlat_ctx_t
*
ctx
,
uint32_t
attr
,
unsigned
long
long
addr_pa
,
int
level
);
unsigned
long
long
addr_pa
,
unsigned
int
level
);
/*
* Architecture-specific initialization code.
*/
/* Returns the current Exception Level. The returned EL must be 1 or higher. */
int
xlat_arch_current_el
(
void
);
unsigned
int
xlat_arch_current_el
(
void
);
/*
* Return the maximum physical address supported by the hardware.
...
...
@@ -94,4 +96,4 @@ unsigned long long xlat_arch_get_max_supported_pa(void);
*/
int
is_mmu_enabled_ctx
(
const
xlat_ctx_t
*
ctx
);
#endif
/*
__
XLAT_TABLES_PRIVATE_H
__
*/
#endif
/* XLAT_TABLES_PRIVATE_H */
lib/xlat_tables_v2/xlat_tables_utils.c
View file @
2ee596c4
...
...
@@ -18,7 +18,7 @@
#if LOG_LEVEL < LOG_LEVEL_VERBOSE
void
xlat_mmap_print
(
__unused
mmap_region_t
*
const
mmap
)
void
xlat_mmap_print
(
__unused
const
mmap_region_t
*
mmap
)
{
/* Empty */
}
...
...
@@ -30,7 +30,7 @@ void xlat_tables_print(__unused xlat_ctx_t *ctx)
#else
/* if LOG_LEVEL >= LOG_LEVEL_VERBOSE */
void
xlat_mmap_print
(
mmap_region_t
*
const
mmap
)
void
xlat_mmap_print
(
const
mmap_region_t
*
mmap
)
{
tf_printf
(
"mmap:
\n
"
);
const
mmap_region_t
*
mm
=
mmap
;
...
...
@@ -47,7 +47,7 @@ void xlat_mmap_print(mmap_region_t *const mmap)
/* Print the attributes of the specified block descriptor. */
static
void
xlat_desc_print
(
const
xlat_ctx_t
*
ctx
,
uint64_t
desc
)
{
int
mem_type_index
=
ATTR_INDEX_GET
(
desc
);
u
int
64_t
mem_type_index
=
ATTR_INDEX_GET
(
desc
);
int
xlat_regime
=
ctx
->
xlat_regime
;
if
(
mem_type_index
==
ATTR_IWBWA_OWBWA_NTR_INDEX
)
{
...
...
@@ -61,8 +61,8 @@ static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc)
if
(
xlat_regime
==
EL3_REGIME
)
{
/* For EL3 only check the AP[2] and XN bits. */
tf_printf
((
desc
&
LOWER_ATTRS
(
AP_RO
))
?
"-RO"
:
"-RW"
);
tf_printf
((
desc
&
UPPER_ATTRS
(
XN
))
?
"-XN"
:
"-EXEC"
);
tf_printf
((
(
desc
&
LOWER_ATTRS
(
AP_RO
))
!=
0ULL
)
?
"-RO"
:
"-RW"
);
tf_printf
((
(
desc
&
UPPER_ATTRS
(
XN
))
!=
0ULL
)
?
"-XN"
:
"-EXEC"
);
}
else
{
assert
(
xlat_regime
==
EL1_EL0_REGIME
);
/*
...
...
@@ -80,18 +80,18 @@ static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc)
assert
((
xn_perm
==
xn_mask
)
||
(
xn_perm
==
0ULL
));
#endif
tf_printf
((
desc
&
LOWER_ATTRS
(
AP_RO
))
?
"-RO"
:
"-RW"
);
tf_printf
((
(
desc
&
LOWER_ATTRS
(
AP_RO
))
!=
0ULL
)
?
"-RO"
:
"-RW"
);
/* Only check one of PXN and UXN, the other one is the same. */
tf_printf
((
desc
&
UPPER_ATTRS
(
PXN
))
?
"-XN"
:
"-EXEC"
);
tf_printf
((
(
desc
&
UPPER_ATTRS
(
PXN
))
!=
0ULL
)
?
"-XN"
:
"-EXEC"
);
/*
* Privileged regions can only be accessed from EL1, user
* regions can be accessed from EL1 and EL0.
*/
tf_printf
((
desc
&
LOWER_ATTRS
(
AP_ACCESS_UNPRIVILEGED
))
tf_printf
((
(
desc
&
LOWER_ATTRS
(
AP_ACCESS_UNPRIVILEGED
))
!=
0ULL
)
?
"-USER"
:
"-PRIV"
);
}
tf_printf
(
LOWER_ATTRS
(
NS
)
&
desc
?
"-NS"
:
"-S"
);
tf_printf
(
((
LOWER_ATTRS
(
NS
)
&
desc
)
!=
0ULL
)
?
"-NS"
:
"-S"
);
}
static
const
char
*
const
level_spacers
[]
=
{
...
...
@@ -108,17 +108,15 @@ static const char *invalid_descriptors_ommited =
* Recursive function that reads the translation tables passed as an argument
* and prints their status.
*/
static
void
xlat_tables_print_internal
(
xlat_ctx_t
*
ctx
,
const
uintptr_t
table_base_va
,
uint64_t
*
const
table_base
,
const
int
table_entries
,
const
unsigned
int
level
)
static
void
xlat_tables_print_internal
(
xlat_ctx_t
*
ctx
,
uintptr_t
table_base_va
,
const
uint64_t
*
table_base
,
unsigned
int
table_entries
,
unsigned
int
level
)
{
assert
(
level
<=
XLAT_TABLE_LEVEL_MAX
);
uint64_t
desc
;
uintptr_t
table_idx_va
=
table_base_va
;
int
table_idx
=
0
;
unsigned
int
table_idx
=
0U
;
size_t
level_size
=
XLAT_BLOCK_SIZE
(
level
);
/*
...
...
@@ -136,9 +134,9 @@ static void xlat_tables_print_internal(xlat_ctx_t *ctx,
if
((
desc
&
DESC_MASK
)
==
INVALID_DESC
)
{
if
(
invalid_row_count
==
0
)
{
tf_printf
(
"%sVA:
%p
size:0x%zx
\n
"
,
tf_printf
(
"%sVA:
0x%lx
size:0x%zx
\n
"
,
level_spacers
[
level
],
(
void
*
)
table_idx_va
,
level_size
);
table_idx_va
,
level_size
);
}
invalid_row_count
++
;
...
...
@@ -164,20 +162,20 @@ static void xlat_tables_print_internal(xlat_ctx_t *ctx,
* but instead points to the next translation
* table in the translation table walk.
*/
tf_printf
(
"%sVA:
%p
size:0x%zx
\n
"
,
tf_printf
(
"%sVA:
0x%lx
size:0x%zx
\n
"
,
level_spacers
[
level
],
(
void
*
)
table_idx_va
,
level_size
);
table_idx_va
,
level_size
);
uintptr_t
addr_inner
=
desc
&
TABLE_ADDR_MASK
;
xlat_tables_print_internal
(
ctx
,
table_idx_va
,
(
uint64_t
*
)
addr_inner
,
XLAT_TABLE_ENTRIES
,
level
+
1
);
XLAT_TABLE_ENTRIES
,
level
+
1
U
);
}
else
{
tf_printf
(
"%sVA:
%p
PA:0x%llx size:0x%zx "
,
tf_printf
(
"%sVA:
0x%lx
PA:0x%llx size:0x%zx "
,
level_spacers
[
level
],
(
void
*
)
table_idx_va
,
(
u
nsigned
long
long
)(
desc
&
TABLE_ADDR_MASK
),
table_idx_va
,
(
u
int64_t
)(
desc
&
TABLE_ADDR_MASK
),
level_size
);
xlat_desc_print
(
ctx
,
desc
);
tf_printf
(
"
\n
"
);
...
...
@@ -197,6 +195,8 @@ static void xlat_tables_print_internal(xlat_ctx_t *ctx,
void
xlat_tables_print
(
xlat_ctx_t
*
ctx
)
{
const
char
*
xlat_regime_str
;
int
used_page_tables
;
if
(
ctx
->
xlat_regime
==
EL1_EL0_REGIME
)
{
xlat_regime_str
=
"1&0"
;
}
else
{
...
...
@@ -206,29 +206,28 @@ void xlat_tables_print(xlat_ctx_t *ctx)
VERBOSE
(
"Translation tables state:
\n
"
);
VERBOSE
(
" Xlat regime: EL%s
\n
"
,
xlat_regime_str
);
VERBOSE
(
" Max allowed PA: 0x%llx
\n
"
,
ctx
->
pa_max_address
);
VERBOSE
(
" Max allowed VA:
%p
\n
"
,
(
void
*
)
ctx
->
va_max_address
);
VERBOSE
(
" Max allowed VA:
0x%lx
\n
"
,
ctx
->
va_max_address
);
VERBOSE
(
" Max mapped PA: 0x%llx
\n
"
,
ctx
->
max_pa
);
VERBOSE
(
" Max mapped VA:
%p
\n
"
,
(
void
*
)
ctx
->
max_va
);
VERBOSE
(
" Max mapped VA:
0x%lx
\n
"
,
ctx
->
max_va
);
VERBOSE
(
" Initial lookup level: %
i
\n
"
,
ctx
->
base_level
);
VERBOSE
(
" Entries @initial lookup level: %
i
\n
"
,
VERBOSE
(
" Initial lookup level: %
u
\n
"
,
ctx
->
base_level
);
VERBOSE
(
" Entries @initial lookup level: %
u
\n
"
,
ctx
->
base_table_entries
);
int
used_page_tables
;
#if PLAT_XLAT_TABLES_DYNAMIC
used_page_tables
=
0
;
for
(
unsigned
int
i
=
0
;
i
<
ctx
->
tables_num
;
++
i
)
{
for
(
int
i
=
0
;
i
<
ctx
->
tables_num
;
++
i
)
{
if
(
ctx
->
tables_mapped_regions
[
i
]
!=
0
)
++
used_page_tables
;
}
#else
used_page_tables
=
ctx
->
next_table
;
#endif
VERBOSE
(
" Used %
i
sub-tables out of %
i
(spare: %
i
)
\n
"
,
VERBOSE
(
" Used %
d
sub-tables out of %
d
(spare: %
d
)
\n
"
,
used_page_tables
,
ctx
->
tables_num
,
ctx
->
tables_num
-
used_page_tables
);
xlat_tables_print_internal
(
ctx
,
0
,
ctx
->
base_table
,
xlat_tables_print_internal
(
ctx
,
0
U
,
ctx
->
base_table
,
ctx
->
base_table_entries
,
ctx
->
base_level
);
}
...
...
@@ -251,13 +250,13 @@ void xlat_tables_print(xlat_ctx_t *ctx)
*/
static
uint64_t
*
find_xlat_table_entry
(
uintptr_t
virtual_addr
,
void
*
xlat_table_base
,
int
xlat_table_base_entries
,
unsigned
int
xlat_table_base_entries
,
unsigned
long
long
virt_addr_space_size
,
int
*
out_level
)
unsigned
int
*
out_level
)
{
unsigned
int
start_level
;
uint64_t
*
table
;
int
entries
;
unsigned
int
entries
;
start_level
=
GET_XLAT_TABLE_LEVEL_BASE
(
virt_addr_space_size
);
...
...
@@ -267,9 +266,7 @@ static uint64_t *find_xlat_table_entry(uintptr_t virtual_addr,
for
(
unsigned
int
level
=
start_level
;
level
<=
XLAT_TABLE_LEVEL_MAX
;
++
level
)
{
int
idx
;
uint64_t
desc
;
uint64_t
desc_type
;
uint64_t
idx
,
desc
,
desc_type
;
idx
=
XLAT_TABLE_IDX
(
virtual_addr
,
level
);
if
(
idx
>=
entries
)
{
...
...
@@ -318,22 +315,23 @@ static uint64_t *find_xlat_table_entry(uintptr_t virtual_addr,
static
int
get_mem_attributes_internal
(
const
xlat_ctx_t
*
ctx
,
uintptr_t
base_va
,
uint32_t
*
attributes
,
uint64_t
**
table_entry
,
unsigned
long
long
*
addr_pa
,
int
*
table_level
)
unsigned
long
long
*
addr_pa
,
unsigned
int
*
table_level
)
{
uint64_t
*
entry
;
uint64_t
desc
;
int
level
;
unsigned
int
level
;
unsigned
long
long
virt_addr_space_size
;
/*
* Sanity-check arguments.
*/
assert
(
ctx
!=
NULL
);
assert
(
ctx
->
initialized
);
assert
(
ctx
->
xlat_regime
==
EL1_EL0_REGIME
||
ctx
->
xlat_regime
==
EL3_REGIME
);
assert
(
ctx
->
initialized
!=
0
);
assert
((
ctx
->
xlat_regime
==
EL1_EL0_REGIME
)
||
(
ctx
->
xlat_regime
==
EL3_REGIME
));
virt_addr_space_size
=
(
unsigned
long
long
)
ctx
->
va_max_address
+
1
;
assert
(
virt_addr_space_size
>
0
);
virt_addr_space_size
=
(
unsigned
long
long
)
ctx
->
va_max_address
+
1
ULL
;
assert
(
virt_addr_space_size
>
0
U
);
entry
=
find_xlat_table_entry
(
base_va
,
ctx
->
base_table
,
...
...
@@ -341,7 +339,7 @@ static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va,
virt_addr_space_size
,
&
level
);
if
(
entry
==
NULL
)
{
WARN
(
"Address
%p
is not mapped.
\n
"
,
(
void
*
)
base_va
);
WARN
(
"Address
0x%lx
is not mapped.
\n
"
,
base_va
);
return
-
EINVAL
;
}
...
...
@@ -366,9 +364,9 @@ static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va,
#endif
/* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
assert
(
attributes
!=
NULL
);
*
attributes
=
0
;
*
attributes
=
0
U
;
int
attr_index
=
(
desc
>>
ATTR_INDEX_SHIFT
)
&
ATTR_INDEX_MASK
;
u
int
64_t
attr_index
=
(
desc
>>
ATTR_INDEX_SHIFT
)
&
ATTR_INDEX_MASK
;
if
(
attr_index
==
ATTR_IWBWA_OWBWA_NTR_INDEX
)
{
*
attributes
|=
MT_MEMORY
;
...
...
@@ -379,20 +377,21 @@ static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va,
*
attributes
|=
MT_DEVICE
;
}
int
ap2_bit
=
(
desc
>>
AP2_SHIFT
)
&
1
;
u
int
64_t
ap2_bit
=
(
desc
>>
AP2_SHIFT
)
&
1
U
;
if
(
ap2_bit
==
AP2_RW
)
*
attributes
|=
MT_RW
;
if
(
ctx
->
xlat_regime
==
EL1_EL0_REGIME
)
{
int
ap1_bit
=
(
desc
>>
AP1_SHIFT
)
&
1
;
uint64_t
ap1_bit
=
(
desc
>>
AP1_SHIFT
)
&
1U
;
if
(
ap1_bit
==
AP1_ACCESS_UNPRIVILEGED
)
*
attributes
|=
MT_USER
;
}
int
ns_bit
=
(
desc
>>
NS_SHIFT
)
&
1
;
u
int
64_t
ns_bit
=
(
desc
>>
NS_SHIFT
)
&
1
U
;
if
(
ns_bit
==
1
)
if
(
ns_bit
==
1
U
)
*
attributes
|=
MT_NS
;
uint64_t
xn_mask
=
xlat_arch_regime_get_xn_desc
(
ctx
->
xlat_regime
);
...
...
@@ -400,7 +399,7 @@ static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va,
if
((
desc
&
xn_mask
)
==
xn_mask
)
{
*
attributes
|=
MT_EXECUTE_NEVER
;
}
else
{
assert
((
desc
&
xn_mask
)
==
0
);
assert
((
desc
&
xn_mask
)
==
0
U
);
}
return
0
;
...
...
@@ -415,7 +414,7 @@ int get_mem_attributes(const xlat_ctx_t *ctx, uintptr_t base_va,
}
int
change_mem_attributes
(
xlat_ctx_t
*
ctx
,
int
change_mem_attributes
(
const
xlat_ctx_t
*
ctx
,
uintptr_t
base_va
,
size_t
size
,
uint32_t
attr
)
...
...
@@ -423,49 +422,49 @@ int change_mem_attributes(xlat_ctx_t *ctx,
/* Note: This implementation isn't optimized. */
assert
(
ctx
!=
NULL
);
assert
(
ctx
->
initialized
);
assert
(
ctx
->
initialized
!=
0
);
unsigned
long
long
virt_addr_space_size
=
(
unsigned
long
long
)
ctx
->
va_max_address
+
1
;
assert
(
virt_addr_space_size
>
0
);
(
unsigned
long
long
)
ctx
->
va_max_address
+
1
U
;
assert
(
virt_addr_space_size
>
0
U
);
if
(
!
IS_PAGE_ALIGNED
(
base_va
))
{
WARN
(
"%s: Address
%p
is not aligned on a page boundary.
\n
"
,
__func__
,
(
void
*
)
base_va
);
WARN
(
"%s: Address
0x%lx
is not aligned on a page boundary.
\n
"
,
__func__
,
base_va
);
return
-
EINVAL
;
}
if
(
size
==
0
)
{
if
(
size
==
0
U
)
{
WARN
(
"%s: Size is 0.
\n
"
,
__func__
);
return
-
EINVAL
;
}
if
((
size
%
PAGE_SIZE
)
!=
0
)
{
if
((
size
%
PAGE_SIZE
)
!=
0
U
)
{
WARN
(
"%s: Size 0x%zx is not a multiple of a page size.
\n
"
,
__func__
,
size
);
return
-
EINVAL
;
}
if
(((
attr
&
MT_EXECUTE_NEVER
)
==
0
)
&&
((
attr
&
MT_RW
)
!=
0
))
{
if
(((
attr
&
MT_EXECUTE_NEVER
)
==
0
U
)
&&
((
attr
&
MT_RW
)
!=
0
U
))
{
WARN
(
"%s: Mapping memory as read-write and executable not allowed.
\n
"
,
__func__
);
return
-
EINVAL
;
}
in
t
pages_count
=
size
/
PAGE_SIZE
;
size_
t
pages_count
=
size
/
PAGE_SIZE
;
VERBOSE
(
"Changing memory attributes of %
i
pages starting from address
%p
...
\n
"
,
pages_count
,
(
void
*
)
base_va
);
VERBOSE
(
"Changing memory attributes of %
zu
pages starting from address
0x%lx
...
\n
"
,
pages_count
,
base_va
);
uintptr_t
base_va_original
=
base_va
;
/*
* Sanity checks.
*/
for
(
in
t
i
=
0
;
i
<
pages_count
;
++
i
)
{
uint64_t
*
entry
;
uint64_t
desc
;
int
level
;
for
(
size_
t
i
=
0
U
;
i
<
pages_count
;
++
i
)
{
const
uint64_t
*
entry
;
uint64_t
desc
,
attr_index
;
unsigned
int
level
;
entry
=
find_xlat_table_entry
(
base_va
,
ctx
->
base_table
,
...
...
@@ -473,7 +472,7 @@ int change_mem_attributes(xlat_ctx_t *ctx,
virt_addr_space_size
,
&
level
);
if
(
entry
==
NULL
)
{
WARN
(
"Address
%p
is not mapped.
\n
"
,
(
void
*
)
base_va
);
WARN
(
"Address
0x%lx
is not mapped.
\n
"
,
base_va
);
return
-
EINVAL
;
}
...
...
@@ -485,8 +484,8 @@ int change_mem_attributes(xlat_ctx_t *ctx,
*/
if
(((
desc
&
DESC_MASK
)
!=
PAGE_DESC
)
||
(
level
!=
XLAT_TABLE_LEVEL_MAX
))
{
WARN
(
"Address
%p
is not mapped at the right granularity.
\n
"
,
(
void
*
)
base_va
);
WARN
(
"Address
0x%lx
is not mapped at the right granularity.
\n
"
,
base_va
);
WARN
(
"Granularity is 0x%llx, should be 0x%x.
\n
"
,
(
unsigned
long
long
)
XLAT_BLOCK_SIZE
(
level
),
PAGE_SIZE
);
return
-
EINVAL
;
...
...
@@ -495,11 +494,11 @@ int change_mem_attributes(xlat_ctx_t *ctx,
/*
* If the region type is device, it shouldn't be executable.
*/
int
attr_index
=
(
desc
>>
ATTR_INDEX_SHIFT
)
&
ATTR_INDEX_MASK
;
attr_index
=
(
desc
>>
ATTR_INDEX_SHIFT
)
&
ATTR_INDEX_MASK
;
if
(
attr_index
==
ATTR_DEVICE_INDEX
)
{
if
((
attr
&
MT_EXECUTE_NEVER
)
==
0
)
{
WARN
(
"Setting device memory as executable at address
%p
."
,
(
void
*
)
base_va
);
if
((
attr
&
MT_EXECUTE_NEVER
)
==
0
U
)
{
WARN
(
"Setting device memory as executable at address
0x%lx
."
,
base_va
);
return
-
EINVAL
;
}
}
...
...
@@ -510,14 +509,14 @@ int change_mem_attributes(xlat_ctx_t *ctx,
/* Restore original value. */
base_va
=
base_va_original
;
for
(
int
i
=
0
;
i
<
pages_count
;
++
i
)
{
for
(
unsigned
int
i
=
0
U
;
i
<
pages_count
;
++
i
)
{
uint32_t
old_attr
,
new_attr
;
uint64_t
*
entry
;
int
level
;
unsigned
long
long
addr_pa
;
uint32_t
old_attr
=
0U
,
new_attr
;
uint64_t
*
entry
=
NULL
;
unsigned
int
level
=
0U
;
unsigned
long
long
addr_pa
=
0ULL
;
get_mem_attributes_internal
(
ctx
,
base_va
,
&
old_attr
,
(
void
)
get_mem_attributes_internal
(
ctx
,
base_va
,
&
old_attr
,
&
entry
,
&
addr_pa
,
&
level
);
/*
...
...
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment