Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
Menu
Open sidebar
adam.huang
Arm Trusted Firmware
Commits
700b6da7
Unverified
Commit
700b6da7
authored
Jul 03, 2018
by
Dimitris Papastamos
Committed by
GitHub
Jul 03, 2018
Browse files
Merge pull request #1459 from antonio-nino-diaz-arm/an/xlat-refactor
Refactor of the xlat tables v2 library
parents
c2f27ced
6a086061
Changes
11
Expand all
Show whitespace changes
Inline
Side-by-side
include/lib/xlat_tables/xlat_tables_v2.h
View file @
700b6da7
...
...
@@ -123,10 +123,8 @@ typedef struct mmap_region {
/*
* Translation regimes supported by this library.
*/
typedef
enum
xlat_regime
{
EL1_EL0_REGIME
,
EL3_REGIME
,
}
xlat_regime_t
;
#define EL1_EL0_REGIME 1
#define EL3_REGIME 3
/*
* Declare the translation context type.
...
...
@@ -163,10 +161,10 @@ typedef struct xlat_ctx xlat_ctx_t;
*/
#define REGISTER_XLAT_CONTEXT(_ctx_name, _mmap_count, _xlat_tables_count, \
_virt_addr_space_size, _phy_addr_space_size) \
_REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, _mmap_count, \
_xlat_tables_count, \
_virt_addr_space_size,
\
_phy_addr_space_size,
\
_REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name,
(
_mmap_count
)
, \
(
_xlat_tables_count
)
, \
(
_virt_addr_space_size
)
, \
(
_phy_addr_space_size
)
, \
IMAGE_XLAT_DEFAULT_REGIME, \
"xlat_table")
...
...
@@ -175,7 +173,7 @@ typedef struct xlat_ctx xlat_ctx_t;
*
* _xlat_regime:
* Specify the translation regime managed by this xlat_ctx_t instance. The
* values are the one from
xlat_regime_t enumera
tion.
* values are the one from
the EL*_REGIME defini
tion
s
.
*
* _section_name:
* Specify the name of the section where the translation tables have to be
...
...
@@ -184,11 +182,11 @@ typedef struct xlat_ctx xlat_ctx_t;
#define REGISTER_XLAT_CONTEXT2(_ctx_name, _mmap_count, _xlat_tables_count, \
_virt_addr_space_size, _phy_addr_space_size, \
_xlat_regime, _section_name) \
_REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, _mmap_count, \
_xlat_tables_count, \
_virt_addr_space_size,
\
_phy_addr_space_size,
\
_xlat_regime, _section_name)
_REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name,
(
_mmap_count
)
, \
(
_xlat_tables_count
)
, \
(
_virt_addr_space_size
)
, \
(
_phy_addr_space_size
)
, \
(
_xlat_regime
)
,
(
_section_name)
)
/******************************************************************************
* Generic translation table APIs.
...
...
include/lib/xlat_tables/xlat_tables_v2_helpers.h
View file @
700b6da7
...
...
@@ -109,10 +109,8 @@ struct xlat_ctx {
unsigned
int
initialized
;
/*
* Translation regime managed by this xlat_ctx_t. It takes the values of
* the enumeration xlat_regime_t. The type is "int" to avoid a circular
* dependency on xlat_tables_v2.h, but this member must be treated as
* xlat_regime_t.
* Translation regime managed by this xlat_ctx_t. It should be one of
* the EL*_REGIME defines.
*/
int
xlat_regime
;
};
...
...
@@ -157,7 +155,7 @@ struct xlat_ctx {
.va_max_address = (_virt_addr_space_size) - 1, \
.pa_max_address = (_phy_addr_space_size) - 1, \
.mmap = _ctx_name##_mmap, \
.mmap_num = _mmap_count, \
.mmap_num =
(
_mmap_count
)
, \
.base_level = GET_XLAT_TABLE_LEVEL_BASE(_virt_addr_space_size), \
.base_table = _ctx_name##_base_xlat_table, \
.base_table_entries = \
...
...
lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
View file @
700b6da7
...
...
@@ -61,7 +61,7 @@ void xlat_arch_tlbi_va(uintptr_t va)
tlbimvaais
(
TLBI_ADDR
(
va
));
}
void
xlat_arch_tlbi_va_regime
(
uintptr_t
va
,
xlat_regime_
t
xlat_regime
__unused
)
void
xlat_arch_tlbi_va_regime
(
uintptr_t
va
,
in
t
xlat_regime
__unused
)
{
/*
* Ensure the translation table write has drained into memory before
...
...
lib/xlat_tables_v2/aarch32/xlat_tables_arch_private.h
View file @
700b6da7
...
...
@@ -14,7 +14,7 @@
* Return the execute-never mask that will prevent instruction fetch at the
* given translation regime.
*/
static
inline
uint64_t
xlat_arch_regime_get_xn_desc
(
xlat_regime_
t
regime
__unused
)
static
inline
uint64_t
xlat_arch_regime_get_xn_desc
(
in
t
regime
__unused
)
{
return
UPPER_ATTRS
(
XN
);
}
...
...
lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
View file @
700b6da7
...
...
@@ -7,11 +7,8 @@
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
#include <bl_common.h>
#include <cassert.h>
#include <common_def.h>
#include <sys/types.h>
#include <utils.h>
#include <utils_def.h>
#include <xlat_tables_v2.h>
#include "../xlat_tables_private.h"
...
...
@@ -128,7 +125,7 @@ void xlat_arch_tlbi_va(uintptr_t va)
#endif
}
void
xlat_arch_tlbi_va_regime
(
uintptr_t
va
,
xlat_regime_
t
xlat_regime
)
void
xlat_arch_tlbi_va_regime
(
uintptr_t
va
,
in
t
xlat_regime
)
{
/*
* Ensure the translation table write has drained into memory before
...
...
lib/xlat_tables_v2/aarch64/xlat_tables_arch_private.h
View file @
700b6da7
...
...
@@ -15,7 +15,7 @@
* Return the execute-never mask that will prevent instruction fetch at all ELs
* that are part of the given translation regime.
*/
static
inline
uint64_t
xlat_arch_regime_get_xn_desc
(
xlat_regime_
t
regime
)
static
inline
uint64_t
xlat_arch_regime_get_xn_desc
(
in
t
regime
)
{
if
(
regime
==
EL1_EL0_REGIME
)
{
return
UPPER_ATTRS
(
UXN
)
|
UPPER_ATTRS
(
PXN
);
...
...
lib/xlat_tables_v2/xlat_tables.mk
View file @
700b6da7
...
...
@@ -7,6 +7,8 @@
XLAT_TABLES_LIB_SRCS
:=
$(
addprefix
lib/xlat_tables_v2/,
\
${ARCH}
/enable_mmu.S
\
${ARCH}
/xlat_tables_arch.c
\
xlat_tables_internal.c
)
xlat_tables_context.c
\
xlat_tables_core.c
\
xlat_tables_utils.c
)
INCLUDES
+=
-Ilib
/xlat_tables_v2/
${ARCH}
lib/xlat_tables_v2/xlat_tables_context.c
0 → 100644
View file @
700b6da7
/*
* Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <debug.h>
#include <platform_def.h>
#include <xlat_tables_defs.h>
#include <xlat_tables_v2.h>
#include "xlat_tables_private.h"
/*
* Each platform can define the size of its physical and virtual address spaces.
* If the platform hasn't defined one or both of them, default to
* ADDR_SPACE_SIZE. The latter is deprecated, though.
*/
#if ERROR_DEPRECATED
# ifdef ADDR_SPACE_SIZE
# error "ADDR_SPACE_SIZE is deprecated. Use PLAT_xxx_ADDR_SPACE_SIZE instead."
# endif
#elif defined(ADDR_SPACE_SIZE)
# ifndef PLAT_PHY_ADDR_SPACE_SIZE
# define PLAT_PHY_ADDR_SPACE_SIZE ADDR_SPACE_SIZE
# endif
# ifndef PLAT_VIRT_ADDR_SPACE_SIZE
# define PLAT_VIRT_ADDR_SPACE_SIZE ADDR_SPACE_SIZE
# endif
#endif
/*
* Allocate and initialise the default translation context for the BL image
* currently executing.
*/
REGISTER_XLAT_CONTEXT
(
tf
,
MAX_MMAP_REGIONS
,
MAX_XLAT_TABLES
,
PLAT_VIRT_ADDR_SPACE_SIZE
,
PLAT_PHY_ADDR_SPACE_SIZE
);
void
mmap_add_region
(
unsigned
long
long
base_pa
,
uintptr_t
base_va
,
size_t
size
,
unsigned
int
attr
)
{
mmap_region_t
mm
=
MAP_REGION
(
base_pa
,
base_va
,
size
,
attr
);
mmap_add_region_ctx
(
&
tf_xlat_ctx
,
&
mm
);
}
void
mmap_add
(
const
mmap_region_t
*
mm
)
{
mmap_add_ctx
(
&
tf_xlat_ctx
,
mm
);
}
#if PLAT_XLAT_TABLES_DYNAMIC
int
mmap_add_dynamic_region
(
unsigned
long
long
base_pa
,
uintptr_t
base_va
,
size_t
size
,
unsigned
int
attr
)
{
mmap_region_t
mm
=
MAP_REGION
(
base_pa
,
base_va
,
size
,
attr
);
return
mmap_add_dynamic_region_ctx
(
&
tf_xlat_ctx
,
&
mm
);
}
int
mmap_remove_dynamic_region
(
uintptr_t
base_va
,
size_t
size
)
{
return
mmap_remove_dynamic_region_ctx
(
&
tf_xlat_ctx
,
base_va
,
size
);
}
#endif
/* PLAT_XLAT_TABLES_DYNAMIC */
void
init_xlat_tables
(
void
)
{
init_xlat_tables_ctx
(
&
tf_xlat_ctx
);
}
/*
* If dynamic allocation of new regions is disabled then by the time we call the
* function enabling the MMU, we'll have registered all the memory regions to
* map for the system's lifetime. Therefore, at this point we know the maximum
* physical address that will ever be mapped.
*
* If dynamic allocation is enabled then we can't make any such assumption
* because the maximum physical address could get pushed while adding a new
* region. Therefore, in this case we have to assume that the whole address
* space size might be mapped.
*/
#ifdef PLAT_XLAT_TABLES_DYNAMIC
#define MAX_PHYS_ADDR tf_xlat_ctx.pa_max_address
#else
#define MAX_PHYS_ADDR tf_xlat_ctx.max_pa
#endif
#ifdef AARCH32
void
enable_mmu_secure
(
unsigned
int
flags
)
{
setup_mmu_cfg
(
flags
,
tf_xlat_ctx
.
base_table
,
MAX_PHYS_ADDR
,
tf_xlat_ctx
.
va_max_address
);
enable_mmu_direct
(
flags
);
}
#else
void
enable_mmu_el1
(
unsigned
int
flags
)
{
setup_mmu_cfg
(
flags
,
tf_xlat_ctx
.
base_table
,
MAX_PHYS_ADDR
,
tf_xlat_ctx
.
va_max_address
);
enable_mmu_direct_el1
(
flags
);
}
void
enable_mmu_el3
(
unsigned
int
flags
)
{
setup_mmu_cfg
(
flags
,
tf_xlat_ctx
.
base_table
,
MAX_PHYS_ADDR
,
tf_xlat_ctx
.
va_max_address
);
enable_mmu_direct_el3
(
flags
);
}
#endif
/* AARCH32 */
lib/xlat_tables_v2/xlat_tables_
internal
.c
→
lib/xlat_tables_v2/xlat_tables_
core
.c
View file @
700b6da7
This diff is collapsed.
Click to expand it.
lib/xlat_tables_v2/xlat_tables_private.h
View file @
700b6da7
...
...
@@ -50,7 +50,7 @@
* S-EL1.
*/
void
xlat_arch_tlbi_va
(
uintptr_t
va
);
void
xlat_arch_tlbi_va_regime
(
uintptr_t
va
,
xlat_regime_
t
xlat_regime
);
void
xlat_arch_tlbi_va_regime
(
uintptr_t
va
,
in
t
xlat_regime
);
/*
* This function has to be called at the end of any code that uses the function
...
...
@@ -59,7 +59,7 @@ void xlat_arch_tlbi_va_regime(uintptr_t va, xlat_regime_t xlat_regime);
void
xlat_arch_tlbi_va_sync
(
void
);
/* Print VA, PA, size and attributes of all regions in the mmap array. */
void
prin
t_mmap
(
mmap_region_t
*
const
mmap
);
void
xla
t_mmap
_print
(
mmap_region_t
*
const
mmap
);
/*
* Print the current state of the translation tables by reading them from
...
...
@@ -67,6 +67,12 @@ void print_mmap(mmap_region_t *const mmap);
*/
void
xlat_tables_print
(
xlat_ctx_t
*
ctx
);
/*
* Returns a block/page table descriptor for the given level and attributes.
*/
uint64_t
xlat_desc
(
const
xlat_ctx_t
*
ctx
,
uint32_t
attr
,
unsigned
long
long
addr_pa
,
int
level
);
/*
* Architecture-specific initialization code.
*/
...
...
lib/xlat_tables_v2/xlat_tables_utils.c
0 → 100644
View file @
700b6da7
/*
* Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch_helpers.h>
#include <assert.h>
#include <debug.h>
#include <errno.h>
#include <platform_def.h>
#include <types.h>
#include <utils_def.h>
#include <xlat_tables_arch_private.h>
#include <xlat_tables_defs.h>
#include <xlat_tables_v2.h>
#include "xlat_tables_private.h"
#if LOG_LEVEL < LOG_LEVEL_VERBOSE
void
xlat_mmap_print
(
__unused
mmap_region_t
*
const
mmap
)
{
/* Empty */
}
void
xlat_tables_print
(
__unused
xlat_ctx_t
*
ctx
)
{
/* Empty */
}
#else
/* if LOG_LEVEL >= LOG_LEVEL_VERBOSE */
void
xlat_mmap_print
(
mmap_region_t
*
const
mmap
)
{
tf_printf
(
"mmap:
\n
"
);
const
mmap_region_t
*
mm
=
mmap
;
while
(
mm
->
size
!=
0U
)
{
tf_printf
(
" VA:0x%lx PA:0x%llx size:0x%zx attr:0x%x "
"granularity:0x%zx
\n
"
,
mm
->
base_va
,
mm
->
base_pa
,
mm
->
size
,
mm
->
attr
,
mm
->
granularity
);
++
mm
;
};
tf_printf
(
"
\n
"
);
}
/* Print the attributes of the specified block descriptor. */
static
void
xlat_desc_print
(
const
xlat_ctx_t
*
ctx
,
uint64_t
desc
)
{
int
mem_type_index
=
ATTR_INDEX_GET
(
desc
);
int
xlat_regime
=
ctx
->
xlat_regime
;
if
(
mem_type_index
==
ATTR_IWBWA_OWBWA_NTR_INDEX
)
{
tf_printf
(
"MEM"
);
}
else
if
(
mem_type_index
==
ATTR_NON_CACHEABLE_INDEX
)
{
tf_printf
(
"NC"
);
}
else
{
assert
(
mem_type_index
==
ATTR_DEVICE_INDEX
);
tf_printf
(
"DEV"
);
}
if
(
xlat_regime
==
EL3_REGIME
)
{
/* For EL3 only check the AP[2] and XN bits. */
tf_printf
((
desc
&
LOWER_ATTRS
(
AP_RO
))
?
"-RO"
:
"-RW"
);
tf_printf
((
desc
&
UPPER_ATTRS
(
XN
))
?
"-XN"
:
"-EXEC"
);
}
else
{
assert
(
xlat_regime
==
EL1_EL0_REGIME
);
/*
* For EL0 and EL1:
* - In AArch64 PXN and UXN can be set independently but in
* AArch32 there is no UXN (XN affects both privilege levels).
* For consistency, we set them simultaneously in both cases.
* - RO and RW permissions must be the same in EL1 and EL0. If
* EL0 can access that memory region, so can EL1, with the
* same permissions.
*/
#if ENABLE_ASSERTIONS
uint64_t
xn_mask
=
xlat_arch_regime_get_xn_desc
(
EL1_EL0_REGIME
);
uint64_t
xn_perm
=
desc
&
xn_mask
;
assert
((
xn_perm
==
xn_mask
)
||
(
xn_perm
==
0ULL
));
#endif
tf_printf
((
desc
&
LOWER_ATTRS
(
AP_RO
))
?
"-RO"
:
"-RW"
);
/* Only check one of PXN and UXN, the other one is the same. */
tf_printf
((
desc
&
UPPER_ATTRS
(
PXN
))
?
"-XN"
:
"-EXEC"
);
/*
* Privileged regions can only be accessed from EL1, user
* regions can be accessed from EL1 and EL0.
*/
tf_printf
((
desc
&
LOWER_ATTRS
(
AP_ACCESS_UNPRIVILEGED
))
?
"-USER"
:
"-PRIV"
);
}
tf_printf
(
LOWER_ATTRS
(
NS
)
&
desc
?
"-NS"
:
"-S"
);
}
static
const
char
*
const
level_spacers
[]
=
{
"[LV0] "
,
" [LV1] "
,
" [LV2] "
,
" [LV3] "
};
static
const
char
*
invalid_descriptors_ommited
=
"%s(%d invalid descriptors omitted)
\n
"
;
/*
* Recursive function that reads the translation tables passed as an argument
* and prints their status.
*/
static
void
xlat_tables_print_internal
(
xlat_ctx_t
*
ctx
,
const
uintptr_t
table_base_va
,
uint64_t
*
const
table_base
,
const
int
table_entries
,
const
unsigned
int
level
)
{
assert
(
level
<=
XLAT_TABLE_LEVEL_MAX
);
uint64_t
desc
;
uintptr_t
table_idx_va
=
table_base_va
;
int
table_idx
=
0
;
size_t
level_size
=
XLAT_BLOCK_SIZE
(
level
);
/*
* Keep track of how many invalid descriptors are counted in a row.
* Whenever multiple invalid descriptors are found, only the first one
* is printed, and a line is added to inform about how many descriptors
* have been omitted.
*/
int
invalid_row_count
=
0
;
while
(
table_idx
<
table_entries
)
{
desc
=
table_base
[
table_idx
];
if
((
desc
&
DESC_MASK
)
==
INVALID_DESC
)
{
if
(
invalid_row_count
==
0
)
{
tf_printf
(
"%sVA:%p size:0x%zx
\n
"
,
level_spacers
[
level
],
(
void
*
)
table_idx_va
,
level_size
);
}
invalid_row_count
++
;
}
else
{
if
(
invalid_row_count
>
1
)
{
tf_printf
(
invalid_descriptors_ommited
,
level_spacers
[
level
],
invalid_row_count
-
1
);
}
invalid_row_count
=
0
;
/*
* Check if this is a table or a block. Tables are only
* allowed in levels other than 3, but DESC_PAGE has the
* same value as DESC_TABLE, so we need to check.
*/
if
(((
desc
&
DESC_MASK
)
==
TABLE_DESC
)
&&
(
level
<
XLAT_TABLE_LEVEL_MAX
))
{
/*
* Do not print any PA for a table descriptor,
* as it doesn't directly map physical memory
* but instead points to the next translation
* table in the translation table walk.
*/
tf_printf
(
"%sVA:%p size:0x%zx
\n
"
,
level_spacers
[
level
],
(
void
*
)
table_idx_va
,
level_size
);
uintptr_t
addr_inner
=
desc
&
TABLE_ADDR_MASK
;
xlat_tables_print_internal
(
ctx
,
table_idx_va
,
(
uint64_t
*
)
addr_inner
,
XLAT_TABLE_ENTRIES
,
level
+
1
);
}
else
{
tf_printf
(
"%sVA:%p PA:0x%llx size:0x%zx "
,
level_spacers
[
level
],
(
void
*
)
table_idx_va
,
(
unsigned
long
long
)(
desc
&
TABLE_ADDR_MASK
),
level_size
);
xlat_desc_print
(
ctx
,
desc
);
tf_printf
(
"
\n
"
);
}
}
table_idx
++
;
table_idx_va
+=
level_size
;
}
if
(
invalid_row_count
>
1
)
{
tf_printf
(
invalid_descriptors_ommited
,
level_spacers
[
level
],
invalid_row_count
-
1
);
}
}
void
xlat_tables_print
(
xlat_ctx_t
*
ctx
)
{
const
char
*
xlat_regime_str
;
if
(
ctx
->
xlat_regime
==
EL1_EL0_REGIME
)
{
xlat_regime_str
=
"1&0"
;
}
else
{
assert
(
ctx
->
xlat_regime
==
EL3_REGIME
);
xlat_regime_str
=
"3"
;
}
VERBOSE
(
"Translation tables state:
\n
"
);
VERBOSE
(
" Xlat regime: EL%s
\n
"
,
xlat_regime_str
);
VERBOSE
(
" Max allowed PA: 0x%llx
\n
"
,
ctx
->
pa_max_address
);
VERBOSE
(
" Max allowed VA: %p
\n
"
,
(
void
*
)
ctx
->
va_max_address
);
VERBOSE
(
" Max mapped PA: 0x%llx
\n
"
,
ctx
->
max_pa
);
VERBOSE
(
" Max mapped VA: %p
\n
"
,
(
void
*
)
ctx
->
max_va
);
VERBOSE
(
" Initial lookup level: %i
\n
"
,
ctx
->
base_level
);
VERBOSE
(
" Entries @initial lookup level: %i
\n
"
,
ctx
->
base_table_entries
);
int
used_page_tables
;
#if PLAT_XLAT_TABLES_DYNAMIC
used_page_tables
=
0
;
for
(
unsigned
int
i
=
0
;
i
<
ctx
->
tables_num
;
++
i
)
{
if
(
ctx
->
tables_mapped_regions
[
i
]
!=
0
)
++
used_page_tables
;
}
#else
used_page_tables
=
ctx
->
next_table
;
#endif
VERBOSE
(
" Used %i sub-tables out of %i (spare: %i)
\n
"
,
used_page_tables
,
ctx
->
tables_num
,
ctx
->
tables_num
-
used_page_tables
);
xlat_tables_print_internal
(
ctx
,
0
,
ctx
->
base_table
,
ctx
->
base_table_entries
,
ctx
->
base_level
);
}
#endif
/* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
/*
* Do a translation table walk to find the block or page descriptor that maps
* virtual_addr.
*
* On success, return the address of the descriptor within the translation
* table. Its lookup level is stored in '*out_level'.
* On error, return NULL.
*
* xlat_table_base
* Base address for the initial lookup level.
* xlat_table_base_entries
* Number of entries in the translation table for the initial lookup level.
* virt_addr_space_size
* Size in bytes of the virtual address space.
*/
static
uint64_t
*
find_xlat_table_entry
(
uintptr_t
virtual_addr
,
void
*
xlat_table_base
,
int
xlat_table_base_entries
,
unsigned
long
long
virt_addr_space_size
,
int
*
out_level
)
{
unsigned
int
start_level
;
uint64_t
*
table
;
int
entries
;
start_level
=
GET_XLAT_TABLE_LEVEL_BASE
(
virt_addr_space_size
);
table
=
xlat_table_base
;
entries
=
xlat_table_base_entries
;
for
(
unsigned
int
level
=
start_level
;
level
<=
XLAT_TABLE_LEVEL_MAX
;
++
level
)
{
int
idx
;
uint64_t
desc
;
uint64_t
desc_type
;
idx
=
XLAT_TABLE_IDX
(
virtual_addr
,
level
);
if
(
idx
>=
entries
)
{
WARN
(
"Missing xlat table entry at address 0x%lx
\n
"
,
virtual_addr
);
return
NULL
;
}
desc
=
table
[
idx
];
desc_type
=
desc
&
DESC_MASK
;
if
(
desc_type
==
INVALID_DESC
)
{
VERBOSE
(
"Invalid entry (memory not mapped)
\n
"
);
return
NULL
;
}
if
(
level
==
XLAT_TABLE_LEVEL_MAX
)
{
/*
* Only page descriptors allowed at the final lookup
* level.
*/
assert
(
desc_type
==
PAGE_DESC
);
*
out_level
=
level
;
return
&
table
[
idx
];
}
if
(
desc_type
==
BLOCK_DESC
)
{
*
out_level
=
level
;
return
&
table
[
idx
];
}
assert
(
desc_type
==
TABLE_DESC
);
table
=
(
uint64_t
*
)(
uintptr_t
)(
desc
&
TABLE_ADDR_MASK
);
entries
=
XLAT_TABLE_ENTRIES
;
}
/*
* This shouldn't be reached, the translation table walk should end at
* most at level XLAT_TABLE_LEVEL_MAX and return from inside the loop.
*/
assert
(
0
);
return
NULL
;
}
static
int
get_mem_attributes_internal
(
const
xlat_ctx_t
*
ctx
,
uintptr_t
base_va
,
uint32_t
*
attributes
,
uint64_t
**
table_entry
,
unsigned
long
long
*
addr_pa
,
int
*
table_level
)
{
uint64_t
*
entry
;
uint64_t
desc
;
int
level
;
unsigned
long
long
virt_addr_space_size
;
/*
* Sanity-check arguments.
*/
assert
(
ctx
!=
NULL
);
assert
(
ctx
->
initialized
);
assert
(
ctx
->
xlat_regime
==
EL1_EL0_REGIME
||
ctx
->
xlat_regime
==
EL3_REGIME
);
virt_addr_space_size
=
(
unsigned
long
long
)
ctx
->
va_max_address
+
1
;
assert
(
virt_addr_space_size
>
0
);
entry
=
find_xlat_table_entry
(
base_va
,
ctx
->
base_table
,
ctx
->
base_table_entries
,
virt_addr_space_size
,
&
level
);
if
(
entry
==
NULL
)
{
WARN
(
"Address %p is not mapped.
\n
"
,
(
void
*
)
base_va
);
return
-
EINVAL
;
}
if
(
addr_pa
!=
NULL
)
{
*
addr_pa
=
*
entry
&
TABLE_ADDR_MASK
;
}
if
(
table_entry
!=
NULL
)
{
*
table_entry
=
entry
;
}
if
(
table_level
!=
NULL
)
{
*
table_level
=
level
;
}
desc
=
*
entry
;
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
VERBOSE
(
"Attributes: "
);
xlat_desc_print
(
ctx
,
desc
);
tf_printf
(
"
\n
"
);
#endif
/* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
assert
(
attributes
!=
NULL
);
*
attributes
=
0
;
int
attr_index
=
(
desc
>>
ATTR_INDEX_SHIFT
)
&
ATTR_INDEX_MASK
;
if
(
attr_index
==
ATTR_IWBWA_OWBWA_NTR_INDEX
)
{
*
attributes
|=
MT_MEMORY
;
}
else
if
(
attr_index
==
ATTR_NON_CACHEABLE_INDEX
)
{
*
attributes
|=
MT_NON_CACHEABLE
;
}
else
{
assert
(
attr_index
==
ATTR_DEVICE_INDEX
);
*
attributes
|=
MT_DEVICE
;
}
int
ap2_bit
=
(
desc
>>
AP2_SHIFT
)
&
1
;
if
(
ap2_bit
==
AP2_RW
)
*
attributes
|=
MT_RW
;
if
(
ctx
->
xlat_regime
==
EL1_EL0_REGIME
)
{
int
ap1_bit
=
(
desc
>>
AP1_SHIFT
)
&
1
;
if
(
ap1_bit
==
AP1_ACCESS_UNPRIVILEGED
)
*
attributes
|=
MT_USER
;
}
int
ns_bit
=
(
desc
>>
NS_SHIFT
)
&
1
;
if
(
ns_bit
==
1
)
*
attributes
|=
MT_NS
;
uint64_t
xn_mask
=
xlat_arch_regime_get_xn_desc
(
ctx
->
xlat_regime
);
if
((
desc
&
xn_mask
)
==
xn_mask
)
{
*
attributes
|=
MT_EXECUTE_NEVER
;
}
else
{
assert
((
desc
&
xn_mask
)
==
0
);
}
return
0
;
}
int
get_mem_attributes
(
const
xlat_ctx_t
*
ctx
,
uintptr_t
base_va
,
uint32_t
*
attributes
)
{
return
get_mem_attributes_internal
(
ctx
,
base_va
,
attributes
,
NULL
,
NULL
,
NULL
);
}
int
change_mem_attributes
(
xlat_ctx_t
*
ctx
,
uintptr_t
base_va
,
size_t
size
,
uint32_t
attr
)
{
/* Note: This implementation isn't optimized. */
assert
(
ctx
!=
NULL
);
assert
(
ctx
->
initialized
);
unsigned
long
long
virt_addr_space_size
=
(
unsigned
long
long
)
ctx
->
va_max_address
+
1
;
assert
(
virt_addr_space_size
>
0
);
if
(
!
IS_PAGE_ALIGNED
(
base_va
))
{
WARN
(
"%s: Address %p is not aligned on a page boundary.
\n
"
,
__func__
,
(
void
*
)
base_va
);
return
-
EINVAL
;
}
if
(
size
==
0
)
{
WARN
(
"%s: Size is 0.
\n
"
,
__func__
);
return
-
EINVAL
;
}
if
((
size
%
PAGE_SIZE
)
!=
0
)
{
WARN
(
"%s: Size 0x%zx is not a multiple of a page size.
\n
"
,
__func__
,
size
);
return
-
EINVAL
;
}
if
(((
attr
&
MT_EXECUTE_NEVER
)
==
0
)
&&
((
attr
&
MT_RW
)
!=
0
))
{
WARN
(
"%s: Mapping memory as read-write and executable not allowed.
\n
"
,
__func__
);
return
-
EINVAL
;
}
int
pages_count
=
size
/
PAGE_SIZE
;
VERBOSE
(
"Changing memory attributes of %i pages starting from address %p...
\n
"
,
pages_count
,
(
void
*
)
base_va
);
uintptr_t
base_va_original
=
base_va
;
/*
* Sanity checks.
*/
for
(
int
i
=
0
;
i
<
pages_count
;
++
i
)
{
uint64_t
*
entry
;
uint64_t
desc
;
int
level
;
entry
=
find_xlat_table_entry
(
base_va
,
ctx
->
base_table
,
ctx
->
base_table_entries
,
virt_addr_space_size
,
&
level
);
if
(
entry
==
NULL
)
{
WARN
(
"Address %p is not mapped.
\n
"
,
(
void
*
)
base_va
);
return
-
EINVAL
;
}
desc
=
*
entry
;
/*
* Check that all the required pages are mapped at page
* granularity.
*/
if
(((
desc
&
DESC_MASK
)
!=
PAGE_DESC
)
||
(
level
!=
XLAT_TABLE_LEVEL_MAX
))
{
WARN
(
"Address %p is not mapped at the right granularity.
\n
"
,
(
void
*
)
base_va
);
WARN
(
"Granularity is 0x%llx, should be 0x%x.
\n
"
,
(
unsigned
long
long
)
XLAT_BLOCK_SIZE
(
level
),
PAGE_SIZE
);
return
-
EINVAL
;
}
/*
* If the region type is device, it shouldn't be executable.
*/
int
attr_index
=
(
desc
>>
ATTR_INDEX_SHIFT
)
&
ATTR_INDEX_MASK
;
if
(
attr_index
==
ATTR_DEVICE_INDEX
)
{
if
((
attr
&
MT_EXECUTE_NEVER
)
==
0
)
{
WARN
(
"Setting device memory as executable at address %p."
,
(
void
*
)
base_va
);
return
-
EINVAL
;
}
}
base_va
+=
PAGE_SIZE
;
}
/* Restore original value. */
base_va
=
base_va_original
;
for
(
int
i
=
0
;
i
<
pages_count
;
++
i
)
{
uint32_t
old_attr
,
new_attr
;
uint64_t
*
entry
;
int
level
;
unsigned
long
long
addr_pa
;
get_mem_attributes_internal
(
ctx
,
base_va
,
&
old_attr
,
&
entry
,
&
addr_pa
,
&
level
);
/*
* From attr, only MT_RO/MT_RW, MT_EXECUTE/MT_EXECUTE_NEVER and
* MT_USER/MT_PRIVILEGED are taken into account. Any other
* information is ignored.
*/
/* Clean the old attributes so that they can be rebuilt. */
new_attr
=
old_attr
&
~
(
MT_RW
|
MT_EXECUTE_NEVER
|
MT_USER
);
/*
* Update attributes, but filter out the ones this function
* isn't allowed to change.
*/
new_attr
|=
attr
&
(
MT_RW
|
MT_EXECUTE_NEVER
|
MT_USER
);
/*
* The break-before-make sequence requires writing an invalid
* descriptor and making sure that the system sees the change
* before writing the new descriptor.
*/
*
entry
=
INVALID_DESC
;
/* Invalidate any cached copy of this mapping in the TLBs. */
xlat_arch_tlbi_va_regime
(
base_va
,
ctx
->
xlat_regime
);
/* Ensure completion of the invalidation. */
xlat_arch_tlbi_va_sync
();
/* Write new descriptor */
*
entry
=
xlat_desc
(
ctx
,
new_attr
,
addr_pa
,
level
);
base_va
+=
PAGE_SIZE
;
}
/* Ensure that the last descriptor writen is seen by the system. */
dsbish
();
return
0
;
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment