Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
Menu
Open sidebar
adam.huang
Arm Trusted Firmware
Commits
224e1aba
Unverified
Commit
224e1aba
authored
Jul 19, 2018
by
danh-arm
Committed by
GitHub
Jul 19, 2018
Browse files
Merge pull request #1481 from antonio-nino-diaz-arm/an/xlat-refactor
xlat: More refactoring
parents
e4686fd8
1dd6c051
Changes
17
Show whitespace changes
Inline
Side-by-side
include/lib/xlat_tables/xlat_mmu_helpers.h
View file @
224e1aba
...
...
@@ -41,10 +41,28 @@
*/
#define XLAT_TABLE_NC (U(1) << 1)
/*
* Offsets into a mmu_cfg_params array generated by setup_mmu_cfg(). All
* parameters are 64 bits wide.
*/
#define MMU_CFG_MAIR 0
#define MMU_CFG_TCR 1
#define MMU_CFG_TTBR0 2
#define MMU_CFG_PARAM_MAX 3
#ifndef __ASSEMBLY__
#include <sys/types.h>
/*
* Return the values that the MMU configuration registers must contain for the
* specified translation context. `params` must be a pointer to array of size
* MMU_CFG_PARAM_MAX.
*/
void
setup_mmu_cfg
(
uint64_t
*
params
,
unsigned
int
flags
,
const
uint64_t
*
base_table
,
unsigned
long
long
max_pa
,
uintptr_t
max_va
,
int
xlat_regime
);
#ifdef AARCH32
/* AArch32 specific translation table API */
void
enable_mmu_secure
(
unsigned
int
flags
);
...
...
include/lib/xlat_tables/xlat_tables_defs.h
View file @
224e1aba
...
...
@@ -62,7 +62,7 @@
/*
* The ARMv8-A architecture allows translation granule sizes of 4KB, 16KB or
* 64KB. However,
TF
only
supports the 4KB case
at the moment.
* 64KB. However, only
4KB are supported
at the moment.
*/
#define PAGE_SIZE_SHIFT FOUR_KB_SHIFT
#define PAGE_SIZE (U(1) << PAGE_SIZE_SHIFT)
...
...
include/lib/xlat_tables/xlat_tables_v2.h
View file @
224e1aba
...
...
@@ -121,10 +121,12 @@ typedef struct mmap_region {
}
mmap_region_t
;
/*
* Translation regimes supported by this library.
* Translation regimes supported by this library. EL_REGIME_INVALID tells the
* library to detect it at runtime.
*/
#define EL1_EL0_REGIME 1
#define EL3_REGIME 3
#define EL_REGIME_INVALID -1
/*
* Declare the translation context type.
...
...
@@ -165,8 +167,7 @@ typedef struct xlat_ctx xlat_ctx_t;
(_xlat_tables_count), \
(_virt_addr_space_size), \
(_phy_addr_space_size), \
IMAGE_XLAT_DEFAULT_REGIME, \
"xlat_table")
EL_REGIME_INVALID, "xlat_table")
/*
* Same as REGISTER_XLAT_CONTEXT plus the additional parameters:
...
...
include/lib/xlat_tables/xlat_tables_v2_helpers.h
View file @
224e1aba
...
...
@@ -16,13 +16,6 @@
#error "Do not include this header file directly. Include xlat_tables_v2.h instead."
#endif
/* Offsets into mmu_cfg_params array. All parameters are 32 bits wide. */
#define MMU_CFG_MAIR0 0
#define MMU_CFG_TCR 1
#define MMU_CFG_TTBR0_LO 2
#define MMU_CFG_TTBR0_HI 3
#define MMU_CFG_PARAM_MAX 4
#ifndef __ASSEMBLY__
#include <cassert.h>
...
...
@@ -31,9 +24,6 @@
#include <xlat_tables_arch.h>
#include <xlat_tables_defs.h>
/* Parameters of register values required when enabling MMU */
extern
uint32_t
mmu_cfg_params
[
MMU_CFG_PARAM_MAX
];
/* Forward declaration */
struct
mmap_region
;
...
...
@@ -172,29 +162,4 @@ struct xlat_ctx {
#endif
/*__ASSEMBLY__*/
#if AARCH64
/*
* This IMAGE_EL macro must not to be used outside the library, and it is only
* used in AArch64.
*/
#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || (defined(IMAGE_BL2) && BL2_AT_EL3)
# define IMAGE_EL 3
# define IMAGE_XLAT_DEFAULT_REGIME EL3_REGIME
#else
# define IMAGE_EL 1
# define IMAGE_XLAT_DEFAULT_REGIME EL1_EL0_REGIME
#endif
#else
/* if AARCH32 */
/*
* The PL1&0 translation regime in AArch32 behaves like the EL1&0 regime in
* AArch64 except for the XN bits, but we set and unset them at the same time,
* so there's no difference in practice.
*/
#define IMAGE_XLAT_DEFAULT_REGIME EL1_EL0_REGIME
#endif
/* AARCH64 */
#endif
/* __XLAT_TABLES_V2_HELPERS_H__ */
lib/xlat_tables/xlat_tables_common.c
View file @
224e1aba
...
...
@@ -195,6 +195,10 @@ static uint64_t mmap_desc(unsigned int attr, unsigned long long addr_pa,
desc
|=
(
level
==
XLAT_TABLE_LEVEL_MAX
)
?
PAGE_DESC
:
BLOCK_DESC
;
desc
|=
(
attr
&
MT_NS
)
?
LOWER_ATTRS
(
NS
)
:
0
;
desc
|=
(
attr
&
MT_RW
)
?
LOWER_ATTRS
(
AP_RW
)
:
LOWER_ATTRS
(
AP_RO
);
/*
* Always set the access flag, as this library assumes access flag
* faults aren't managed.
*/
desc
|=
LOWER_ATTRS
(
ACCESS_FLAG
);
desc
|=
ap1_mask
;
...
...
@@ -222,9 +226,10 @@ static uint64_t mmap_desc(unsigned int attr, unsigned long long addr_pa,
}
else
{
/* Normal memory */
/*
* Always map read-write normal memory as execute-never.
* (Trusted Firmware doesn't self-modify its code, therefore
* R/W memory is reserved for data storage, which must not be
* executable.)
* This library assumes that it is used by software that does
* not self-modify its code, therefore R/W memory is reserved
* for data storage, which must not be executable.
*
* Note that setting the XN bit here is for consistency only.
* The function that enables the MMU sets the SCTLR_ELx.WXN bit,
* which makes any writable memory region to be treated as
...
...
lib/xlat_tables_v2/aarch32/enable_mmu.S
View file @
224e1aba
...
...
@@ -24,17 +24,17 @@ func enable_mmu_direct
mov
r3
,
r0
ldr
r0
,
=
mmu_cfg_params
/
*
MAIR0
*/
ldr
r1
,
[
r0
,
#(
MMU_CFG_MAIR
0
<<
2
)]
/
*
MAIR0
.
Only
the
lower
32
bits
are
used
.
*/
ldr
r1
,
[
r0
,
#(
MMU_CFG_MAIR
<<
3
)]
stcopr
r1
,
MAIR0
/
*
TTBCR
*/
ldr
r2
,
[
r0
,
#(
MMU_CFG_TCR
<<
2
)]
/
*
TTBCR
.
Only
the
lower
32
bits
are
used
.
*/
ldr
r2
,
[
r0
,
#(
MMU_CFG_TCR
<<
3
)]
stcopr
r2
,
TTBCR
/
*
TTBR0
*/
ldr
r1
,
[
r0
,
#(
MMU_CFG_TTBR0
_LO
<<
2
)]
ldr
r2
,
[
r0
,
#(
MMU_CFG_TTBR0
_HI
<<
2
)]
ldr
r1
,
[
r0
,
#(
MMU_CFG_TTBR0
<<
3
)]
ldr
r2
,
[
r0
,
#(
(
MMU_CFG_TTBR0
<<
3
)
+
4
)]
stcopr16
r1
,
r2
,
TTBR0_64
/
*
TTBR1
is
unused
right
now
; set it to 0. */
...
...
lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
View file @
224e1aba
...
...
@@ -18,16 +18,14 @@
#error ARMv7 target does not support LPAE MMU descriptors
#endif
uint32_t
mmu_cfg_params
[
MMU_CFG_PARAM_MAX
];
/*
* Returns 1 if the provided granule size is supported, 0 otherwise.
*/
int
xlat_arch_is_granule_size_supported
(
size_t
size
)
{
/*
* The
Trusted Firmw
ar
e
uses long descriptor translation table format,
*
which
supports 4 KiB pages only.
* The
libr
ar
y
uses
the
long descriptor translation table format,
which
* supports 4 KiB pages only.
*/
return
(
size
==
(
4U
*
1024U
));
}
...
...
@@ -50,18 +48,12 @@ int is_mmu_enabled_ctx(const xlat_ctx_t *ctx __unused)
return
(
read_sctlr
()
&
SCTLR_M_BIT
)
!=
0
;
}
void
xlat_arch_
tlbi_va
(
uintptr_t
va
)
uint64_t
xlat_arch_
regime_get_xn_desc
(
int
xlat_regime
__unused
)
{
/*
* Ensure the translation table write has drained into memory before
* invalidating the TLB entry.
*/
dsbishst
();
tlbimvaais
(
TLBI_ADDR
(
va
));
return
UPPER_ATTRS
(
XN
);
}
void
xlat_arch_tlbi_va
_regime
(
uintptr_t
va
,
int
xlat_regime
__unused
)
void
xlat_arch_tlbi_va
(
uintptr_t
va
,
int
xlat_regime
__unused
)
{
/*
* Ensure the translation table write has drained into memory before
...
...
@@ -103,29 +95,32 @@ int xlat_arch_current_el(void)
/*
* If EL3 is in AArch32 mode, all secure PL1 modes (Monitor, System,
* SVC, Abort, UND, IRQ and FIQ modes) execute at EL3.
*
* The PL1&0 translation regime in AArch32 behaves like the EL1&0 regime
* in AArch64 except for the XN bits, but we set and unset them at the
* same time, so there's no difference in practice.
*/
return
3
;
return
1
;
}
/*******************************************************************************
* Function for enabling the MMU in Secure PL1, assuming that the page tables
* have already been created.
******************************************************************************/
void
setup_mmu_cfg
(
unsigned
int
flags
,
const
uint64_t
*
base_table
,
unsigned
long
long
max_pa
,
uintptr_t
max_va
)
void
setup_mmu_cfg
(
uint64_t
*
params
,
unsigned
int
flags
,
const
uint64_t
*
base_table
,
unsigned
long
long
max_pa
,
uintptr_t
max_va
,
__unused
int
xlat_regime
)
{
u
_register
_t
mair
0
,
ttb
c
r
;
uint
64
_t
ttbr
0
;
u
int64
_t
mair
,
ttbr
0
;
uint
32
_t
ttb
c
r
;
assert
(
IS_IN_SECURE
());
/* Set attributes in the right indices of the MAIR */
mair
0
=
MAIR0_ATTR_SET
(
ATTR_DEVICE
,
ATTR_DEVICE_INDEX
);
mair
0
|=
MAIR0_ATTR_SET
(
ATTR_IWBWA_OWBWA_NTR
,
mair
=
MAIR0_ATTR_SET
(
ATTR_DEVICE
,
ATTR_DEVICE_INDEX
);
mair
|=
MAIR0_ATTR_SET
(
ATTR_IWBWA_OWBWA_NTR
,
ATTR_IWBWA_OWBWA_NTR_INDEX
);
mair
0
|=
MAIR0_ATTR_SET
(
ATTR_NON_CACHEABLE
,
mair
|=
MAIR0_ATTR_SET
(
ATTR_NON_CACHEABLE
,
ATTR_NON_CACHEABLE_INDEX
);
/*
...
...
@@ -173,17 +168,17 @@ void setup_mmu_cfg(unsigned int flags,
/* Set TTBR0 bits as well */
ttbr0
=
(
uint64_t
)(
uintptr_t
)
base_table
;
#if ARM_ARCH_AT_LEAST(8, 2)
/*
* Enable CnP bit so as to share page tables with all PEs.
*
M
andatory for ARMv8.2 implementations.
* Enable CnP bit so as to share page tables with all PEs.
This
*
is m
andatory for ARMv8.2 implementations.
*/
ttbr0
|=
TTBR_CNP_BIT
;
#endif
/* Now populate MMU configuration */
mmu_cfg_params
[
MMU_CFG_MAIR0
]
=
mair0
;
mmu_cfg_params
[
MMU_CFG_TCR
]
=
ttbcr
;
mmu_cfg_params
[
MMU_CFG_TTBR0_LO
]
=
(
uint32_t
)
ttbr0
;
mmu_cfg_params
[
MMU_CFG_TTBR0_HI
]
=
ttbr0
>>
32
;
params
[
MMU_CFG_MAIR
]
=
mair
;
params
[
MMU_CFG_TCR
]
=
(
uint64_t
)
ttbcr
;
params
[
MMU_CFG_TTBR0
]
=
ttbr0
;
}
lib/xlat_tables_v2/aarch32/xlat_tables_arch_private.h
deleted
100644 → 0
View file @
e4686fd8
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef __XLAT_TABLES_ARCH_PRIVATE_H__
#define __XLAT_TABLES_ARCH_PRIVATE_H__
#include <xlat_tables_defs.h>
#include <xlat_tables_v2.h>
/*
* Return the execute-never mask that will prevent instruction fetch at the
* given translation regime.
*/
static
inline
uint64_t
xlat_arch_regime_get_xn_desc
(
int
regime
__unused
)
{
return
UPPER_ATTRS
(
XN
);
}
#endif
/* __XLAT_TABLES_ARCH_PRIVATE_H__ */
lib/xlat_tables_v2/aarch64/enable_mmu.S
View file @
224e1aba
...
...
@@ -43,17 +43,15 @@
ldr
x0
,
=
mmu_cfg_params
/
*
MAIR
*/
ldr
w
1
,
[
x0
,
#(
MMU_CFG_MAIR
0
<<
2
)]
ldr
x
1
,
[
x0
,
#(
MMU_CFG_MAIR
<<
3
)]
_msr
mair
,
\
el
,
x1
/
*
TCR
*/
ldr
w
2
,
[
x0
,
#(
MMU_CFG_TCR
<<
2
)]
ldr
x
2
,
[
x0
,
#(
MMU_CFG_TCR
<<
3
)]
_msr
tcr
,
\
el
,
x2
/
*
TTBR
*/
ldr
w3
,
[
x0
,
#(
MMU_CFG_TTBR0_LO
<<
2
)]
ldr
w4
,
[
x0
,
#(
MMU_CFG_TTBR0_HI
<<
2
)]
orr
x3
,
x3
,
x4
,
lsl
#
32
ldr
x3
,
[
x0
,
#(
MMU_CFG_TTBR0
<<
3
)]
_msr
ttbr0
,
\
el
,
x3
/
*
...
...
lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
View file @
224e1aba
...
...
@@ -13,8 +13,6 @@
#include <xlat_tables_v2.h>
#include "../xlat_tables_private.h"
uint32_t
mmu_cfg_params
[
MMU_CFG_PARAM_MAX
];
/*
* Returns 1 if the provided granule size is supported, 0 otherwise.
*/
...
...
@@ -113,19 +111,17 @@ int is_mmu_enabled_ctx(const xlat_ctx_t *ctx)
}
}
void
xlat_arch_tlbi_va
(
uintptr_t
va
)
uint64_t
xlat_arch_regime_get_xn_desc
(
int
xlat_regime
)
{
#if IMAGE_EL == 1
assert
(
IS_IN_EL
(
1
));
xlat_arch_tlbi_va_regime
(
va
,
EL1_EL0_REGIME
);
#elif IMAGE_EL == 3
assert
(
IS_IN_EL
(
3
));
xlat_arch_tlbi_va_regime
(
va
,
EL3_REGIME
);
#endif
if
(
xlat_regime
==
EL1_EL0_REGIME
)
{
return
UPPER_ATTRS
(
UXN
)
|
UPPER_ATTRS
(
PXN
);
}
else
{
assert
(
xlat_regime
==
EL3_REGIME
);
return
UPPER_ATTRS
(
XN
);
}
}
void
xlat_arch_tlbi_va
_regime
(
uintptr_t
va
,
int
xlat_regime
)
void
xlat_arch_tlbi_va
(
uintptr_t
va
,
int
xlat_regime
)
{
/*
* Ensure the translation table write has drained into memory before
...
...
@@ -182,12 +178,11 @@ int xlat_arch_current_el(void)
return
el
;
}
void
setup_mmu_cfg
(
unsigned
int
flags
,
const
uint64_t
*
base_table
,
unsigned
long
long
max_pa
,
uintptr_t
max_va
)
void
setup_mmu_cfg
(
uint64_t
*
params
,
unsigned
int
flags
,
const
uint64_t
*
base_table
,
unsigned
long
long
max_pa
,
uintptr_t
max_va
,
int
xlat_regime
)
{
uint64_t
mair
,
ttbr
,
tcr
;
uint64_t
mair
,
ttbr
0
,
tcr
;
uintptr_t
virtual_addr_space_size
;
/* Set attributes in the right indices of the MAIR. */
...
...
@@ -195,8 +190,6 @@ void setup_mmu_cfg(unsigned int flags,
mair
|=
MAIR_ATTR_SET
(
ATTR_IWBWA_OWBWA_NTR
,
ATTR_IWBWA_OWBWA_NTR_INDEX
);
mair
|=
MAIR_ATTR_SET
(
ATTR_NON_CACHEABLE
,
ATTR_NON_CACHEABLE_INDEX
);
ttbr
=
(
uint64_t
)
base_table
;
/*
* Limit the input address ranges and memory region sizes translated
* using TTBR0 to the given virtual address space size.
...
...
@@ -232,30 +225,29 @@ void setup_mmu_cfg(unsigned int flags,
*/
unsigned
long
long
tcr_ps_bits
=
tcr_physical_addr_size_bits
(
max_pa
);
#if IMAGE_EL == 1
assert
(
IS_IN_EL
(
1
));
if
(
xlat_regime
==
EL1_EL0_REGIME
)
{
/*
* TCR_EL1.EPD1: Disable translation table walk for addresses
that are
* translated using TTBR1_EL1.
* TCR_EL1.EPD1: Disable translation table walk for addresses
*
that are
translated using TTBR1_EL1.
*/
tcr
|=
TCR_EPD1_BIT
|
(
tcr_ps_bits
<<
TCR_EL1_IPS_SHIFT
);
#elif IMAGE_EL == 3
assert
(
IS_IN_EL
(
3
)
);
}
else
{
assert
(
xlat_regime
==
EL3_REGIME
);
tcr
|=
TCR_EL3_RES1
|
(
tcr_ps_bits
<<
TCR_EL3_PS_SHIFT
);
#endif
mmu_cfg_params
[
MMU_CFG_MAIR0
]
=
(
uint32_t
)
mair
;
mmu_cfg_params
[
MMU_CFG_TCR
]
=
(
uint32_t
)
tcr
;
}
/* Set TTBR bits as well */
if
(
ARM_ARCH_AT_LEAST
(
8
,
2
))
{
ttbr0
=
(
uint64_t
)
base_table
;
#if ARM_ARCH_AT_LEAST(8, 2)
/*
* Enable CnP bit so as to share page tables with all PEs. This
* is mandatory for ARMv8.2 implementations.
*/
ttbr
|=
TTBR_CNP_BIT
;
}
ttbr
0
|=
TTBR_CNP_BIT
;
#endif
mmu_cfg_params
[
MMU_CFG_TTBR0_LO
]
=
(
uint32_t
)
ttbr
;
mmu_cfg_params
[
MMU_CFG_TTBR0_HI
]
=
(
uint32_t
)
(
ttbr
>>
32
);
params
[
MMU_CFG_MAIR
]
=
mair
;
params
[
MMU_CFG_TCR
]
=
tcr
;
params
[
MMU_CFG_TTBR0
]
=
ttbr0
;
}
lib/xlat_tables_v2/aarch64/xlat_tables_arch_private.h
deleted
100644 → 0
View file @
e4686fd8
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef __XLAT_TABLES_ARCH_PRIVATE_H__
#define __XLAT_TABLES_ARCH_PRIVATE_H__
#include <assert.h>
#include <xlat_tables_defs.h>
#include <xlat_tables_v2.h>
/*
* Return the execute-never mask that will prevent instruction fetch at all ELs
* that are part of the given translation regime.
*/
static
inline
uint64_t
xlat_arch_regime_get_xn_desc
(
int
regime
)
{
if
(
regime
==
EL1_EL0_REGIME
)
{
return
UPPER_ATTRS
(
UXN
)
|
UPPER_ATTRS
(
PXN
);
}
else
{
assert
(
regime
==
EL3_REGIME
);
return
UPPER_ATTRS
(
XN
);
}
}
#endif
/* __XLAT_TABLES_ARCH_PRIVATE_H__ */
lib/xlat_tables_v2/xlat_tables.mk
View file @
224e1aba
...
...
@@ -10,5 +10,3 @@ XLAT_TABLES_LIB_SRCS := $(addprefix lib/xlat_tables_v2/, \
xlat_tables_context.c
\
xlat_tables_core.c
\
xlat_tables_utils.c
)
INCLUDES
+=
-Ilib
/xlat_tables_v2/
${ARCH}
lib/xlat_tables_v2/xlat_tables_context.c
View file @
224e1aba
...
...
@@ -4,6 +4,7 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
#include <debug.h>
#include <platform_def.h>
#include <xlat_tables_defs.h>
...
...
@@ -11,6 +12,12 @@
#include "xlat_tables_private.h"
/*
* MMU configuration register values for the active translation context. Used
* from the MMU assembly helpers.
*/
uint64_t
mmu_cfg_params
[
MMU_CFG_PARAM_MAX
];
/*
* Each platform can define the size of its physical and virtual address spaces.
* If the platform hasn't defined one or both of them, default to
...
...
@@ -69,6 +76,17 @@ int mmap_remove_dynamic_region(uintptr_t base_va, size_t size)
void
init_xlat_tables
(
void
)
{
assert
(
tf_xlat_ctx
.
xlat_regime
==
EL_REGIME_INVALID
);
int
current_el
=
xlat_arch_current_el
();
if
(
current_el
==
1
)
{
tf_xlat_ctx
.
xlat_regime
=
EL1_EL0_REGIME
;
}
else
{
assert
(
current_el
==
3
);
tf_xlat_ctx
.
xlat_regime
=
EL3_REGIME
;
}
init_xlat_tables_ctx
(
&
tf_xlat_ctx
);
}
...
...
@@ -93,8 +111,9 @@ void init_xlat_tables(void)
void
enable_mmu_secure
(
unsigned
int
flags
)
{
setup_mmu_cfg
(
flags
,
tf_xlat_ctx
.
base_table
,
MAX_PHYS_ADDR
,
tf_xlat_ctx
.
va_max_address
);
setup_mmu_cfg
((
uint64_t
*
)
&
mmu_cfg_params
,
flags
,
tf_xlat_ctx
.
base_table
,
MAX_PHYS_ADDR
,
tf_xlat_ctx
.
va_max_address
,
EL1_EL0_REGIME
);
enable_mmu_direct
(
flags
);
}
...
...
@@ -102,15 +121,17 @@ void enable_mmu_secure(unsigned int flags)
void
enable_mmu_el1
(
unsigned
int
flags
)
{
setup_mmu_cfg
(
flags
,
tf_xlat_ctx
.
base_table
,
MAX_PHYS_ADDR
,
tf_xlat_ctx
.
va_max_address
);
setup_mmu_cfg
((
uint64_t
*
)
&
mmu_cfg_params
,
flags
,
tf_xlat_ctx
.
base_table
,
MAX_PHYS_ADDR
,
tf_xlat_ctx
.
va_max_address
,
EL1_EL0_REGIME
);
enable_mmu_direct_el1
(
flags
);
}
void
enable_mmu_el3
(
unsigned
int
flags
)
{
setup_mmu_cfg
(
flags
,
tf_xlat_ctx
.
base_table
,
MAX_PHYS_ADDR
,
tf_xlat_ctx
.
va_max_address
);
setup_mmu_cfg
((
uint64_t
*
)
&
mmu_cfg_params
,
flags
,
tf_xlat_ctx
.
base_table
,
MAX_PHYS_ADDR
,
tf_xlat_ctx
.
va_max_address
,
EL3_REGIME
);
enable_mmu_direct_el3
(
flags
);
}
...
...
lib/xlat_tables_v2/xlat_tables_core.c
View file @
224e1aba
...
...
@@ -12,7 +12,6 @@
#include <string.h>
#include <types.h>
#include <utils_def.h>
#include <xlat_tables_arch_private.h>
#include <xlat_tables_defs.h>
#include <xlat_tables_v2.h>
...
...
@@ -104,12 +103,14 @@ uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
*/
desc
|=
(
level
==
XLAT_TABLE_LEVEL_MAX
)
?
PAGE_DESC
:
BLOCK_DESC
;
/*
* Always set the access flag, as TF doesn't manage access flag faults.
* Always set the access flag, as this library assumes access flag
* faults aren't managed.
*/
desc
|=
LOWER_ATTRS
(
ACCESS_FLAG
);
/*
* Deduce other fields of the descriptor based on the MT_NS and MT_RW
* memory region attributes.
*/
desc
|=
LOWER_ATTRS
(
ACCESS_FLAG
);
desc
|=
(
attr
&
MT_NS
)
?
LOWER_ATTRS
(
NS
)
:
0
;
desc
|=
(
attr
&
MT_RW
)
?
LOWER_ATTRS
(
AP_RW
)
:
LOWER_ATTRS
(
AP_RO
);
...
...
@@ -155,9 +156,10 @@ uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
}
else
{
/* Normal memory */
/*
* Always map read-write normal memory as execute-never.
* (Trusted Firmware doesn't self-modify its code, therefore
* R/W memory is reserved for data storage, which must not be
* executable.)
* This library assumes that it is used by software that does
* not self-modify its code, therefore R/W memory is reserved
* for data storage, which must not be executable.
*
* Note that setting the XN bit here is for consistency only.
* The function that enables the MMU sets the SCTLR_ELx.WXN bit,
* which makes any writable memory region to be treated as
...
...
@@ -311,7 +313,7 @@ static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
if
(
action
==
ACTION_WRITE_BLOCK_ENTRY
)
{
table_base
[
table_idx
]
=
INVALID_DESC
;
xlat_arch_tlbi_va
_regime
(
table_idx_va
,
ctx
->
xlat_regime
);
xlat_arch_tlbi_va
(
table_idx_va
,
ctx
->
xlat_regime
);
}
else
if
(
action
==
ACTION_RECURSE_INTO_TABLE
)
{
...
...
@@ -327,7 +329,7 @@ static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
*/
if
(
xlat_table_is_empty
(
ctx
,
subtable
))
{
table_base
[
table_idx
]
=
INVALID_DESC
;
xlat_arch_tlbi_va
_regime
(
table_idx_va
,
xlat_arch_tlbi_va
(
table_idx_va
,
ctx
->
xlat_regime
);
}
...
...
lib/xlat_tables_v2/xlat_tables_private.h
View file @
224e1aba
...
...
@@ -35,22 +35,24 @@
#endif
/* PLAT_XLAT_TABLES_DYNAMIC */
/*
* Return the execute-never mask that will prevent instruction fetch at the
* given translation regime.
*/
uint64_t
xlat_arch_regime_get_xn_desc
(
int
xlat_regime
);
/*
* Invalidate all TLB entries that match the given virtual address. This
* operation applies to all PEs in the same Inner Shareable domain as the PE
* that executes this function. This functions must be called for every
* translation table entry that is modified.
*
* xlat_arch_tlbi_va() applies the invalidation to the exception level of the
* current translation regime, whereas xlat_arch_tlbi_va_regime() applies it to
* the given translation regime.
* translation table entry that is modified. It only affects the specified
* translation regime.
*
* Note, however, that it is architecturally UNDEFINED to invalidate TLB entries
* pertaining to a higher exception level, e.g. invalidating EL3 entries from
* S-EL1.
*/
void
xlat_arch_tlbi_va
(
uintptr_t
va
);
void
xlat_arch_tlbi_va_regime
(
uintptr_t
va
,
int
xlat_regime
);
void
xlat_arch_tlbi_va
(
uintptr_t
va
,
int
xlat_regime
);
/*
* This function has to be called at the end of any code that uses the function
...
...
@@ -86,10 +88,6 @@ int xlat_arch_current_el(void);
*/
unsigned
long
long
xlat_arch_get_max_supported_pa
(
void
);
/* Enable MMU and configure it to use the specified translation tables. */
void
setup_mmu_cfg
(
unsigned
int
flags
,
const
uint64_t
*
base_table
,
unsigned
long
long
max_pa
,
uintptr_t
max_va
);
/*
* Return 1 if the MMU of the translation regime managed by the given xlat_ctx_t
* is enabled, 0 otherwise.
...
...
lib/xlat_tables_v2/xlat_tables_utils.c
View file @
224e1aba
...
...
@@ -11,7 +11,6 @@
#include <platform_def.h>
#include <types.h>
#include <utils_def.h>
#include <xlat_tables_arch_private.h>
#include <xlat_tables_defs.h>
#include <xlat_tables_v2.h>
...
...
@@ -544,7 +543,7 @@ int change_mem_attributes(xlat_ctx_t *ctx,
*
entry
=
INVALID_DESC
;
/* Invalidate any cached copy of this mapping in the TLBs. */
xlat_arch_tlbi_va
_regime
(
base_va
,
ctx
->
xlat_regime
);
xlat_arch_tlbi_va
(
base_va
,
ctx
->
xlat_regime
);
/* Ensure completion of the invalidation. */
xlat_arch_tlbi_va_sync
();
...
...
services/std_svc/spm/sp_setup.c
View file @
224e1aba
...
...
@@ -107,38 +107,22 @@ void spm_sp_setup(sp_context_t *sp_ctx)
* MMU-related registers
* ---------------------
*/
xlat_ctx_t
*
xlat_ctx
=
sp_ctx
->
xlat_ctx_handle
;
/* Set attributes in the right indices of the MAIR */
u_register_t
mair_el1
=
MAIR_ATTR_SET
(
ATTR_DEVICE
,
ATTR_DEVICE_INDEX
)
|
MAIR_ATTR_SET
(
ATTR_IWBWA_OWBWA_NTR
,
ATTR_IWBWA_OWBWA_NTR_INDEX
)
|
MAIR_ATTR_SET
(
ATTR_NON_CACHEABLE
,
ATTR_NON_CACHEABLE_INDEX
);
write_ctx_reg
(
get_sysregs_ctx
(
ctx
),
CTX_MAIR_EL1
,
mair_el1
);
/* Setup TCR_EL1. */
u_register_t
tcr_ps_bits
=
tcr_physical_addr_size_bits
(
PLAT_PHY_ADDR_SPACE_SIZE
);
u_register_t
tcr_el1
=
/* Size of region addressed by TTBR0_EL1 = 2^(64-T0SZ) bytes. */
(
64
-
__builtin_ctzl
(
PLAT_VIRT_ADDR_SPACE_SIZE
))
|
/* Inner and outer WBWA, shareable. */
TCR_SH_INNER_SHAREABLE
|
TCR_RGN_OUTER_WBA
|
TCR_RGN_INNER_WBA
|
/* Set the granularity to 4KB. */
TCR_TG0_4K
|
/* Limit Intermediate Physical Address Size. */
tcr_ps_bits
<<
TCR_EL1_IPS_SHIFT
|
/* Disable translations using TBBR1_EL1. */
TCR_EPD1_BIT
/* The remaining fields related to TBBR1_EL1 are left as zero. */
;
uint64_t
mmu_cfg_params
[
MMU_CFG_PARAM_MAX
];
tcr_el1
&=
~
(
/* Enable translations using TBBR0_EL1 */
TCR_EPD0_BIT
);
setup_mmu_cfg
((
uint64_t
*
)
&
mmu_cfg_params
,
0
,
xlat_ctx
->
base_table
,
xlat_ctx
->
pa_max_address
,
xlat_ctx
->
va_max_address
,
EL1_EL0_REGIME
);
write_ctx_reg
(
get_sysregs_ctx
(
ctx
),
CTX_MAIR_EL1
,
mmu_cfg_params
[
MMU_CFG_MAIR
]);
write_ctx_reg
(
get_sysregs_ctx
(
ctx
),
CTX_TCR_EL1
,
mmu_cfg_params
[
MMU_CFG_TCR
]);
write_ctx_reg
(
get_sysregs_ctx
(
ctx
),
CTX_TCR_EL1
,
tcr_el1
);
write_ctx_reg
(
get_sysregs_ctx
(
ctx
),
CTX_TTBR0_EL1
,
mmu_cfg_params
[
MMU_CFG_TTBR0
]);
/* Setup SCTLR_EL1 */
u_register_t
sctlr_el1
=
read_ctx_reg
(
get_sysregs_ctx
(
ctx
),
CTX_SCTLR_EL1
);
...
...
@@ -174,13 +158,6 @@ void spm_sp_setup(sp_context_t *sp_ctx)
write_ctx_reg
(
get_sysregs_ctx
(
ctx
),
CTX_SCTLR_EL1
,
sctlr_el1
);
uint64_t
*
xlat_base
=
((
xlat_ctx_t
*
)
sp_ctx
->
xlat_ctx_handle
)
->
base_table
;
/* Point TTBR0_EL1 at the tables of the context created for the SP. */
write_ctx_reg
(
get_sysregs_ctx
(
ctx
),
CTX_TTBR0_EL1
,
(
u_register_t
)
xlat_base
);
/*
* Setup other system registers
* ----------------------------
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment