Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
Menu
Open sidebar
adam.huang
Arm Trusted Firmware
Commits
28ee754d
Commit
28ee754d
authored
Mar 16, 2017
by
davidcunado-arm
Committed by
GitHub
Mar 16, 2017
Browse files
Merge pull request #856 from antonio-nino-diaz-arm/an/dynamic-xlat
Introduce version 2 of the translation tables library
parents
fa971fca
bf75a371
Changes
37
Hide whitespace changes
Inline
Side-by-side
lib/cpus/aarch64/cortex_a57.S
View file @
28ee754d
...
...
@@ -114,6 +114,21 @@ func check_errata_806969
b
cpu_rev_var_ls
endfunc
check_errata_806969
/
*
---------------------------------------------------
*
Errata
Workaround
for
Cortex
A57
Errata
#
813419
.
*
This
applies
only
to
revision
r0p0
of
Cortex
A57
.
*
---------------------------------------------------
*/
func
check_errata_813419
/
*
*
Even
though
this
is
only
needed
for
revision
r0p0
,
it
*
is
always
applied
due
to
limitations
of
the
current
*
errata
framework
.
*/
mov
x0
,
#
ERRATA_APPLIES
ret
endfunc
check_errata_813419
/
*
---------------------------------------------------
*
Errata
Workaround
for
Cortex
A57
Errata
#
813420
.
*
This
applies
only
to
revision
r0p0
of
Cortex
A57
.
...
...
@@ -482,6 +497,7 @@ func cortex_a57_errata_report
*
checking
functions
of
each
errata
.
*/
report_errata
ERRATA_A57_806969
,
cortex_a57
,
806969
report_errata
ERRATA_A57_813419
,
cortex_a57
,
813419
report_errata
ERRATA_A57_813420
,
cortex_a57
,
813420
report_errata
A57_DISABLE_NON_TEMPORAL_HINT
,
cortex_a57
,
\
disable_ldnp_overread
...
...
lib/cpus/cpu-ops.mk
View file @
28ee754d
...
...
@@ -70,6 +70,10 @@ ERRATA_A53_836870 ?=0
# only to revision r0p0 of the Cortex A57 cpu.
ERRATA_A57_806969
?=
0
# Flag to apply erratum 813419 workaround during reset. This erratum applies
# only to revision r0p0 of the Cortex A57 cpu.
ERRATA_A57_813419
?=
0
# Flag to apply erratum 813420 workaround during reset. This erratum applies
# only to revision r0p0 of the Cortex A57 cpu.
ERRATA_A57_813420
?=
0
...
...
@@ -106,6 +110,10 @@ $(eval $(call add_define,ERRATA_A53_836870))
$(eval
$(call
assert_boolean,ERRATA_A57_806969))
$(eval
$(call
add_define,ERRATA_A57_806969))
# Process ERRATA_A57_813419 flag
$(eval
$(call
assert_boolean,ERRATA_A57_813419))
$(eval
$(call
add_define,ERRATA_A57_813419))
# Process ERRATA_A57_813420 flag
$(eval
$(call
assert_boolean,ERRATA_A57_813420))
$(eval
$(call
add_define,ERRATA_A57_813420))
...
...
lib/xlat_tables/aarch64/xlat_tables.c
View file @
28ee754d
...
...
@@ -208,7 +208,7 @@ void init_xlat_tables(void)
/* into memory, the TLB invalidation is complete, */
\
/* and translation register writes are committed */
\
/* before enabling the MMU */
\
dsb();
\
dsb
ish
(); \
isb(); \
\
sctlr = read_sctlr_el##_el(); \
...
...
lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
0 → 100644
View file @
28ee754d
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
#include <cassert.h>
#include <platform_def.h>
#include <utils.h>
#include <xlat_tables_v2.h>
#include "../xlat_tables_private.h"
#if DEBUG
static
unsigned
long
long
xlat_arch_get_max_supported_pa
(
void
)
{
/* Physical address space size for long descriptor format. */
return
(
1ull
<<
40
)
-
1ull
;
}
#endif
/* DEBUG*/
int
is_mmu_enabled
(
void
)
{
return
(
read_sctlr
()
&
SCTLR_M_BIT
)
!=
0
;
}
#if PLAT_XLAT_TABLES_DYNAMIC
void
xlat_arch_tlbi_va
(
uintptr_t
va
)
{
/*
* Ensure the translation table write has drained into memory before
* invalidating the TLB entry.
*/
dsbishst
();
tlbimvaais
(
TLBI_ADDR
(
va
));
}
void
xlat_arch_tlbi_va_sync
(
void
)
{
/* Invalidate all entries from branch predictors. */
bpiallis
();
/*
* A TLB maintenance instruction can complete at any time after
* it is issued, but is only guaranteed to be complete after the
* execution of DSB by the PE that executed the TLB maintenance
* instruction. After the TLB invalidate instruction is
* complete, no new memory accesses using the invalidated TLB
* entries will be observed by any observer of the system
* domain. See section D4.8.2 of the ARMv8 (issue k), paragraph
* "Ordering and completion of TLB maintenance instructions".
*/
dsbish
();
/*
* The effects of a completed TLB maintenance instruction are
* only guaranteed to be visible on the PE that executed the
* instruction after the execution of an ISB instruction by the
* PE that executed the TLB maintenance instruction.
*/
isb
();
}
#endif
/* PLAT_XLAT_TABLES_DYNAMIC */
void
init_xlat_tables_arch
(
unsigned
long
long
max_pa
)
{
assert
((
PLAT_PHY_ADDR_SPACE_SIZE
-
1
)
<=
xlat_arch_get_max_supported_pa
());
}
/*******************************************************************************
* Function for enabling the MMU in Secure PL1, assuming that the
* page-tables have already been created.
******************************************************************************/
void
enable_mmu_internal_secure
(
unsigned
int
flags
,
uint64_t
*
base_table
)
{
u_register_t
mair0
,
ttbcr
,
sctlr
;
uint64_t
ttbr0
;
assert
(
IS_IN_SECURE
());
assert
((
read_sctlr
()
&
SCTLR_M_BIT
)
==
0
);
/* Invalidate TLBs at the current exception level */
tlbiall
();
/* Set attributes in the right indices of the MAIR */
mair0
=
MAIR0_ATTR_SET
(
ATTR_DEVICE
,
ATTR_DEVICE_INDEX
);
mair0
|=
MAIR0_ATTR_SET
(
ATTR_IWBWA_OWBWA_NTR
,
ATTR_IWBWA_OWBWA_NTR_INDEX
);
mair0
|=
MAIR0_ATTR_SET
(
ATTR_NON_CACHEABLE
,
ATTR_NON_CACHEABLE_INDEX
);
write_mair0
(
mair0
);
/*
* Set TTBCR bits as well. Set TTBR0 table properties as Inner
* & outer WBWA & shareable. Disable TTBR1.
*/
ttbcr
=
TTBCR_EAE_BIT
|
TTBCR_SH0_INNER_SHAREABLE
|
TTBCR_RGN0_OUTER_WBA
|
TTBCR_RGN0_INNER_WBA
|
(
32
-
__builtin_ctzl
((
uintptr_t
)
PLAT_VIRT_ADDR_SPACE_SIZE
));
ttbcr
|=
TTBCR_EPD1_BIT
;
write_ttbcr
(
ttbcr
);
/* Set TTBR0 bits as well */
ttbr0
=
(
uint64_t
)(
uintptr_t
)
base_table
;
write64_ttbr0
(
ttbr0
);
write64_ttbr1
(
0
);
/*
* Ensure all translation table writes have drained
* into memory, the TLB invalidation is complete,
* and translation register writes are committed
* before enabling the MMU
*/
dsb
();
isb
();
sctlr
=
read_sctlr
();
sctlr
|=
SCTLR_WXN_BIT
|
SCTLR_M_BIT
;
if
(
flags
&
DISABLE_DCACHE
)
sctlr
&=
~
SCTLR_C_BIT
;
else
sctlr
|=
SCTLR_C_BIT
;
write_sctlr
(
sctlr
);
/* Ensure the MMU enable takes effect immediately */
isb
();
}
void
enable_mmu_arch
(
unsigned
int
flags
,
uint64_t
*
base_table
)
{
enable_mmu_internal_secure
(
flags
,
base_table
);
}
lib/xlat_tables_v2/aarch32/xlat_tables_arch.h
0 → 100644
View file @
28ee754d
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __XLAT_TABLES_ARCH_H__
#define __XLAT_TABLES_ARCH_H__
#include <arch.h>
#include <platform_def.h>
#include <xlat_tables_defs.h>
/*
* In AArch32 state, the MMU only supports 4KB page granularity, which means
* that the first translation table level is either 1 or 2. Both of them are
* allowed to have block and table descriptors. See section G4.5.6 of the
* ARMv8-A Architecture Reference Manual (DDI 0487A.k) for more information.
*
* The define below specifies the first table level that allows block
* descriptors.
*/
#define MIN_LVL_BLOCK_DESC 1
/*
* Each platform can define the size of the virtual address space, which is
* defined in PLAT_VIRT_ADDR_SPACE_SIZE. TTBCR.TxSZ is calculated as 32 minus
* the width of said address space. The value of TTBCR.TxSZ must be in the
* range 0 to 7 [1], which means that the virtual address space width must be
* in the range 32 to 25 bits.
*
* Here we calculate the initial lookup level from the value of
* PLAT_VIRT_ADDR_SPACE_SIZE. For a 4 KB page size, level 1 supports virtual
* address spaces of widths 32 to 31 bits, and level 2 from 30 to 25. Wider or
* narrower address spaces are not supported. As a result, level 3 cannot be
* used as initial lookup level with 4 KB granularity [1].
*
* For example, for a 31-bit address space (i.e. PLAT_VIRT_ADDR_SPACE_SIZE ==
* 1 << 31), TTBCR.TxSZ will be programmed to (32 - 31) = 1. According to Table
* G4-5 in the ARM ARM, the initial lookup level for an address space like that
* is 1.
*
* See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
* information:
* [1] Section G4.6.5
*/
#if PLAT_VIRT_ADDR_SPACE_SIZE > (1ULL << (32 - TTBCR_TxSZ_MIN))
# error "PLAT_VIRT_ADDR_SPACE_SIZE is too big."
#elif PLAT_VIRT_ADDR_SPACE_SIZE > (1 << L1_XLAT_ADDRESS_SHIFT)
# define XLAT_TABLE_LEVEL_BASE 1
# define NUM_BASE_LEVEL_ENTRIES \
(PLAT_VIRT_ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
#elif PLAT_VIRT_ADDR_SPACE_SIZE >= (1 << (32 - TTBCR_TxSZ_MAX))
# define XLAT_TABLE_LEVEL_BASE 2
# define NUM_BASE_LEVEL_ENTRIES \
(PLAT_VIRT_ADDR_SPACE_SIZE >> L2_XLAT_ADDRESS_SHIFT)
#else
# error "PLAT_VIRT_ADDR_SPACE_SIZE is too small."
#endif
#endif
/* __XLAT_TABLES_ARCH_H__ */
lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
0 → 100644
View file @
28ee754d
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
#include <bl_common.h>
#include <cassert.h>
#include <common_def.h>
#include <platform_def.h>
#include <sys/types.h>
#include <utils.h>
#include <xlat_tables_v2.h>
#include "../xlat_tables_private.h"
#if defined(IMAGE_BL1) || defined(IMAGE_BL31)
# define IMAGE_EL 3
#else
# define IMAGE_EL 1
#endif
static
unsigned
long
long
tcr_ps_bits
;
static
unsigned
long
long
calc_physical_addr_size_bits
(
unsigned
long
long
max_addr
)
{
/* Physical address can't exceed 48 bits */
assert
((
max_addr
&
ADDR_MASK_48_TO_63
)
==
0
);
/* 48 bits address */
if
(
max_addr
&
ADDR_MASK_44_TO_47
)
return
TCR_PS_BITS_256TB
;
/* 44 bits address */
if
(
max_addr
&
ADDR_MASK_42_TO_43
)
return
TCR_PS_BITS_16TB
;
/* 42 bits address */
if
(
max_addr
&
ADDR_MASK_40_TO_41
)
return
TCR_PS_BITS_4TB
;
/* 40 bits address */
if
(
max_addr
&
ADDR_MASK_36_TO_39
)
return
TCR_PS_BITS_1TB
;
/* 36 bits address */
if
(
max_addr
&
ADDR_MASK_32_TO_35
)
return
TCR_PS_BITS_64GB
;
return
TCR_PS_BITS_4GB
;
}
#if DEBUG
/* Physical Address ranges supported in the AArch64 Memory Model */
static
const
unsigned
int
pa_range_bits_arr
[]
=
{
PARANGE_0000
,
PARANGE_0001
,
PARANGE_0010
,
PARANGE_0011
,
PARANGE_0100
,
PARANGE_0101
};
unsigned
long
long
xlat_arch_get_max_supported_pa
(
void
)
{
u_register_t
pa_range
=
read_id_aa64mmfr0_el1
()
&
ID_AA64MMFR0_EL1_PARANGE_MASK
;
/* All other values are reserved */
assert
(
pa_range
<
ARRAY_SIZE
(
pa_range_bits_arr
));
return
(
1ull
<<
pa_range_bits_arr
[
pa_range
])
-
1ull
;
}
#endif
/* DEBUG*/
int
is_mmu_enabled
(
void
)
{
#if IMAGE_EL == 1
assert
(
IS_IN_EL
(
1
));
return
(
read_sctlr_el1
()
&
SCTLR_M_BIT
)
!=
0
;
#elif IMAGE_EL == 3
assert
(
IS_IN_EL
(
3
));
return
(
read_sctlr_el3
()
&
SCTLR_M_BIT
)
!=
0
;
#endif
}
#if PLAT_XLAT_TABLES_DYNAMIC
void
xlat_arch_tlbi_va
(
uintptr_t
va
)
{
/*
* Ensure the translation table write has drained into memory before
* invalidating the TLB entry.
*/
dsbishst
();
#if IMAGE_EL == 1
assert
(
IS_IN_EL
(
1
));
tlbivaae1is
(
TLBI_ADDR
(
va
));
#elif IMAGE_EL == 3
assert
(
IS_IN_EL
(
3
));
tlbivae3is
(
TLBI_ADDR
(
va
));
#endif
}
void
xlat_arch_tlbi_va_sync
(
void
)
{
/*
* A TLB maintenance instruction can complete at any time after
* it is issued, but is only guaranteed to be complete after the
* execution of DSB by the PE that executed the TLB maintenance
* instruction. After the TLB invalidate instruction is
* complete, no new memory accesses using the invalidated TLB
* entries will be observed by any observer of the system
* domain. See section D4.8.2 of the ARMv8 (issue k), paragraph
* "Ordering and completion of TLB maintenance instructions".
*/
dsbish
();
/*
* The effects of a completed TLB maintenance instruction are
* only guaranteed to be visible on the PE that executed the
* instruction after the execution of an ISB instruction by the
* PE that executed the TLB maintenance instruction.
*/
isb
();
}
#endif
/* PLAT_XLAT_TABLES_DYNAMIC */
void
init_xlat_tables_arch
(
unsigned
long
long
max_pa
)
{
assert
((
PLAT_PHY_ADDR_SPACE_SIZE
-
1
)
<=
xlat_arch_get_max_supported_pa
());
/*
* If dynamic allocation of new regions is enabled the code can't make
* assumptions about the max physical address because it could change
* after adding new regions. If this functionality is disabled it is
* safer to restrict the max physical address as much as possible.
*/
#ifdef PLAT_XLAT_TABLES_DYNAMIC
tcr_ps_bits
=
calc_physical_addr_size_bits
(
PLAT_PHY_ADDR_SPACE_SIZE
);
#else
tcr_ps_bits
=
calc_physical_addr_size_bits
(
max_pa
);
#endif
}
/*******************************************************************************
* Macro generating the code for the function enabling the MMU in the given
* exception level, assuming that the pagetables have already been created.
*
* _el: Exception level at which the function will run
* _tcr_extra: Extra bits to set in the TCR register. This mask will
* be OR'ed with the default TCR value.
* _tlbi_fct: Function to invalidate the TLBs at the current
* exception level
******************************************************************************/
#define DEFINE_ENABLE_MMU_EL(_el, _tcr_extra, _tlbi_fct) \
void enable_mmu_internal_el##_el(unsigned int flags, \
uint64_t *base_table) \
{ \
uint64_t mair, tcr, ttbr; \
uint32_t sctlr; \
\
assert(IS_IN_EL(_el)); \
assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0); \
\
/* Invalidate TLBs at the current exception level */
\
_tlbi_fct(); \
\
/* Set attributes in the right indices of the MAIR */
\
mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); \
mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, \
ATTR_IWBWA_OWBWA_NTR_INDEX); \
mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, \
ATTR_NON_CACHEABLE_INDEX); \
write_mair_el##_el(mair); \
\
/* Set TCR bits as well. */
\
/* Inner & outer WBWA & shareable. */
\
/* Set T0SZ to (64 - width of virtual address space) */
\
tcr = TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA | \
TCR_RGN_INNER_WBA | \
(64 - __builtin_ctzl(PLAT_VIRT_ADDR_SPACE_SIZE));\
tcr |= _tcr_extra; \
write_tcr_el##_el(tcr); \
\
/* Set TTBR bits as well */
\
ttbr = (uint64_t) base_table; \
write_ttbr0_el##_el(ttbr); \
\
/* Ensure all translation table writes have drained */
\
/* into memory, the TLB invalidation is complete, */
\
/* and translation register writes are committed */
\
/* before enabling the MMU */
\
dsbish(); \
isb(); \
\
sctlr = read_sctlr_el##_el(); \
sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT; \
\
if (flags & DISABLE_DCACHE) \
sctlr &= ~SCTLR_C_BIT; \
else \
sctlr |= SCTLR_C_BIT; \
\
write_sctlr_el##_el(sctlr); \
\
/* Ensure the MMU enable takes effect immediately */
\
isb(); \
}
/* Define EL1 and EL3 variants of the function enabling the MMU */
#if IMAGE_EL == 1
DEFINE_ENABLE_MMU_EL
(
1
,
(
tcr_ps_bits
<<
TCR_EL1_IPS_SHIFT
),
tlbivmalle1
)
#elif IMAGE_EL == 3
DEFINE_ENABLE_MMU_EL
(
3
,
TCR_EL3_RES1
|
(
tcr_ps_bits
<<
TCR_EL3_PS_SHIFT
),
tlbialle3
)
#endif
void
enable_mmu_arch
(
unsigned
int
flags
,
uint64_t
*
base_table
)
{
#if IMAGE_EL == 1
assert
(
IS_IN_EL
(
1
));
enable_mmu_internal_el1
(
flags
,
base_table
);
#elif IMAGE_EL == 3
assert
(
IS_IN_EL
(
3
));
enable_mmu_internal_el3
(
flags
,
base_table
);
#endif
}
lib/xlat_tables_v2/aarch64/xlat_tables_arch.h
0 → 100644
View file @
28ee754d
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __XLAT_TABLES_ARCH_H__
#define __XLAT_TABLES_ARCH_H__
#include <arch.h>
#include <platform_def.h>
#include <xlat_tables_defs.h>
/*
* In AArch64 state, the MMU may support 4 KB, 16 KB and 64 KB page
* granularity. For 4KB granularity, a level 0 table descriptor doesn't support
* block translation. For 16KB, the same thing happens to levels 0 and 1. For
* 64KB, same for level 1. See section D4.3.1 of the ARMv8-A Architecture
* Reference Manual (DDI 0487A.k) for more information.
*
* The define below specifies the first table level that allows block
* descriptors.
*/
#if PAGE_SIZE == (4*1024)
/* 4KB */
# define MIN_LVL_BLOCK_DESC 1
#else
/* 16KB or 64KB */
# define MIN_LVL_BLOCK_DESC 2
#endif
/*
* Each platform can define the size of the virtual address space, which is
* defined in PLAT_VIRT_ADDR_SPACE_SIZE. TCR.TxSZ is calculated as 64 minus the
* width of said address space. The value of TCR.TxSZ must be in the range 16
* to 39 [1], which means that the virtual address space width must be in the
* range 48 to 25 bits.
*
* Here we calculate the initial lookup level from the value of
* PLAT_VIRT_ADDR_SPACE_SIZE. For a 4 KB page size, level 0 supports virtual
* address spaces of widths 48 to 40 bits, level 1 from 39 to 31, and level 2
* from 30 to 25. Wider or narrower address spaces are not supported. As a
* result, level 3 cannot be used as initial lookup level with 4 KB
* granularity. [2]
*
* For example, for a 35-bit address space (i.e. PLAT_VIRT_ADDR_SPACE_SIZE ==
* 1 << 35), TCR.TxSZ will be programmed to (64 - 35) = 29. According to Table
* D4-11 in the ARM ARM, the initial lookup level for an address space like
* that is 1.
*
* See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
* information:
* [1] Page 1730: 'Input address size', 'For all translation stages'.
* [2] Section D4.2.5
*/
#if PLAT_VIRT_ADDR_SPACE_SIZE > (1ULL << (64 - TCR_TxSZ_MIN))
# error "PLAT_VIRT_ADDR_SPACE_SIZE is too big."
#elif PLAT_VIRT_ADDR_SPACE_SIZE > (1ULL << L0_XLAT_ADDRESS_SHIFT)
# define XLAT_TABLE_LEVEL_BASE 0
# define NUM_BASE_LEVEL_ENTRIES \
(PLAT_VIRT_ADDR_SPACE_SIZE >> L0_XLAT_ADDRESS_SHIFT)
#elif PLAT_VIRT_ADDR_SPACE_SIZE > (1 << L1_XLAT_ADDRESS_SHIFT)
# define XLAT_TABLE_LEVEL_BASE 1
# define NUM_BASE_LEVEL_ENTRIES \
(PLAT_VIRT_ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
#elif PLAT_VIRT_ADDR_SPACE_SIZE >= (1 << (64 - TCR_TxSZ_MAX))
# define XLAT_TABLE_LEVEL_BASE 2
# define NUM_BASE_LEVEL_ENTRIES \
(PLAT_VIRT_ADDR_SPACE_SIZE >> L2_XLAT_ADDRESS_SHIFT)
#else
# error "PLAT_VIRT_ADDR_SPACE_SIZE is too small."
#endif
#endif
/* __XLAT_TABLES_ARCH_H__ */
lib/xlat_tables_v2/xlat_tables.mk
0 → 100644
View file @
28ee754d
#
# Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of ARM nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
XLAT_TABLES_LIB_SRCS
:=
$(
addprefix
lib/xlat_tables_v2/,
\
${ARCH}
/xlat_tables_arch.c
\
xlat_tables_common.c
\
xlat_tables_internal.c
)
lib/xlat_tables_v2/xlat_tables_common.c
0 → 100644
View file @
28ee754d
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
#include <cassert.h>
#include <common_def.h>
#include <debug.h>
#include <errno.h>
#include <platform_def.h>
#include <string.h>
#include <types.h>
#include <utils.h>
#include <xlat_tables_v2.h>
#ifdef AARCH32
# include "aarch32/xlat_tables_arch.h"
#else
# include "aarch64/xlat_tables_arch.h"
#endif
#include "xlat_tables_private.h"
/*
* Private variables used by the TF
*/
static
mmap_region_t
tf_mmap
[
MAX_MMAP_REGIONS
+
1
];
static
uint64_t
tf_xlat_tables
[
MAX_XLAT_TABLES
][
XLAT_TABLE_ENTRIES
]
__aligned
(
XLAT_TABLE_SIZE
)
__section
(
"xlat_table"
);
static
uint64_t
tf_base_xlat_table
[
NUM_BASE_LEVEL_ENTRIES
]
__aligned
(
NUM_BASE_LEVEL_ENTRIES
*
sizeof
(
uint64_t
));
static
mmap_region_t
tf_mmap
[
MAX_MMAP_REGIONS
+
1
];
#if PLAT_XLAT_TABLES_DYNAMIC
static
int
xlat_tables_mapped_regions
[
MAX_XLAT_TABLES
];
#endif
/* PLAT_XLAT_TABLES_DYNAMIC */
xlat_ctx_t
tf_xlat_ctx
=
{
.
pa_max_address
=
PLAT_PHY_ADDR_SPACE_SIZE
-
1
,
.
va_max_address
=
PLAT_VIRT_ADDR_SPACE_SIZE
-
1
,
.
mmap
=
tf_mmap
,
.
mmap_num
=
MAX_MMAP_REGIONS
,
.
tables
=
tf_xlat_tables
,
.
tables_num
=
MAX_XLAT_TABLES
,
#if PLAT_XLAT_TABLES_DYNAMIC
.
tables_mapped_regions
=
xlat_tables_mapped_regions
,
#endif
/* PLAT_XLAT_TABLES_DYNAMIC */
.
base_table
=
tf_base_xlat_table
,
.
base_table_entries
=
NUM_BASE_LEVEL_ENTRIES
,
.
max_pa
=
0
,
.
max_va
=
0
,
.
next_table
=
0
,
.
base_level
=
XLAT_TABLE_LEVEL_BASE
,
.
initialized
=
0
};
void
mmap_add_region
(
unsigned
long
long
base_pa
,
uintptr_t
base_va
,
size_t
size
,
unsigned
int
attr
)
{
mmap_region_t
mm
=
{
.
base_va
=
base_va
,
.
base_pa
=
base_pa
,
.
size
=
size
,
.
attr
=
attr
,
};
mmap_add_region_ctx
(
&
tf_xlat_ctx
,
(
mmap_region_t
*
)
&
mm
);
}
void
mmap_add
(
const
mmap_region_t
*
mm
)
{
while
(
mm
->
size
)
{
mmap_add_region_ctx
(
&
tf_xlat_ctx
,
(
mmap_region_t
*
)
mm
);
mm
++
;
}
}
#if PLAT_XLAT_TABLES_DYNAMIC
int
mmap_add_dynamic_region
(
unsigned
long
long
base_pa
,
uintptr_t
base_va
,
size_t
size
,
unsigned
int
attr
)
{
mmap_region_t
mm
=
{
.
base_va
=
base_va
,
.
base_pa
=
base_pa
,
.
size
=
size
,
.
attr
=
attr
,
};
return
mmap_add_dynamic_region_ctx
(
&
tf_xlat_ctx
,
&
mm
);
}
int
mmap_remove_dynamic_region
(
uintptr_t
base_va
,
size_t
size
)
{
return
mmap_remove_dynamic_region_ctx
(
&
tf_xlat_ctx
,
base_va
,
size
);
}
#endif
/* PLAT_XLAT_TABLES_DYNAMIC */
void
init_xlat_tables
(
void
)
{
assert
(
!
is_mmu_enabled
());
assert
(
!
tf_xlat_ctx
.
initialized
);
print_mmap
(
tf_xlat_ctx
.
mmap
);
init_xlation_table
(
&
tf_xlat_ctx
);
xlat_tables_print
(
&
tf_xlat_ctx
);
assert
(
tf_xlat_ctx
.
max_va
<=
PLAT_VIRT_ADDR_SPACE_SIZE
-
1
);
assert
(
tf_xlat_ctx
.
max_pa
<=
PLAT_PHY_ADDR_SPACE_SIZE
-
1
);
init_xlat_tables_arch
(
tf_xlat_ctx
.
max_pa
);
}
#ifdef AARCH32
void
enable_mmu_secure
(
unsigned
int
flags
)
{
enable_mmu_arch
(
flags
,
tf_xlat_ctx
.
base_table
);
}
#else
void
enable_mmu_el1
(
unsigned
int
flags
)
{
enable_mmu_arch
(
flags
,
tf_xlat_ctx
.
base_table
);
}
void
enable_mmu_el3
(
unsigned
int
flags
)
{
enable_mmu_arch
(
flags
,
tf_xlat_ctx
.
base_table
);
}
#endif
/* AARCH32 */
lib/xlat_tables_v2/xlat_tables_internal.c
0 → 100644
View file @
28ee754d
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
#include <cassert.h>
#include <common_def.h>
#include <debug.h>
#include <errno.h>
#include <platform_def.h>
#include <string.h>
#include <types.h>
#include <utils.h>
#include <xlat_tables_v2.h>
#ifdef AARCH32
# include "aarch32/xlat_tables_arch.h"
#else
# include "aarch64/xlat_tables_arch.h"
#endif
#include "xlat_tables_private.h"
#if PLAT_XLAT_TABLES_DYNAMIC
/*
* The following functions assume that they will be called using subtables only.
* The base table can't be unmapped, so it is not needed to do any special
* handling for it.
*/
/*
* Returns the index of the array corresponding to the specified translation
* table.
*/
static
int
xlat_table_get_index
(
xlat_ctx_t
*
ctx
,
const
uint64_t
*
table
)
{
for
(
int
i
=
0
;
i
<
ctx
->
tables_num
;
i
++
)
if
(
ctx
->
tables
[
i
]
==
table
)
return
i
;
/*
* Maybe we were asked to get the index of the base level table, which
* should never happen.
*/
assert
(
0
);
return
-
1
;
}
/* Returns a pointer to an empty translation table. */
static
uint64_t
*
xlat_table_get_empty
(
xlat_ctx_t
*
ctx
)
{
for
(
int
i
=
0
;
i
<
ctx
->
tables_num
;
i
++
)
if
(
ctx
->
tables_mapped_regions
[
i
]
==
0
)
return
ctx
->
tables
[
i
];
return
NULL
;
}
/* Increments region count for a given table. */
static
void
xlat_table_inc_regions_count
(
xlat_ctx_t
*
ctx
,
const
uint64_t
*
table
)
{
ctx
->
tables_mapped_regions
[
xlat_table_get_index
(
ctx
,
table
)]
++
;
}
/* Decrements region count for a given table. */
static
void
xlat_table_dec_regions_count
(
xlat_ctx_t
*
ctx
,
const
uint64_t
*
table
)
{
ctx
->
tables_mapped_regions
[
xlat_table_get_index
(
ctx
,
table
)]
--
;
}
/* Returns 0 if the speficied table isn't empty, otherwise 1. */
static
int
xlat_table_is_empty
(
xlat_ctx_t
*
ctx
,
const
uint64_t
*
table
)
{
return
!
ctx
->
tables_mapped_regions
[
xlat_table_get_index
(
ctx
,
table
)];
}
#else
/* PLAT_XLAT_TABLES_DYNAMIC */
/* Returns a pointer to the first empty translation table. */
static
uint64_t
*
xlat_table_get_empty
(
xlat_ctx_t
*
ctx
)
{
assert
(
ctx
->
next_table
<
ctx
->
tables_num
);
return
ctx
->
tables
[
ctx
->
next_table
++
];
}
#endif
/* PLAT_XLAT_TABLES_DYNAMIC */
/* Returns a block/page table descriptor for the given level and attributes. */
static
uint64_t
xlat_desc
(
unsigned
int
attr
,
unsigned
long
long
addr_pa
,
int
level
)
{
uint64_t
desc
;
int
mem_type
;
/* Make sure that the granularity is fine enough to map this address. */
assert
((
addr_pa
&
XLAT_BLOCK_MASK
(
level
))
==
0
);
desc
=
addr_pa
;
/*
* There are different translation table descriptors for level 3 and the
* rest.
*/
desc
|=
(
level
==
XLAT_TABLE_LEVEL_MAX
)
?
PAGE_DESC
:
BLOCK_DESC
;
/*
* Always set the access flag, as TF doesn't manage access flag faults.
* Deduce other fields of the descriptor based on the MT_NS and MT_RW
* memory region attributes.
*/
desc
|=
(
attr
&
MT_NS
)
?
LOWER_ATTRS
(
NS
)
:
0
;
desc
|=
(
attr
&
MT_RW
)
?
LOWER_ATTRS
(
AP_RW
)
:
LOWER_ATTRS
(
AP_RO
);
desc
|=
LOWER_ATTRS
(
ACCESS_FLAG
);
/*
* Deduce shareability domain and executability of the memory region
* from the memory type of the attributes (MT_TYPE).
*
* Data accesses to device memory and non-cacheable normal memory are
* coherent for all observers in the system, and correspondingly are
* always treated as being Outer Shareable. Therefore, for these 2 types
* of memory, it is not strictly needed to set the shareability field
* in the translation tables.
*/
mem_type
=
MT_TYPE
(
attr
);
if
(
mem_type
==
MT_DEVICE
)
{
desc
|=
LOWER_ATTRS
(
ATTR_DEVICE_INDEX
|
OSH
);
/*
* Always map device memory as execute-never.
* This is to avoid the possibility of a speculative instruction
* fetch, which could be an issue if this memory region
* corresponds to a read-sensitive peripheral.
*/
desc
|=
UPPER_ATTRS
(
XN
);
}
else
{
/* Normal memory */
/*
* Always map read-write normal memory as execute-never.
* (Trusted Firmware doesn't self-modify its code, therefore
* R/W memory is reserved for data storage, which must not be
* executable.)
* Note that setting the XN bit here is for consistency only.
* The enable_mmu_elx() function sets the SCTLR_EL3.WXN bit,
* which makes any writable memory region to be treated as
* execute-never, regardless of the value of the XN bit in the
* translation table.
*
* For read-only memory, rely on the MT_EXECUTE/MT_EXECUTE_NEVER
* attribute to figure out the value of the XN bit.
*/
if
((
attr
&
MT_RW
)
||
(
attr
&
MT_EXECUTE_NEVER
))
desc
|=
UPPER_ATTRS
(
XN
);
if
(
mem_type
==
MT_MEMORY
)
{
desc
|=
LOWER_ATTRS
(
ATTR_IWBWA_OWBWA_NTR_INDEX
|
ISH
);
}
else
{
assert
(
mem_type
==
MT_NON_CACHEABLE
);
desc
|=
LOWER_ATTRS
(
ATTR_NON_CACHEABLE_INDEX
|
OSH
);
}
}
return
desc
;
}
/*
* Enumeration of actions that can be made when mapping table entries depending
* on the previous value in that entry and information about the region being
* mapped.
*/
typedef
enum
{
/* Do nothing */
ACTION_NONE
,
/* Write a block (or page, if in level 3) entry. */
ACTION_WRITE_BLOCK_ENTRY
,
/*
* Create a new table and write a table entry pointing to it. Recurse
* into it for further processing.
*/
ACTION_CREATE_NEW_TABLE
,
/*
* There is a table descriptor in this entry, read it and recurse into
* that table for further processing.
*/
ACTION_RECURSE_INTO_TABLE
,
}
action_t
;
#if PLAT_XLAT_TABLES_DYNAMIC
/*
* Recursive function that writes to the translation tables and unmaps the
* specified region.
*/
static
void
xlat_tables_unmap_region
(
xlat_ctx_t
*
ctx
,
mmap_region_t
*
mm
,
const
uintptr_t
table_base_va
,
uint64_t
*
const
table_base
,
const
int
table_entries
,
const
int
level
)
{
assert
(
level
>=
ctx
->
base_level
&&
level
<=
XLAT_TABLE_LEVEL_MAX
);
uint64_t
*
subtable
;
uint64_t
desc
;
uintptr_t
table_idx_va
;
uintptr_t
table_idx_end_va
;
/* End VA of this entry */
uintptr_t
region_end_va
=
mm
->
base_va
+
mm
->
size
-
1
;
int
table_idx
;
if
(
mm
->
base_va
>
table_base_va
)
{
/* Find the first index of the table affected by the region. */
table_idx_va
=
mm
->
base_va
&
~
XLAT_BLOCK_MASK
(
level
);
table_idx
=
(
table_idx_va
-
table_base_va
)
>>
XLAT_ADDR_SHIFT
(
level
);
assert
(
table_idx
<
table_entries
);
}
else
{
/* Start from the beginning of the table. */
table_idx_va
=
table_base_va
;
table_idx
=
0
;
}
while
(
table_idx
<
table_entries
)
{
table_idx_end_va
=
table_idx_va
+
XLAT_BLOCK_SIZE
(
level
)
-
1
;
desc
=
table_base
[
table_idx
];
uint64_t
desc_type
=
desc
&
DESC_MASK
;
action_t
action
=
ACTION_NONE
;
if
((
mm
->
base_va
<=
table_idx_va
)
&&
(
region_end_va
>=
table_idx_end_va
))
{
/* Region covers all block */
if
(
level
==
3
)
{
/*
* Last level, only page descriptors allowed,
* erase it.
*/
assert
(
desc_type
==
PAGE_DESC
);
action
=
ACTION_WRITE_BLOCK_ENTRY
;
}
else
{
/*
* Other levels can have table descriptors. If
* so, recurse into it and erase descriptors
* inside it as needed. If there is a block
* descriptor, just erase it. If an invalid
* descriptor is found, this table isn't
* actually mapped, which shouldn't happen.
*/
if
(
desc_type
==
TABLE_DESC
)
{
action
=
ACTION_RECURSE_INTO_TABLE
;
}
else
{
assert
(
desc_type
==
BLOCK_DESC
);
action
=
ACTION_WRITE_BLOCK_ENTRY
;
}
}
}
else
if
((
mm
->
base_va
<=
table_idx_end_va
)
||
(
region_end_va
>=
table_idx_va
))
{
/*
* Region partially covers block.
*
* It can't happen in level 3.
*
* There must be a table descriptor here, if not there
* was a problem when mapping the region.
*/
assert
(
level
<
3
);
assert
(
desc_type
==
TABLE_DESC
);
action
=
ACTION_RECURSE_INTO_TABLE
;
}
if
(
action
==
ACTION_WRITE_BLOCK_ENTRY
)
{
table_base
[
table_idx
]
=
INVALID_DESC
;
xlat_arch_tlbi_va
(
table_idx_va
);
}
else
if
(
action
==
ACTION_RECURSE_INTO_TABLE
)
{
subtable
=
(
uint64_t
*
)(
uintptr_t
)(
desc
&
TABLE_ADDR_MASK
);
/* Recurse to write into subtable */
xlat_tables_unmap_region
(
ctx
,
mm
,
table_idx_va
,
subtable
,
XLAT_TABLE_ENTRIES
,
level
+
1
);
/*
* If the subtable is now empty, remove its reference.
*/
if
(
xlat_table_is_empty
(
ctx
,
subtable
))
{
table_base
[
table_idx
]
=
INVALID_DESC
;
xlat_arch_tlbi_va
(
table_idx_va
);
}
}
else
{
assert
(
action
==
ACTION_NONE
);
}
table_idx
++
;
table_idx_va
+=
XLAT_BLOCK_SIZE
(
level
);
/* If reached the end of the region, exit */
if
(
region_end_va
<=
table_idx_va
)
break
;
}
if
(
level
>
ctx
->
base_level
)
xlat_table_dec_regions_count
(
ctx
,
table_base
);
}
#endif
/* PLAT_XLAT_TABLES_DYNAMIC */
/*
* From the given arguments, it decides which action to take when mapping the
* specified region.
*/
static
action_t
xlat_tables_map_region_action
(
const
mmap_region_t
*
mm
,
const
int
desc_type
,
const
unsigned
long
long
dest_pa
,
const
uintptr_t
table_entry_base_va
,
const
int
level
)
{
uintptr_t
mm_end_va
=
mm
->
base_va
+
mm
->
size
-
1
;
uintptr_t
table_entry_end_va
=
table_entry_base_va
+
XLAT_BLOCK_SIZE
(
level
)
-
1
;
/*
* The descriptor types allowed depend on the current table level.
*/
if
((
mm
->
base_va
<=
table_entry_base_va
)
&&
(
mm_end_va
>=
table_entry_end_va
))
{
/*
* Table entry is covered by region
* --------------------------------
*
* This means that this table entry can describe the whole
* translation with this granularity in principle.
*/
if
(
level
==
3
)
{
/*
* Last level, only page descriptors are allowed.
*/
if
(
desc_type
==
PAGE_DESC
)
{
/*
* There's another region mapped here, don't
* overwrite.
*/
return
ACTION_NONE
;
}
else
{
assert
(
desc_type
==
INVALID_DESC
);
return
ACTION_WRITE_BLOCK_ENTRY
;
}
}
else
{
/*
* Other levels. Table descriptors are allowed. Block
* descriptors too, but they have some limitations.
*/
if
(
desc_type
==
TABLE_DESC
)
{
/* There's already a table, recurse into it. */
return
ACTION_RECURSE_INTO_TABLE
;
}
else
if
(
desc_type
==
INVALID_DESC
)
{
/*
* There's nothing mapped here, create a new
* entry.
*
* Check if the destination granularity allows
* us to use a block descriptor or we need a
* finer table for it.
*
* Also, check if the current level allows block
* descriptors. If not, create a table instead.
*/
if
((
dest_pa
&
XLAT_BLOCK_MASK
(
level
))
||
(
level
<
MIN_LVL_BLOCK_DESC
))
return
ACTION_CREATE_NEW_TABLE
;
else
return
ACTION_WRITE_BLOCK_ENTRY
;
}
else
{
/*
* There's another region mapped here, don't
* overwrite.
*/
assert
(
desc_type
==
BLOCK_DESC
);
return
ACTION_NONE
;
}
}
}
else
if
((
mm
->
base_va
<=
table_entry_end_va
)
||
(
mm_end_va
>=
table_entry_base_va
))
{
/*
* Region partially covers table entry
* -----------------------------------
*
* This means that this table entry can't describe the whole
* translation, a finer table is needed.
* There cannot be partial block overlaps in level 3. If that
* happens, some of the preliminary checks when adding the
* mmap region failed to detect that PA and VA must at least be
* aligned to PAGE_SIZE.
*/
assert
(
level
<
3
);
if
(
desc_type
==
INVALID_DESC
)
{
/*
* The block is not fully covered by the region. Create
* a new table, recurse into it and try to map the
* region with finer granularity.
*/
return
ACTION_CREATE_NEW_TABLE
;
}
else
{
assert
(
desc_type
==
TABLE_DESC
);
/*
* The block is not fully covered by the region, but
* there is already a table here. Recurse into it and
* try to map with finer granularity.
*
* PAGE_DESC for level 3 has the same value as
* TABLE_DESC, but this code can't run on a level 3
* table because there can't be overlaps in level 3.
*/
return
ACTION_RECURSE_INTO_TABLE
;
}
}
/*
* This table entry is outside of the region specified in the arguments,
* don't write anything to it.
*/
return
ACTION_NONE
;
}
/*
* Recursive function that writes to the translation tables and maps the
* specified region. On success, it returns the VA of the last byte that was
* succesfully mapped. On error, it returns the VA of the next entry that
* should have been mapped.
*/
static
uintptr_t
xlat_tables_map_region
(
xlat_ctx_t
*
ctx
,
mmap_region_t
*
mm
,
const
uintptr_t
table_base_va
,
uint64_t
*
const
table_base
,
const
int
table_entries
,
const
int
level
)
{
assert
(
level
>=
ctx
->
base_level
&&
level
<=
XLAT_TABLE_LEVEL_MAX
);
uintptr_t
mm_end_va
=
mm
->
base_va
+
mm
->
size
-
1
;
uintptr_t
table_idx_va
;
unsigned
long
long
table_idx_pa
;
uint64_t
*
subtable
;
uint64_t
desc
;
int
table_idx
;
if
(
mm
->
base_va
>
table_base_va
)
{
/* Find the first index of the table affected by the region. */
table_idx_va
=
mm
->
base_va
&
~
XLAT_BLOCK_MASK
(
level
);
table_idx
=
(
table_idx_va
-
table_base_va
)
>>
XLAT_ADDR_SHIFT
(
level
);
assert
(
table_idx
<
table_entries
);
}
else
{
/* Start from the beginning of the table. */
table_idx_va
=
table_base_va
;
table_idx
=
0
;
}
#if PLAT_XLAT_TABLES_DYNAMIC
if
(
level
>
ctx
->
base_level
)
xlat_table_inc_regions_count
(
ctx
,
table_base
);
#endif
while
(
table_idx
<
table_entries
)
{
desc
=
table_base
[
table_idx
];
table_idx_pa
=
mm
->
base_pa
+
table_idx_va
-
mm
->
base_va
;
action_t
action
=
xlat_tables_map_region_action
(
mm
,
desc
&
DESC_MASK
,
table_idx_pa
,
table_idx_va
,
level
);
if
(
action
==
ACTION_WRITE_BLOCK_ENTRY
)
{
table_base
[
table_idx
]
=
xlat_desc
(
mm
->
attr
,
table_idx_pa
,
level
);
}
else
if
(
action
==
ACTION_CREATE_NEW_TABLE
)
{
subtable
=
xlat_table_get_empty
(
ctx
);
if
(
subtable
==
NULL
)
{
/* Not enough free tables to map this region */
return
table_idx_va
;
}
/* Point to new subtable from this one. */
table_base
[
table_idx
]
=
TABLE_DESC
|
(
unsigned
long
)
subtable
;
/* Recurse to write into subtable */
uintptr_t
end_va
=
xlat_tables_map_region
(
ctx
,
mm
,
table_idx_va
,
subtable
,
XLAT_TABLE_ENTRIES
,
level
+
1
);
if
(
end_va
!=
table_idx_va
+
XLAT_BLOCK_SIZE
(
level
)
-
1
)
return
end_va
;
}
else
if
(
action
==
ACTION_RECURSE_INTO_TABLE
)
{
subtable
=
(
uint64_t
*
)(
uintptr_t
)(
desc
&
TABLE_ADDR_MASK
);
/* Recurse to write into subtable */
uintptr_t
end_va
=
xlat_tables_map_region
(
ctx
,
mm
,
table_idx_va
,
subtable
,
XLAT_TABLE_ENTRIES
,
level
+
1
);
if
(
end_va
!=
table_idx_va
+
XLAT_BLOCK_SIZE
(
level
)
-
1
)
return
end_va
;
}
else
{
assert
(
action
==
ACTION_NONE
);
}
table_idx
++
;
table_idx_va
+=
XLAT_BLOCK_SIZE
(
level
);
/* If reached the end of the region, exit */
if
(
mm_end_va
<=
table_idx_va
)
break
;
}
return
table_idx_va
-
1
;
}
void
print_mmap
(
mmap_region_t
*
const
mmap
)
{
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
tf_printf
(
"mmap:
\n
"
);
mmap_region_t
*
mm
=
mmap
;
while
(
mm
->
size
)
{
tf_printf
(
" VA:%p PA:0x%llx size:0x%zx attr:0x%x
\n
"
,
(
void
*
)
mm
->
base_va
,
mm
->
base_pa
,
mm
->
size
,
mm
->
attr
);
++
mm
;
};
tf_printf
(
"
\n
"
);
#endif
}
/*
* Function that verifies that a region can be mapped.
* Returns:
* 0: Success, the mapping is allowed.
* EINVAL: Invalid values were used as arguments.
* ERANGE: The memory limits were surpassed.
* ENOMEM: There is not enough memory in the mmap array.
* EPERM: Region overlaps another one in an invalid way.
*/
static
int
mmap_add_region_check
(
xlat_ctx_t
*
ctx
,
unsigned
long
long
base_pa
,
uintptr_t
base_va
,
size_t
size
,
unsigned
int
attr
)
{
mmap_region_t
*
mm
=
ctx
->
mmap
;
unsigned
long
long
end_pa
=
base_pa
+
size
-
1
;
uintptr_t
end_va
=
base_va
+
size
-
1
;
if
(
!
IS_PAGE_ALIGNED
(
base_pa
)
||
!
IS_PAGE_ALIGNED
(
base_va
)
||
!
IS_PAGE_ALIGNED
(
size
))
return
-
EINVAL
;
/* Check for overflows */
if
((
base_pa
>
end_pa
)
||
(
base_va
>
end_va
))
return
-
ERANGE
;
if
((
base_va
+
(
uintptr_t
)
size
-
(
uintptr_t
)
1
)
>
ctx
->
va_max_address
)
return
-
ERANGE
;
if
((
base_pa
+
(
unsigned
long
long
)
size
-
1ULL
)
>
ctx
->
pa_max_address
)
return
-
ERANGE
;
/* Check that there is space in the mmap array */
if
(
ctx
->
mmap
[
ctx
->
mmap_num
-
1
].
size
!=
0
)
return
-
ENOMEM
;
/* Check for PAs and VAs overlaps with all other regions */
for
(
mm
=
ctx
->
mmap
;
mm
->
size
;
++
mm
)
{
uintptr_t
mm_end_va
=
mm
->
base_va
+
mm
->
size
-
1
;
/*
* Check if one of the regions is completely inside the other
* one.
*/
int
fully_overlapped_va
=
((
base_va
>=
mm
->
base_va
)
&&
(
end_va
<=
mm_end_va
))
||
((
mm
->
base_va
>=
base_va
)
&&
(
mm_end_va
<=
end_va
));
/*
* Full VA overlaps are only allowed if both regions are
* identity mapped (zero offset) or have the same VA to PA
* offset. Also, make sure that it's not the exact same area.
* This can only be done with static regions.
*/
if
(
fully_overlapped_va
)
{
#if PLAT_XLAT_TABLES_DYNAMIC
if
((
attr
&
MT_DYNAMIC
)
||
(
mm
->
attr
&
MT_DYNAMIC
))
return
-
EPERM
;
#endif
/* PLAT_XLAT_TABLES_DYNAMIC */
if
((
mm
->
base_va
-
mm
->
base_pa
)
!=
(
base_va
-
base_pa
))
return
-
EPERM
;
if
((
base_va
==
mm
->
base_va
)
&&
(
size
==
mm
->
size
))
return
-
EPERM
;
}
else
{
/*
* If the regions do not have fully overlapping VAs,
* then they must have fully separated VAs and PAs.
* Partial overlaps are not allowed
*/
unsigned
long
long
mm_end_pa
=
mm
->
base_pa
+
mm
->
size
-
1
;
int
separated_pa
=
(
end_pa
<
mm
->
base_pa
)
||
(
base_pa
>
mm_end_pa
);
int
separated_va
=
(
end_va
<
mm
->
base_va
)
||
(
base_va
>
mm_end_va
);
if
(
!
(
separated_va
&&
separated_pa
))
return
-
EPERM
;
}
}
return
0
;
}
void
mmap_add_region_ctx
(
xlat_ctx_t
*
ctx
,
mmap_region_t
*
mm
)
{
mmap_region_t
*
mm_cursor
=
ctx
->
mmap
;
mmap_region_t
*
mm_last
=
mm_cursor
+
ctx
->
mmap_num
;
unsigned
long
long
end_pa
=
mm
->
base_pa
+
mm
->
size
-
1
;
uintptr_t
end_va
=
mm
->
base_va
+
mm
->
size
-
1
;
int
ret
;
/* Ignore empty regions */
if
(
!
mm
->
size
)
return
;
/* Static regions must be added before initializing the xlat tables. */
assert
(
!
ctx
->
initialized
);
ret
=
mmap_add_region_check
(
ctx
,
mm
->
base_pa
,
mm
->
base_va
,
mm
->
size
,
mm
->
attr
);
if
(
ret
!=
0
)
{
ERROR
(
"mmap_add_region_check() failed. error %d
\n
"
,
ret
);
assert
(
0
);
return
;
}
/*
* Find correct place in mmap to insert new region.
*
* 1 - Lower region VA end first.
* 2 - Smaller region size first.
*
* VA 0 0xFF
*
* 1st |------|
* 2nd |------------|
* 3rd |------|
* 4th |---|
* 5th |---|
* 6th |----------|
* 7th |-------------------------------------|
*
* This is required for overlapping regions only. It simplifies adding
* regions with the loop in xlat_tables_init_internal because the outer
* ones won't overwrite block or page descriptors of regions added
* previously.
*
* Overlapping is only allowed for static regions.
*/
while
((
mm_cursor
->
base_va
+
mm_cursor
->
size
-
1
)
<
end_va
&&
mm_cursor
->
size
)
++
mm_cursor
;
while
((
mm_cursor
->
base_va
+
mm_cursor
->
size
-
1
==
end_va
)
&&
(
mm_cursor
->
size
<
mm
->
size
))
++
mm_cursor
;
/* Make room for new region by moving other regions up by one place */
memmove
(
mm_cursor
+
1
,
mm_cursor
,
(
uintptr_t
)
mm_last
-
(
uintptr_t
)
mm_cursor
);
/*
* Check we haven't lost the empty sentinel from the end of the array.
* This shouldn't happen as we have checked in mmap_add_region_check
* that there is free space.
*/
assert
(
mm_last
->
size
==
0
);
mm_cursor
->
base_pa
=
mm
->
base_pa
;
mm_cursor
->
base_va
=
mm
->
base_va
;
mm_cursor
->
size
=
mm
->
size
;
mm_cursor
->
attr
=
mm
->
attr
;
if
(
end_pa
>
ctx
->
max_pa
)
ctx
->
max_pa
=
end_pa
;
if
(
end_va
>
ctx
->
max_va
)
ctx
->
max_va
=
end_va
;
}
#if PLAT_XLAT_TABLES_DYNAMIC
int
mmap_add_dynamic_region_ctx
(
xlat_ctx_t
*
ctx
,
mmap_region_t
*
mm
)
{
mmap_region_t
*
mm_cursor
=
ctx
->
mmap
;
mmap_region_t
*
mm_last
=
mm_cursor
+
ctx
->
mmap_num
;
unsigned
long
long
end_pa
=
mm
->
base_pa
+
mm
->
size
-
1
;
uintptr_t
end_va
=
mm
->
base_va
+
mm
->
size
-
1
;
int
ret
;
/* Nothing to do */
if
(
!
mm
->
size
)
return
0
;
ret
=
mmap_add_region_check
(
ctx
,
mm
->
base_pa
,
mm
->
base_va
,
mm
->
size
,
mm
->
attr
|
MT_DYNAMIC
);
if
(
ret
!=
0
)
return
ret
;
/*
* Find the adequate entry in the mmap array in the same way done for
* static regions in mmap_add_region_ctx().
*/
while
((
mm_cursor
->
base_va
+
mm_cursor
->
size
-
1
)
<
end_va
&&
mm_cursor
->
size
)
++
mm_cursor
;
while
((
mm_cursor
->
base_va
+
mm_cursor
->
size
-
1
==
end_va
)
&&
(
mm_cursor
->
size
<
mm
->
size
))
++
mm_cursor
;
/* Make room for new region by moving other regions up by one place */
memmove
(
mm_cursor
+
1
,
mm_cursor
,
(
uintptr_t
)
mm_last
-
(
uintptr_t
)
mm_cursor
);
/*
* Check we haven't lost the empty sentinal from the end of the array.
* This shouldn't happen as we have checked in mmap_add_region_check
* that there is free space.
*/
assert
(
mm_last
->
size
==
0
);
mm_cursor
->
base_pa
=
mm
->
base_pa
;
mm_cursor
->
base_va
=
mm
->
base_va
;
mm_cursor
->
size
=
mm
->
size
;
mm_cursor
->
attr
=
mm
->
attr
|
MT_DYNAMIC
;
/*
* Update the translation tables if the xlat tables are initialized. If
* not, this region will be mapped when they are initialized.
*/
if
(
ctx
->
initialized
)
{
uintptr_t
end_va
=
xlat_tables_map_region
(
ctx
,
mm_cursor
,
0
,
ctx
->
base_table
,
ctx
->
base_table_entries
,
ctx
->
base_level
);
/* Failed to map, remove mmap entry, unmap and return error. */
if
(
end_va
!=
mm_cursor
->
base_va
+
mm_cursor
->
size
-
1
)
{
memmove
(
mm_cursor
,
mm_cursor
+
1
,
(
uintptr_t
)
mm_last
-
(
uintptr_t
)
mm_cursor
);
/*
* Check if the mapping function actually managed to map
* anything. If not, just return now.
*/
if
(
mm_cursor
->
base_va
>=
end_va
)
return
-
ENOMEM
;
/*
* Something went wrong after mapping some table entries,
* undo every change done up to this point.
*/
mmap_region_t
unmap_mm
=
{
.
base_pa
=
0
,
.
base_va
=
mm
->
base_va
,
.
size
=
end_va
-
mm
->
base_va
,
.
attr
=
0
};
xlat_tables_unmap_region
(
ctx
,
&
unmap_mm
,
0
,
ctx
->
base_table
,
ctx
->
base_table_entries
,
ctx
->
base_level
);
return
-
ENOMEM
;
}
/*
* Make sure that all entries are written to the memory. There
* is no need to invalidate entries when mapping dynamic regions
* because new table/block/page descriptors only replace old
* invalid descriptors, that aren't TLB cached.
*/
dsbishst
();
}
if
(
end_pa
>
ctx
->
max_pa
)
ctx
->
max_pa
=
end_pa
;
if
(
end_va
>
ctx
->
max_va
)
ctx
->
max_va
=
end_va
;
return
0
;
}
/*
* Removes the region with given base Virtual Address and size from the given
* context.
*
* Returns:
* 0: Success.
* EINVAL: Invalid values were used as arguments (region not found).
* EPERM: Tried to remove a static region.
*/
int
mmap_remove_dynamic_region_ctx
(
xlat_ctx_t
*
ctx
,
uintptr_t
base_va
,
size_t
size
)
{
mmap_region_t
*
mm
=
ctx
->
mmap
;
mmap_region_t
*
mm_last
=
mm
+
ctx
->
mmap_num
;
int
update_max_va_needed
=
0
;
int
update_max_pa_needed
=
0
;
/* Check sanity of mmap array. */
assert
(
mm
[
ctx
->
mmap_num
].
size
==
0
);
while
(
mm
->
size
)
{
if
((
mm
->
base_va
==
base_va
)
&&
(
mm
->
size
==
size
))
break
;
++
mm
;
}
/* Check that the region was found */
if
(
mm
->
size
==
0
)
return
-
EINVAL
;
/* If the region is static it can't be removed */
if
(
!
(
mm
->
attr
&
MT_DYNAMIC
))
return
-
EPERM
;
/* Check if this region is using the top VAs or PAs. */
if
((
mm
->
base_va
+
mm
->
size
-
1
)
==
ctx
->
max_va
)
update_max_va_needed
=
1
;
if
((
mm
->
base_pa
+
mm
->
size
-
1
)
==
ctx
->
max_pa
)
update_max_pa_needed
=
1
;
/* Update the translation tables if needed */
if
(
ctx
->
initialized
)
{
xlat_tables_unmap_region
(
ctx
,
mm
,
0
,
ctx
->
base_table
,
ctx
->
base_table_entries
,
ctx
->
base_level
);
xlat_arch_tlbi_va_sync
();
}
/* Remove this region by moving the rest down by one place. */
memmove
(
mm
,
mm
+
1
,
(
uintptr_t
)
mm_last
-
(
uintptr_t
)
mm
);
/* Check if we need to update the max VAs and PAs */
if
(
update_max_va_needed
)
{
ctx
->
max_va
=
0
;
mm
=
ctx
->
mmap
;
while
(
mm
->
size
)
{
if
((
mm
->
base_va
+
mm
->
size
-
1
)
>
ctx
->
max_va
)
ctx
->
max_va
=
mm
->
base_va
+
mm
->
size
-
1
;
++
mm
;
}
}
if
(
update_max_pa_needed
)
{
ctx
->
max_pa
=
0
;
mm
=
ctx
->
mmap
;
while
(
mm
->
size
)
{
if
((
mm
->
base_pa
+
mm
->
size
-
1
)
>
ctx
->
max_pa
)
ctx
->
max_pa
=
mm
->
base_pa
+
mm
->
size
-
1
;
++
mm
;
}
}
return
0
;
}
#endif
/* PLAT_XLAT_TABLES_DYNAMIC */
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
/* Print the attributes of the specified block descriptor. */
static
void
xlat_desc_print
(
uint64_t
desc
)
{
int
mem_type_index
=
ATTR_INDEX_GET
(
desc
);
if
(
mem_type_index
==
ATTR_IWBWA_OWBWA_NTR_INDEX
)
{
tf_printf
(
"MEM"
);
}
else
if
(
mem_type_index
==
ATTR_NON_CACHEABLE_INDEX
)
{
tf_printf
(
"NC"
);
}
else
{
assert
(
mem_type_index
==
ATTR_DEVICE_INDEX
);
tf_printf
(
"DEV"
);
}
tf_printf
(
LOWER_ATTRS
(
AP_RO
)
&
desc
?
"-RO"
:
"-RW"
);
tf_printf
(
LOWER_ATTRS
(
NS
)
&
desc
?
"-NS"
:
"-S"
);
tf_printf
(
UPPER_ATTRS
(
XN
)
&
desc
?
"-XN"
:
"-EXEC"
);
}
static
const
char
*
const
level_spacers
[]
=
{
"[LV0] "
,
" [LV1] "
,
" [LV2] "
,
" [LV3] "
};
static
const
char
*
invalid_descriptors_ommited
=
"%s(%d invalid descriptors omitted)
\n
"
;
/*
* Recursive function that reads the translation tables passed as an argument
* and prints their status.
*/
static
void
xlat_tables_print_internal
(
const
uintptr_t
table_base_va
,
uint64_t
*
const
table_base
,
const
int
table_entries
,
const
int
level
)
{
assert
(
level
<=
XLAT_TABLE_LEVEL_MAX
);
uint64_t
desc
;
uintptr_t
table_idx_va
=
table_base_va
;
int
table_idx
=
0
;
size_t
level_size
=
XLAT_BLOCK_SIZE
(
level
);
/*
* Keep track of how many invalid descriptors are counted in a row.
* Whenever multiple invalid descriptors are found, only the first one
* is printed, and a line is added to inform about how many descriptors
* have been omitted.
*/
int
invalid_row_count
=
0
;
while
(
table_idx
<
table_entries
)
{
desc
=
table_base
[
table_idx
];
if
((
desc
&
DESC_MASK
)
==
INVALID_DESC
)
{
if
(
invalid_row_count
==
0
)
{
tf_printf
(
"%sVA:%p size:0x%zx
\n
"
,
level_spacers
[
level
],
(
void
*
)
table_idx_va
,
level_size
);
}
invalid_row_count
++
;
}
else
{
if
(
invalid_row_count
>
1
)
{
tf_printf
(
invalid_descriptors_ommited
,
level_spacers
[
level
],
invalid_row_count
-
1
);
}
invalid_row_count
=
0
;
/*
* Check if this is a table or a block. Tables are only
* allowed in levels other than 3, but DESC_PAGE has the
* same value as DESC_TABLE, so we need to check.
*/
if
(((
desc
&
DESC_MASK
)
==
TABLE_DESC
)
&&
(
level
<
XLAT_TABLE_LEVEL_MAX
))
{
/*
* Do not print any PA for a table descriptor,
* as it doesn't directly map physical memory
* but instead points to the next translation
* table in the translation table walk.
*/
tf_printf
(
"%sVA:%p size:0x%zx
\n
"
,
level_spacers
[
level
],
(
void
*
)
table_idx_va
,
level_size
);
uintptr_t
addr_inner
=
desc
&
TABLE_ADDR_MASK
;
xlat_tables_print_internal
(
table_idx_va
,
(
uint64_t
*
)
addr_inner
,
XLAT_TABLE_ENTRIES
,
level
+
1
);
}
else
{
tf_printf
(
"%sVA:%p PA:0x%llx size:0x%zx "
,
level_spacers
[
level
],
(
void
*
)
table_idx_va
,
(
unsigned
long
long
)(
desc
&
TABLE_ADDR_MASK
),
level_size
);
xlat_desc_print
(
desc
);
tf_printf
(
"
\n
"
);
}
}
table_idx
++
;
table_idx_va
+=
level_size
;
}
if
(
invalid_row_count
>
1
)
{
tf_printf
(
invalid_descriptors_ommited
,
level_spacers
[
level
],
invalid_row_count
-
1
);
}
}
#endif
/* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
void
xlat_tables_print
(
xlat_ctx_t
*
ctx
)
{
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
xlat_tables_print_internal
(
0
,
ctx
->
base_table
,
ctx
->
base_table_entries
,
ctx
->
base_level
);
#endif
/* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
}
void
init_xlation_table
(
xlat_ctx_t
*
ctx
)
{
mmap_region_t
*
mm
=
ctx
->
mmap
;
/* All tables must be zeroed before mapping any region. */
for
(
int
i
=
0
;
i
<
ctx
->
base_table_entries
;
i
++
)
ctx
->
base_table
[
i
]
=
INVALID_DESC
;
for
(
int
j
=
0
;
j
<
ctx
->
tables_num
;
j
++
)
{
#if PLAT_XLAT_TABLES_DYNAMIC
ctx
->
tables_mapped_regions
[
j
]
=
0
;
#endif
for
(
int
i
=
0
;
i
<
XLAT_TABLE_ENTRIES
;
i
++
)
ctx
->
tables
[
j
][
i
]
=
INVALID_DESC
;
}
while
(
mm
->
size
)
{
uintptr_t
end_va
=
xlat_tables_map_region
(
ctx
,
mm
,
0
,
ctx
->
base_table
,
ctx
->
base_table_entries
,
ctx
->
base_level
);
if
(
end_va
!=
mm
->
base_va
+
mm
->
size
-
1
)
{
ERROR
(
"Not enough memory to map region:
\n
"
" VA:%p PA:0x%llx size:0x%zx attr:0x%x
\n
"
,
(
void
*
)
mm
->
base_va
,
mm
->
base_pa
,
mm
->
size
,
mm
->
attr
);
panic
();
}
mm
++
;
}
ctx
->
initialized
=
1
;
}
lib/xlat_tables_v2/xlat_tables_private.h
0 → 100644
View file @
28ee754d
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __XLAT_TABLES_PRIVATE_H__
#define __XLAT_TABLES_PRIVATE_H__
#include <cassert.h>
#include <platform_def.h>
#include <utils.h>
/*
* If the platform hasn't defined a physical and a virtual address space size
* default to ADDR_SPACE_SIZE.
*/
#if ERROR_DEPRECATED
# ifdef ADDR_SPACE_SIZE
# error "ADDR_SPACE_SIZE is deprecated. Use PLAT_xxx_ADDR_SPACE_SIZE instead."
# endif
#elif defined(ADDR_SPACE_SIZE)
# ifndef PLAT_PHY_ADDR_SPACE_SIZE
# define PLAT_PHY_ADDR_SPACE_SIZE ADDR_SPACE_SIZE
# endif
# ifndef PLAT_VIRT_ADDR_SPACE_SIZE
# define PLAT_VIRT_ADDR_SPACE_SIZE ADDR_SPACE_SIZE
# endif
#endif
/* The virtual and physical address space sizes must be powers of two. */
CASSERT
(
IS_POWER_OF_TWO
(
PLAT_VIRT_ADDR_SPACE_SIZE
),
assert_valid_virt_addr_space_size
);
CASSERT
(
IS_POWER_OF_TWO
(
PLAT_PHY_ADDR_SPACE_SIZE
),
assert_valid_phy_addr_space_size
);
/* Struct that holds all information about the translation tables. */
typedef
struct
{
/*
* Max allowed Virtual and Physical Addresses.
*/
unsigned
long
long
pa_max_address
;
uintptr_t
va_max_address
;
/*
* Array of all memory regions stored in order of ascending end address
* and ascending size to simplify the code that allows overlapping
* regions. The list is terminated by the first entry with size == 0.
*/
mmap_region_t
*
mmap
;
/* mmap_num + 1 elements */
int
mmap_num
;
/*
* Array of finer-grain translation tables.
* For example, if the initial lookup level is 1 then this array would
* contain both level-2 and level-3 entries.
*/
uint64_t
(
*
tables
)[
XLAT_TABLE_ENTRIES
];
int
tables_num
;
/*
* Keep track of how many regions are mapped in each table. The base
* table can't be unmapped so it isn't needed to keep track of it.
*/
#if PLAT_XLAT_TABLES_DYNAMIC
int
*
tables_mapped_regions
;
#endif
/* PLAT_XLAT_TABLES_DYNAMIC */
int
next_table
;
/*
* Base translation table. It doesn't need to have the same amount of
* entries as the ones used for other levels.
*/
uint64_t
*
base_table
;
int
base_table_entries
;
unsigned
long
long
max_pa
;
uintptr_t
max_va
;
/* Level of the base translation table. */
int
base_level
;
/* Set to 1 when the translation tables are initialized. */
int
initialized
;
}
xlat_ctx_t
;
#if PLAT_XLAT_TABLES_DYNAMIC
/*
* Shifts and masks to access fields of an mmap_attr_t
*/
/* Dynamic or static */
#define MT_DYN_SHIFT 30
/* 31 would cause undefined behaviours */
/*
* Memory mapping private attributes
*
* Private attributes not exposed in the mmap_attr_t enum.
*/
typedef
enum
{
/*
* Regions mapped before the MMU can't be unmapped dynamically (they are
* static) and regions mapped with MMU enabled can be unmapped. This
* behaviour can't be overridden.
*
* Static regions can overlap each other, dynamic regions can't.
*/
MT_STATIC
=
0
<<
MT_DYN_SHIFT
,
MT_DYNAMIC
=
1
<<
MT_DYN_SHIFT
}
mmap_priv_attr_t
;
/*
* Function used to invalidate all levels of the translation walk for a given
* virtual address. It must be called for every translation table entry that is
* modified.
*/
void
xlat_arch_tlbi_va
(
uintptr_t
va
);
/*
* This function has to be called at the end of any code that uses the function
* xlat_arch_tlbi_va().
*/
void
xlat_arch_tlbi_va_sync
(
void
);
/* Add a dynamic region to the specified context. */
int
mmap_add_dynamic_region_ctx
(
xlat_ctx_t
*
ctx
,
mmap_region_t
*
mm
);
/* Remove a dynamic region from the specified context. */
int
mmap_remove_dynamic_region_ctx
(
xlat_ctx_t
*
ctx
,
uintptr_t
base_va
,
size_t
size
);
#endif
/* PLAT_XLAT_TABLES_DYNAMIC */
/* Print VA, PA, size and attributes of all regions in the mmap array. */
void
print_mmap
(
mmap_region_t
*
const
mmap
);
/*
* Print the current state of the translation tables by reading them from
* memory.
*/
void
xlat_tables_print
(
xlat_ctx_t
*
ctx
);
/*
* Initialize the translation tables by mapping all regions added to the
* specified context.
*/
void
init_xlation_table
(
xlat_ctx_t
*
ctx
);
/* Add a static region to the specified context. */
void
mmap_add_region_ctx
(
xlat_ctx_t
*
ctx
,
mmap_region_t
*
mm
);
/*
* Architecture-specific initialization code.
*/
/* Execute architecture-specific translation table initialization code. */
void
init_xlat_tables_arch
(
unsigned
long
long
max_pa
);
/* Enable MMU and configure it to use the specified translation tables. */
void
enable_mmu_arch
(
unsigned
int
flags
,
uint64_t
*
base_table
);
/* Return 1 if the MMU of this Exception Level is enabled, 0 otherwise. */
int
is_mmu_enabled
(
void
);
#endif
/* __XLAT_TABLES_PRIVATE_H__ */
plat/arm/board/juno/platform.mk
View file @
28ee754d
...
...
@@ -69,6 +69,7 @@ BL31_SOURCES += lib/cpus/aarch64/cortex_a53.S \
# Enable workarounds for selected Cortex-A57 erratas.
ERRATA_A57_806969
:=
0
ERRATA_A57_813419
:=
1
ERRATA_A57_813420
:=
1
# Enable option to skip L1 data cache flush during the Cortex-A57 cluster
...
...
plat/arm/common/arm_bl1_setup.c
View file @
28ee754d
/*
* Copyright (c) 2015-201
6
, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015-201
7
, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
...
...
@@ -36,7 +36,7 @@
#include <plat_arm.h>
#include <sp805.h>
#include <utils.h>
#include <xlat_tables.h>
#include <xlat_tables
_v2
.h>
#include "../../../bl1/bl1_private.h"
/* Weak definitions may be overridden in specific ARM standard platform */
...
...
plat/arm/common/arm_common.c
View file @
28ee754d
/*
* Copyright (c) 2015-201
6
, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015-201
7
, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
...
...
@@ -34,7 +34,7 @@
#include <mmio.h>
#include <plat_arm.h>
#include <platform_def.h>
#include <xlat_tables.h>
#include <xlat_tables
_v2
.h>
extern
const
mmap_region_t
plat_arm_mmap
[];
...
...
plat/arm/common/arm_common.mk
View file @
28ee754d
...
...
@@ -108,8 +108,9 @@ ifeq (${ARCH}, aarch64)
PLAT_INCLUDES
+=
-Iinclude
/plat/arm/common/aarch64
endif
PLAT_BL_COMMON_SOURCES
+=
lib/xlat_tables/xlat_tables_common.c
\
lib/xlat_tables/
${ARCH}
/xlat_tables.c
\
include
lib/xlat_tables_v2/xlat_tables.mk
PLAT_BL_COMMON_SOURCES
+=
${XLAT_TABLES_LIB_SRCS}
\
plat/arm/common/
${ARCH}
/arm_helpers.S
\
plat/arm/common/arm_common.c
\
plat/common/
${ARCH}
/plat_common.c
...
...
plat/common/aarch32/plat_common.c
View file @
28ee754d
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016
-2017
, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
...
...
@@ -29,7 +29,7 @@
*/
#include <platform.h>
#include <xlat_
table
s.h>
#include <xlat_
mmu_helper
s.h>
/*
* The following platform setup functions are weakly defined. They
...
...
plat/common/aarch64/plat_common.c
View file @
28ee754d
/*
* Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2014
-2017
, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
...
...
@@ -30,7 +30,7 @@
#include <assert.h>
#include <console.h>
#include <platform.h>
#include <xlat_
table
s.h>
#include <xlat_
mmu_helper
s.h>
/*
* The following platform setup functions are weakly defined. They
...
...
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment