Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
Menu
Open sidebar
adam.huang
Arm Trusted Firmware
Commits
28ee754d
Commit
28ee754d
authored
Mar 16, 2017
by
davidcunado-arm
Committed by
GitHub
Mar 16, 2017
Browse files
Merge pull request #856 from antonio-nino-diaz-arm/an/dynamic-xlat
Introduce version 2 of the translation tables library
parents
fa971fca
bf75a371
Changes
37
Expand all
Hide whitespace changes
Inline
Side-by-side
lib/cpus/aarch64/cortex_a57.S
View file @
28ee754d
...
@@ -114,6 +114,21 @@ func check_errata_806969
...
@@ -114,6 +114,21 @@ func check_errata_806969
b
cpu_rev_var_ls
b
cpu_rev_var_ls
endfunc
check_errata_806969
endfunc
check_errata_806969
/
*
---------------------------------------------------
*
Errata
Workaround
for
Cortex
A57
Errata
#
813419
.
*
This
applies
only
to
revision
r0p0
of
Cortex
A57
.
*
---------------------------------------------------
*/
func
check_errata_813419
/
*
*
Even
though
this
is
only
needed
for
revision
r0p0
,
it
*
is
always
applied
due
to
limitations
of
the
current
*
errata
framework
.
*/
mov
x0
,
#
ERRATA_APPLIES
ret
endfunc
check_errata_813419
/
*
---------------------------------------------------
/
*
---------------------------------------------------
*
Errata
Workaround
for
Cortex
A57
Errata
#
813420
.
*
Errata
Workaround
for
Cortex
A57
Errata
#
813420
.
*
This
applies
only
to
revision
r0p0
of
Cortex
A57
.
*
This
applies
only
to
revision
r0p0
of
Cortex
A57
.
...
@@ -482,6 +497,7 @@ func cortex_a57_errata_report
...
@@ -482,6 +497,7 @@ func cortex_a57_errata_report
*
checking
functions
of
each
errata
.
*
checking
functions
of
each
errata
.
*/
*/
report_errata
ERRATA_A57_806969
,
cortex_a57
,
806969
report_errata
ERRATA_A57_806969
,
cortex_a57
,
806969
report_errata
ERRATA_A57_813419
,
cortex_a57
,
813419
report_errata
ERRATA_A57_813420
,
cortex_a57
,
813420
report_errata
ERRATA_A57_813420
,
cortex_a57
,
813420
report_errata
A57_DISABLE_NON_TEMPORAL_HINT
,
cortex_a57
,
\
report_errata
A57_DISABLE_NON_TEMPORAL_HINT
,
cortex_a57
,
\
disable_ldnp_overread
disable_ldnp_overread
...
...
lib/cpus/cpu-ops.mk
View file @
28ee754d
...
@@ -70,6 +70,10 @@ ERRATA_A53_836870 ?=0
...
@@ -70,6 +70,10 @@ ERRATA_A53_836870 ?=0
# only to revision r0p0 of the Cortex A57 cpu.
# only to revision r0p0 of the Cortex A57 cpu.
ERRATA_A57_806969
?=
0
ERRATA_A57_806969
?=
0
# Flag to apply erratum 813419 workaround during reset. This erratum applies
# only to revision r0p0 of the Cortex A57 cpu.
ERRATA_A57_813419
?=
0
# Flag to apply erratum 813420 workaround during reset. This erratum applies
# Flag to apply erratum 813420 workaround during reset. This erratum applies
# only to revision r0p0 of the Cortex A57 cpu.
# only to revision r0p0 of the Cortex A57 cpu.
ERRATA_A57_813420
?=
0
ERRATA_A57_813420
?=
0
...
@@ -106,6 +110,10 @@ $(eval $(call add_define,ERRATA_A53_836870))
...
@@ -106,6 +110,10 @@ $(eval $(call add_define,ERRATA_A53_836870))
$(eval
$(call
assert_boolean,ERRATA_A57_806969))
$(eval
$(call
assert_boolean,ERRATA_A57_806969))
$(eval
$(call
add_define,ERRATA_A57_806969))
$(eval
$(call
add_define,ERRATA_A57_806969))
# Process ERRATA_A57_813419 flag
$(eval
$(call
assert_boolean,ERRATA_A57_813419))
$(eval
$(call
add_define,ERRATA_A57_813419))
# Process ERRATA_A57_813420 flag
# Process ERRATA_A57_813420 flag
$(eval
$(call
assert_boolean,ERRATA_A57_813420))
$(eval
$(call
assert_boolean,ERRATA_A57_813420))
$(eval
$(call
add_define,ERRATA_A57_813420))
$(eval
$(call
add_define,ERRATA_A57_813420))
...
...
lib/xlat_tables/aarch64/xlat_tables.c
View file @
28ee754d
...
@@ -208,7 +208,7 @@ void init_xlat_tables(void)
...
@@ -208,7 +208,7 @@ void init_xlat_tables(void)
/* into memory, the TLB invalidation is complete, */
\
/* into memory, the TLB invalidation is complete, */
\
/* and translation register writes are committed */
\
/* and translation register writes are committed */
\
/* before enabling the MMU */
\
/* before enabling the MMU */
\
dsb();
\
dsb
ish
(); \
isb(); \
isb(); \
\
\
sctlr = read_sctlr_el##_el(); \
sctlr = read_sctlr_el##_el(); \
...
...
lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
0 → 100644
View file @
28ee754d
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
#include <cassert.h>
#include <platform_def.h>
#include <utils.h>
#include <xlat_tables_v2.h>
#include "../xlat_tables_private.h"
#if DEBUG
static
unsigned
long
long
xlat_arch_get_max_supported_pa
(
void
)
{
/* Physical address space size for long descriptor format. */
return
(
1ull
<<
40
)
-
1ull
;
}
#endif
/* DEBUG*/
int
is_mmu_enabled
(
void
)
{
return
(
read_sctlr
()
&
SCTLR_M_BIT
)
!=
0
;
}
#if PLAT_XLAT_TABLES_DYNAMIC
void
xlat_arch_tlbi_va
(
uintptr_t
va
)
{
/*
* Ensure the translation table write has drained into memory before
* invalidating the TLB entry.
*/
dsbishst
();
tlbimvaais
(
TLBI_ADDR
(
va
));
}
void
xlat_arch_tlbi_va_sync
(
void
)
{
/* Invalidate all entries from branch predictors. */
bpiallis
();
/*
* A TLB maintenance instruction can complete at any time after
* it is issued, but is only guaranteed to be complete after the
* execution of DSB by the PE that executed the TLB maintenance
* instruction. After the TLB invalidate instruction is
* complete, no new memory accesses using the invalidated TLB
* entries will be observed by any observer of the system
* domain. See section D4.8.2 of the ARMv8 (issue k), paragraph
* "Ordering and completion of TLB maintenance instructions".
*/
dsbish
();
/*
* The effects of a completed TLB maintenance instruction are
* only guaranteed to be visible on the PE that executed the
* instruction after the execution of an ISB instruction by the
* PE that executed the TLB maintenance instruction.
*/
isb
();
}
#endif
/* PLAT_XLAT_TABLES_DYNAMIC */
void
init_xlat_tables_arch
(
unsigned
long
long
max_pa
)
{
assert
((
PLAT_PHY_ADDR_SPACE_SIZE
-
1
)
<=
xlat_arch_get_max_supported_pa
());
}
/*******************************************************************************
* Function for enabling the MMU in Secure PL1, assuming that the
* page-tables have already been created.
******************************************************************************/
void
enable_mmu_internal_secure
(
unsigned
int
flags
,
uint64_t
*
base_table
)
{
u_register_t
mair0
,
ttbcr
,
sctlr
;
uint64_t
ttbr0
;
assert
(
IS_IN_SECURE
());
assert
((
read_sctlr
()
&
SCTLR_M_BIT
)
==
0
);
/* Invalidate TLBs at the current exception level */
tlbiall
();
/* Set attributes in the right indices of the MAIR */
mair0
=
MAIR0_ATTR_SET
(
ATTR_DEVICE
,
ATTR_DEVICE_INDEX
);
mair0
|=
MAIR0_ATTR_SET
(
ATTR_IWBWA_OWBWA_NTR
,
ATTR_IWBWA_OWBWA_NTR_INDEX
);
mair0
|=
MAIR0_ATTR_SET
(
ATTR_NON_CACHEABLE
,
ATTR_NON_CACHEABLE_INDEX
);
write_mair0
(
mair0
);
/*
* Set TTBCR bits as well. Set TTBR0 table properties as Inner
* & outer WBWA & shareable. Disable TTBR1.
*/
ttbcr
=
TTBCR_EAE_BIT
|
TTBCR_SH0_INNER_SHAREABLE
|
TTBCR_RGN0_OUTER_WBA
|
TTBCR_RGN0_INNER_WBA
|
(
32
-
__builtin_ctzl
((
uintptr_t
)
PLAT_VIRT_ADDR_SPACE_SIZE
));
ttbcr
|=
TTBCR_EPD1_BIT
;
write_ttbcr
(
ttbcr
);
/* Set TTBR0 bits as well */
ttbr0
=
(
uint64_t
)(
uintptr_t
)
base_table
;
write64_ttbr0
(
ttbr0
);
write64_ttbr1
(
0
);
/*
* Ensure all translation table writes have drained
* into memory, the TLB invalidation is complete,
* and translation register writes are committed
* before enabling the MMU
*/
dsb
();
isb
();
sctlr
=
read_sctlr
();
sctlr
|=
SCTLR_WXN_BIT
|
SCTLR_M_BIT
;
if
(
flags
&
DISABLE_DCACHE
)
sctlr
&=
~
SCTLR_C_BIT
;
else
sctlr
|=
SCTLR_C_BIT
;
write_sctlr
(
sctlr
);
/* Ensure the MMU enable takes effect immediately */
isb
();
}
void
enable_mmu_arch
(
unsigned
int
flags
,
uint64_t
*
base_table
)
{
enable_mmu_internal_secure
(
flags
,
base_table
);
}
lib/xlat_tables_v2/aarch32/xlat_tables_arch.h
0 → 100644
View file @
28ee754d
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __XLAT_TABLES_ARCH_H__
#define __XLAT_TABLES_ARCH_H__
#include <arch.h>
#include <platform_def.h>
#include <xlat_tables_defs.h>
/*
* In AArch32 state, the MMU only supports 4KB page granularity, which means
* that the first translation table level is either 1 or 2. Both of them are
* allowed to have block and table descriptors. See section G4.5.6 of the
* ARMv8-A Architecture Reference Manual (DDI 0487A.k) for more information.
*
* The define below specifies the first table level that allows block
* descriptors.
*/
#define MIN_LVL_BLOCK_DESC 1
/*
* Each platform can define the size of the virtual address space, which is
* defined in PLAT_VIRT_ADDR_SPACE_SIZE. TTBCR.TxSZ is calculated as 32 minus
* the width of said address space. The value of TTBCR.TxSZ must be in the
* range 0 to 7 [1], which means that the virtual address space width must be
* in the range 32 to 25 bits.
*
* Here we calculate the initial lookup level from the value of
* PLAT_VIRT_ADDR_SPACE_SIZE. For a 4 KB page size, level 1 supports virtual
* address spaces of widths 32 to 31 bits, and level 2 from 30 to 25. Wider or
* narrower address spaces are not supported. As a result, level 3 cannot be
* used as initial lookup level with 4 KB granularity [1].
*
* For example, for a 31-bit address space (i.e. PLAT_VIRT_ADDR_SPACE_SIZE ==
* 1 << 31), TTBCR.TxSZ will be programmed to (32 - 31) = 1. According to Table
* G4-5 in the ARM ARM, the initial lookup level for an address space like that
* is 1.
*
* See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
* information:
* [1] Section G4.6.5
*/
#if PLAT_VIRT_ADDR_SPACE_SIZE > (1ULL << (32 - TTBCR_TxSZ_MIN))
# error "PLAT_VIRT_ADDR_SPACE_SIZE is too big."
#elif PLAT_VIRT_ADDR_SPACE_SIZE > (1 << L1_XLAT_ADDRESS_SHIFT)
# define XLAT_TABLE_LEVEL_BASE 1
# define NUM_BASE_LEVEL_ENTRIES \
(PLAT_VIRT_ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
#elif PLAT_VIRT_ADDR_SPACE_SIZE >= (1 << (32 - TTBCR_TxSZ_MAX))
# define XLAT_TABLE_LEVEL_BASE 2
# define NUM_BASE_LEVEL_ENTRIES \
(PLAT_VIRT_ADDR_SPACE_SIZE >> L2_XLAT_ADDRESS_SHIFT)
#else
# error "PLAT_VIRT_ADDR_SPACE_SIZE is too small."
#endif
#endif
/* __XLAT_TABLES_ARCH_H__ */
lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
0 → 100644
View file @
28ee754d
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
#include <bl_common.h>
#include <cassert.h>
#include <common_def.h>
#include <platform_def.h>
#include <sys/types.h>
#include <utils.h>
#include <xlat_tables_v2.h>
#include "../xlat_tables_private.h"
#if defined(IMAGE_BL1) || defined(IMAGE_BL31)
# define IMAGE_EL 3
#else
# define IMAGE_EL 1
#endif
static
unsigned
long
long
tcr_ps_bits
;
static
unsigned
long
long
calc_physical_addr_size_bits
(
unsigned
long
long
max_addr
)
{
/* Physical address can't exceed 48 bits */
assert
((
max_addr
&
ADDR_MASK_48_TO_63
)
==
0
);
/* 48 bits address */
if
(
max_addr
&
ADDR_MASK_44_TO_47
)
return
TCR_PS_BITS_256TB
;
/* 44 bits address */
if
(
max_addr
&
ADDR_MASK_42_TO_43
)
return
TCR_PS_BITS_16TB
;
/* 42 bits address */
if
(
max_addr
&
ADDR_MASK_40_TO_41
)
return
TCR_PS_BITS_4TB
;
/* 40 bits address */
if
(
max_addr
&
ADDR_MASK_36_TO_39
)
return
TCR_PS_BITS_1TB
;
/* 36 bits address */
if
(
max_addr
&
ADDR_MASK_32_TO_35
)
return
TCR_PS_BITS_64GB
;
return
TCR_PS_BITS_4GB
;
}
#if DEBUG
/* Physical Address ranges supported in the AArch64 Memory Model */
static
const
unsigned
int
pa_range_bits_arr
[]
=
{
PARANGE_0000
,
PARANGE_0001
,
PARANGE_0010
,
PARANGE_0011
,
PARANGE_0100
,
PARANGE_0101
};
unsigned
long
long
xlat_arch_get_max_supported_pa
(
void
)
{
u_register_t
pa_range
=
read_id_aa64mmfr0_el1
()
&
ID_AA64MMFR0_EL1_PARANGE_MASK
;
/* All other values are reserved */
assert
(
pa_range
<
ARRAY_SIZE
(
pa_range_bits_arr
));
return
(
1ull
<<
pa_range_bits_arr
[
pa_range
])
-
1ull
;
}
#endif
/* DEBUG*/
int
is_mmu_enabled
(
void
)
{
#if IMAGE_EL == 1
assert
(
IS_IN_EL
(
1
));
return
(
read_sctlr_el1
()
&
SCTLR_M_BIT
)
!=
0
;
#elif IMAGE_EL == 3
assert
(
IS_IN_EL
(
3
));
return
(
read_sctlr_el3
()
&
SCTLR_M_BIT
)
!=
0
;
#endif
}
#if PLAT_XLAT_TABLES_DYNAMIC
void
xlat_arch_tlbi_va
(
uintptr_t
va
)
{
/*
* Ensure the translation table write has drained into memory before
* invalidating the TLB entry.
*/
dsbishst
();
#if IMAGE_EL == 1
assert
(
IS_IN_EL
(
1
));
tlbivaae1is
(
TLBI_ADDR
(
va
));
#elif IMAGE_EL == 3
assert
(
IS_IN_EL
(
3
));
tlbivae3is
(
TLBI_ADDR
(
va
));
#endif
}
void
xlat_arch_tlbi_va_sync
(
void
)
{
/*
* A TLB maintenance instruction can complete at any time after
* it is issued, but is only guaranteed to be complete after the
* execution of DSB by the PE that executed the TLB maintenance
* instruction. After the TLB invalidate instruction is
* complete, no new memory accesses using the invalidated TLB
* entries will be observed by any observer of the system
* domain. See section D4.8.2 of the ARMv8 (issue k), paragraph
* "Ordering and completion of TLB maintenance instructions".
*/
dsbish
();
/*
* The effects of a completed TLB maintenance instruction are
* only guaranteed to be visible on the PE that executed the
* instruction after the execution of an ISB instruction by the
* PE that executed the TLB maintenance instruction.
*/
isb
();
}
#endif
/* PLAT_XLAT_TABLES_DYNAMIC */
void
init_xlat_tables_arch
(
unsigned
long
long
max_pa
)
{
assert
((
PLAT_PHY_ADDR_SPACE_SIZE
-
1
)
<=
xlat_arch_get_max_supported_pa
());
/*
* If dynamic allocation of new regions is enabled the code can't make
* assumptions about the max physical address because it could change
* after adding new regions. If this functionality is disabled it is
* safer to restrict the max physical address as much as possible.
*/
#ifdef PLAT_XLAT_TABLES_DYNAMIC
tcr_ps_bits
=
calc_physical_addr_size_bits
(
PLAT_PHY_ADDR_SPACE_SIZE
);
#else
tcr_ps_bits
=
calc_physical_addr_size_bits
(
max_pa
);
#endif
}
/*******************************************************************************
* Macro generating the code for the function enabling the MMU in the given
* exception level, assuming that the pagetables have already been created.
*
* _el: Exception level at which the function will run
* _tcr_extra: Extra bits to set in the TCR register. This mask will
* be OR'ed with the default TCR value.
* _tlbi_fct: Function to invalidate the TLBs at the current
* exception level
******************************************************************************/
#define DEFINE_ENABLE_MMU_EL(_el, _tcr_extra, _tlbi_fct) \
void enable_mmu_internal_el##_el(unsigned int flags, \
uint64_t *base_table) \
{ \
uint64_t mair, tcr, ttbr; \
uint32_t sctlr; \
\
assert(IS_IN_EL(_el)); \
assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0); \
\
/* Invalidate TLBs at the current exception level */
\
_tlbi_fct(); \
\
/* Set attributes in the right indices of the MAIR */
\
mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); \
mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, \
ATTR_IWBWA_OWBWA_NTR_INDEX); \
mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, \
ATTR_NON_CACHEABLE_INDEX); \
write_mair_el##_el(mair); \
\
/* Set TCR bits as well. */
\
/* Inner & outer WBWA & shareable. */
\
/* Set T0SZ to (64 - width of virtual address space) */
\
tcr = TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA | \
TCR_RGN_INNER_WBA | \
(64 - __builtin_ctzl(PLAT_VIRT_ADDR_SPACE_SIZE));\
tcr |= _tcr_extra; \
write_tcr_el##_el(tcr); \
\
/* Set TTBR bits as well */
\
ttbr = (uint64_t) base_table; \
write_ttbr0_el##_el(ttbr); \
\
/* Ensure all translation table writes have drained */
\
/* into memory, the TLB invalidation is complete, */
\
/* and translation register writes are committed */
\
/* before enabling the MMU */
\
dsbish(); \
isb(); \
\
sctlr = read_sctlr_el##_el(); \
sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT; \
\
if (flags & DISABLE_DCACHE) \
sctlr &= ~SCTLR_C_BIT; \
else \
sctlr |= SCTLR_C_BIT; \
\
write_sctlr_el##_el(sctlr); \
\
/* Ensure the MMU enable takes effect immediately */
\
isb(); \
}
/* Define EL1 and EL3 variants of the function enabling the MMU */
#if IMAGE_EL == 1
DEFINE_ENABLE_MMU_EL
(
1
,
(
tcr_ps_bits
<<
TCR_EL1_IPS_SHIFT
),
tlbivmalle1
)
#elif IMAGE_EL == 3
DEFINE_ENABLE_MMU_EL
(
3
,
TCR_EL3_RES1
|
(
tcr_ps_bits
<<
TCR_EL3_PS_SHIFT
),
tlbialle3
)
#endif
void
enable_mmu_arch
(
unsigned
int
flags
,
uint64_t
*
base_table
)
{
#if IMAGE_EL == 1
assert
(
IS_IN_EL
(
1
));
enable_mmu_internal_el1
(
flags
,
base_table
);
#elif IMAGE_EL == 3
assert
(
IS_IN_EL
(
3
));
enable_mmu_internal_el3
(
flags
,
base_table
);
#endif
}
lib/xlat_tables_v2/aarch64/xlat_tables_arch.h
0 → 100644
View file @
28ee754d
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __XLAT_TABLES_ARCH_H__
#define __XLAT_TABLES_ARCH_H__
#include <arch.h>
#include <platform_def.h>
#include <xlat_tables_defs.h>
/*
* In AArch64 state, the MMU may support 4 KB, 16 KB and 64 KB page
* granularity. For 4KB granularity, a level 0 table descriptor doesn't support
* block translation. For 16KB, the same thing happens to levels 0 and 1. For
* 64KB, same for level 1. See section D4.3.1 of the ARMv8-A Architecture
* Reference Manual (DDI 0487A.k) for more information.
*
* The define below specifies the first table level that allows block
* descriptors.
*/
#if PAGE_SIZE == (4*1024)
/* 4KB */
# define MIN_LVL_BLOCK_DESC 1
#else
/* 16KB or 64KB */
# define MIN_LVL_BLOCK_DESC 2
#endif
/*
* Each platform can define the size of the virtual address space, which is
* defined in PLAT_VIRT_ADDR_SPACE_SIZE. TCR.TxSZ is calculated as 64 minus the
* width of said address space. The value of TCR.TxSZ must be in the range 16
* to 39 [1], which means that the virtual address space width must be in the
* range 48 to 25 bits.
*
* Here we calculate the initial lookup level from the value of
* PLAT_VIRT_ADDR_SPACE_SIZE. For a 4 KB page size, level 0 supports virtual
* address spaces of widths 48 to 40 bits, level 1 from 39 to 31, and level 2
* from 30 to 25. Wider or narrower address spaces are not supported. As a
* result, level 3 cannot be used as initial lookup level with 4 KB
* granularity. [2]
*
* For example, for a 35-bit address space (i.e. PLAT_VIRT_ADDR_SPACE_SIZE ==
* 1 << 35), TCR.TxSZ will be programmed to (64 - 35) = 29. According to Table
* D4-11 in the ARM ARM, the initial lookup level for an address space like
* that is 1.
*
* See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
* information:
* [1] Page 1730: 'Input address size', 'For all translation stages'.
* [2] Section D4.2.5
*/
#if PLAT_VIRT_ADDR_SPACE_SIZE > (1ULL << (64 - TCR_TxSZ_MIN))
# error "PLAT_VIRT_ADDR_SPACE_SIZE is too big."
#elif PLAT_VIRT_ADDR_SPACE_SIZE > (1ULL << L0_XLAT_ADDRESS_SHIFT)
# define XLAT_TABLE_LEVEL_BASE 0
# define NUM_BASE_LEVEL_ENTRIES \
(PLAT_VIRT_ADDR_SPACE_SIZE >> L0_XLAT_ADDRESS_SHIFT)
#elif PLAT_VIRT_ADDR_SPACE_SIZE > (1 << L1_XLAT_ADDRESS_SHIFT)
# define XLAT_TABLE_LEVEL_BASE 1
# define NUM_BASE_LEVEL_ENTRIES \
(PLAT_VIRT_ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
#elif PLAT_VIRT_ADDR_SPACE_SIZE >= (1 << (64 - TCR_TxSZ_MAX))
# define XLAT_TABLE_LEVEL_BASE 2
# define NUM_BASE_LEVEL_ENTRIES \
(PLAT_VIRT_ADDR_SPACE_SIZE >> L2_XLAT_ADDRESS_SHIFT)
#else
# error "PLAT_VIRT_ADDR_SPACE_SIZE is too small."
#endif
#endif
/* __XLAT_TABLES_ARCH_H__ */
lib/xlat_tables_v2/xlat_tables.mk
0 → 100644
View file @
28ee754d
#
# Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of ARM nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
XLAT_TABLES_LIB_SRCS
:=
$(
addprefix
lib/xlat_tables_v2/,
\
${ARCH}
/xlat_tables_arch.c
\
xlat_tables_common.c
\
xlat_tables_internal.c
)
lib/xlat_tables_v2/xlat_tables_common.c
0 → 100644
View file @
28ee754d
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
#include <cassert.h>
#include <common_def.h>
#include <debug.h>
#include <errno.h>
#include <platform_def.h>
#include <string.h>
#include <types.h>
#include <utils.h>
#include <xlat_tables_v2.h>
#ifdef AARCH32
# include "aarch32/xlat_tables_arch.h"
#else
# include "aarch64/xlat_tables_arch.h"
#endif
#include "xlat_tables_private.h"
/*
* Private variables used by the TF
*/
static
mmap_region_t
tf_mmap
[
MAX_MMAP_REGIONS
+
1
];
static
uint64_t
tf_xlat_tables
[
MAX_XLAT_TABLES
][
XLAT_TABLE_ENTRIES
]
__aligned
(
XLAT_TABLE_SIZE
)
__section
(
"xlat_table"
);
static
uint64_t
tf_base_xlat_table
[
NUM_BASE_LEVEL_ENTRIES
]
__aligned
(
NUM_BASE_LEVEL_ENTRIES
*
sizeof
(
uint64_t
));
static
mmap_region_t
tf_mmap
[
MAX_MMAP_REGIONS
+
1
];
#if PLAT_XLAT_TABLES_DYNAMIC
static
int
xlat_tables_mapped_regions
[
MAX_XLAT_TABLES
];
#endif
/* PLAT_XLAT_TABLES_DYNAMIC */
xlat_ctx_t
tf_xlat_ctx
=
{
.
pa_max_address
=
PLAT_PHY_ADDR_SPACE_SIZE
-
1
,
.
va_max_address
=
PLAT_VIRT_ADDR_SPACE_SIZE
-
1
,
.
mmap
=
tf_mmap
,
.
mmap_num
=
MAX_MMAP_REGIONS
,
.
tables
=
tf_xlat_tables
,
.
tables_num
=
MAX_XLAT_TABLES
,
#if PLAT_XLAT_TABLES_DYNAMIC
.
tables_mapped_regions
=
xlat_tables_mapped_regions
,
#endif
/* PLAT_XLAT_TABLES_DYNAMIC */
.
base_table
=
tf_base_xlat_table
,
.
base_table_entries
=
NUM_BASE_LEVEL_ENTRIES
,
.
max_pa
=
0
,
.
max_va
=
0
,
.
next_table
=
0
,
.
base_level
=
XLAT_TABLE_LEVEL_BASE
,
.
initialized
=
0
};
void
mmap_add_region
(
unsigned
long
long
base_pa
,
uintptr_t
base_va
,
size_t
size
,
unsigned
int
attr
)
{
mmap_region_t
mm
=
{
.
base_va
=
base_va
,
.
base_pa
=
base_pa
,
.
size
=
size
,
.
attr
=
attr
,
};
mmap_add_region_ctx
(
&
tf_xlat_ctx
,
(
mmap_region_t
*
)
&
mm
);
}
void
mmap_add
(
const
mmap_region_t
*
mm
)
{
while
(
mm
->
size
)
{
mmap_add_region_ctx
(
&
tf_xlat_ctx
,
(
mmap_region_t
*
)
mm
);
mm
++
;
}
}
#if PLAT_XLAT_TABLES_DYNAMIC
int
mmap_add_dynamic_region
(
unsigned
long
long
base_pa
,
uintptr_t
base_va
,
size_t
size
,
unsigned
int
attr
)
{
mmap_region_t
mm
=
{
.
base_va
=
base_va
,
.
base_pa
=
base_pa
,
.
size
=
size
,
.
attr
=
attr
,
};
return
mmap_add_dynamic_region_ctx
(
&
tf_xlat_ctx
,
&
mm
);
}
int
mmap_remove_dynamic_region
(
uintptr_t
base_va
,
size_t
size
)
{
return
mmap_remove_dynamic_region_ctx
(
&
tf_xlat_ctx
,
base_va
,
size
);
}
#endif
/* PLAT_XLAT_TABLES_DYNAMIC */
void
init_xlat_tables
(
void
)
{
assert
(
!
is_mmu_enabled
());
assert
(
!
tf_xlat_ctx
.
initialized
);
print_mmap
(
tf_xlat_ctx
.
mmap
);
init_xlation_table
(
&
tf_xlat_ctx
);
xlat_tables_print
(
&
tf_xlat_ctx
);
assert
(
tf_xlat_ctx
.
max_va
<=
PLAT_VIRT_ADDR_SPACE_SIZE
-
1
);
assert
(
tf_xlat_ctx
.
max_pa
<=
PLAT_PHY_ADDR_SPACE_SIZE
-
1
);
init_xlat_tables_arch
(
tf_xlat_ctx
.
max_pa
);
}
#ifdef AARCH32
void
enable_mmu_secure
(
unsigned
int
flags
)
{
enable_mmu_arch
(
flags
,
tf_xlat_ctx
.
base_table
);
}
#else
void
enable_mmu_el1
(
unsigned
int
flags
)
{
enable_mmu_arch
(
flags
,
tf_xlat_ctx
.
base_table
);
}
void
enable_mmu_el3
(
unsigned
int
flags
)
{
enable_mmu_arch
(
flags
,
tf_xlat_ctx
.
base_table
);
}
#endif
/* AARCH32 */
lib/xlat_tables_v2/xlat_tables_internal.c
0 → 100644
View file @
28ee754d
This diff is collapsed.
Click to expand it.
lib/xlat_tables_v2/xlat_tables_private.h
0 → 100644
View file @
28ee754d
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __XLAT_TABLES_PRIVATE_H__
#define __XLAT_TABLES_PRIVATE_H__
#include <cassert.h>
#include <platform_def.h>
#include <utils.h>
/*
* If the platform hasn't defined a physical and a virtual address space size
* default to ADDR_SPACE_SIZE.
*/
#if ERROR_DEPRECATED
# ifdef ADDR_SPACE_SIZE
# error "ADDR_SPACE_SIZE is deprecated. Use PLAT_xxx_ADDR_SPACE_SIZE instead."
# endif
#elif defined(ADDR_SPACE_SIZE)
# ifndef PLAT_PHY_ADDR_SPACE_SIZE
# define PLAT_PHY_ADDR_SPACE_SIZE ADDR_SPACE_SIZE
# endif
# ifndef PLAT_VIRT_ADDR_SPACE_SIZE
# define PLAT_VIRT_ADDR_SPACE_SIZE ADDR_SPACE_SIZE
# endif
#endif
/* The virtual and physical address space sizes must be powers of two. */
CASSERT
(
IS_POWER_OF_TWO
(
PLAT_VIRT_ADDR_SPACE_SIZE
),
assert_valid_virt_addr_space_size
);
CASSERT
(
IS_POWER_OF_TWO
(
PLAT_PHY_ADDR_SPACE_SIZE
),
assert_valid_phy_addr_space_size
);
/* Struct that holds all information about the translation tables. */
typedef
struct
{
/*
* Max allowed Virtual and Physical Addresses.
*/
unsigned
long
long
pa_max_address
;
uintptr_t
va_max_address
;
/*
* Array of all memory regions stored in order of ascending end address
* and ascending size to simplify the code that allows overlapping
* regions. The list is terminated by the first entry with size == 0.
*/
mmap_region_t
*
mmap
;
/* mmap_num + 1 elements */
int
mmap_num
;
/*
* Array of finer-grain translation tables.
* For example, if the initial lookup level is 1 then this array would
* contain both level-2 and level-3 entries.
*/
uint64_t
(
*
tables
)[
XLAT_TABLE_ENTRIES
];
int
tables_num
;
/*
* Keep track of how many regions are mapped in each table. The base
* table can't be unmapped so it isn't needed to keep track of it.
*/
#if PLAT_XLAT_TABLES_DYNAMIC
int
*
tables_mapped_regions
;
#endif
/* PLAT_XLAT_TABLES_DYNAMIC */
int
next_table
;
/*
* Base translation table. It doesn't need to have the same amount of
* entries as the ones used for other levels.
*/
uint64_t
*
base_table
;
int
base_table_entries
;
unsigned
long
long
max_pa
;
uintptr_t
max_va
;
/* Level of the base translation table. */
int
base_level
;
/* Set to 1 when the translation tables are initialized. */
int
initialized
;
}
xlat_ctx_t
;
#if PLAT_XLAT_TABLES_DYNAMIC
/*
* Shifts and masks to access fields of an mmap_attr_t
*/
/* Dynamic or static */
#define MT_DYN_SHIFT 30
/* 31 would cause undefined behaviours */
/*
* Memory mapping private attributes
*
* Private attributes not exposed in the mmap_attr_t enum.
*/
typedef
enum
{
/*
* Regions mapped before the MMU can't be unmapped dynamically (they are
* static) and regions mapped with MMU enabled can be unmapped. This
* behaviour can't be overridden.
*
* Static regions can overlap each other, dynamic regions can't.
*/
MT_STATIC
=
0
<<
MT_DYN_SHIFT
,
MT_DYNAMIC
=
1
<<
MT_DYN_SHIFT
}
mmap_priv_attr_t
;
/*
* Function used to invalidate all levels of the translation walk for a given
* virtual address. It must be called for every translation table entry that is
* modified.
*/
void
xlat_arch_tlbi_va
(
uintptr_t
va
);
/*
* This function has to be called at the end of any code that uses the function
* xlat_arch_tlbi_va().
*/
void
xlat_arch_tlbi_va_sync
(
void
);
/* Add a dynamic region to the specified context. */
int
mmap_add_dynamic_region_ctx
(
xlat_ctx_t
*
ctx
,
mmap_region_t
*
mm
);
/* Remove a dynamic region from the specified context. */
int
mmap_remove_dynamic_region_ctx
(
xlat_ctx_t
*
ctx
,
uintptr_t
base_va
,
size_t
size
);
#endif
/* PLAT_XLAT_TABLES_DYNAMIC */
/* Print VA, PA, size and attributes of all regions in the mmap array. */
void
print_mmap
(
mmap_region_t
*
const
mmap
);
/*
* Print the current state of the translation tables by reading them from
* memory.
*/
void
xlat_tables_print
(
xlat_ctx_t
*
ctx
);
/*
* Initialize the translation tables by mapping all regions added to the
* specified context.
*/
void
init_xlation_table
(
xlat_ctx_t
*
ctx
);
/* Add a static region to the specified context. */
void
mmap_add_region_ctx
(
xlat_ctx_t
*
ctx
,
mmap_region_t
*
mm
);
/*
* Architecture-specific initialization code.
*/
/* Execute architecture-specific translation table initialization code. */
void
init_xlat_tables_arch
(
unsigned
long
long
max_pa
);
/* Enable MMU and configure it to use the specified translation tables. */
void
enable_mmu_arch
(
unsigned
int
flags
,
uint64_t
*
base_table
);
/* Return 1 if the MMU of this Exception Level is enabled, 0 otherwise. */
int
is_mmu_enabled
(
void
);
#endif
/* __XLAT_TABLES_PRIVATE_H__ */
plat/arm/board/juno/platform.mk
View file @
28ee754d
...
@@ -69,6 +69,7 @@ BL31_SOURCES += lib/cpus/aarch64/cortex_a53.S \
...
@@ -69,6 +69,7 @@ BL31_SOURCES += lib/cpus/aarch64/cortex_a53.S \
# Enable workarounds for selected Cortex-A57 erratas.
# Enable workarounds for selected Cortex-A57 erratas.
ERRATA_A57_806969
:=
0
ERRATA_A57_806969
:=
0
ERRATA_A57_813419
:=
1
ERRATA_A57_813420
:=
1
ERRATA_A57_813420
:=
1
# Enable option to skip L1 data cache flush during the Cortex-A57 cluster
# Enable option to skip L1 data cache flush during the Cortex-A57 cluster
...
...
plat/arm/common/arm_bl1_setup.c
View file @
28ee754d
/*
/*
* Copyright (c) 2015-201
6
, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015-201
7
, ARM Limited and Contributors. All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* modification, are permitted provided that the following conditions are met:
...
@@ -36,7 +36,7 @@
...
@@ -36,7 +36,7 @@
#include <plat_arm.h>
#include <plat_arm.h>
#include <sp805.h>
#include <sp805.h>
#include <utils.h>
#include <utils.h>
#include <xlat_tables.h>
#include <xlat_tables
_v2
.h>
#include "../../../bl1/bl1_private.h"
#include "../../../bl1/bl1_private.h"
/* Weak definitions may be overridden in specific ARM standard platform */
/* Weak definitions may be overridden in specific ARM standard platform */
...
...
plat/arm/common/arm_common.c
View file @
28ee754d
/*
/*
* Copyright (c) 2015-201
6
, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015-201
7
, ARM Limited and Contributors. All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* modification, are permitted provided that the following conditions are met:
...
@@ -34,7 +34,7 @@
...
@@ -34,7 +34,7 @@
#include <mmio.h>
#include <mmio.h>
#include <plat_arm.h>
#include <plat_arm.h>
#include <platform_def.h>
#include <platform_def.h>
#include <xlat_tables.h>
#include <xlat_tables
_v2
.h>
extern
const
mmap_region_t
plat_arm_mmap
[];
extern
const
mmap_region_t
plat_arm_mmap
[];
...
...
plat/arm/common/arm_common.mk
View file @
28ee754d
...
@@ -108,8 +108,9 @@ ifeq (${ARCH}, aarch64)
...
@@ -108,8 +108,9 @@ ifeq (${ARCH}, aarch64)
PLAT_INCLUDES
+=
-Iinclude
/plat/arm/common/aarch64
PLAT_INCLUDES
+=
-Iinclude
/plat/arm/common/aarch64
endif
endif
PLAT_BL_COMMON_SOURCES
+=
lib/xlat_tables/xlat_tables_common.c
\
include
lib/xlat_tables_v2/xlat_tables.mk
lib/xlat_tables/
${ARCH}
/xlat_tables.c
\
PLAT_BL_COMMON_SOURCES
+=
${XLAT_TABLES_LIB_SRCS}
\
plat/arm/common/
${ARCH}
/arm_helpers.S
\
plat/arm/common/
${ARCH}
/arm_helpers.S
\
plat/arm/common/arm_common.c
\
plat/arm/common/arm_common.c
\
plat/common/
${ARCH}
/plat_common.c
plat/common/
${ARCH}
/plat_common.c
...
...
plat/common/aarch32/plat_common.c
View file @
28ee754d
/*
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016
-2017
, ARM Limited and Contributors. All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* modification, are permitted provided that the following conditions are met:
...
@@ -29,7 +29,7 @@
...
@@ -29,7 +29,7 @@
*/
*/
#include <platform.h>
#include <platform.h>
#include <xlat_
table
s.h>
#include <xlat_
mmu_helper
s.h>
/*
/*
* The following platform setup functions are weakly defined. They
* The following platform setup functions are weakly defined. They
...
...
plat/common/aarch64/plat_common.c
View file @
28ee754d
/*
/*
* Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2014
-2017
, ARM Limited and Contributors. All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* modification, are permitted provided that the following conditions are met:
...
@@ -30,7 +30,7 @@
...
@@ -30,7 +30,7 @@
#include <assert.h>
#include <assert.h>
#include <console.h>
#include <console.h>
#include <platform.h>
#include <platform.h>
#include <xlat_
table
s.h>
#include <xlat_
mmu_helper
s.h>
/*
/*
* The following platform setup functions are weakly defined. They
* The following platform setup functions are weakly defined. They
...
...
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment