Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
Menu
Open sidebar
adam.huang
Arm Trusted Firmware
Commits
2ee596c4
Unverified
Commit
2ee596c4
authored
Jul 30, 2018
by
Dimitris Papastamos
Committed by
GitHub
Jul 30, 2018
Browse files
Merge pull request #1493 from antonio-nino-diaz-arm/an/xlat-misra
Fix MISRA defects in xlat tables lib and SP805 driver
parents
eef90a77
354305c3
Changes
21
Expand all
Hide whitespace changes
Inline
Side-by-side
drivers/arm/sp805/sp805.c
View file @
2ee596c4
/*
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015
-2018
, ARM Limited and Contributors. All rights reserved.
*
*
* SPDX-License-Identifier: BSD-3-Clause
* SPDX-License-Identifier: BSD-3-Clause
*/
*/
...
@@ -10,17 +10,17 @@
...
@@ -10,17 +10,17 @@
/* Inline register access functions */
/* Inline register access functions */
static
inline
void
sp805_write_wdog_load
(
uintptr_t
base
,
u
nsigned
long
value
)
static
inline
void
sp805_write_wdog_load
(
uintptr_t
base
,
u
int32_t
value
)
{
{
mmio_write_32
(
base
+
SP805_WDOG_LOAD_OFF
,
value
);
mmio_write_32
(
base
+
SP805_WDOG_LOAD_OFF
,
value
);
}
}
static
inline
void
sp805_write_wdog_ctrl
(
uintptr_t
base
,
u
nsigned
long
value
)
static
inline
void
sp805_write_wdog_ctrl
(
uintptr_t
base
,
u
int32_t
value
)
{
{
mmio_write_32
(
base
+
SP805_WDOG_CTR_OFF
,
value
);
mmio_write_32
(
base
+
SP805_WDOG_CTR_OFF
,
value
);
}
}
static
inline
void
sp805_write_wdog_lock
(
uintptr_t
base
,
u
nsigned
long
value
)
static
inline
void
sp805_write_wdog_lock
(
uintptr_t
base
,
u
int32_t
value
)
{
{
mmio_write_32
(
base
+
SP805_WDOG_LOCK_OFF
,
value
);
mmio_write_32
(
base
+
SP805_WDOG_LOCK_OFF
,
value
);
}
}
...
@@ -28,23 +28,23 @@ static inline void sp805_write_wdog_lock(uintptr_t base, unsigned long value)
...
@@ -28,23 +28,23 @@ static inline void sp805_write_wdog_lock(uintptr_t base, unsigned long value)
/* Public API implementation */
/* Public API implementation */
void
sp805_start
(
uintptr_t
base
,
unsigned
long
ticks
)
void
sp805_start
(
uintptr_t
base
,
unsigned
int
ticks
)
{
{
sp805_write_wdog_load
(
base
,
ticks
);
sp805_write_wdog_load
(
base
,
ticks
);
sp805_write_wdog_ctrl
(
base
,
SP805_CTR_RESEN
|
SP805_CTR_INTEN
);
sp805_write_wdog_ctrl
(
base
,
SP805_CTR_RESEN
|
SP805_CTR_INTEN
);
/* Lock registers access */
/* Lock registers access */
sp805_write_wdog_lock
(
base
,
0
);
sp805_write_wdog_lock
(
base
,
0
U
);
}
}
void
sp805_stop
(
uintptr_t
base
)
void
sp805_stop
(
uintptr_t
base
)
{
{
sp805_write_wdog_lock
(
base
,
WDOG_UNLOCK_KEY
);
sp805_write_wdog_lock
(
base
,
WDOG_UNLOCK_KEY
);
sp805_write_wdog_ctrl
(
base
,
0
);
sp805_write_wdog_ctrl
(
base
,
0
U
);
}
}
void
sp805_refresh
(
uintptr_t
base
,
unsigned
long
ticks
)
void
sp805_refresh
(
uintptr_t
base
,
unsigned
int
ticks
)
{
{
sp805_write_wdog_lock
(
base
,
WDOG_UNLOCK_KEY
);
sp805_write_wdog_lock
(
base
,
WDOG_UNLOCK_KEY
);
sp805_write_wdog_load
(
base
,
ticks
);
sp805_write_wdog_load
(
base
,
ticks
);
sp805_write_wdog_lock
(
base
,
0
);
sp805_write_wdog_lock
(
base
,
0
U
);
}
}
include/drivers/arm/sp805.h
View file @
2ee596c4
/*
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015
-2018
, ARM Limited and Contributors. All rights reserved.
*
*
* SPDX-License-Identifier: BSD-3-Clause
* SPDX-License-Identifier: BSD-3-Clause
*/
*/
#ifndef __SP805_H__
#ifndef SP805_H
#define __SP805_H__
#define SP805_H
#include <utils_def.h>
/* SP805 register offset */
/* SP805 register offset */
#define SP805_WDOG_LOAD_OFF 0x000
#define SP805_WDOG_LOAD_OFF
UL(
0x000
)
#define SP805_WDOG_CTR_OFF 0x008
#define SP805_WDOG_CTR_OFF
UL(
0x008
)
#define SP805_WDOG_LOCK_OFF 0xc00
#define SP805_WDOG_LOCK_OFF
UL(
0xc00
)
/* Magic word to unlock the wd registers */
/* Magic word to unlock the wd registers */
#define WDOG_UNLOCK_KEY 0x1ACCE551
#define WDOG_UNLOCK_KEY
U(
0x1ACCE551
)
/* Register field definitions */
/* Register field definitions */
#define SP805_CTR_RESEN (
1
<< 1)
#define SP805_CTR_RESEN (
U(1)
<< 1)
#define SP805_CTR_INTEN (
1
<< 0)
#define SP805_CTR_INTEN (
U(1)
<< 0)
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLY__
...
@@ -25,10 +27,10 @@
...
@@ -25,10 +27,10 @@
/* Public high level API */
/* Public high level API */
void
sp805_start
(
uintptr_t
base
,
unsigned
long
ticks
);
void
sp805_start
(
uintptr_t
base
,
unsigned
int
ticks
);
void
sp805_stop
(
uintptr_t
base
);
void
sp805_stop
(
uintptr_t
base
);
void
sp805_refresh
(
uintptr_t
base
,
unsigned
long
ticks
);
void
sp805_refresh
(
uintptr_t
base
,
unsigned
int
ticks
);
#endif
/* __ASSEMBLY__ */
#endif
/* __ASSEMBLY__ */
#endif
/*
__
SP805_H
__
*/
#endif
/* SP805_H */
include/lib/xlat_tables/aarch32/xlat_tables_aarch32.h
View file @
2ee596c4
...
@@ -4,8 +4,8 @@
...
@@ -4,8 +4,8 @@
* SPDX-License-Identifier: BSD-3-Clause
* SPDX-License-Identifier: BSD-3-Clause
*/
*/
#ifndef
__
XLAT_TABLES_AARCH32_H
__
#ifndef XLAT_TABLES_AARCH32_H
#define
__
XLAT_TABLES_AARCH32_H
__
#define XLAT_TABLES_AARCH32_H
#include <arch.h>
#include <arch.h>
#include <utils_def.h>
#include <utils_def.h>
...
@@ -24,7 +24,7 @@
...
@@ -24,7 +24,7 @@
* The define below specifies the first table level that allows block
* The define below specifies the first table level that allows block
* descriptors.
* descriptors.
*/
*/
#if PAGE_SIZE !=
(4 * 1024)
#if PAGE_SIZE !=
PAGE_SIZE_4KB
#error "Invalid granule size. AArch32 supports 4KB pages only."
#error "Invalid granule size. AArch32 supports 4KB pages only."
#endif
#endif
...
@@ -43,8 +43,8 @@
...
@@ -43,8 +43,8 @@
* [1] See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
* [1] See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
* information, Section G4.6.5
* information, Section G4.6.5
*/
*/
#define MIN_VIRT_ADDR_SPACE_SIZE (ULL(1) << (32 - TTBCR_TxSZ_MAX))
#define MIN_VIRT_ADDR_SPACE_SIZE (ULL(1) <<
(U
(32
)
- TTBCR_TxSZ_MAX))
#define MAX_VIRT_ADDR_SPACE_SIZE (ULL(1) << (32 - TTBCR_TxSZ_MIN))
#define MAX_VIRT_ADDR_SPACE_SIZE (ULL(1) <<
(U
(32
)
- TTBCR_TxSZ_MIN))
/*
/*
* Here we calculate the initial lookup level from the value of the given
* Here we calculate the initial lookup level from the value of the given
...
@@ -66,7 +66,8 @@
...
@@ -66,7 +66,8 @@
* valid. Therefore, the caller is expected to check it is the case using the
* valid. Therefore, the caller is expected to check it is the case using the
* CHECK_VIRT_ADDR_SPACE_SIZE() macro first.
* CHECK_VIRT_ADDR_SPACE_SIZE() macro first.
*/
*/
#define GET_XLAT_TABLE_LEVEL_BASE(_virt_addr_space_size) \
#define GET_XLAT_TABLE_LEVEL_BASE(_virt_addr_space_sz) \
(((_virt_addr_space_size) > (ULL(1) << L1_XLAT_ADDRESS_SHIFT)) ? 1 : 2)
(((_virt_addr_space_sz) > (ULL(1) << L1_XLAT_ADDRESS_SHIFT)) ? \
U(1) : U(2))
#endif
/*
__
XLAT_TABLES_AARCH32_H
__
*/
#endif
/* XLAT_TABLES_AARCH32_H */
include/lib/xlat_tables/aarch64/xlat_tables_aarch64.h
View file @
2ee596c4
...
@@ -4,8 +4,8 @@
...
@@ -4,8 +4,8 @@
* SPDX-License-Identifier: BSD-3-Clause
* SPDX-License-Identifier: BSD-3-Clause
*/
*/
#ifndef
__
XLAT_TABLES_AARCH64_H
__
#ifndef XLAT_TABLES_AARCH64_H
#define
__
XLAT_TABLES_AARCH64_H
__
#define XLAT_TABLES_AARCH64_H
#include <arch.h>
#include <arch.h>
#include <utils_def.h>
#include <utils_def.h>
...
@@ -30,9 +30,9 @@ unsigned long long tcr_physical_addr_size_bits(unsigned long long max_addr);
...
@@ -30,9 +30,9 @@ unsigned long long tcr_physical_addr_size_bits(unsigned long long max_addr);
* The define below specifies the first table level that allows block
* The define below specifies the first table level that allows block
* descriptors.
* descriptors.
*/
*/
#if PAGE_SIZE ==
(4 * 1024)
#if PAGE_SIZE ==
PAGE_SIZE_4KB
# define MIN_LVL_BLOCK_DESC U(1)
# define MIN_LVL_BLOCK_DESC U(1)
#elif PAGE_SIZE ==
(16 * 1024
) || PAGE_SIZE ==
(64 * 1024
)
#elif
(
PAGE_SIZE ==
PAGE_SIZE_16KB
) ||
(
PAGE_SIZE ==
PAGE_SIZE_64KB
)
# define MIN_LVL_BLOCK_DESC U(2)
# define MIN_LVL_BLOCK_DESC U(2)
#endif
#endif
...
@@ -50,8 +50,8 @@ unsigned long long tcr_physical_addr_size_bits(unsigned long long max_addr);
...
@@ -50,8 +50,8 @@ unsigned long long tcr_physical_addr_size_bits(unsigned long long max_addr);
* information:
* information:
* Page 1730: 'Input address size', 'For all translation stages'.
* Page 1730: 'Input address size', 'For all translation stages'.
*/
*/
#define MIN_VIRT_ADDR_SPACE_SIZE (ULL(1) << (64 - TCR_TxSZ_MAX))
#define MIN_VIRT_ADDR_SPACE_SIZE (ULL(1) <<
(U
(64
)
- TCR_TxSZ_MAX))
#define MAX_VIRT_ADDR_SPACE_SIZE (ULL(1) << (64 - TCR_TxSZ_MIN))
#define MAX_VIRT_ADDR_SPACE_SIZE (ULL(1) <<
(U
(64
)
- TCR_TxSZ_MIN))
/*
/*
* Here we calculate the initial lookup level from the value of the given
* Here we calculate the initial lookup level from the value of the given
...
@@ -74,10 +74,10 @@ unsigned long long tcr_physical_addr_size_bits(unsigned long long max_addr);
...
@@ -74,10 +74,10 @@ unsigned long long tcr_physical_addr_size_bits(unsigned long long max_addr);
* valid. Therefore, the caller is expected to check it is the case using the
* valid. Therefore, the caller is expected to check it is the case using the
* CHECK_VIRT_ADDR_SPACE_SIZE() macro first.
* CHECK_VIRT_ADDR_SPACE_SIZE() macro first.
*/
*/
#define GET_XLAT_TABLE_LEVEL_BASE(_virt_addr_space_s
ize)
\
#define GET_XLAT_TABLE_LEVEL_BASE(_virt_addr_space_s
z)
\
(((_virt_addr_space_s
ize
) > (ULL(1) << L0_XLAT_ADDRESS_SHIFT))
\
(((_virt_addr_space_s
z
) > (ULL(1) << L0_XLAT_ADDRESS_SHIFT)) \
? 0
\
? 0
U
\
: (((_virt_addr_space_s
ize
) > (ULL(1) << L1_XLAT_ADDRESS_SHIFT)) \
: (((_virt_addr_space_s
z
) > (ULL(1) << L1_XLAT_ADDRESS_SHIFT)) \
? 1 : 2))
? 1
U
: 2
U
))
#endif
/*
__
XLAT_TABLES_AARCH64_H
__
*/
#endif
/* XLAT_TABLES_AARCH64_H */
include/lib/xlat_tables/xlat_mmu_helpers.h
View file @
2ee596c4
...
@@ -4,8 +4,8 @@
...
@@ -4,8 +4,8 @@
* SPDX-License-Identifier: BSD-3-Clause
* SPDX-License-Identifier: BSD-3-Clause
*/
*/
#ifndef
__
XLAT_MMU_HELPERS_H
__
#ifndef XLAT_MMU_HELPERS_H
#define
__
XLAT_MMU_HELPERS_H
__
#define XLAT_MMU_HELPERS_H
/*
/*
* The following flags are passed to enable_mmu_xxx() to override the default
* The following flags are passed to enable_mmu_xxx() to override the default
...
@@ -52,6 +52,7 @@
...
@@ -52,6 +52,7 @@
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLY__
#include <stdint.h>
#include <sys/types.h>
#include <sys/types.h>
/*
/*
...
@@ -82,4 +83,4 @@ size_t xlat_arch_get_max_supported_granule_size(void);
...
@@ -82,4 +83,4 @@ size_t xlat_arch_get_max_supported_granule_size(void);
#endif
/* __ASSEMBLY__ */
#endif
/* __ASSEMBLY__ */
#endif
/*
__
XLAT_MMU_HELPERS_H
__
*/
#endif
/* XLAT_MMU_HELPERS_H */
include/lib/xlat_tables/xlat_tables.h
View file @
2ee596c4
/*
/*
* Copyright (c) 2014-201
7
, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2014-201
8
, ARM Limited and Contributors. All rights reserved.
*
*
* SPDX-License-Identifier: BSD-3-Clause
* SPDX-License-Identifier: BSD-3-Clause
*/
*/
#ifndef
__
XLAT_TABLES_H
__
#ifndef XLAT_TABLES_H
#define
__
XLAT_TABLES_H
__
#define XLAT_TABLES_H
#include <xlat_tables_defs.h>
#include <xlat_tables_defs.h>
...
@@ -92,4 +92,4 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
...
@@ -92,4 +92,4 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
void
mmap_add
(
const
mmap_region_t
*
mm
);
void
mmap_add
(
const
mmap_region_t
*
mm
);
#endif
/*__ASSEMBLY__*/
#endif
/*__ASSEMBLY__*/
#endif
/*
__
XLAT_TABLES_H
__
*/
#endif
/* XLAT_TABLES_H */
include/lib/xlat_tables/xlat_tables_arch.h
View file @
2ee596c4
/*
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017
-2018
, ARM Limited and Contributors. All rights reserved.
*
*
* SPDX-License-Identifier: BSD-3-Clause
* SPDX-License-Identifier: BSD-3-Clause
*/
*/
#ifndef
__
XLAT_TABLES_ARCH_H
__
#ifndef XLAT_TABLES_ARCH_H
#define
__
XLAT_TABLES_ARCH_H
__
#define XLAT_TABLES_ARCH_H
#ifdef AARCH32
#ifdef AARCH32
#include "aarch32/xlat_tables_aarch32.h"
#include "aarch32/xlat_tables_aarch32.h"
...
@@ -21,8 +21,8 @@
...
@@ -21,8 +21,8 @@
* limits. Not that these limits are different for AArch32 and AArch64.
* limits. Not that these limits are different for AArch32 and AArch64.
*/
*/
#define CHECK_VIRT_ADDR_SPACE_SIZE(size) \
#define CHECK_VIRT_ADDR_SPACE_SIZE(size) \
(((unsigned long long)(size) >= MIN_VIRT_ADDR_SPACE_SIZE) &&
\
(((unsigned long long)(size) >= MIN_VIRT_ADDR_SPACE_SIZE) && \
((unsigned long long)(size) <= MAX_VIRT_ADDR_SPACE_SIZE) &&
\
((unsigned long long)(size) <= MAX_VIRT_ADDR_SPACE_SIZE) && \
IS_POWER_OF_TWO(size))
IS_POWER_OF_TWO(size))
/*
/*
...
@@ -40,4 +40,4 @@
...
@@ -40,4 +40,4 @@
((addr_space_size) >> \
((addr_space_size) >> \
XLAT_ADDR_SHIFT(GET_XLAT_TABLE_LEVEL_BASE(addr_space_size)))
XLAT_ADDR_SHIFT(GET_XLAT_TABLE_LEVEL_BASE(addr_space_size)))
#endif
/*
__
XLAT_TABLES_ARCH_H
__
*/
#endif
/* XLAT_TABLES_ARCH_H */
include/lib/xlat_tables/xlat_tables_defs.h
View file @
2ee596c4
...
@@ -4,8 +4,8 @@
...
@@ -4,8 +4,8 @@
* SPDX-License-Identifier: BSD-3-Clause
* SPDX-License-Identifier: BSD-3-Clause
*/
*/
#ifndef
__
XLAT_TABLES_DEFS_H
__
#ifndef XLAT_TABLES_DEFS_H
#define
__
XLAT_TABLES_DEFS_H
__
#define XLAT_TABLES_DEFS_H
#include <arch.h>
#include <arch.h>
#include <utils_def.h>
#include <utils_def.h>
...
@@ -24,6 +24,10 @@
...
@@ -24,6 +24,10 @@
#define TWO_MB_INDEX(x) ((x) >> TWO_MB_SHIFT)
#define TWO_MB_INDEX(x) ((x) >> TWO_MB_SHIFT)
#define FOUR_KB_INDEX(x) ((x) >> FOUR_KB_SHIFT)
#define FOUR_KB_INDEX(x) ((x) >> FOUR_KB_SHIFT)
#define PAGE_SIZE_4KB U(4096)
#define PAGE_SIZE_16KB U(16384)
#define PAGE_SIZE_64KB U(65536)
#define INVALID_DESC U(0x0)
#define INVALID_DESC U(0x0)
/*
/*
* A block descriptor points to a region of memory bigger than the granule size
* A block descriptor points to a region of memory bigger than the granule size
...
@@ -66,8 +70,8 @@
...
@@ -66,8 +70,8 @@
*/
*/
#define PAGE_SIZE_SHIFT FOUR_KB_SHIFT
#define PAGE_SIZE_SHIFT FOUR_KB_SHIFT
#define PAGE_SIZE (U(1) << PAGE_SIZE_SHIFT)
#define PAGE_SIZE (U(1) << PAGE_SIZE_SHIFT)
#define PAGE_SIZE_MASK (PAGE_SIZE - 1)
#define PAGE_SIZE_MASK (PAGE_SIZE -
U(
1)
)
#define IS_PAGE_ALIGNED(addr) (((addr) & PAGE_SIZE_MASK) == 0)
#define IS_PAGE_ALIGNED(addr) (((addr) & PAGE_SIZE_MASK) ==
U(
0)
)
#define XLAT_ENTRY_SIZE_SHIFT U(3)
/* Each MMU table entry is 8 bytes (1 << 3) */
#define XLAT_ENTRY_SIZE_SHIFT U(3)
/* Each MMU table entry is 8 bytes (1 << 3) */
#define XLAT_ENTRY_SIZE (U(1) << XLAT_ENTRY_SIZE_SHIFT)
#define XLAT_ENTRY_SIZE (U(1) << XLAT_ENTRY_SIZE_SHIFT)
...
@@ -80,7 +84,7 @@
...
@@ -80,7 +84,7 @@
/* Values for number of entries in each MMU translation table */
/* Values for number of entries in each MMU translation table */
#define XLAT_TABLE_ENTRIES_SHIFT (XLAT_TABLE_SIZE_SHIFT - XLAT_ENTRY_SIZE_SHIFT)
#define XLAT_TABLE_ENTRIES_SHIFT (XLAT_TABLE_SIZE_SHIFT - XLAT_ENTRY_SIZE_SHIFT)
#define XLAT_TABLE_ENTRIES (U(1) << XLAT_TABLE_ENTRIES_SHIFT)
#define XLAT_TABLE_ENTRIES (U(1) << XLAT_TABLE_ENTRIES_SHIFT)
#define XLAT_TABLE_ENTRIES_MASK (XLAT_TABLE_ENTRIES - 1)
#define XLAT_TABLE_ENTRIES_MASK (XLAT_TABLE_ENTRIES -
U(
1)
)
/* Values to convert a memory address to an index into a translation table */
/* Values to convert a memory address to an index into a translation table */
#define L3_XLAT_ADDRESS_SHIFT PAGE_SIZE_SHIFT
#define L3_XLAT_ADDRESS_SHIFT PAGE_SIZE_SHIFT
...
@@ -90,9 +94,9 @@
...
@@ -90,9 +94,9 @@
#define XLAT_ADDR_SHIFT(level) (PAGE_SIZE_SHIFT + \
#define XLAT_ADDR_SHIFT(level) (PAGE_SIZE_SHIFT + \
((XLAT_TABLE_LEVEL_MAX - (level)) * XLAT_TABLE_ENTRIES_SHIFT))
((XLAT_TABLE_LEVEL_MAX - (level)) * XLAT_TABLE_ENTRIES_SHIFT))
#define XLAT_BLOCK_SIZE(level) (
(u_register_t)1
<< XLAT_ADDR_SHIFT(level))
#define XLAT_BLOCK_SIZE(level) (
UL(1)
<< XLAT_ADDR_SHIFT(level))
/* Mask to get the bits used to index inside a block of a certain level */
/* Mask to get the bits used to index inside a block of a certain level */
#define XLAT_BLOCK_MASK(level) (XLAT_BLOCK_SIZE(level) - 1)
#define XLAT_BLOCK_MASK(level) (XLAT_BLOCK_SIZE(level) -
UL(
1)
)
/* Mask to get the address bits common to a block of a certain table level*/
/* Mask to get the address bits common to a block of a certain table level*/
#define XLAT_ADDR_MASK(level) (~XLAT_BLOCK_MASK(level))
#define XLAT_ADDR_MASK(level) (~XLAT_BLOCK_MASK(level))
/*
/*
...
@@ -111,13 +115,13 @@
...
@@ -111,13 +115,13 @@
* when stage 1 translations can only support one VA range.
* when stage 1 translations can only support one VA range.
*/
*/
#define AP2_SHIFT U(0x7)
#define AP2_SHIFT U(0x7)
#define AP2_RO U(0x1)
#define AP2_RO U
LL
(0x1)
#define AP2_RW U(0x0)
#define AP2_RW U
LL
(0x0)
#define AP1_SHIFT U(0x6)
#define AP1_SHIFT U(0x6)
#define AP1_ACCESS_UNPRIVILEGED U(0x1)
#define AP1_ACCESS_UNPRIVILEGED U
LL
(0x1)
#define AP1_NO_ACCESS_UNPRIVILEGED U(0x0)
#define AP1_NO_ACCESS_UNPRIVILEGED U
LL
(0x0)
#define AP1_RES1 U(0x1)
#define AP1_RES1 U
LL
(0x1)
/*
/*
* The following definitions must all be passed to the LOWER_ATTRS() macro to
* The following definitions must all be passed to the LOWER_ATTRS() macro to
...
@@ -129,9 +133,9 @@
...
@@ -129,9 +133,9 @@
#define AP_NO_ACCESS_UNPRIVILEGED (AP1_NO_ACCESS_UNPRIVILEGED << 4)
#define AP_NO_ACCESS_UNPRIVILEGED (AP1_NO_ACCESS_UNPRIVILEGED << 4)
#define AP_ONE_VA_RANGE_RES1 (AP1_RES1 << 4)
#define AP_ONE_VA_RANGE_RES1 (AP1_RES1 << 4)
#define NS (U(0x1) << 3)
#define NS (U(0x1) << 3)
#define ATTR_NON_CACHEABLE_INDEX U(0x2)
#define ATTR_NON_CACHEABLE_INDEX U
LL
(0x2)
#define ATTR_DEVICE_INDEX U(0x1)
#define ATTR_DEVICE_INDEX U
LL
(0x1)
#define ATTR_IWBWA_OWBWA_NTR_INDEX U(0x0)
#define ATTR_IWBWA_OWBWA_NTR_INDEX U
LL
(0x0)
#define LOWER_ATTRS(x) (((x) & U(0xfff)) << 2)
#define LOWER_ATTRS(x) (((x) & U(0xfff)) << 2)
/* Normal Memory, Outer Write-Through non-transient, Inner Non-cacheable */
/* Normal Memory, Outer Write-Through non-transient, Inner Non-cacheable */
...
...
include/lib/xlat_tables/xlat_tables_v2.h
View file @
2ee596c4
...
@@ -4,8 +4,8 @@
...
@@ -4,8 +4,8 @@
* SPDX-License-Identifier: BSD-3-Clause
* SPDX-License-Identifier: BSD-3-Clause
*/
*/
#ifndef
__
XLAT_TABLES_V2_H
__
#ifndef XLAT_TABLES_V2_H
#define
__
XLAT_TABLES_V2_H
__
#define XLAT_TABLES_V2_H
#include <xlat_tables_defs.h>
#include <xlat_tables_defs.h>
#include <xlat_tables_v2_helpers.h>
#include <xlat_tables_v2_helpers.h>
...
@@ -27,7 +27,7 @@
...
@@ -27,7 +27,7 @@
/* Helper macro to define an mmap_region_t. */
/* Helper macro to define an mmap_region_t. */
#define MAP_REGION(_pa, _va, _sz, _attr) \
#define MAP_REGION(_pa, _va, _sz, _attr) \
_
MAP_REGION_FULL_SPEC(_pa, _va, _sz, _attr, REGION_DEFAULT_GRANULARITY)
MAP_REGION_FULL_SPEC(_pa, _va, _sz, _attr, REGION_DEFAULT_GRANULARITY)
/* Helper macro to define an mmap_region_t with an identity mapping. */
/* Helper macro to define an mmap_region_t with an identity mapping. */
#define MAP_REGION_FLAT(_adr, _sz, _attr) \
#define MAP_REGION_FLAT(_adr, _sz, _attr) \
...
@@ -44,7 +44,7 @@
...
@@ -44,7 +44,7 @@
* equivalent to the MAP_REGION() macro.
* equivalent to the MAP_REGION() macro.
*/
*/
#define MAP_REGION2(_pa, _va, _sz, _attr, _gr) \
#define MAP_REGION2(_pa, _va, _sz, _attr, _gr) \
_
MAP_REGION_FULL_SPEC(_pa, _va, _sz, _attr, _gr)
MAP_REGION_FULL_SPEC(_pa, _va, _sz, _attr, _gr)
/*
/*
* Shifts and masks to access fields of an mmap attribute
* Shifts and masks to access fields of an mmap attribute
...
@@ -163,7 +163,7 @@ typedef struct xlat_ctx xlat_ctx_t;
...
@@ -163,7 +163,7 @@ typedef struct xlat_ctx xlat_ctx_t;
*/
*/
#define REGISTER_XLAT_CONTEXT(_ctx_name, _mmap_count, _xlat_tables_count, \
#define REGISTER_XLAT_CONTEXT(_ctx_name, _mmap_count, _xlat_tables_count, \
_virt_addr_space_size, _phy_addr_space_size) \
_virt_addr_space_size, _phy_addr_space_size) \
_
REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, (_mmap_count), \
REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, (_mmap_count), \
(_xlat_tables_count), \
(_xlat_tables_count), \
(_virt_addr_space_size), \
(_virt_addr_space_size), \
(_phy_addr_space_size), \
(_phy_addr_space_size), \
...
@@ -183,7 +183,7 @@ typedef struct xlat_ctx xlat_ctx_t;
...
@@ -183,7 +183,7 @@ typedef struct xlat_ctx xlat_ctx_t;
#define REGISTER_XLAT_CONTEXT2(_ctx_name, _mmap_count, _xlat_tables_count, \
#define REGISTER_XLAT_CONTEXT2(_ctx_name, _mmap_count, _xlat_tables_count, \
_virt_addr_space_size, _phy_addr_space_size, \
_virt_addr_space_size, _phy_addr_space_size, \
_xlat_regime, _section_name) \
_xlat_regime, _section_name) \
_
REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, (_mmap_count), \
REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, (_mmap_count), \
(_xlat_tables_count), \
(_xlat_tables_count), \
(_virt_addr_space_size), \
(_virt_addr_space_size), \
(_phy_addr_space_size), \
(_phy_addr_space_size), \
...
@@ -296,7 +296,7 @@ int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx,
...
@@ -296,7 +296,7 @@ int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx,
* translation tables are not modified by any other code while this function is
* translation tables are not modified by any other code while this function is
* executing.
* executing.
*/
*/
int
change_mem_attributes
(
xlat_ctx_t
*
ctx
,
uintptr_t
base_va
,
size_t
size
,
int
change_mem_attributes
(
const
xlat_ctx_t
*
ctx
,
uintptr_t
base_va
,
size_t
size
,
uint32_t
attr
);
uint32_t
attr
);
/*
/*
...
@@ -318,4 +318,4 @@ int get_mem_attributes(const xlat_ctx_t *ctx, uintptr_t base_va,
...
@@ -318,4 +318,4 @@ int get_mem_attributes(const xlat_ctx_t *ctx, uintptr_t base_va,
uint32_t
*
attributes
);
uint32_t
*
attributes
);
#endif
/*__ASSEMBLY__*/
#endif
/*__ASSEMBLY__*/
#endif
/*
__
XLAT_TABLES_V2_H
__
*/
#endif
/* XLAT_TABLES_V2_H */
include/lib/xlat_tables/xlat_tables_v2_helpers.h
View file @
2ee596c4
...
@@ -9,10 +9,10 @@
...
@@ -9,10 +9,10 @@
* used outside of this library code.
* used outside of this library code.
*/
*/
#ifndef
__
XLAT_TABLES_V2_HELPERS_H
__
#ifndef XLAT_TABLES_V2_HELPERS_H
#define
__
XLAT_TABLES_V2_HELPERS_H
__
#define XLAT_TABLES_V2_HELPERS_H
#ifndef
__
XLAT_TABLES_V2_H
__
#ifndef XLAT_TABLES_V2_H
#error "Do not include this header file directly. Include xlat_tables_v2.h instead."
#error "Do not include this header file directly. Include xlat_tables_v2.h instead."
#endif
#endif
...
@@ -32,7 +32,7 @@ struct mmap_region;
...
@@ -32,7 +32,7 @@ struct mmap_region;
* the fields of the structure but its parameter list is not guaranteed to
* the fields of the structure but its parameter list is not guaranteed to
* remain stable as we add members to mmap_region_t.
* remain stable as we add members to mmap_region_t.
*/
*/
#define
_
MAP_REGION_FULL_SPEC(_pa, _va, _sz, _attr, _gr) \
#define MAP_REGION_FULL_SPEC(_pa, _va, _sz, _attr, _gr)
\
{ \
{ \
.base_pa = (_pa), \
.base_pa = (_pa), \
.base_va = (_va), \
.base_va = (_va), \
...
@@ -58,7 +58,7 @@ struct xlat_ctx {
...
@@ -58,7 +58,7 @@ struct xlat_ctx {
* null entry.
* null entry.
*/
*/
struct
mmap_region
*
mmap
;
struct
mmap_region
*
mmap
;
unsigned
int
mmap_num
;
int
mmap_num
;
/*
/*
* Array of finer-grain translation tables.
* Array of finer-grain translation tables.
...
@@ -66,7 +66,7 @@ struct xlat_ctx {
...
@@ -66,7 +66,7 @@ struct xlat_ctx {
* contain both level-2 and level-3 entries.
* contain both level-2 and level-3 entries.
*/
*/
uint64_t
(
*
tables
)[
XLAT_TABLE_ENTRIES
];
uint64_t
(
*
tables
)[
XLAT_TABLE_ENTRIES
];
unsigned
int
tables_num
;
int
tables_num
;
/*
/*
* Keep track of how many regions are mapped in each table. The base
* Keep track of how many regions are mapped in each table. The base
* table can't be unmapped so it isn't needed to keep track of it.
* table can't be unmapped so it isn't needed to keep track of it.
...
@@ -75,7 +75,7 @@ struct xlat_ctx {
...
@@ -75,7 +75,7 @@ struct xlat_ctx {
int
*
tables_mapped_regions
;
int
*
tables_mapped_regions
;
#endif
/* PLAT_XLAT_TABLES_DYNAMIC */
#endif
/* PLAT_XLAT_TABLES_DYNAMIC */
unsigned
int
next_table
;
int
next_table
;
/*
/*
* Base translation table. It doesn't need to have the same amount of
* Base translation table. It doesn't need to have the same amount of
...
@@ -96,7 +96,7 @@ struct xlat_ctx {
...
@@ -96,7 +96,7 @@ struct xlat_ctx {
unsigned
int
base_level
;
unsigned
int
base_level
;
/* Set to 1 when the translation tables are initialized. */
/* Set to 1 when the translation tables are initialized. */
unsigned
int
initialized
;
int
initialized
;
/*
/*
* Translation regime managed by this xlat_ctx_t. It should be one of
* Translation regime managed by this xlat_ctx_t. It should be one of
...
@@ -106,60 +106,60 @@ struct xlat_ctx {
...
@@ -106,60 +106,60 @@ struct xlat_ctx {
};
};
#if PLAT_XLAT_TABLES_DYNAMIC
#if PLAT_XLAT_TABLES_DYNAMIC
#define _ALLOC_DYNMAP_STRUCT(_ctx_name, _xlat_tables_count) \
#define
XLAT
_ALLOC_DYNMAP_STRUCT(_ctx_name, _xlat_tables_count) \
static int _ctx_name##_mapped_regions[_xlat_tables_count];
static int _ctx_name##_mapped_regions[_xlat_tables_count];
#define _REGISTER_DYNMAP_STRUCT(_ctx_name) \
#define
XLAT
_REGISTER_DYNMAP_STRUCT(_ctx_name) \
.tables_mapped_regions = _ctx_name##_mapped_regions,
.tables_mapped_regions = _ctx_name##_mapped_regions,
#else
#else
#define _ALLOC_DYNMAP_STRUCT(_ctx_name, _xlat_tables_count) \
#define
XLAT
_ALLOC_DYNMAP_STRUCT(_ctx_name, _xlat_tables_count) \
/* do nothing */
/* do nothing */
#define _REGISTER_DYNMAP_STRUCT(_ctx_name) \
#define
XLAT
_REGISTER_DYNMAP_STRUCT(_ctx_name) \
/* do nothing */
/* do nothing */
#endif
/* PLAT_XLAT_TABLES_DYNAMIC */
#endif
/* PLAT_XLAT_TABLES_DYNAMIC */
#define
_
REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, _mmap_count,
_xlat_tables_count,
\
#define REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, _mmap_count,
\
_
virt_addr_space_size, _phy
_addr_space_size,
\
_
xlat_tables_count, _virt
_addr_space_size, \
_xlat_regime, _section_name)
\
_phy_addr_space_size,
_xlat_regime, _section_name)\
CASSERT(CHECK_VIRT_ADDR_SPACE_SIZE(_virt_addr_space_size),
\
CASSERT(CHECK_VIRT_ADDR_SPACE_SIZE(_virt_addr_space_size), \
assert_invalid_virtual_addr_space_size_for_##_ctx_name);
\
assert_invalid_virtual_addr_space_size_for_##_ctx_name);\
\
\
CASSERT(CHECK_PHY_ADDR_SPACE_SIZE(_phy_addr_space_size),
\
CASSERT(CHECK_PHY_ADDR_SPACE_SIZE(_phy_addr_space_size), \
assert_invalid_physical_addr_space_sizefor_##_ctx_name);
\
assert_invalid_physical_addr_space_sizefor_##_ctx_name);\
\
\
static mmap_region_t _ctx_name##_mmap[_mmap_count + 1];
\
static mmap_region_t _ctx_name##_mmap[_mmap_count + 1]; \
\
\
static uint64_t _ctx_name##_xlat_tables[_xlat_tables_count]
\
static uint64_t _ctx_name##_xlat_tables[_xlat_tables_count] \
[XLAT_TABLE_ENTRIES]
\
[XLAT_TABLE_ENTRIES] \
__aligned(XLAT_TABLE_SIZE) __section(_section_name);
\
__aligned(XLAT_TABLE_SIZE) __section(_section_name); \
\
\
static uint64_t _ctx_name##_base_xlat_table
\
static uint64_t _ctx_name##_base_xlat_table \
[GET_NUM_BASE_LEVEL_ENTRIES(_virt_addr_space_size)]
\
[GET_NUM_BASE_LEVEL_ENTRIES(_virt_addr_space_size)] \
__aligned(GET_NUM_BASE_LEVEL_ENTRIES(_virt_addr_space_size)
\
__aligned(GET_NUM_BASE_LEVEL_ENTRIES(_virt_addr_space_size)\
* sizeof(uint64_t));
\
* sizeof(uint64_t)); \
\
\
_ALLOC_DYNMAP_STRUCT(_ctx_name, _xlat_tables_count)
\
XLAT
_ALLOC_DYNMAP_STRUCT(_ctx_name, _xlat_tables_count) \
\
\
static xlat_ctx_t _ctx_name##_xlat_ctx = {
\
static xlat_ctx_t _ctx_name##_xlat_ctx = { \
.va_max_address = (_virt_addr_space_size) - 1
,
\
.va_max_address = (_virt_addr_space_size) - 1
UL,
\
.pa_max_address = (_phy_addr_space_size) - 1
,
\
.pa_max_address = (_phy_addr_space_size) - 1
ULL,
\
.mmap = _ctx_name##_mmap,
\
.mmap = _ctx_name##_mmap, \
.mmap_num = (_mmap_count),
\
.mmap_num = (_mmap_count), \
.base_level = GET_XLAT_TABLE_LEVEL_BASE(_virt_addr_space_size),
\
.base_level = GET_XLAT_TABLE_LEVEL_BASE(_virt_addr_space_size),\
.base_table = _ctx_name##_base_xlat_table,
\
.base_table = _ctx_name##_base_xlat_table, \
.base_table_entries =
\
.base_table_entries = \
GET_NUM_BASE_LEVEL_ENTRIES(_virt_addr_space_size),
\
GET_NUM_BASE_LEVEL_ENTRIES(_virt_addr_space_size),\
.tables = _ctx_name##_xlat_tables,
\
.tables = _ctx_name##_xlat_tables, \
.tables_num = _xlat_tables_count,
\
.tables_num = _xlat_tables_count, \
_REGISTER_DYNMAP_STRUCT(_ctx_name)
\
XLAT
_REGISTER_DYNMAP_STRUCT(_ctx_name) \
.xlat_regime = (_xlat_regime),
\
.xlat_regime = (_xlat_regime), \
.max_pa = 0,
\
.max_pa = 0
U
, \
.max_va = 0,
\
.max_va = 0
U
, \
.next_table = 0,
\
.next_table = 0, \
.initialized = 0,
\
.initialized = 0, \
}
}
#endif
/*__ASSEMBLY__*/
#endif
/*__ASSEMBLY__*/
#endif
/*
__
XLAT_TABLES_V2_HELPERS_H
__
*/
#endif
/* XLAT_TABLES_V2_HELPERS_H */
lib/xlat_tables/aarch32/xlat_tables.c
View file @
2ee596c4
...
@@ -13,7 +13,7 @@
...
@@ -13,7 +13,7 @@
#include <xlat_tables.h>
#include <xlat_tables.h>
#include "../xlat_tables_private.h"
#include "../xlat_tables_private.h"
#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING)
#if
(
ARM_ARCH_MAJOR == 7
)
&& !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING)
#error ARMv7 target does not support LPAE MMU descriptors
#error ARMv7 target does not support LPAE MMU descriptors
#endif
#endif
...
@@ -34,16 +34,16 @@ static unsigned long long get_max_supported_pa(void)
...
@@ -34,16 +34,16 @@ static unsigned long long get_max_supported_pa(void)
}
}
#endif
/* ENABLE_ASSERTIONS */
#endif
/* ENABLE_ASSERTIONS */
int
xlat_arch_current_el
(
void
)
unsigned
int
xlat_arch_current_el
(
void
)
{
{
/*
/*
* If EL3 is in AArch32 mode, all secure PL1 modes (Monitor, System,
* If EL3 is in AArch32 mode, all secure PL1 modes (Monitor, System,
* SVC, Abort, UND, IRQ and FIQ modes) execute at EL3.
* SVC, Abort, UND, IRQ and FIQ modes) execute at EL3.
*/
*/
return
3
;
return
3
U
;
}
}
uint64_t
xlat_arch_get_xn_desc
(
int
el
__unused
)
uint64_t
xlat_arch_get_xn_desc
(
unsigned
int
el
__unused
)
{
{
return
UPPER_ATTRS
(
XN
);
return
UPPER_ATTRS
(
XN
);
}
}
...
@@ -53,12 +53,12 @@ void init_xlat_tables(void)
...
@@ -53,12 +53,12 @@ void init_xlat_tables(void)
unsigned
long
long
max_pa
;
unsigned
long
long
max_pa
;
uintptr_t
max_va
;
uintptr_t
max_va
;
print_mmap
();
print_mmap
();
init_xlation_table
(
0
,
base_xlation_table
,
XLAT_TABLE_LEVEL_BASE
,
init_xlation_table
(
0
U
,
base_xlation_table
,
XLAT_TABLE_LEVEL_BASE
,
&
max_va
,
&
max_pa
);
&
max_va
,
&
max_pa
);
assert
(
max_va
<=
PLAT_VIRT_ADDR_SPACE_SIZE
-
1
);
assert
(
max_va
<=
(
PLAT_VIRT_ADDR_SPACE_SIZE
-
1
U
)
);
assert
(
max_pa
<=
PLAT_PHY_ADDR_SPACE_SIZE
-
1
);
assert
(
max_pa
<=
(
PLAT_PHY_ADDR_SPACE_SIZE
-
1
U
)
);
assert
((
PLAT_PHY_ADDR_SPACE_SIZE
-
1
)
<=
get_max_supported_pa
());
assert
((
PLAT_PHY_ADDR_SPACE_SIZE
-
1
U
)
<=
get_max_supported_pa
());
}
}
/*******************************************************************************
/*******************************************************************************
...
@@ -71,7 +71,7 @@ void enable_mmu_secure(unsigned int flags)
...
@@ -71,7 +71,7 @@ void enable_mmu_secure(unsigned int flags)
uint64_t
ttbr0
;
uint64_t
ttbr0
;
assert
(
IS_IN_SECURE
());
assert
(
IS_IN_SECURE
());
assert
((
read_sctlr
()
&
SCTLR_M_BIT
)
==
0
);
assert
((
read_sctlr
()
&
SCTLR_M_BIT
)
==
0
U
);
/* Set attributes in the right indices of the MAIR */
/* Set attributes in the right indices of the MAIR */
mair0
=
MAIR0_ATTR_SET
(
ATTR_DEVICE
,
ATTR_DEVICE_INDEX
);
mair0
=
MAIR0_ATTR_SET
(
ATTR_DEVICE
,
ATTR_DEVICE_INDEX
);
...
@@ -87,18 +87,18 @@ void enable_mmu_secure(unsigned int flags)
...
@@ -87,18 +87,18 @@ void enable_mmu_secure(unsigned int flags)
/*
/*
* Set TTBCR bits as well. Set TTBR0 table properties. Disable TTBR1.
* Set TTBCR bits as well. Set TTBR0 table properties. Disable TTBR1.
*/
*/
if
(
flags
&
XLAT_TABLE_NC
)
{
int
t0sz
=
32
-
__builtin_ctzll
(
PLAT_VIRT_ADDR_SPACE_SIZE
);
if
((
flags
&
XLAT_TABLE_NC
)
!=
0U
)
{
/* Inner & outer non-cacheable non-shareable. */
/* Inner & outer non-cacheable non-shareable. */
ttbcr
=
TTBCR_EAE_BIT
|
ttbcr
=
TTBCR_EAE_BIT
|
TTBCR_SH0_NON_SHAREABLE
|
TTBCR_RGN0_OUTER_NC
|
TTBCR_SH0_NON_SHAREABLE
|
TTBCR_RGN0_OUTER_NC
|
TTBCR_RGN0_INNER_NC
|
TTBCR_RGN0_INNER_NC
|
(
uint32_t
)
t0sz
;
(
32
-
__builtin_ctzll
(
PLAT_VIRT_ADDR_SPACE_SIZE
));
}
else
{
}
else
{
/* Inner & outer WBWA & shareable. */
/* Inner & outer WBWA & shareable. */
ttbcr
=
TTBCR_EAE_BIT
|
ttbcr
=
TTBCR_EAE_BIT
|
TTBCR_SH0_INNER_SHAREABLE
|
TTBCR_RGN0_OUTER_WBA
|
TTBCR_SH0_INNER_SHAREABLE
|
TTBCR_RGN0_OUTER_WBA
|
TTBCR_RGN0_INNER_WBA
|
TTBCR_RGN0_INNER_WBA
|
(
uint32_t
)
t0sz
;
(
32
-
__builtin_ctzll
(
PLAT_VIRT_ADDR_SPACE_SIZE
));
}
}
ttbcr
|=
TTBCR_EPD1_BIT
;
ttbcr
|=
TTBCR_EPD1_BIT
;
write_ttbcr
(
ttbcr
);
write_ttbcr
(
ttbcr
);
...
@@ -106,7 +106,7 @@ void enable_mmu_secure(unsigned int flags)
...
@@ -106,7 +106,7 @@ void enable_mmu_secure(unsigned int flags)
/* Set TTBR0 bits as well */
/* Set TTBR0 bits as well */
ttbr0
=
(
uintptr_t
)
base_xlation_table
;
ttbr0
=
(
uintptr_t
)
base_xlation_table
;
write64_ttbr0
(
ttbr0
);
write64_ttbr0
(
ttbr0
);
write64_ttbr1
(
0
);
write64_ttbr1
(
0
U
);
/*
/*
* Ensure all translation table writes have drained
* Ensure all translation table writes have drained
...
@@ -120,7 +120,7 @@ void enable_mmu_secure(unsigned int flags)
...
@@ -120,7 +120,7 @@ void enable_mmu_secure(unsigned int flags)
sctlr
=
read_sctlr
();
sctlr
=
read_sctlr
();
sctlr
|=
SCTLR_WXN_BIT
|
SCTLR_M_BIT
;
sctlr
|=
SCTLR_WXN_BIT
|
SCTLR_M_BIT
;
if
(
flags
&
DISABLE_DCACHE
)
if
(
(
flags
&
DISABLE_DCACHE
)
!=
0U
)
sctlr
&=
~
SCTLR_C_BIT
;
sctlr
&=
~
SCTLR_C_BIT
;
else
else
sctlr
|=
SCTLR_C_BIT
;
sctlr
|=
SCTLR_C_BIT
;
...
...
lib/xlat_tables/aarch64/xlat_tables.c
View file @
2ee596c4
...
@@ -31,26 +31,26 @@ static unsigned long long calc_physical_addr_size_bits(
...
@@ -31,26 +31,26 @@ static unsigned long long calc_physical_addr_size_bits(
unsigned
long
long
max_addr
)
unsigned
long
long
max_addr
)
{
{
/* Physical address can't exceed 48 bits */
/* Physical address can't exceed 48 bits */
assert
((
max_addr
&
ADDR_MASK_48_TO_63
)
==
0
);
assert
((
max_addr
&
ADDR_MASK_48_TO_63
)
==
0
U
);
/* 48 bits address */
/* 48 bits address */
if
(
max_addr
&
ADDR_MASK_44_TO_47
)
if
(
(
max_addr
&
ADDR_MASK_44_TO_47
)
!=
0U
)
return
TCR_PS_BITS_256TB
;
return
TCR_PS_BITS_256TB
;
/* 44 bits address */
/* 44 bits address */
if
(
max_addr
&
ADDR_MASK_42_TO_43
)
if
(
(
max_addr
&
ADDR_MASK_42_TO_43
)
!=
0U
)
return
TCR_PS_BITS_16TB
;
return
TCR_PS_BITS_16TB
;
/* 42 bits address */
/* 42 bits address */
if
(
max_addr
&
ADDR_MASK_40_TO_41
)
if
(
(
max_addr
&
ADDR_MASK_40_TO_41
)
!=
0U
)
return
TCR_PS_BITS_4TB
;
return
TCR_PS_BITS_4TB
;
/* 40 bits address */
/* 40 bits address */
if
(
max_addr
&
ADDR_MASK_36_TO_39
)
if
(
(
max_addr
&
ADDR_MASK_36_TO_39
)
!=
0U
)
return
TCR_PS_BITS_1TB
;
return
TCR_PS_BITS_1TB
;
/* 36 bits address */
/* 36 bits address */
if
(
max_addr
&
ADDR_MASK_32_TO_35
)
if
(
(
max_addr
&
ADDR_MASK_32_TO_35
)
!=
0U
)
return
TCR_PS_BITS_64GB
;
return
TCR_PS_BITS_64GB
;
return
TCR_PS_BITS_4GB
;
return
TCR_PS_BITS_4GB
;
...
@@ -78,21 +78,21 @@ static unsigned long long get_max_supported_pa(void)
...
@@ -78,21 +78,21 @@ static unsigned long long get_max_supported_pa(void)
}
}
#endif
/* ENABLE_ASSERTIONS */
#endif
/* ENABLE_ASSERTIONS */
int
xlat_arch_current_el
(
void
)
unsigned
int
xlat_arch_current_el
(
void
)
{
{
int
el
=
GET_EL
(
read_CurrentEl
());
unsigned
int
el
=
(
unsigned
int
)
GET_EL
(
read_CurrentEl
());
assert
(
el
>
0
);
assert
(
el
>
0
U
);
return
el
;
return
el
;
}
}
uint64_t
xlat_arch_get_xn_desc
(
int
el
)
uint64_t
xlat_arch_get_xn_desc
(
unsigned
int
el
)
{
{
if
(
el
==
3
)
{
if
(
el
==
3
U
)
{
return
UPPER_ATTRS
(
XN
);
return
UPPER_ATTRS
(
XN
);
}
else
{
}
else
{
assert
(
el
==
1
);
assert
(
el
==
1
U
);
return
UPPER_ATTRS
(
PXN
);
return
UPPER_ATTRS
(
PXN
);
}
}
}
}
...
@@ -102,12 +102,12 @@ void init_xlat_tables(void)
...
@@ -102,12 +102,12 @@ void init_xlat_tables(void)
unsigned
long
long
max_pa
;
unsigned
long
long
max_pa
;
uintptr_t
max_va
;
uintptr_t
max_va
;
print_mmap
();
print_mmap
();
init_xlation_table
(
0
,
base_xlation_table
,
XLAT_TABLE_LEVEL_BASE
,
init_xlation_table
(
0
U
,
base_xlation_table
,
XLAT_TABLE_LEVEL_BASE
,
&
max_va
,
&
max_pa
);
&
max_va
,
&
max_pa
);
assert
(
max_va
<=
PLAT_VIRT_ADDR_SPACE_SIZE
-
1
);
assert
(
max_va
<=
(
PLAT_VIRT_ADDR_SPACE_SIZE
-
1
U
)
);
assert
(
max_pa
<=
PLAT_PHY_ADDR_SPACE_SIZE
-
1
);
assert
(
max_pa
<=
(
PLAT_PHY_ADDR_SPACE_SIZE
-
1
U
)
);
assert
((
PLAT_PHY_ADDR_SPACE_SIZE
-
1
)
<=
get_max_supported_pa
());
assert
((
PLAT_PHY_ADDR_SPACE_SIZE
-
1
U
)
<=
get_max_supported_pa
());
tcr_ps_bits
=
calc_physical_addr_size_bits
(
max_pa
);
tcr_ps_bits
=
calc_physical_addr_size_bits
(
max_pa
);
}
}
...
@@ -129,7 +129,7 @@ void init_xlat_tables(void)
...
@@ -129,7 +129,7 @@ void init_xlat_tables(void)
uint32_t sctlr; \
uint32_t sctlr; \
\
\
assert(IS_IN_EL(_el)); \
assert(IS_IN_EL(_el)); \
assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0); \
assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0
U
); \
\
\
/* Set attributes in the right indices of the MAIR */
\
/* Set attributes in the right indices of the MAIR */
\
mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); \
mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); \
...
@@ -144,16 +144,18 @@ void init_xlat_tables(void)
...
@@ -144,16 +144,18 @@ void init_xlat_tables(void)
\
\
/* Set TCR bits as well. */
\
/* Set TCR bits as well. */
\
/* Set T0SZ to (64 - width of virtual address space) */
\
/* Set T0SZ to (64 - width of virtual address space) */
\
if (flags & XLAT_TABLE_NC) { \
int t0sz = 64 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE);\
\
if ((flags & XLAT_TABLE_NC) != 0U) { \
/* Inner & outer non-cacheable non-shareable. */
\
/* Inner & outer non-cacheable non-shareable. */
\
tcr = TCR_SH_NON_SHAREABLE | \
tcr = TCR_SH_NON_SHAREABLE | \
TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC | \
TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC | \
(
64 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE));
\
(
uint64_t) t0sz;
\
} else { \
} else { \
/* Inner & outer WBWA & shareable. */
\
/* Inner & outer WBWA & shareable. */
\
tcr = TCR_SH_INNER_SHAREABLE | \
tcr = TCR_SH_INNER_SHAREABLE | \
TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA | \
TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA | \
(
64 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE));
\
(
uint64_t) t0sz;
\
} \
} \
tcr |= _tcr_extra; \
tcr |= _tcr_extra; \
write_tcr_el##_el(tcr); \
write_tcr_el##_el(tcr); \
...
@@ -172,7 +174,7 @@ void init_xlat_tables(void)
...
@@ -172,7 +174,7 @@ void init_xlat_tables(void)
sctlr = read_sctlr_el##_el(); \
sctlr = read_sctlr_el##_el(); \
sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT; \
sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT; \
\
\
if (flags & DISABLE_DCACHE)
\
if
(
(flags & DISABLE_DCACHE)
!= 0U)
\
sctlr &= ~SCTLR_C_BIT; \
sctlr &= ~SCTLR_C_BIT; \
else \
else \
sctlr |= SCTLR_C_BIT; \
sctlr |= SCTLR_C_BIT; \
...
...
lib/xlat_tables/xlat_tables_common.c
View file @
2ee596c4
...
@@ -32,6 +32,7 @@
...
@@ -32,6 +32,7 @@
#endif
#endif
#define UNSET_DESC ~0ULL
#define UNSET_DESC ~0ULL
#define MT_UNKNOWN ~0U
static
uint64_t
xlat_tables
[
MAX_XLAT_TABLES
][
XLAT_TABLE_ENTRIES
]
static
uint64_t
xlat_tables
[
MAX_XLAT_TABLES
][
XLAT_TABLE_ENTRIES
]
__aligned
(
XLAT_TABLE_SIZE
)
__section
(
"xlat_table"
);
__aligned
(
XLAT_TABLE_SIZE
)
__section
(
"xlat_table"
);
...
@@ -55,7 +56,7 @@ void print_mmap(void)
...
@@ -55,7 +56,7 @@ void print_mmap(void)
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
debug_print
(
"mmap:
\n
"
);
debug_print
(
"mmap:
\n
"
);
mmap_region_t
*
mm
=
mmap
;
mmap_region_t
*
mm
=
mmap
;
while
(
mm
->
size
)
{
while
(
mm
->
size
!=
0U
)
{
debug_print
(
" VA:%p PA:0x%llx size:0x%zx attr:0x%x
\n
"
,
debug_print
(
" VA:%p PA:0x%llx size:0x%zx attr:0x%x
\n
"
,
(
void
*
)
mm
->
base_va
,
mm
->
base_pa
,
(
void
*
)
mm
->
base_va
,
mm
->
base_pa
,
mm
->
size
,
mm
->
attr
);
mm
->
size
,
mm
->
attr
);
...
@@ -69,46 +70,47 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
...
@@ -69,46 +70,47 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
size_t
size
,
unsigned
int
attr
)
size_t
size
,
unsigned
int
attr
)
{
{
mmap_region_t
*
mm
=
mmap
;
mmap_region_t
*
mm
=
mmap
;
mmap_region_t
*
mm_last
=
mm
+
ARRAY_SIZE
(
mmap
)
-
1
;
const
mmap_region_t
*
mm_last
=
mm
+
ARRAY_SIZE
(
mmap
)
-
1
U
;
unsigned
long
long
end_pa
=
base_pa
+
size
-
1
;
unsigned
long
long
end_pa
=
base_pa
+
size
-
1
U
;
uintptr_t
end_va
=
base_va
+
size
-
1
;
uintptr_t
end_va
=
base_va
+
size
-
1
U
;
assert
(
IS_PAGE_ALIGNED
(
base_pa
));
assert
(
IS_PAGE_ALIGNED
(
base_pa
));
assert
(
IS_PAGE_ALIGNED
(
base_va
));
assert
(
IS_PAGE_ALIGNED
(
base_va
));
assert
(
IS_PAGE_ALIGNED
(
size
));
assert
(
IS_PAGE_ALIGNED
(
size
));
if
(
!
size
)
if
(
size
==
0U
)
return
;
return
;
assert
(
base_pa
<
end_pa
);
/* Check for overflows */
assert
(
base_pa
<
end_pa
);
/* Check for overflows */
assert
(
base_va
<
end_va
);
assert
(
base_va
<
end_va
);
assert
((
base_va
+
(
uintptr_t
)
size
-
(
uintptr_t
)
1
)
<=
assert
((
base_va
+
(
uintptr_t
)
size
-
(
uintptr_t
)
1
)
<=
(
PLAT_VIRT_ADDR_SPACE_SIZE
-
1
));
(
PLAT_VIRT_ADDR_SPACE_SIZE
-
1
U
));
assert
((
base_pa
+
(
unsigned
long
long
)
size
-
1ULL
)
<=
assert
((
base_pa
+
(
unsigned
long
long
)
size
-
1ULL
)
<=
(
PLAT_PHY_ADDR_SPACE_SIZE
-
1
));
(
PLAT_PHY_ADDR_SPACE_SIZE
-
1
U
));
#if ENABLE_ASSERTIONS
#if ENABLE_ASSERTIONS
/* Check for PAs and VAs overlaps with all other regions */
/* Check for PAs and VAs overlaps with all other regions */
for
(
mm
=
mmap
;
mm
->
size
;
++
mm
)
{
for
(
mm
=
mmap
;
mm
->
size
;
++
mm
)
{
uintptr_t
mm_end_va
=
mm
->
base_va
+
mm
->
size
-
1
;
uintptr_t
mm_end_va
=
mm
->
base_va
+
mm
->
size
-
1
U
;
/*
/*
* Check if one of the regions is completely inside the other
* Check if one of the regions is completely inside the other
* one.
* one.
*/
*/
int
fully_overlapped_va
=
int
fully_overlapped_va
=
((
base_va
>=
mm
->
base_va
)
&&
(
end_va
<=
mm_end_va
))
||
(((
base_va
>=
mm
->
base_va
)
&&
(
end_va
<=
mm_end_va
))
||
((
mm
->
base_va
>=
base_va
)
&&
(
mm_end_va
<=
end_va
));
((
mm
->
base_va
>=
base_va
)
&&
(
mm_end_va
<=
end_va
)))
?
1
:
0
;
/*
/*
* Full VA overlaps are only allowed if both regions are
* Full VA overlaps are only allowed if both regions are
* identity mapped (zero offset) or have the same VA to PA
* identity mapped (zero offset) or have the same VA to PA
* offset. Also, make sure that it's not the exact same area.
* offset. Also, make sure that it's not the exact same area.
*/
*/
if
(
fully_overlapped_va
)
{
if
(
fully_overlapped_va
==
1
)
{
assert
((
mm
->
base_va
-
mm
->
base_pa
)
==
assert
((
mm
->
base_va
-
mm
->
base_pa
)
==
(
base_va
-
base_pa
));
(
base_va
-
base_pa
));
assert
((
base_va
!=
mm
->
base_va
)
||
(
size
!=
mm
->
size
));
assert
((
base_va
!=
mm
->
base_va
)
||
(
size
!=
mm
->
size
));
...
@@ -122,12 +124,12 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
...
@@ -122,12 +124,12 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
unsigned
long
long
mm_end_pa
=
unsigned
long
long
mm_end_pa
=
mm
->
base_pa
+
mm
->
size
-
1
;
mm
->
base_pa
+
mm
->
size
-
1
;
int
separated_pa
=
int
separated_pa
=
((
end_pa
<
mm
->
base_pa
)
||
(
end_pa
<
mm
->
base_pa
)
||
(
base_pa
>
mm_end_pa
);
(
base_pa
>
mm_end_pa
)
)
?
1
:
0
;
int
separated_va
=
int
separated_va
=
((
end_va
<
mm
->
base_va
)
||
(
end_va
<
mm
->
base_va
)
||
(
base_va
>
mm_end_va
);
(
base_va
>
mm_end_va
)
)
?
1
:
0
;
assert
(
separated_va
&&
separated_pa
);
assert
(
(
separated_va
==
1
)
&&
(
separated_pa
==
1
)
);
}
}
}
}
...
@@ -136,7 +138,7 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
...
@@ -136,7 +138,7 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
#endif
/* ENABLE_ASSERTIONS */
#endif
/* ENABLE_ASSERTIONS */
/* Find correct place in mmap to insert new region */
/* Find correct place in mmap to insert new region */
while
(
mm
->
base_va
<
base_va
&&
mm
->
size
)
while
(
(
mm
->
base_va
<
base_va
)
&&
(
mm
->
size
!=
0U
)
)
++
mm
;
++
mm
;
/*
/*
...
@@ -154,10 +156,10 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
...
@@ -154,10 +156,10 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
++
mm
;
++
mm
;
/* Make room for new region by moving other regions up by one place */
/* Make room for new region by moving other regions up by one place */
memmove
(
mm
+
1
,
mm
,
(
uintptr_t
)
mm_last
-
(
uintptr_t
)
mm
);
(
void
)
memmove
(
mm
+
1
,
mm
,
(
uintptr_t
)
mm_last
-
(
uintptr_t
)
mm
);
/* Check we haven't lost the empty sentinal from the end of the array */
/* Check we haven't lost the empty sentinal from the end of the array */
assert
(
mm_last
->
size
==
0
);
assert
(
mm_last
->
size
==
0
U
);
mm
->
base_pa
=
base_pa
;
mm
->
base_pa
=
base_pa
;
mm
->
base_va
=
base_va
;
mm
->
base_va
=
base_va
;
...
@@ -172,9 +174,12 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
...
@@ -172,9 +174,12 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
void
mmap_add
(
const
mmap_region_t
*
mm
)
void
mmap_add
(
const
mmap_region_t
*
mm
)
{
{
while
(
mm
->
size
)
{
const
mmap_region_t
*
mm_cursor
=
mm
;
mmap_add_region
(
mm
->
base_pa
,
mm
->
base_va
,
mm
->
size
,
mm
->
attr
);
++
mm
;
while
(
mm_cursor
->
size
!=
0U
)
{
mmap_add_region
(
mm_cursor
->
base_pa
,
mm_cursor
->
base_va
,
mm_cursor
->
size
,
mm_cursor
->
attr
);
mm_cursor
++
;
}
}
}
}
...
@@ -185,7 +190,7 @@ static uint64_t mmap_desc(unsigned int attr, unsigned long long addr_pa,
...
@@ -185,7 +190,7 @@ static uint64_t mmap_desc(unsigned int attr, unsigned long long addr_pa,
int
mem_type
;
int
mem_type
;
/* Make sure that the granularity is fine enough to map this address. */
/* Make sure that the granularity is fine enough to map this address. */
assert
((
addr_pa
&
XLAT_BLOCK_MASK
(
level
))
==
0
);
assert
((
addr_pa
&
XLAT_BLOCK_MASK
(
level
))
==
0
U
);
desc
=
addr_pa
;
desc
=
addr_pa
;
/*
/*
...
@@ -193,8 +198,8 @@ static uint64_t mmap_desc(unsigned int attr, unsigned long long addr_pa,
...
@@ -193,8 +198,8 @@ static uint64_t mmap_desc(unsigned int attr, unsigned long long addr_pa,
* rest.
* rest.
*/
*/
desc
|=
(
level
==
XLAT_TABLE_LEVEL_MAX
)
?
PAGE_DESC
:
BLOCK_DESC
;
desc
|=
(
level
==
XLAT_TABLE_LEVEL_MAX
)
?
PAGE_DESC
:
BLOCK_DESC
;
desc
|=
(
attr
&
MT_NS
)
?
LOWER_ATTRS
(
NS
)
:
0
;
desc
|=
(
(
attr
&
MT_NS
)
!=
0U
)
?
LOWER_ATTRS
(
NS
)
:
0
U
;
desc
|=
(
attr
&
MT_RW
)
?
LOWER_ATTRS
(
AP_RW
)
:
LOWER_ATTRS
(
AP_RO
);
desc
|=
(
(
attr
&
MT_RW
)
!=
0U
)
?
LOWER_ATTRS
(
AP_RW
)
:
LOWER_ATTRS
(
AP_RO
);
/*
/*
* Always set the access flag, as this library assumes access flag
* Always set the access flag, as this library assumes access flag
* faults aren't managed.
* faults aren't managed.
...
@@ -239,7 +244,7 @@ static uint64_t mmap_desc(unsigned int attr, unsigned long long addr_pa,
...
@@ -239,7 +244,7 @@ static uint64_t mmap_desc(unsigned int attr, unsigned long long addr_pa,
* For read-only memory, rely on the MT_EXECUTE/MT_EXECUTE_NEVER
* For read-only memory, rely on the MT_EXECUTE/MT_EXECUTE_NEVER
* attribute to figure out the value of the XN bit.
* attribute to figure out the value of the XN bit.
*/
*/
if
((
attr
&
MT_RW
)
||
(
attr
&
MT_EXECUTE_NEVER
))
{
if
((
(
attr
&
MT_RW
)
!=
0U
)
||
(
(
attr
&
MT_EXECUTE_NEVER
)
!=
0U
)
)
{
desc
|=
execute_never_mask
;
desc
|=
execute_never_mask
;
}
}
...
@@ -253,9 +258,9 @@ static uint64_t mmap_desc(unsigned int attr, unsigned long long addr_pa,
...
@@ -253,9 +258,9 @@ static uint64_t mmap_desc(unsigned int attr, unsigned long long addr_pa,
debug_print
((
mem_type
==
MT_MEMORY
)
?
"MEM"
:
debug_print
((
mem_type
==
MT_MEMORY
)
?
"MEM"
:
((
mem_type
==
MT_NON_CACHEABLE
)
?
"NC"
:
"DEV"
));
((
mem_type
==
MT_NON_CACHEABLE
)
?
"NC"
:
"DEV"
));
debug_print
(
attr
&
MT_RW
?
"-RW"
:
"-RO"
);
debug_print
(
((
attr
&
MT_RW
)
!=
0U
)
?
"-RW"
:
"-RO"
);
debug_print
(
attr
&
MT_NS
?
"-NS"
:
"-S"
);
debug_print
(
((
attr
&
MT_NS
)
!=
0U
)
?
"-NS"
:
"-S"
);
debug_print
(
attr
&
MT_EXECUTE_NEVER
?
"-XN"
:
"-EXEC"
);
debug_print
(
((
attr
&
MT_EXECUTE_NEVER
)
!=
0U
)
?
"-XN"
:
"-EXEC"
);
return
desc
;
return
desc
;
}
}
...
@@ -265,14 +270,14 @@ static uint64_t mmap_desc(unsigned int attr, unsigned long long addr_pa,
...
@@ -265,14 +270,14 @@ static uint64_t mmap_desc(unsigned int attr, unsigned long long addr_pa,
*
*
* On success, this function returns 0.
* On success, this function returns 0.
* If there are partial overlaps (meaning that a smaller size is needed) or if
* If there are partial overlaps (meaning that a smaller size is needed) or if
* the region can't be found in the given area, it returns
-1. In this case the
* the region can't be found in the given area, it returns
MT_UNKNOWN. In this
* value pointed by attr should be ignored by the caller.
*
case the
value pointed by attr should be ignored by the caller.
*/
*/
static
int
mmap_region_attr
(
mmap_region_t
*
mm
,
uintptr_t
base_va
,
static
unsigned
int
mmap_region_attr
(
const
mmap_region_t
*
mm
,
uintptr_t
base_va
,
size_t
size
,
unsigned
int
*
attr
)
size_t
size
,
unsigned
int
*
attr
)
{
{
/* Don't assume that the area is contained in the first region */
/* Don't assume that the area is contained in the first region */
int
ret
=
-
1
;
unsigned
int
ret
=
MT_UNKNOWN
;
/*
/*
* Get attributes from last (innermost) region that contains the
* Get attributes from last (innermost) region that contains the
...
@@ -289,26 +294,26 @@ static int mmap_region_attr(mmap_region_t *mm, uintptr_t base_va,
...
@@ -289,26 +294,26 @@ static int mmap_region_attr(mmap_region_t *mm, uintptr_t base_va,
* in region 2. The loop shouldn't stop at region 2 as inner regions
* in region 2. The loop shouldn't stop at region 2 as inner regions
* have priority over outer regions, it should stop at region 5.
* have priority over outer regions, it should stop at region 5.
*/
*/
for
(
;
;
++
mm
)
{
for
(
;
;
++
mm
)
{
if
(
!
mm
->
size
)
if
(
mm
->
size
==
0U
)
return
ret
;
/* Reached end of list */
return
ret
;
/* Reached end of list */
if
(
mm
->
base_va
>
base_va
+
size
-
1
)
if
(
mm
->
base_va
>
(
base_va
+
size
-
1
U
)
)
return
ret
;
/* Next region is after area so end */
return
ret
;
/* Next region is after area so end */
if
(
mm
->
base_va
+
mm
->
size
-
1
<
base_va
)
if
(
(
mm
->
base_va
+
mm
->
size
-
1
U
)
<
base_va
)
continue
;
/* Next region has already been overtaken */
continue
;
/* Next region has already been overtaken */
if
(
!
ret
&&
mm
->
attr
==
*
attr
)
if
(
(
ret
==
0U
)
&&
(
mm
->
attr
==
*
attr
)
)
continue
;
/* Region doesn't override attribs so skip */
continue
;
/* Region doesn't override attribs so skip */
if
(
mm
->
base_va
>
base_va
||
if
(
(
mm
->
base_va
>
base_va
)
||
mm
->
base_va
+
mm
->
size
-
1
<
base_va
+
size
-
1
)
((
mm
->
base_va
+
mm
->
size
-
1
U
)
<
(
base_va
+
size
-
1
U
))
)
return
-
1
;
/* Region doesn't fully cover
our
area */
return
MT_UNKNOWN
;
/* Region doesn't fully cover area */
*
attr
=
mm
->
attr
;
*
attr
=
mm
->
attr
;
ret
=
0
;
ret
=
0
U
;
}
}
return
ret
;
return
ret
;
}
}
...
@@ -318,7 +323,8 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
...
@@ -318,7 +323,8 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
uint64_t
*
table
,
uint64_t
*
table
,
unsigned
int
level
)
unsigned
int
level
)
{
{
assert
(
level
>=
XLAT_TABLE_LEVEL_MIN
&&
level
<=
XLAT_TABLE_LEVEL_MAX
);
assert
((
level
>=
XLAT_TABLE_LEVEL_MIN
)
&&
(
level
<=
XLAT_TABLE_LEVEL_MAX
));
unsigned
int
level_size_shift
=
unsigned
int
level_size_shift
=
L0_XLAT_ADDRESS_SHIFT
-
level
*
XLAT_TABLE_ENTRIES_SHIFT
;
L0_XLAT_ADDRESS_SHIFT
-
level
*
XLAT_TABLE_ENTRIES_SHIFT
;
...
@@ -331,10 +337,10 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
...
@@ -331,10 +337,10 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
do
{
do
{
uint64_t
desc
=
UNSET_DESC
;
uint64_t
desc
=
UNSET_DESC
;
if
(
!
mm
->
size
)
{
if
(
mm
->
size
==
0U
)
{
/* Done mapping regions; finish zeroing the table */
/* Done mapping regions; finish zeroing the table */
desc
=
INVALID_DESC
;
desc
=
INVALID_DESC
;
}
else
if
(
mm
->
base_va
+
mm
->
size
-
1
<
base_va
)
{
}
else
if
(
(
mm
->
base_va
+
mm
->
size
-
1
U
)
<
base_va
)
{
/* This area is after the region so get next region */
/* This area is after the region so get next region */
++
mm
;
++
mm
;
continue
;
continue
;
...
@@ -343,7 +349,7 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
...
@@ -343,7 +349,7 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
debug_print
(
"%s VA:%p size:0x%llx "
,
get_level_spacer
(
level
),
debug_print
(
"%s VA:%p size:0x%llx "
,
get_level_spacer
(
level
),
(
void
*
)
base_va
,
(
unsigned
long
long
)
level_size
);
(
void
*
)
base_va
,
(
unsigned
long
long
)
level_size
);
if
(
mm
->
base_va
>
base_va
+
level_size
-
1
)
{
if
(
mm
->
base_va
>
(
base_va
+
level_size
-
1
U
)
)
{
/* Next region is after this area. Nothing to map yet */
/* Next region is after this area. Nothing to map yet */
desc
=
INVALID_DESC
;
desc
=
INVALID_DESC
;
/* Make sure that the current level allows block descriptors */
/* Make sure that the current level allows block descriptors */
...
@@ -354,9 +360,10 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
...
@@ -354,9 +360,10 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
* it will return the innermost region's attributes.
* it will return the innermost region's attributes.
*/
*/
unsigned
int
attr
;
unsigned
int
attr
;
int
r
=
mmap_region_attr
(
mm
,
base_va
,
level_size
,
&
attr
);
unsigned
int
r
=
mmap_region_attr
(
mm
,
base_va
,
level_size
,
&
attr
);
if
(
!
r
)
{
if
(
r
==
0U
)
{
desc
=
mmap_desc
(
attr
,
desc
=
mmap_desc
(
attr
,
base_va
-
mm
->
base_va
+
mm
->
base_pa
,
base_va
-
mm
->
base_va
+
mm
->
base_pa
,
level
);
level
);
...
@@ -365,13 +372,15 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
...
@@ -365,13 +372,15 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
if
(
desc
==
UNSET_DESC
)
{
if
(
desc
==
UNSET_DESC
)
{
/* Area not covered by a region so need finer table */
/* Area not covered by a region so need finer table */
uint64_t
*
new_table
=
xlat_tables
[
next_xlat
++
];
uint64_t
*
new_table
=
xlat_tables
[
next_xlat
];
next_xlat
++
;
assert
(
next_xlat
<=
MAX_XLAT_TABLES
);
assert
(
next_xlat
<=
MAX_XLAT_TABLES
);
desc
=
TABLE_DESC
|
(
uintptr_t
)
new_table
;
desc
=
TABLE_DESC
|
(
uintptr_t
)
new_table
;
/* Recurse to fill in new table */
/* Recurse to fill in new table */
mm
=
init_xlation_table_inner
(
mm
,
base_va
,
mm
=
init_xlation_table_inner
(
mm
,
base_va
,
new_table
,
level
+
1
);
new_table
,
level
+
1U
);
}
}
debug_print
(
"
\n
"
);
debug_print
(
"
\n
"
);
...
@@ -379,7 +388,7 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
...
@@ -379,7 +388,7 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
*
table
++
=
desc
;
*
table
++
=
desc
;
base_va
+=
level_size
;
base_va
+=
level_size
;
}
while
((
base_va
&
level_index_mask
)
&&
}
while
((
base_va
&
level_index_mask
)
&&
(
base_va
-
1
<
PLAT_VIRT_ADDR_SPACE_SIZE
-
1
));
(
(
base_va
-
1
U
)
<
(
PLAT_VIRT_ADDR_SPACE_SIZE
-
1
U
)
));
return
mm
;
return
mm
;
}
}
...
@@ -388,15 +397,15 @@ void init_xlation_table(uintptr_t base_va, uint64_t *table,
...
@@ -388,15 +397,15 @@ void init_xlation_table(uintptr_t base_va, uint64_t *table,
unsigned
int
level
,
uintptr_t
*
max_va
,
unsigned
int
level
,
uintptr_t
*
max_va
,
unsigned
long
long
*
max_pa
)
unsigned
long
long
*
max_pa
)
{
{
int
el
=
xlat_arch_current_el
();
unsigned
int
el
=
xlat_arch_current_el
();
execute_never_mask
=
xlat_arch_get_xn_desc
(
el
);
execute_never_mask
=
xlat_arch_get_xn_desc
(
el
);
if
(
el
==
3
)
{
if
(
el
==
3
U
)
{
ap1_mask
=
LOWER_ATTRS
(
AP_ONE_VA_RANGE_RES1
);
ap1_mask
=
LOWER_ATTRS
(
AP_ONE_VA_RANGE_RES1
);
}
else
{
}
else
{
assert
(
el
==
1
);
assert
(
el
==
1
U
);
ap1_mask
=
0
;
ap1_mask
=
0
ULL
;
}
}
init_xlation_table_inner
(
mmap
,
base_va
,
table
,
level
);
init_xlation_table_inner
(
mmap
,
base_va
,
table
,
level
);
...
...
lib/xlat_tables/xlat_tables_private.h
View file @
2ee596c4
/*
/*
* Copyright (c) 2016-201
7
, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016-201
8
, ARM Limited and Contributors. All rights reserved.
*
*
* SPDX-License-Identifier: BSD-3-Clause
* SPDX-License-Identifier: BSD-3-Clause
*/
*/
#ifndef
__
XLAT_TABLES_PRIVATE_H
__
#ifndef XLAT_TABLES_PRIVATE_H
#define
__
XLAT_TABLES_PRIVATE_H
__
#define XLAT_TABLES_PRIVATE_H
#include <cassert.h>
#include <cassert.h>
#include <platform_def.h>
#include <platform_def.h>
...
@@ -44,17 +44,17 @@ CASSERT(CHECK_PHY_ADDR_SPACE_SIZE(PLAT_PHY_ADDR_SPACE_SIZE),
...
@@ -44,17 +44,17 @@ CASSERT(CHECK_PHY_ADDR_SPACE_SIZE(PLAT_PHY_ADDR_SPACE_SIZE),
void
print_mmap
(
void
);
void
print_mmap
(
void
);
/* Returns the current Exception Level. The returned EL must be 1 or higher. */
/* Returns the current Exception Level. The returned EL must be 1 or higher. */
int
xlat_arch_current_el
(
void
);
unsigned
int
xlat_arch_current_el
(
void
);
/*
/*
* Returns the bit mask that has to be ORed to the rest of a translation table
* Returns the bit mask that has to be ORed to the rest of a translation table
* descriptor so that execution of code is prohibited at the given Exception
* descriptor so that execution of code is prohibited at the given Exception
* Level.
* Level.
*/
*/
uint64_t
xlat_arch_get_xn_desc
(
int
el
);
uint64_t
xlat_arch_get_xn_desc
(
unsigned
int
el
);
void
init_xlation_table
(
uintptr_t
base_va
,
uint64_t
*
table
,
void
init_xlation_table
(
uintptr_t
base_va
,
uint64_t
*
table
,
unsigned
int
level
,
uintptr_t
*
max_va
,
unsigned
int
level
,
uintptr_t
*
max_va
,
unsigned
long
long
*
max_pa
);
unsigned
long
long
*
max_pa
);
#endif
/*
__
XLAT_TABLES_PRIVATE_H
__
*/
#endif
/* XLAT_TABLES_PRIVATE_H */
lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
View file @
2ee596c4
...
@@ -14,7 +14,7 @@
...
@@ -14,7 +14,7 @@
#include <xlat_tables_v2.h>
#include <xlat_tables_v2.h>
#include "../xlat_tables_private.h"
#include "../xlat_tables_private.h"
#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING)
#if
(
ARM_ARCH_MAJOR == 7
)
&& !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING)
#error ARMv7 target does not support LPAE MMU descriptors
#error ARMv7 target does not support LPAE MMU descriptors
#endif
#endif
...
@@ -27,12 +27,12 @@ int xlat_arch_is_granule_size_supported(size_t size)
...
@@ -27,12 +27,12 @@ int xlat_arch_is_granule_size_supported(size_t size)
* The library uses the long descriptor translation table format, which
* The library uses the long descriptor translation table format, which
* supports 4 KiB pages only.
* supports 4 KiB pages only.
*/
*/
return
(
size
==
(
4U
*
1024U
))
;
return
(
size
==
PAGE_SIZE_4KB
)
?
1
:
0
;
}
}
size_t
xlat_arch_get_max_supported_granule_size
(
void
)
size_t
xlat_arch_get_max_supported_granule_size
(
void
)
{
{
return
4U
*
1024U
;
return
PAGE_SIZE_4KB
;
}
}
#if ENABLE_ASSERTIONS
#if ENABLE_ASSERTIONS
...
@@ -90,7 +90,7 @@ void xlat_arch_tlbi_va_sync(void)
...
@@ -90,7 +90,7 @@ void xlat_arch_tlbi_va_sync(void)
isb
();
isb
();
}
}
int
xlat_arch_current_el
(
void
)
unsigned
int
xlat_arch_current_el
(
void
)
{
{
/*
/*
* If EL3 is in AArch32 mode, all secure PL1 modes (Monitor, System,
* If EL3 is in AArch32 mode, all secure PL1 modes (Monitor, System,
...
@@ -100,7 +100,7 @@ int xlat_arch_current_el(void)
...
@@ -100,7 +100,7 @@ int xlat_arch_current_el(void)
* in AArch64 except for the XN bits, but we set and unset them at the
* in AArch64 except for the XN bits, but we set and unset them at the
* same time, so there's no difference in practice.
* same time, so there's no difference in practice.
*/
*/
return
1
;
return
1
U
;
}
}
/*******************************************************************************
/*******************************************************************************
...
@@ -143,20 +143,23 @@ void setup_mmu_cfg(uint64_t *params, unsigned int flags,
...
@@ -143,20 +143,23 @@ void setup_mmu_cfg(uint64_t *params, unsigned int flags,
* 32 bits.
* 32 bits.
*/
*/
if
(
max_va
!=
UINT32_MAX
)
{
if
(
max_va
!=
UINT32_MAX
)
{
uintptr_t
virtual_addr_space_size
=
max_va
+
1
;
uintptr_t
virtual_addr_space_size
=
max_va
+
1U
;
assert
(
CHECK_VIRT_ADDR_SPACE_SIZE
(
virtual_addr_space_size
));
assert
(
CHECK_VIRT_ADDR_SPACE_SIZE
(
virtual_addr_space_size
));
/*
/*
* __builtin_ctzll(0) is undefined but here we are guaranteed
* __builtin_ctzll(0) is undefined but here we are guaranteed
* that virtual_addr_space_size is in the range [1, UINT32_MAX].
* that virtual_addr_space_size is in the range [1, UINT32_MAX].
*/
*/
ttbcr
|=
32
-
__builtin_ctzll
(
virtual_addr_space_size
);
int
t0sz
=
32
-
__builtin_ctzll
(
virtual_addr_space_size
);
ttbcr
|=
(
uint32_t
)
t0sz
;
}
}
/*
/*
* Set the cacheability and shareability attributes for memory
* Set the cacheability and shareability attributes for memory
* associated with translation table walks using TTBR0.
* associated with translation table walks using TTBR0.
*/
*/
if
(
flags
&
XLAT_TABLE_NC
)
{
if
(
(
flags
&
XLAT_TABLE_NC
)
!=
0U
)
{
/* Inner & outer non-cacheable non-shareable. */
/* Inner & outer non-cacheable non-shareable. */
ttbcr
|=
TTBCR_SH0_NON_SHAREABLE
|
TTBCR_RGN0_OUTER_NC
|
ttbcr
|=
TTBCR_SH0_NON_SHAREABLE
|
TTBCR_RGN0_OUTER_NC
|
TTBCR_RGN0_INNER_NC
;
TTBCR_RGN0_INNER_NC
;
...
...
lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
View file @
2ee596c4
...
@@ -20,58 +20,58 @@ int xlat_arch_is_granule_size_supported(size_t size)
...
@@ -20,58 +20,58 @@ int xlat_arch_is_granule_size_supported(size_t size)
{
{
u_register_t
id_aa64mmfr0_el1
=
read_id_aa64mmfr0_el1
();
u_register_t
id_aa64mmfr0_el1
=
read_id_aa64mmfr0_el1
();
if
(
size
==
(
4U
*
1024U
)
)
{
if
(
size
==
PAGE_SIZE_4KB
)
{
return
((
id_aa64mmfr0_el1
>>
ID_AA64MMFR0_EL1_TGRAN4_SHIFT
)
&
return
((
(
id_aa64mmfr0_el1
>>
ID_AA64MMFR0_EL1_TGRAN4_SHIFT
)
&
ID_AA64MMFR0_EL1_TGRAN4_MASK
)
==
ID_AA64MMFR0_EL1_TGRAN4_MASK
)
==
ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED
;
ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED
)
?
1
:
0
;
}
else
if
(
size
==
(
16U
*
1024U
)
)
{
}
else
if
(
size
==
PAGE_SIZE_16KB
)
{
return
((
id_aa64mmfr0_el1
>>
ID_AA64MMFR0_EL1_TGRAN16_SHIFT
)
&
return
((
(
id_aa64mmfr0_el1
>>
ID_AA64MMFR0_EL1_TGRAN16_SHIFT
)
&
ID_AA64MMFR0_EL1_TGRAN16_MASK
)
==
ID_AA64MMFR0_EL1_TGRAN16_MASK
)
==
ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED
;
ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED
)
?
1
:
0
;
}
else
if
(
size
==
(
64U
*
1024U
)
)
{
}
else
if
(
size
==
PAGE_SIZE_64KB
)
{
return
((
id_aa64mmfr0_el1
>>
ID_AA64MMFR0_EL1_TGRAN64_SHIFT
)
&
return
((
(
id_aa64mmfr0_el1
>>
ID_AA64MMFR0_EL1_TGRAN64_SHIFT
)
&
ID_AA64MMFR0_EL1_TGRAN64_MASK
)
==
ID_AA64MMFR0_EL1_TGRAN64_MASK
)
==
ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED
;
ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED
)
?
1
:
0
;
}
else
{
return
0
;
}
}
return
0
;
}
}
size_t
xlat_arch_get_max_supported_granule_size
(
void
)
size_t
xlat_arch_get_max_supported_granule_size
(
void
)
{
{
if
(
xlat_arch_is_granule_size_supported
(
64U
*
1024U
)
)
{
if
(
xlat_arch_is_granule_size_supported
(
PAGE_SIZE_64KB
)
!=
0
)
{
return
64U
*
1024U
;
return
PAGE_SIZE_64KB
;
}
else
if
(
xlat_arch_is_granule_size_supported
(
16U
*
1024U
)
)
{
}
else
if
(
xlat_arch_is_granule_size_supported
(
PAGE_SIZE_16KB
)
!=
0
)
{
return
16U
*
1024U
;
return
PAGE_SIZE_16KB
;
}
else
{
}
else
{
assert
(
xlat_arch_is_granule_size_supported
(
4U
*
1024U
)
);
assert
(
xlat_arch_is_granule_size_supported
(
PAGE_SIZE_4KB
)
!=
0
);
return
4U
*
1024U
;
return
PAGE_SIZE_4KB
;
}
}
}
}
unsigned
long
long
tcr_physical_addr_size_bits
(
unsigned
long
long
max_addr
)
unsigned
long
long
tcr_physical_addr_size_bits
(
unsigned
long
long
max_addr
)
{
{
/* Physical address can't exceed 48 bits */
/* Physical address can't exceed 48 bits */
assert
((
max_addr
&
ADDR_MASK_48_TO_63
)
==
0
);
assert
((
max_addr
&
ADDR_MASK_48_TO_63
)
==
0
U
);
/* 48 bits address */
/* 48 bits address */
if
(
max_addr
&
ADDR_MASK_44_TO_47
)
if
(
(
max_addr
&
ADDR_MASK_44_TO_47
)
!=
0U
)
return
TCR_PS_BITS_256TB
;
return
TCR_PS_BITS_256TB
;
/* 44 bits address */
/* 44 bits address */
if
(
max_addr
&
ADDR_MASK_42_TO_43
)
if
(
(
max_addr
&
ADDR_MASK_42_TO_43
)
!=
0U
)
return
TCR_PS_BITS_16TB
;
return
TCR_PS_BITS_16TB
;
/* 42 bits address */
/* 42 bits address */
if
(
max_addr
&
ADDR_MASK_40_TO_41
)
if
(
(
max_addr
&
ADDR_MASK_40_TO_41
)
!=
0U
)
return
TCR_PS_BITS_4TB
;
return
TCR_PS_BITS_4TB
;
/* 40 bits address */
/* 40 bits address */
if
(
max_addr
&
ADDR_MASK_36_TO_39
)
if
(
(
max_addr
&
ADDR_MASK_36_TO_39
)
!=
0U
)
return
TCR_PS_BITS_1TB
;
return
TCR_PS_BITS_1TB
;
/* 36 bits address */
/* 36 bits address */
if
(
max_addr
&
ADDR_MASK_32_TO_35
)
if
(
(
max_addr
&
ADDR_MASK_32_TO_35
)
!=
0U
)
return
TCR_PS_BITS_64GB
;
return
TCR_PS_BITS_64GB
;
return
TCR_PS_BITS_4GB
;
return
TCR_PS_BITS_4GB
;
...
@@ -102,12 +102,12 @@ unsigned long long xlat_arch_get_max_supported_pa(void)
...
@@ -102,12 +102,12 @@ unsigned long long xlat_arch_get_max_supported_pa(void)
int
is_mmu_enabled_ctx
(
const
xlat_ctx_t
*
ctx
)
int
is_mmu_enabled_ctx
(
const
xlat_ctx_t
*
ctx
)
{
{
if
(
ctx
->
xlat_regime
==
EL1_EL0_REGIME
)
{
if
(
ctx
->
xlat_regime
==
EL1_EL0_REGIME
)
{
assert
(
xlat_arch_current_el
()
>=
1
);
assert
(
xlat_arch_current_el
()
>=
1
U
);
return
(
read_sctlr_el1
()
&
SCTLR_M_BIT
)
!=
0
;
return
(
(
read_sctlr_el1
()
&
SCTLR_M_BIT
)
!=
0U
)
?
1
:
0
;
}
else
{
}
else
{
assert
(
ctx
->
xlat_regime
==
EL3_REGIME
);
assert
(
ctx
->
xlat_regime
==
EL3_REGIME
);
assert
(
xlat_arch_current_el
()
>=
3
);
assert
(
xlat_arch_current_el
()
>=
3
U
);
return
(
read_sctlr_el3
()
&
SCTLR_M_BIT
)
!=
0
;
return
(
(
read_sctlr_el3
()
&
SCTLR_M_BIT
)
!=
0U
)
?
1
:
0
;
}
}
}
}
...
@@ -137,11 +137,11 @@ void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime)
...
@@ -137,11 +137,11 @@ void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime)
* exception level (see section D4.9.2 of the ARM ARM rev B.a).
* exception level (see section D4.9.2 of the ARM ARM rev B.a).
*/
*/
if
(
xlat_regime
==
EL1_EL0_REGIME
)
{
if
(
xlat_regime
==
EL1_EL0_REGIME
)
{
assert
(
xlat_arch_current_el
()
>=
1
);
assert
(
xlat_arch_current_el
()
>=
1
U
);
tlbivaae1is
(
TLBI_ADDR
(
va
));
tlbivaae1is
(
TLBI_ADDR
(
va
));
}
else
{
}
else
{
assert
(
xlat_regime
==
EL3_REGIME
);
assert
(
xlat_regime
==
EL3_REGIME
);
assert
(
xlat_arch_current_el
()
>=
3
);
assert
(
xlat_arch_current_el
()
>=
3
U
);
tlbivae3is
(
TLBI_ADDR
(
va
));
tlbivae3is
(
TLBI_ADDR
(
va
));
}
}
}
}
...
@@ -169,11 +169,11 @@ void xlat_arch_tlbi_va_sync(void)
...
@@ -169,11 +169,11 @@ void xlat_arch_tlbi_va_sync(void)
isb
();
isb
();
}
}
int
xlat_arch_current_el
(
void
)
unsigned
int
xlat_arch_current_el
(
void
)
{
{
int
el
=
GET_EL
(
read_CurrentEl
());
unsigned
int
el
=
(
unsigned
int
)
GET_EL
(
read_CurrentEl
());
assert
(
el
>
0
);
assert
(
el
>
0
U
);
return
el
;
return
el
;
}
}
...
@@ -194,22 +194,24 @@ void setup_mmu_cfg(uint64_t *params, unsigned int flags,
...
@@ -194,22 +194,24 @@ void setup_mmu_cfg(uint64_t *params, unsigned int flags,
* Limit the input address ranges and memory region sizes translated
* Limit the input address ranges and memory region sizes translated
* using TTBR0 to the given virtual address space size.
* using TTBR0 to the given virtual address space size.
*/
*/
assert
(
max_va
<
((
uint64_t
)
UINTPTR_MAX
));
assert
(
max_va
<
((
uint64_t
)
UINTPTR_MAX
));
virtual_addr_space_size
=
max_va
+
1
;
virtual_addr_space_size
=
(
uintptr_t
)
max_va
+
1
U
;
assert
(
CHECK_VIRT_ADDR_SPACE_SIZE
(
virtual_addr_space_size
));
assert
(
CHECK_VIRT_ADDR_SPACE_SIZE
(
virtual_addr_space_size
));
/*
/*
* __builtin_ctzll(0) is undefined but here we are guaranteed that
* __builtin_ctzll(0) is undefined but here we are guaranteed that
* virtual_addr_space_size is in the range [1,UINTPTR_MAX].
* virtual_addr_space_size is in the range [1,UINTPTR_MAX].
*/
*/
tcr
=
(
uint64_t
)
64
-
__builtin_ctzll
(
virtual_addr_space_size
);
int
t0sz
=
64
-
__builtin_ctzll
(
virtual_addr_space_size
);
tcr
=
(
uint64_t
)
t0sz
;
/*
/*
* Set the cacheability and shareability attributes for memory
* Set the cacheability and shareability attributes for memory
* associated with translation table walks.
* associated with translation table walks.
*/
*/
if
((
flags
&
XLAT_TABLE_NC
)
!=
0
)
{
if
((
flags
&
XLAT_TABLE_NC
)
!=
0
U
)
{
/* Inner & outer non-cacheable non-shareable. */
/* Inner & outer non-cacheable non-shareable. */
tcr
|=
TCR_SH_NON_SHAREABLE
|
tcr
|=
TCR_SH_NON_SHAREABLE
|
TCR_RGN_OUTER_NC
|
TCR_RGN_INNER_NC
;
TCR_RGN_OUTER_NC
|
TCR_RGN_INNER_NC
;
...
...
lib/xlat_tables_v2/xlat_tables_context.c
View file @
2ee596c4
...
@@ -78,12 +78,12 @@ void init_xlat_tables(void)
...
@@ -78,12 +78,12 @@ void init_xlat_tables(void)
{
{
assert
(
tf_xlat_ctx
.
xlat_regime
==
EL_REGIME_INVALID
);
assert
(
tf_xlat_ctx
.
xlat_regime
==
EL_REGIME_INVALID
);
int
current_el
=
xlat_arch_current_el
();
unsigned
int
current_el
=
xlat_arch_current_el
();
if
(
current_el
==
1
)
{
if
(
current_el
==
1
U
)
{
tf_xlat_ctx
.
xlat_regime
=
EL1_EL0_REGIME
;
tf_xlat_ctx
.
xlat_regime
=
EL1_EL0_REGIME
;
}
else
{
}
else
{
assert
(
current_el
==
3
);
assert
(
current_el
==
3
U
);
tf_xlat_ctx
.
xlat_regime
=
EL3_REGIME
;
tf_xlat_ctx
.
xlat_regime
=
EL3_REGIME
;
}
}
...
...
lib/xlat_tables_v2/xlat_tables_core.c
View file @
2ee596c4
This diff is collapsed.
Click to expand it.
lib/xlat_tables_v2/xlat_tables_private.h
View file @
2ee596c4
...
@@ -4,8 +4,8 @@
...
@@ -4,8 +4,8 @@
* SPDX-License-Identifier: BSD-3-Clause
* SPDX-License-Identifier: BSD-3-Clause
*/
*/
#ifndef
__
XLAT_TABLES_PRIVATE_H
__
#ifndef XLAT_TABLES_PRIVATE_H
#define
__
XLAT_TABLES_PRIVATE_H
__
#define XLAT_TABLES_PRIVATE_H
#include <platform_def.h>
#include <platform_def.h>
#include <xlat_tables_defs.h>
#include <xlat_tables_defs.h>
...
@@ -35,6 +35,8 @@
...
@@ -35,6 +35,8 @@
#endif
/* PLAT_XLAT_TABLES_DYNAMIC */
#endif
/* PLAT_XLAT_TABLES_DYNAMIC */
extern
uint64_t
mmu_cfg_params
[
MMU_CFG_PARAM_MAX
];
/*
/*
* Return the execute-never mask that will prevent instruction fetch at the
* Return the execute-never mask that will prevent instruction fetch at the
* given translation regime.
* given translation regime.
...
@@ -61,7 +63,7 @@ void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime);
...
@@ -61,7 +63,7 @@ void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime);
void
xlat_arch_tlbi_va_sync
(
void
);
void
xlat_arch_tlbi_va_sync
(
void
);
/* Print VA, PA, size and attributes of all regions in the mmap array. */
/* Print VA, PA, size and attributes of all regions in the mmap array. */
void
xlat_mmap_print
(
mmap_region_t
*
const
mmap
);
void
xlat_mmap_print
(
const
mmap_region_t
*
mmap
);
/*
/*
* Print the current state of the translation tables by reading them from
* Print the current state of the translation tables by reading them from
...
@@ -73,14 +75,14 @@ void xlat_tables_print(xlat_ctx_t *ctx);
...
@@ -73,14 +75,14 @@ void xlat_tables_print(xlat_ctx_t *ctx);
* Returns a block/page table descriptor for the given level and attributes.
* Returns a block/page table descriptor for the given level and attributes.
*/
*/
uint64_t
xlat_desc
(
const
xlat_ctx_t
*
ctx
,
uint32_t
attr
,
uint64_t
xlat_desc
(
const
xlat_ctx_t
*
ctx
,
uint32_t
attr
,
unsigned
long
long
addr_pa
,
int
level
);
unsigned
long
long
addr_pa
,
unsigned
int
level
);
/*
/*
* Architecture-specific initialization code.
* Architecture-specific initialization code.
*/
*/
/* Returns the current Exception Level. The returned EL must be 1 or higher. */
/* Returns the current Exception Level. The returned EL must be 1 or higher. */
int
xlat_arch_current_el
(
void
);
unsigned
int
xlat_arch_current_el
(
void
);
/*
/*
* Return the maximum physical address supported by the hardware.
* Return the maximum physical address supported by the hardware.
...
@@ -94,4 +96,4 @@ unsigned long long xlat_arch_get_max_supported_pa(void);
...
@@ -94,4 +96,4 @@ unsigned long long xlat_arch_get_max_supported_pa(void);
*/
*/
int
is_mmu_enabled_ctx
(
const
xlat_ctx_t
*
ctx
);
int
is_mmu_enabled_ctx
(
const
xlat_ctx_t
*
ctx
);
#endif
/*
__
XLAT_TABLES_PRIVATE_H
__
*/
#endif
/* XLAT_TABLES_PRIVATE_H */
lib/xlat_tables_v2/xlat_tables_utils.c
View file @
2ee596c4
...
@@ -18,7 +18,7 @@
...
@@ -18,7 +18,7 @@
#if LOG_LEVEL < LOG_LEVEL_VERBOSE
#if LOG_LEVEL < LOG_LEVEL_VERBOSE
void
xlat_mmap_print
(
__unused
mmap_region_t
*
const
mmap
)
void
xlat_mmap_print
(
__unused
const
mmap_region_t
*
mmap
)
{
{
/* Empty */
/* Empty */
}
}
...
@@ -30,7 +30,7 @@ void xlat_tables_print(__unused xlat_ctx_t *ctx)
...
@@ -30,7 +30,7 @@ void xlat_tables_print(__unused xlat_ctx_t *ctx)
#else
/* if LOG_LEVEL >= LOG_LEVEL_VERBOSE */
#else
/* if LOG_LEVEL >= LOG_LEVEL_VERBOSE */
void
xlat_mmap_print
(
mmap_region_t
*
const
mmap
)
void
xlat_mmap_print
(
const
mmap_region_t
*
mmap
)
{
{
tf_printf
(
"mmap:
\n
"
);
tf_printf
(
"mmap:
\n
"
);
const
mmap_region_t
*
mm
=
mmap
;
const
mmap_region_t
*
mm
=
mmap
;
...
@@ -47,7 +47,7 @@ void xlat_mmap_print(mmap_region_t *const mmap)
...
@@ -47,7 +47,7 @@ void xlat_mmap_print(mmap_region_t *const mmap)
/* Print the attributes of the specified block descriptor. */
/* Print the attributes of the specified block descriptor. */
static
void
xlat_desc_print
(
const
xlat_ctx_t
*
ctx
,
uint64_t
desc
)
static
void
xlat_desc_print
(
const
xlat_ctx_t
*
ctx
,
uint64_t
desc
)
{
{
int
mem_type_index
=
ATTR_INDEX_GET
(
desc
);
u
int
64_t
mem_type_index
=
ATTR_INDEX_GET
(
desc
);
int
xlat_regime
=
ctx
->
xlat_regime
;
int
xlat_regime
=
ctx
->
xlat_regime
;
if
(
mem_type_index
==
ATTR_IWBWA_OWBWA_NTR_INDEX
)
{
if
(
mem_type_index
==
ATTR_IWBWA_OWBWA_NTR_INDEX
)
{
...
@@ -61,8 +61,8 @@ static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc)
...
@@ -61,8 +61,8 @@ static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc)
if
(
xlat_regime
==
EL3_REGIME
)
{
if
(
xlat_regime
==
EL3_REGIME
)
{
/* For EL3 only check the AP[2] and XN bits. */
/* For EL3 only check the AP[2] and XN bits. */
tf_printf
((
desc
&
LOWER_ATTRS
(
AP_RO
))
?
"-RO"
:
"-RW"
);
tf_printf
((
(
desc
&
LOWER_ATTRS
(
AP_RO
))
!=
0ULL
)
?
"-RO"
:
"-RW"
);
tf_printf
((
desc
&
UPPER_ATTRS
(
XN
))
?
"-XN"
:
"-EXEC"
);
tf_printf
((
(
desc
&
UPPER_ATTRS
(
XN
))
!=
0ULL
)
?
"-XN"
:
"-EXEC"
);
}
else
{
}
else
{
assert
(
xlat_regime
==
EL1_EL0_REGIME
);
assert
(
xlat_regime
==
EL1_EL0_REGIME
);
/*
/*
...
@@ -80,18 +80,18 @@ static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc)
...
@@ -80,18 +80,18 @@ static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc)
assert
((
xn_perm
==
xn_mask
)
||
(
xn_perm
==
0ULL
));
assert
((
xn_perm
==
xn_mask
)
||
(
xn_perm
==
0ULL
));
#endif
#endif
tf_printf
((
desc
&
LOWER_ATTRS
(
AP_RO
))
?
"-RO"
:
"-RW"
);
tf_printf
((
(
desc
&
LOWER_ATTRS
(
AP_RO
))
!=
0ULL
)
?
"-RO"
:
"-RW"
);
/* Only check one of PXN and UXN, the other one is the same. */
/* Only check one of PXN and UXN, the other one is the same. */
tf_printf
((
desc
&
UPPER_ATTRS
(
PXN
))
?
"-XN"
:
"-EXEC"
);
tf_printf
((
(
desc
&
UPPER_ATTRS
(
PXN
))
!=
0ULL
)
?
"-XN"
:
"-EXEC"
);
/*
/*
* Privileged regions can only be accessed from EL1, user
* Privileged regions can only be accessed from EL1, user
* regions can be accessed from EL1 and EL0.
* regions can be accessed from EL1 and EL0.
*/
*/
tf_printf
((
desc
&
LOWER_ATTRS
(
AP_ACCESS_UNPRIVILEGED
))
tf_printf
((
(
desc
&
LOWER_ATTRS
(
AP_ACCESS_UNPRIVILEGED
))
!=
0ULL
)
?
"-USER"
:
"-PRIV"
);
?
"-USER"
:
"-PRIV"
);
}
}
tf_printf
(
LOWER_ATTRS
(
NS
)
&
desc
?
"-NS"
:
"-S"
);
tf_printf
(
((
LOWER_ATTRS
(
NS
)
&
desc
)
!=
0ULL
)
?
"-NS"
:
"-S"
);
}
}
static
const
char
*
const
level_spacers
[]
=
{
static
const
char
*
const
level_spacers
[]
=
{
...
@@ -108,17 +108,15 @@ static const char *invalid_descriptors_ommited =
...
@@ -108,17 +108,15 @@ static const char *invalid_descriptors_ommited =
* Recursive function that reads the translation tables passed as an argument
* Recursive function that reads the translation tables passed as an argument
* and prints their status.
* and prints their status.
*/
*/
static
void
xlat_tables_print_internal
(
xlat_ctx_t
*
ctx
,
static
void
xlat_tables_print_internal
(
xlat_ctx_t
*
ctx
,
uintptr_t
table_base_va
,
const
uintptr_t
table_base_va
,
const
uint64_t
*
table_base
,
unsigned
int
table_entries
,
uint64_t
*
const
table_base
,
const
int
table_entries
,
unsigned
int
level
)
const
unsigned
int
level
)
{
{
assert
(
level
<=
XLAT_TABLE_LEVEL_MAX
);
assert
(
level
<=
XLAT_TABLE_LEVEL_MAX
);
uint64_t
desc
;
uint64_t
desc
;
uintptr_t
table_idx_va
=
table_base_va
;
uintptr_t
table_idx_va
=
table_base_va
;
int
table_idx
=
0
;
unsigned
int
table_idx
=
0U
;
size_t
level_size
=
XLAT_BLOCK_SIZE
(
level
);
size_t
level_size
=
XLAT_BLOCK_SIZE
(
level
);
/*
/*
...
@@ -136,9 +134,9 @@ static void xlat_tables_print_internal(xlat_ctx_t *ctx,
...
@@ -136,9 +134,9 @@ static void xlat_tables_print_internal(xlat_ctx_t *ctx,
if
((
desc
&
DESC_MASK
)
==
INVALID_DESC
)
{
if
((
desc
&
DESC_MASK
)
==
INVALID_DESC
)
{
if
(
invalid_row_count
==
0
)
{
if
(
invalid_row_count
==
0
)
{
tf_printf
(
"%sVA:
%p
size:0x%zx
\n
"
,
tf_printf
(
"%sVA:
0x%lx
size:0x%zx
\n
"
,
level_spacers
[
level
],
level_spacers
[
level
],
(
void
*
)
table_idx_va
,
level_size
);
table_idx_va
,
level_size
);
}
}
invalid_row_count
++
;
invalid_row_count
++
;
...
@@ -164,20 +162,20 @@ static void xlat_tables_print_internal(xlat_ctx_t *ctx,
...
@@ -164,20 +162,20 @@ static void xlat_tables_print_internal(xlat_ctx_t *ctx,
* but instead points to the next translation
* but instead points to the next translation
* table in the translation table walk.
* table in the translation table walk.
*/
*/
tf_printf
(
"%sVA:
%p
size:0x%zx
\n
"
,
tf_printf
(
"%sVA:
0x%lx
size:0x%zx
\n
"
,
level_spacers
[
level
],
level_spacers
[
level
],
(
void
*
)
table_idx_va
,
level_size
);
table_idx_va
,
level_size
);
uintptr_t
addr_inner
=
desc
&
TABLE_ADDR_MASK
;
uintptr_t
addr_inner
=
desc
&
TABLE_ADDR_MASK
;
xlat_tables_print_internal
(
ctx
,
table_idx_va
,
xlat_tables_print_internal
(
ctx
,
table_idx_va
,
(
uint64_t
*
)
addr_inner
,
(
uint64_t
*
)
addr_inner
,
XLAT_TABLE_ENTRIES
,
level
+
1
);
XLAT_TABLE_ENTRIES
,
level
+
1
U
);
}
else
{
}
else
{
tf_printf
(
"%sVA:
%p
PA:0x%llx size:0x%zx "
,
tf_printf
(
"%sVA:
0x%lx
PA:0x%llx size:0x%zx "
,
level_spacers
[
level
],
level_spacers
[
level
],
(
void
*
)
table_idx_va
,
table_idx_va
,
(
u
nsigned
long
long
)(
desc
&
TABLE_ADDR_MASK
),
(
u
int64_t
)(
desc
&
TABLE_ADDR_MASK
),
level_size
);
level_size
);
xlat_desc_print
(
ctx
,
desc
);
xlat_desc_print
(
ctx
,
desc
);
tf_printf
(
"
\n
"
);
tf_printf
(
"
\n
"
);
...
@@ -197,6 +195,8 @@ static void xlat_tables_print_internal(xlat_ctx_t *ctx,
...
@@ -197,6 +195,8 @@ static void xlat_tables_print_internal(xlat_ctx_t *ctx,
void
xlat_tables_print
(
xlat_ctx_t
*
ctx
)
void
xlat_tables_print
(
xlat_ctx_t
*
ctx
)
{
{
const
char
*
xlat_regime_str
;
const
char
*
xlat_regime_str
;
int
used_page_tables
;
if
(
ctx
->
xlat_regime
==
EL1_EL0_REGIME
)
{
if
(
ctx
->
xlat_regime
==
EL1_EL0_REGIME
)
{
xlat_regime_str
=
"1&0"
;
xlat_regime_str
=
"1&0"
;
}
else
{
}
else
{
...
@@ -206,29 +206,28 @@ void xlat_tables_print(xlat_ctx_t *ctx)
...
@@ -206,29 +206,28 @@ void xlat_tables_print(xlat_ctx_t *ctx)
VERBOSE
(
"Translation tables state:
\n
"
);
VERBOSE
(
"Translation tables state:
\n
"
);
VERBOSE
(
" Xlat regime: EL%s
\n
"
,
xlat_regime_str
);
VERBOSE
(
" Xlat regime: EL%s
\n
"
,
xlat_regime_str
);
VERBOSE
(
" Max allowed PA: 0x%llx
\n
"
,
ctx
->
pa_max_address
);
VERBOSE
(
" Max allowed PA: 0x%llx
\n
"
,
ctx
->
pa_max_address
);
VERBOSE
(
" Max allowed VA:
%p
\n
"
,
(
void
*
)
ctx
->
va_max_address
);
VERBOSE
(
" Max allowed VA:
0x%lx
\n
"
,
ctx
->
va_max_address
);
VERBOSE
(
" Max mapped PA: 0x%llx
\n
"
,
ctx
->
max_pa
);
VERBOSE
(
" Max mapped PA: 0x%llx
\n
"
,
ctx
->
max_pa
);
VERBOSE
(
" Max mapped VA:
%p
\n
"
,
(
void
*
)
ctx
->
max_va
);
VERBOSE
(
" Max mapped VA:
0x%lx
\n
"
,
ctx
->
max_va
);
VERBOSE
(
" Initial lookup level: %
i
\n
"
,
ctx
->
base_level
);
VERBOSE
(
" Initial lookup level: %
u
\n
"
,
ctx
->
base_level
);
VERBOSE
(
" Entries @initial lookup level: %
i
\n
"
,
VERBOSE
(
" Entries @initial lookup level: %
u
\n
"
,
ctx
->
base_table_entries
);
ctx
->
base_table_entries
);
int
used_page_tables
;
#if PLAT_XLAT_TABLES_DYNAMIC
#if PLAT_XLAT_TABLES_DYNAMIC
used_page_tables
=
0
;
used_page_tables
=
0
;
for
(
unsigned
int
i
=
0
;
i
<
ctx
->
tables_num
;
++
i
)
{
for
(
int
i
=
0
;
i
<
ctx
->
tables_num
;
++
i
)
{
if
(
ctx
->
tables_mapped_regions
[
i
]
!=
0
)
if
(
ctx
->
tables_mapped_regions
[
i
]
!=
0
)
++
used_page_tables
;
++
used_page_tables
;
}
}
#else
#else
used_page_tables
=
ctx
->
next_table
;
used_page_tables
=
ctx
->
next_table
;
#endif
#endif
VERBOSE
(
" Used %
i
sub-tables out of %
i
(spare: %
i
)
\n
"
,
VERBOSE
(
" Used %
d
sub-tables out of %
d
(spare: %
d
)
\n
"
,
used_page_tables
,
ctx
->
tables_num
,
used_page_tables
,
ctx
->
tables_num
,
ctx
->
tables_num
-
used_page_tables
);
ctx
->
tables_num
-
used_page_tables
);
xlat_tables_print_internal
(
ctx
,
0
,
ctx
->
base_table
,
xlat_tables_print_internal
(
ctx
,
0
U
,
ctx
->
base_table
,
ctx
->
base_table_entries
,
ctx
->
base_level
);
ctx
->
base_table_entries
,
ctx
->
base_level
);
}
}
...
@@ -251,13 +250,13 @@ void xlat_tables_print(xlat_ctx_t *ctx)
...
@@ -251,13 +250,13 @@ void xlat_tables_print(xlat_ctx_t *ctx)
*/
*/
static
uint64_t
*
find_xlat_table_entry
(
uintptr_t
virtual_addr
,
static
uint64_t
*
find_xlat_table_entry
(
uintptr_t
virtual_addr
,
void
*
xlat_table_base
,
void
*
xlat_table_base
,
int
xlat_table_base_entries
,
unsigned
int
xlat_table_base_entries
,
unsigned
long
long
virt_addr_space_size
,
unsigned
long
long
virt_addr_space_size
,
int
*
out_level
)
unsigned
int
*
out_level
)
{
{
unsigned
int
start_level
;
unsigned
int
start_level
;
uint64_t
*
table
;
uint64_t
*
table
;
int
entries
;
unsigned
int
entries
;
start_level
=
GET_XLAT_TABLE_LEVEL_BASE
(
virt_addr_space_size
);
start_level
=
GET_XLAT_TABLE_LEVEL_BASE
(
virt_addr_space_size
);
...
@@ -267,9 +266,7 @@ static uint64_t *find_xlat_table_entry(uintptr_t virtual_addr,
...
@@ -267,9 +266,7 @@ static uint64_t *find_xlat_table_entry(uintptr_t virtual_addr,
for
(
unsigned
int
level
=
start_level
;
for
(
unsigned
int
level
=
start_level
;
level
<=
XLAT_TABLE_LEVEL_MAX
;
level
<=
XLAT_TABLE_LEVEL_MAX
;
++
level
)
{
++
level
)
{
int
idx
;
uint64_t
idx
,
desc
,
desc_type
;
uint64_t
desc
;
uint64_t
desc_type
;
idx
=
XLAT_TABLE_IDX
(
virtual_addr
,
level
);
idx
=
XLAT_TABLE_IDX
(
virtual_addr
,
level
);
if
(
idx
>=
entries
)
{
if
(
idx
>=
entries
)
{
...
@@ -318,22 +315,23 @@ static uint64_t *find_xlat_table_entry(uintptr_t virtual_addr,
...
@@ -318,22 +315,23 @@ static uint64_t *find_xlat_table_entry(uintptr_t virtual_addr,
static
int
get_mem_attributes_internal
(
const
xlat_ctx_t
*
ctx
,
uintptr_t
base_va
,
static
int
get_mem_attributes_internal
(
const
xlat_ctx_t
*
ctx
,
uintptr_t
base_va
,
uint32_t
*
attributes
,
uint64_t
**
table_entry
,
uint32_t
*
attributes
,
uint64_t
**
table_entry
,
unsigned
long
long
*
addr_pa
,
int
*
table_level
)
unsigned
long
long
*
addr_pa
,
unsigned
int
*
table_level
)
{
{
uint64_t
*
entry
;
uint64_t
*
entry
;
uint64_t
desc
;
uint64_t
desc
;
int
level
;
unsigned
int
level
;
unsigned
long
long
virt_addr_space_size
;
unsigned
long
long
virt_addr_space_size
;
/*
/*
* Sanity-check arguments.
* Sanity-check arguments.
*/
*/
assert
(
ctx
!=
NULL
);
assert
(
ctx
!=
NULL
);
assert
(
ctx
->
initialized
);
assert
(
ctx
->
initialized
!=
0
);
assert
(
ctx
->
xlat_regime
==
EL1_EL0_REGIME
||
ctx
->
xlat_regime
==
EL3_REGIME
);
assert
((
ctx
->
xlat_regime
==
EL1_EL0_REGIME
)
||
(
ctx
->
xlat_regime
==
EL3_REGIME
));
virt_addr_space_size
=
(
unsigned
long
long
)
ctx
->
va_max_address
+
1
;
virt_addr_space_size
=
(
unsigned
long
long
)
ctx
->
va_max_address
+
1
ULL
;
assert
(
virt_addr_space_size
>
0
);
assert
(
virt_addr_space_size
>
0
U
);
entry
=
find_xlat_table_entry
(
base_va
,
entry
=
find_xlat_table_entry
(
base_va
,
ctx
->
base_table
,
ctx
->
base_table
,
...
@@ -341,7 +339,7 @@ static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va,
...
@@ -341,7 +339,7 @@ static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va,
virt_addr_space_size
,
virt_addr_space_size
,
&
level
);
&
level
);
if
(
entry
==
NULL
)
{
if
(
entry
==
NULL
)
{
WARN
(
"Address
%p
is not mapped.
\n
"
,
(
void
*
)
base_va
);
WARN
(
"Address
0x%lx
is not mapped.
\n
"
,
base_va
);
return
-
EINVAL
;
return
-
EINVAL
;
}
}
...
@@ -366,9 +364,9 @@ static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va,
...
@@ -366,9 +364,9 @@ static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va,
#endif
/* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
#endif
/* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
assert
(
attributes
!=
NULL
);
assert
(
attributes
!=
NULL
);
*
attributes
=
0
;
*
attributes
=
0
U
;
int
attr_index
=
(
desc
>>
ATTR_INDEX_SHIFT
)
&
ATTR_INDEX_MASK
;
u
int
64_t
attr_index
=
(
desc
>>
ATTR_INDEX_SHIFT
)
&
ATTR_INDEX_MASK
;
if
(
attr_index
==
ATTR_IWBWA_OWBWA_NTR_INDEX
)
{
if
(
attr_index
==
ATTR_IWBWA_OWBWA_NTR_INDEX
)
{
*
attributes
|=
MT_MEMORY
;
*
attributes
|=
MT_MEMORY
;
...
@@ -379,20 +377,21 @@ static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va,
...
@@ -379,20 +377,21 @@ static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va,
*
attributes
|=
MT_DEVICE
;
*
attributes
|=
MT_DEVICE
;
}
}
int
ap2_bit
=
(
desc
>>
AP2_SHIFT
)
&
1
;
u
int
64_t
ap2_bit
=
(
desc
>>
AP2_SHIFT
)
&
1
U
;
if
(
ap2_bit
==
AP2_RW
)
if
(
ap2_bit
==
AP2_RW
)
*
attributes
|=
MT_RW
;
*
attributes
|=
MT_RW
;
if
(
ctx
->
xlat_regime
==
EL1_EL0_REGIME
)
{
if
(
ctx
->
xlat_regime
==
EL1_EL0_REGIME
)
{
int
ap1_bit
=
(
desc
>>
AP1_SHIFT
)
&
1
;
uint64_t
ap1_bit
=
(
desc
>>
AP1_SHIFT
)
&
1U
;
if
(
ap1_bit
==
AP1_ACCESS_UNPRIVILEGED
)
if
(
ap1_bit
==
AP1_ACCESS_UNPRIVILEGED
)
*
attributes
|=
MT_USER
;
*
attributes
|=
MT_USER
;
}
}
int
ns_bit
=
(
desc
>>
NS_SHIFT
)
&
1
;
u
int
64_t
ns_bit
=
(
desc
>>
NS_SHIFT
)
&
1
U
;
if
(
ns_bit
==
1
)
if
(
ns_bit
==
1
U
)
*
attributes
|=
MT_NS
;
*
attributes
|=
MT_NS
;
uint64_t
xn_mask
=
xlat_arch_regime_get_xn_desc
(
ctx
->
xlat_regime
);
uint64_t
xn_mask
=
xlat_arch_regime_get_xn_desc
(
ctx
->
xlat_regime
);
...
@@ -400,7 +399,7 @@ static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va,
...
@@ -400,7 +399,7 @@ static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va,
if
((
desc
&
xn_mask
)
==
xn_mask
)
{
if
((
desc
&
xn_mask
)
==
xn_mask
)
{
*
attributes
|=
MT_EXECUTE_NEVER
;
*
attributes
|=
MT_EXECUTE_NEVER
;
}
else
{
}
else
{
assert
((
desc
&
xn_mask
)
==
0
);
assert
((
desc
&
xn_mask
)
==
0
U
);
}
}
return
0
;
return
0
;
...
@@ -415,7 +414,7 @@ int get_mem_attributes(const xlat_ctx_t *ctx, uintptr_t base_va,
...
@@ -415,7 +414,7 @@ int get_mem_attributes(const xlat_ctx_t *ctx, uintptr_t base_va,
}
}
int
change_mem_attributes
(
xlat_ctx_t
*
ctx
,
int
change_mem_attributes
(
const
xlat_ctx_t
*
ctx
,
uintptr_t
base_va
,
uintptr_t
base_va
,
size_t
size
,
size_t
size
,
uint32_t
attr
)
uint32_t
attr
)
...
@@ -423,49 +422,49 @@ int change_mem_attributes(xlat_ctx_t *ctx,
...
@@ -423,49 +422,49 @@ int change_mem_attributes(xlat_ctx_t *ctx,
/* Note: This implementation isn't optimized. */
/* Note: This implementation isn't optimized. */
assert
(
ctx
!=
NULL
);
assert
(
ctx
!=
NULL
);
assert
(
ctx
->
initialized
);
assert
(
ctx
->
initialized
!=
0
);
unsigned
long
long
virt_addr_space_size
=
unsigned
long
long
virt_addr_space_size
=
(
unsigned
long
long
)
ctx
->
va_max_address
+
1
;
(
unsigned
long
long
)
ctx
->
va_max_address
+
1
U
;
assert
(
virt_addr_space_size
>
0
);
assert
(
virt_addr_space_size
>
0
U
);
if
(
!
IS_PAGE_ALIGNED
(
base_va
))
{
if
(
!
IS_PAGE_ALIGNED
(
base_va
))
{
WARN
(
"%s: Address
%p
is not aligned on a page boundary.
\n
"
,
WARN
(
"%s: Address
0x%lx
is not aligned on a page boundary.
\n
"
,
__func__
,
(
void
*
)
base_va
);
__func__
,
base_va
);
return
-
EINVAL
;
return
-
EINVAL
;
}
}
if
(
size
==
0
)
{
if
(
size
==
0
U
)
{
WARN
(
"%s: Size is 0.
\n
"
,
__func__
);
WARN
(
"%s: Size is 0.
\n
"
,
__func__
);
return
-
EINVAL
;
return
-
EINVAL
;
}
}
if
((
size
%
PAGE_SIZE
)
!=
0
)
{
if
((
size
%
PAGE_SIZE
)
!=
0
U
)
{
WARN
(
"%s: Size 0x%zx is not a multiple of a page size.
\n
"
,
WARN
(
"%s: Size 0x%zx is not a multiple of a page size.
\n
"
,
__func__
,
size
);
__func__
,
size
);
return
-
EINVAL
;
return
-
EINVAL
;
}
}
if
(((
attr
&
MT_EXECUTE_NEVER
)
==
0
)
&&
((
attr
&
MT_RW
)
!=
0
))
{
if
(((
attr
&
MT_EXECUTE_NEVER
)
==
0
U
)
&&
((
attr
&
MT_RW
)
!=
0
U
))
{
WARN
(
"%s: Mapping memory as read-write and executable not allowed.
\n
"
,
WARN
(
"%s: Mapping memory as read-write and executable not allowed.
\n
"
,
__func__
);
__func__
);
return
-
EINVAL
;
return
-
EINVAL
;
}
}
in
t
pages_count
=
size
/
PAGE_SIZE
;
size_
t
pages_count
=
size
/
PAGE_SIZE
;
VERBOSE
(
"Changing memory attributes of %
i
pages starting from address
%p
...
\n
"
,
VERBOSE
(
"Changing memory attributes of %
zu
pages starting from address
0x%lx
...
\n
"
,
pages_count
,
(
void
*
)
base_va
);
pages_count
,
base_va
);
uintptr_t
base_va_original
=
base_va
;
uintptr_t
base_va_original
=
base_va
;
/*
/*
* Sanity checks.
* Sanity checks.
*/
*/
for
(
in
t
i
=
0
;
i
<
pages_count
;
++
i
)
{
for
(
size_
t
i
=
0
U
;
i
<
pages_count
;
++
i
)
{
uint64_t
*
entry
;
const
uint64_t
*
entry
;
uint64_t
desc
;
uint64_t
desc
,
attr_index
;
int
level
;
unsigned
int
level
;
entry
=
find_xlat_table_entry
(
base_va
,
entry
=
find_xlat_table_entry
(
base_va
,
ctx
->
base_table
,
ctx
->
base_table
,
...
@@ -473,7 +472,7 @@ int change_mem_attributes(xlat_ctx_t *ctx,
...
@@ -473,7 +472,7 @@ int change_mem_attributes(xlat_ctx_t *ctx,
virt_addr_space_size
,
virt_addr_space_size
,
&
level
);
&
level
);
if
(
entry
==
NULL
)
{
if
(
entry
==
NULL
)
{
WARN
(
"Address
%p
is not mapped.
\n
"
,
(
void
*
)
base_va
);
WARN
(
"Address
0x%lx
is not mapped.
\n
"
,
base_va
);
return
-
EINVAL
;
return
-
EINVAL
;
}
}
...
@@ -485,8 +484,8 @@ int change_mem_attributes(xlat_ctx_t *ctx,
...
@@ -485,8 +484,8 @@ int change_mem_attributes(xlat_ctx_t *ctx,
*/
*/
if
(((
desc
&
DESC_MASK
)
!=
PAGE_DESC
)
||
if
(((
desc
&
DESC_MASK
)
!=
PAGE_DESC
)
||
(
level
!=
XLAT_TABLE_LEVEL_MAX
))
{
(
level
!=
XLAT_TABLE_LEVEL_MAX
))
{
WARN
(
"Address
%p
is not mapped at the right granularity.
\n
"
,
WARN
(
"Address
0x%lx
is not mapped at the right granularity.
\n
"
,
(
void
*
)
base_va
);
base_va
);
WARN
(
"Granularity is 0x%llx, should be 0x%x.
\n
"
,
WARN
(
"Granularity is 0x%llx, should be 0x%x.
\n
"
,
(
unsigned
long
long
)
XLAT_BLOCK_SIZE
(
level
),
PAGE_SIZE
);
(
unsigned
long
long
)
XLAT_BLOCK_SIZE
(
level
),
PAGE_SIZE
);
return
-
EINVAL
;
return
-
EINVAL
;
...
@@ -495,11 +494,11 @@ int change_mem_attributes(xlat_ctx_t *ctx,
...
@@ -495,11 +494,11 @@ int change_mem_attributes(xlat_ctx_t *ctx,
/*
/*
* If the region type is device, it shouldn't be executable.
* If the region type is device, it shouldn't be executable.
*/
*/
int
attr_index
=
(
desc
>>
ATTR_INDEX_SHIFT
)
&
ATTR_INDEX_MASK
;
attr_index
=
(
desc
>>
ATTR_INDEX_SHIFT
)
&
ATTR_INDEX_MASK
;
if
(
attr_index
==
ATTR_DEVICE_INDEX
)
{
if
(
attr_index
==
ATTR_DEVICE_INDEX
)
{
if
((
attr
&
MT_EXECUTE_NEVER
)
==
0
)
{
if
((
attr
&
MT_EXECUTE_NEVER
)
==
0
U
)
{
WARN
(
"Setting device memory as executable at address
%p
."
,
WARN
(
"Setting device memory as executable at address
0x%lx
."
,
(
void
*
)
base_va
);
base_va
);
return
-
EINVAL
;
return
-
EINVAL
;
}
}
}
}
...
@@ -510,14 +509,14 @@ int change_mem_attributes(xlat_ctx_t *ctx,
...
@@ -510,14 +509,14 @@ int change_mem_attributes(xlat_ctx_t *ctx,
/* Restore original value. */
/* Restore original value. */
base_va
=
base_va_original
;
base_va
=
base_va_original
;
for
(
int
i
=
0
;
i
<
pages_count
;
++
i
)
{
for
(
unsigned
int
i
=
0
U
;
i
<
pages_count
;
++
i
)
{
uint32_t
old_attr
,
new_attr
;
uint32_t
old_attr
=
0U
,
new_attr
;
uint64_t
*
entry
;
uint64_t
*
entry
=
NULL
;
int
level
;
unsigned
int
level
=
0U
;
unsigned
long
long
addr_pa
;
unsigned
long
long
addr_pa
=
0ULL
;
get_mem_attributes_internal
(
ctx
,
base_va
,
&
old_attr
,
(
void
)
get_mem_attributes_internal
(
ctx
,
base_va
,
&
old_attr
,
&
entry
,
&
addr_pa
,
&
level
);
&
entry
,
&
addr_pa
,
&
level
);
/*
/*
...
...
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment