Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
Menu
Open sidebar
adam.huang
Arm Trusted Firmware
Commits
29be1b55
Unverified
Commit
29be1b55
authored
Aug 09, 2018
by
Dimitris Papastamos
Committed by
GitHub
Aug 09, 2018
Browse files
Merge pull request #1513 from antonio-nino-diaz-arm/an/xlat-caches
xlat v2: Cleanup and dcache coherency bug fix
parents
781842ea
3e318e40
Changes
10
Hide whitespace changes
Inline
Side-by-side
include/lib/xlat_tables/xlat_tables_v2.h
View file @
29be1b55
...
...
@@ -296,14 +296,15 @@ int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx,
* translation tables are not modified by any other code while this function is
* executing.
*/
int
change_mem_attributes
(
const
xlat_ctx_t
*
ctx
,
uintptr_t
base_va
,
size_t
size
,
uint32_t
attr
);
int
xlat_change_mem_attributes_ctx
(
const
xlat_ctx_t
*
ctx
,
uintptr_t
base_va
,
size_t
size
,
uint32_t
attr
);
int
xlat_change_mem_attributes
(
uintptr_t
base_va
,
size_t
size
,
uint32_t
attr
);
/*
* Query the memory attributes of a memory page in a set of translation tables.
*
* Return 0 on success, a negative error code on error.
* On success, the attributes are stored into *attr
ibutes
.
* On success, the attributes are stored into *attr.
*
* ctx
* Translation context to work on.
...
...
@@ -311,11 +312,12 @@ int change_mem_attributes(const xlat_ctx_t *ctx, uintptr_t base_va, size_t size,
* Virtual address of the page to get the attributes of.
* There are no alignment restrictions on this address. The attributes of the
* memory page it lies within are returned.
* attr
ibutes
* attr
* Output parameter where to store the attributes of the targeted memory page.
*/
int
get_mem_attributes
(
const
xlat_ctx_t
*
ctx
,
uintptr_t
base_va
,
uint32_t
*
attributes
);
int
xlat_get_mem_attributes_ctx
(
const
xlat_ctx_t
*
ctx
,
uintptr_t
base_va
,
uint32_t
*
attr
);
int
xlat_get_mem_attributes
(
uintptr_t
base_va
,
uint32_t
*
attr
);
#endif
/*__ASSEMBLY__*/
#endif
/* XLAT_TABLES_V2_H */
lib/psci/aarch32/psci_helpers.S
View file @
29be1b55
...
...
@@ -91,28 +91,6 @@ func psci_do_pwrup_cache_maintenance
stcopr
r0
,
SCTLR
isb
#if PLAT_XLAT_TABLES_DYNAMIC
/
*
---------------------------------------------
*
During
warm
boot
the
MMU
is
enabled
with
data
*
cache
disabled
,
then
the
interconnect
is
set
*
up
and
finally
the
data
cache
is
enabled
.
*
*
During
this
period
,
if
another
CPU
modifies
*
the
translation
tables
,
the
MMU
table
walker
*
may
read
the
old
entries
.
This
is
only
a
*
problem
for
dynamic
regions
,
the
warm
boot
*
code
isn
't affected because it is static.
*
*
Invalidate
all
TLB
entries
loaded
while
the
*
CPU
wasn
't coherent with the rest of the
*
system
.
*
---------------------------------------------
*/
stcopr
r0
,
TLBIALL
dsb
ish
isb
#endif
pop
{
r12
,
pc
}
endfunc
psci_do_pwrup_cache_maintenance
...
...
lib/psci/aarch64/psci_helpers.S
View file @
29be1b55
...
...
@@ -115,28 +115,6 @@ func psci_do_pwrup_cache_maintenance
msr
sctlr_el3
,
x0
isb
#if PLAT_XLAT_TABLES_DYNAMIC
/
*
---------------------------------------------
*
During
warm
boot
the
MMU
is
enabled
with
data
*
cache
disabled
,
then
the
interconnect
is
set
*
up
and
finally
the
data
cache
is
enabled
.
*
*
During
this
period
,
if
another
CPU
modifies
*
the
translation
tables
,
the
MMU
table
walker
*
may
read
the
old
entries
.
This
is
only
a
*
problem
for
dynamic
regions
,
the
warm
boot
*
code
isn
't affected because it is static.
*
*
Invalidate
all
TLB
entries
loaded
while
the
*
CPU
wasn
't coherent with the rest of the
*
system
.
*
---------------------------------------------
*/
tlbi
alle3
dsb
ish
isb
#endif
ldp
x29
,
x30
,
[
sp
],
#
16
ret
endfunc
psci_do_pwrup_cache_maintenance
...
...
lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
View file @
29be1b55
...
...
@@ -48,6 +48,11 @@ bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx __unused)
return
(
read_sctlr
()
&
SCTLR_M_BIT
)
!=
0
;
}
bool
is_dcache_enabled
(
void
)
{
return
(
read_sctlr
()
&
SCTLR_C_BIT
)
!=
0
;
}
uint64_t
xlat_arch_regime_get_xn_desc
(
int
xlat_regime
__unused
)
{
return
UPPER_ATTRS
(
XN
);
...
...
lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
View file @
29be1b55
...
...
@@ -112,6 +112,17 @@ bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx)
}
}
bool
is_dcache_enabled
(
void
)
{
unsigned
int
el
=
(
unsigned
int
)
GET_EL
(
read_CurrentEl
());
if
(
el
==
1U
)
{
return
(
read_sctlr_el1
()
&
SCTLR_C_BIT
)
!=
0U
;
}
else
{
return
(
read_sctlr_el3
()
&
SCTLR_C_BIT
)
!=
0U
;
}
}
uint64_t
xlat_arch_regime_get_xn_desc
(
int
xlat_regime
)
{
if
(
xlat_regime
==
EL1_EL0_REGIME
)
{
...
...
lib/xlat_tables_v2/xlat_tables_context.c
View file @
29be1b55
...
...
@@ -90,6 +90,16 @@ void init_xlat_tables(void)
init_xlat_tables_ctx
(
&
tf_xlat_ctx
);
}
int
xlat_get_mem_attributes
(
uintptr_t
base_va
,
uint32_t
*
attr
)
{
return
xlat_get_mem_attributes_ctx
(
&
tf_xlat_ctx
,
base_va
,
attr
);
}
int
xlat_change_mem_attributes
(
uintptr_t
base_va
,
size_t
size
,
uint32_t
attr
)
{
return
xlat_change_mem_attributes_ctx
(
&
tf_xlat_ctx
,
base_va
,
size
,
attr
);
}
/*
* If dynamic allocation of new regions is disabled then by the time we call the
* function enabling the MMU, we'll have registered all the memory regions to
...
...
lib/xlat_tables_v2/xlat_tables_core.c
View file @
29be1b55
...
...
@@ -18,6 +18,13 @@
#include "xlat_tables_private.h"
/* Helper function that cleans the data cache only if it is enabled. */
static
inline
void
xlat_clean_dcache_range
(
uintptr_t
addr
,
size_t
size
)
{
if
(
is_dcache_enabled
())
clean_dcache_range
(
addr
,
size
);
}
#if PLAT_XLAT_TABLES_DYNAMIC
/*
...
...
@@ -329,7 +336,10 @@ static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
xlat_tables_unmap_region
(
ctx
,
mm
,
table_idx_va
,
subtable
,
XLAT_TABLE_ENTRIES
,
level
+
1U
);
#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
xlat_clean_dcache_range
((
uintptr_t
)
subtable
,
XLAT_TABLE_ENTRIES
*
sizeof
(
uint64_t
));
#endif
/*
* If the subtable is now empty, remove its reference.
*/
...
...
@@ -563,6 +573,10 @@ static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
end_va
=
xlat_tables_map_region
(
ctx
,
mm
,
table_idx_va
,
subtable
,
XLAT_TABLE_ENTRIES
,
level
+
1U
);
#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
xlat_clean_dcache_range
((
uintptr_t
)
subtable
,
XLAT_TABLE_ENTRIES
*
sizeof
(
uint64_t
));
#endif
if
(
end_va
!=
(
table_idx_va
+
XLAT_BLOCK_SIZE
(
level
)
-
1U
))
return
end_va
;
...
...
@@ -575,6 +589,10 @@ static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
end_va
=
xlat_tables_map_region
(
ctx
,
mm
,
table_idx_va
,
subtable
,
XLAT_TABLE_ENTRIES
,
level
+
1U
);
#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
xlat_clean_dcache_range
((
uintptr_t
)
subtable
,
XLAT_TABLE_ENTRIES
*
sizeof
(
uint64_t
));
#endif
if
(
end_va
!=
(
table_idx_va
+
XLAT_BLOCK_SIZE
(
level
)
-
1U
))
return
end_va
;
...
...
@@ -859,7 +877,10 @@ int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
end_va
=
xlat_tables_map_region
(
ctx
,
mm_cursor
,
0U
,
ctx
->
base_table
,
ctx
->
base_table_entries
,
ctx
->
base_level
);
#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
xlat_clean_dcache_range
((
uintptr_t
)
ctx
->
base_table
,
ctx
->
base_table_entries
*
sizeof
(
uint64_t
));
#endif
/* Failed to map, remove mmap entry, unmap and return error. */
if
(
end_va
!=
(
mm_cursor
->
base_va
+
mm_cursor
->
size
-
1U
))
{
(
void
)
memmove
(
mm_cursor
,
mm_cursor
+
1U
,
...
...
@@ -885,7 +906,10 @@ int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
xlat_tables_unmap_region
(
ctx
,
&
unmap_mm
,
0U
,
ctx
->
base_table
,
ctx
->
base_table_entries
,
ctx
->
base_level
);
#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
xlat_clean_dcache_range
((
uintptr_t
)
ctx
->
base_table
,
ctx
->
base_table_entries
*
sizeof
(
uint64_t
));
#endif
return
-
ENOMEM
;
}
...
...
@@ -951,6 +975,10 @@ int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx, uintptr_t base_va,
xlat_tables_unmap_region
(
ctx
,
mm
,
0U
,
ctx
->
base_table
,
ctx
->
base_table_entries
,
ctx
->
base_level
);
#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
xlat_clean_dcache_range
((
uintptr_t
)
ctx
->
base_table
,
ctx
->
base_table_entries
*
sizeof
(
uint64_t
));
#endif
xlat_arch_tlbi_va_sync
();
}
...
...
@@ -1012,7 +1040,10 @@ void init_xlat_tables_ctx(xlat_ctx_t *ctx)
uintptr_t
end_va
=
xlat_tables_map_region
(
ctx
,
mm
,
0U
,
ctx
->
base_table
,
ctx
->
base_table_entries
,
ctx
->
base_level
);
#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
xlat_clean_dcache_range
((
uintptr_t
)
ctx
->
base_table
,
ctx
->
base_table_entries
*
sizeof
(
uint64_t
));
#endif
if
(
end_va
!=
(
mm
->
base_va
+
mm
->
size
-
1U
))
{
ERROR
(
"Not enough memory to map region:
\n
"
" VA:0x%lx PA:0x%llx size:0x%zx attr:0x%x
\n
"
,
...
...
lib/xlat_tables_v2/xlat_tables_private.h
View file @
29be1b55
...
...
@@ -97,4 +97,7 @@ unsigned long long xlat_arch_get_max_supported_pa(void);
*/
bool
is_mmu_enabled_ctx
(
const
xlat_ctx_t
*
ctx
);
/* Returns true if the data cache is enabled at the current EL. */
bool
is_dcache_enabled
(
void
);
#endif
/* XLAT_TABLES_PRIVATE_H */
lib/xlat_tables_v2/xlat_tables_utils.c
View file @
29be1b55
...
...
@@ -314,8 +314,8 @@ static uint64_t *find_xlat_table_entry(uintptr_t virtual_addr,
}
static
int
get_mem_attributes_internal
(
const
xlat_ctx_t
*
ctx
,
uintptr_t
base_va
,
uint32_t
*
attributes
,
uint64_t
**
table_entry
,
static
int
xlat_
get_mem_attributes_internal
(
const
xlat_ctx_t
*
ctx
,
uintptr_t
base_va
,
uint32_t
*
attributes
,
uint64_t
**
table_entry
,
unsigned
long
long
*
addr_pa
,
unsigned
int
*
table_level
)
{
uint64_t
*
entry
;
...
...
@@ -407,18 +407,16 @@ static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va,
}
int
get_mem_attributes
(
const
xlat_ctx_t
*
ctx
,
uintptr_t
base_va
,
uint32_t
*
attr
ibutes
)
int
xlat_
get_mem_attributes
_ctx
(
const
xlat_ctx_t
*
ctx
,
uintptr_t
base_va
,
uint32_t
*
attr
)
{
return
get_mem_attributes_internal
(
ctx
,
base_va
,
attr
ibutes
,
NULL
,
NULL
,
NULL
);
return
xlat_
get_mem_attributes_internal
(
ctx
,
base_va
,
attr
,
NULL
,
NULL
,
NULL
);
}
int
change_mem_attributes
(
const
xlat_ctx_t
*
ctx
,
uintptr_t
base_va
,
size_t
size
,
uint32_t
attr
)
int
xlat_change_mem_attributes_ctx
(
const
xlat_ctx_t
*
ctx
,
uintptr_t
base_va
,
size_t
size
,
uint32_t
attr
)
{
/* Note: This implementation isn't optimized. */
...
...
@@ -517,7 +515,7 @@ int change_mem_attributes(const xlat_ctx_t *ctx,
unsigned
int
level
=
0U
;
unsigned
long
long
addr_pa
=
0ULL
;
(
void
)
get_mem_attributes_internal
(
ctx
,
base_va
,
&
old_attr
,
(
void
)
xlat_
get_mem_attributes_internal
(
ctx
,
base_va
,
&
old_attr
,
&
entry
,
&
addr_pa
,
&
level
);
/*
...
...
@@ -541,7 +539,9 @@ int change_mem_attributes(const xlat_ctx_t *ctx,
* before writing the new descriptor.
*/
*
entry
=
INVALID_DESC
;
#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
dccvac
((
uintptr_t
)
entry
);
#endif
/* Invalidate any cached copy of this mapping in the TLBs. */
xlat_arch_tlbi_va
(
base_va
,
ctx
->
xlat_regime
);
...
...
@@ -550,7 +550,9 @@ int change_mem_attributes(const xlat_ctx_t *ctx,
/* Write new descriptor */
*
entry
=
xlat_desc
(
ctx
,
new_attr
,
addr_pa
,
level
);
#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
dccvac
((
uintptr_t
)
entry
);
#endif
base_va
+=
PAGE_SIZE
;
}
...
...
services/std_svc/spm/sp_xlat.c
View file @
29be1b55
...
...
@@ -44,7 +44,7 @@ xlat_ctx_t *spm_get_sp_xlat_context(void)
* converts an attributes value from the SMC format to the mmap_attr_t format by
* setting MT_RW/MT_RO, MT_USER/MT_PRIVILEGED and MT_EXECUTE/MT_EXECUTE_NEVER.
* The other fields are left as 0 because they are ignored by the function
* change_mem_attributes().
*
xlat_
change_mem_attributes
_ctx
().
*/
static
unsigned
int
smc_attr_to_mmap_attr
(
unsigned
int
attributes
)
{
...
...
@@ -112,12 +112,12 @@ int32_t spm_memory_attributes_get_smc_handler(sp_context_t *sp_ctx,
spin_lock
(
&
mem_attr_smc_lock
);
int
rc
=
get_mem_attributes
(
sp_ctx
->
xlat_ctx_handle
,
int
rc
=
xlat_
get_mem_attributes
_ctx
(
sp_ctx
->
xlat_ctx_handle
,
base_va
,
&
attributes
);
spin_unlock
(
&
mem_attr_smc_lock
);
/* Convert error codes of get_mem_attributes() into SPM
ones
. */
/* Convert error codes of
xlat_
get_mem_attributes
_ctx
() into SPM. */
assert
((
rc
==
0
)
||
(
rc
==
-
EINVAL
));
if
(
rc
==
0
)
{
...
...
@@ -142,13 +142,13 @@ int spm_memory_attributes_set_smc_handler(sp_context_t *sp_ctx,
spin_lock
(
&
mem_attr_smc_lock
);
int
ret
=
change_mem_attributes
(
sp_ctx
->
xlat_ctx_handle
,
int
ret
=
xlat_
change_mem_attributes
_ctx
(
sp_ctx
->
xlat_ctx_handle
,
base_va
,
size
,
smc_attr_to_mmap_attr
(
attributes
));
spin_unlock
(
&
mem_attr_smc_lock
);
/* Convert error codes of change_mem_attributes() into SPM
ones
. */
/* Convert error codes of
xlat_
change_mem_attributes
_ctx
() into SPM. */
assert
((
ret
==
0
)
||
(
ret
==
-
EINVAL
));
return
(
ret
==
0
)
?
SPM_SUCCESS
:
SPM_INVALID_PARAMETER
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment