diff --git a/include/lib/xlat_tables/xlat_tables_v2.h b/include/lib/xlat_tables/xlat_tables_v2.h index fd61fc40b1b6227da9f664f363f0c6fcb8250f50..022accee541bc046ff6dec579c1e8837355db95b 100644 --- a/include/lib/xlat_tables/xlat_tables_v2.h +++ b/include/lib/xlat_tables/xlat_tables_v2.h @@ -296,14 +296,15 @@ int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx, * translation tables are not modified by any other code while this function is * executing. */ -int change_mem_attributes(const xlat_ctx_t *ctx, uintptr_t base_va, size_t size, - uint32_t attr); +int xlat_change_mem_attributes_ctx(const xlat_ctx_t *ctx, uintptr_t base_va, + size_t size, uint32_t attr); +int xlat_change_mem_attributes(uintptr_t base_va, size_t size, uint32_t attr); /* * Query the memory attributes of a memory page in a set of translation tables. * * Return 0 on success, a negative error code on error. - * On success, the attributes are stored into *attributes. + * On success, the attributes are stored into *attr. * * ctx * Translation context to work on. @@ -311,11 +312,12 @@ int change_mem_attributes(const xlat_ctx_t *ctx, uintptr_t base_va, size_t size, * Virtual address of the page to get the attributes of. * There are no alignment restrictions on this address. The attributes of the * memory page it lies within are returned. - * attributes + * attr * Output parameter where to store the attributes of the targeted memory page. */ -int get_mem_attributes(const xlat_ctx_t *ctx, uintptr_t base_va, - uint32_t *attributes); +int xlat_get_mem_attributes_ctx(const xlat_ctx_t *ctx, uintptr_t base_va, + uint32_t *attr); +int xlat_get_mem_attributes(uintptr_t base_va, uint32_t *attr); #endif /*__ASSEMBLY__*/ #endif /* XLAT_TABLES_V2_H */ diff --git a/lib/psci/aarch32/psci_helpers.S b/lib/psci/aarch32/psci_helpers.S index a29a29c4937bb9e544df156c212066641d5778d4..63d7e7088743cf7bd01b78e521f39b3b7eec064c 100644 --- a/lib/psci/aarch32/psci_helpers.S +++ b/lib/psci/aarch32/psci_helpers.S @@ -91,28 +91,6 @@ func psci_do_pwrup_cache_maintenance stcopr r0, SCTLR isb -#if PLAT_XLAT_TABLES_DYNAMIC - /* --------------------------------------------- - * During warm boot the MMU is enabled with data - * cache disabled, then the interconnect is set - * up and finally the data cache is enabled. - * - * During this period, if another CPU modifies - * the translation tables, the MMU table walker - * may read the old entries. This is only a - * problem for dynamic regions, the warm boot - * code isn't affected because it is static. - * - * Invalidate all TLB entries loaded while the - * CPU wasn't coherent with the rest of the - * system. - * --------------------------------------------- - */ - stcopr r0, TLBIALL - dsb ish - isb -#endif - pop {r12, pc} endfunc psci_do_pwrup_cache_maintenance diff --git a/lib/psci/aarch64/psci_helpers.S b/lib/psci/aarch64/psci_helpers.S index d37ca764a5b97ea42a5d16e047fb1af04ba19b73..06d6636ed785028db122ec86e7e59aa4a0ccb8bc 100644 --- a/lib/psci/aarch64/psci_helpers.S +++ b/lib/psci/aarch64/psci_helpers.S @@ -115,28 +115,6 @@ func psci_do_pwrup_cache_maintenance msr sctlr_el3, x0 isb -#if PLAT_XLAT_TABLES_DYNAMIC - /* --------------------------------------------- - * During warm boot the MMU is enabled with data - * cache disabled, then the interconnect is set - * up and finally the data cache is enabled. - * - * During this period, if another CPU modifies - * the translation tables, the MMU table walker - * may read the old entries. This is only a - * problem for dynamic regions, the warm boot - * code isn't affected because it is static. - * - * Invalidate all TLB entries loaded while the - * CPU wasn't coherent with the rest of the - * system. - * --------------------------------------------- - */ - tlbi alle3 - dsb ish - isb -#endif - ldp x29, x30, [sp], #16 ret endfunc psci_do_pwrup_cache_maintenance diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c index c09fb596446f73953f7bd50cf5e7fa55950cdaa2..24828409cee23bbacd3d0ae2a223ad2d4cfb19b9 100644 --- a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c +++ b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c @@ -48,6 +48,11 @@ bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx __unused) return (read_sctlr() & SCTLR_M_BIT) != 0; } +bool is_dcache_enabled(void) +{ + return (read_sctlr() & SCTLR_C_BIT) != 0; +} + uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime __unused) { return UPPER_ATTRS(XN); diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c index 4e4292ff5bdc7d9f940e6ebfec6ceda971bc0de8..cf8070ad8e731833ed39c7db08283d287c085e4c 100644 --- a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c +++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c @@ -112,6 +112,17 @@ bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx) } } +bool is_dcache_enabled(void) +{ + unsigned int el = (unsigned int)GET_EL(read_CurrentEl()); + + if (el == 1U) { + return (read_sctlr_el1() & SCTLR_C_BIT) != 0U; + } else { + return (read_sctlr_el3() & SCTLR_C_BIT) != 0U; + } +} + uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime) { if (xlat_regime == EL1_EL0_REGIME) { diff --git a/lib/xlat_tables_v2/xlat_tables_context.c b/lib/xlat_tables_v2/xlat_tables_context.c index d7b2ebf8dc9b284d89554592183455afab0c6f4e..143f08ab3e239e78b45dff88c4102945f172e634 100644 --- a/lib/xlat_tables_v2/xlat_tables_context.c +++ b/lib/xlat_tables_v2/xlat_tables_context.c @@ -90,6 +90,16 @@ void init_xlat_tables(void) init_xlat_tables_ctx(&tf_xlat_ctx); } +int xlat_get_mem_attributes(uintptr_t base_va, uint32_t *attr) +{ + return xlat_get_mem_attributes_ctx(&tf_xlat_ctx, base_va, attr); +} + +int xlat_change_mem_attributes(uintptr_t base_va, size_t size, uint32_t attr) +{ + return xlat_change_mem_attributes_ctx(&tf_xlat_ctx, base_va, size, attr); +} + /* * If dynamic allocation of new regions is disabled then by the time we call the * function enabling the MMU, we'll have registered all the memory regions to diff --git a/lib/xlat_tables_v2/xlat_tables_core.c b/lib/xlat_tables_v2/xlat_tables_core.c index 54e58341ac034cd9b8ddd1c49888ba802ce40ac3..56b9514c36116b9ea5d1d9a4f5958ce2c3e018f0 100644 --- a/lib/xlat_tables_v2/xlat_tables_core.c +++ b/lib/xlat_tables_v2/xlat_tables_core.c @@ -18,6 +18,13 @@ #include "xlat_tables_private.h" +/* Helper function that cleans the data cache only if it is enabled. */ +static inline void xlat_clean_dcache_range(uintptr_t addr, size_t size) +{ + if (is_dcache_enabled()) + clean_dcache_range(addr, size); +} + #if PLAT_XLAT_TABLES_DYNAMIC /* @@ -329,7 +336,10 @@ static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm, xlat_tables_unmap_region(ctx, mm, table_idx_va, subtable, XLAT_TABLE_ENTRIES, level + 1U); - +#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) + xlat_clean_dcache_range((uintptr_t)subtable, + XLAT_TABLE_ENTRIES * sizeof(uint64_t)); +#endif /* * If the subtable is now empty, remove its reference. */ @@ -563,6 +573,10 @@ static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm, end_va = xlat_tables_map_region(ctx, mm, table_idx_va, subtable, XLAT_TABLE_ENTRIES, level + 1U); +#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) + xlat_clean_dcache_range((uintptr_t)subtable, + XLAT_TABLE_ENTRIES * sizeof(uint64_t)); +#endif if (end_va != (table_idx_va + XLAT_BLOCK_SIZE(level) - 1U)) return end_va; @@ -575,6 +589,10 @@ static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm, end_va = xlat_tables_map_region(ctx, mm, table_idx_va, subtable, XLAT_TABLE_ENTRIES, level + 1U); +#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) + xlat_clean_dcache_range((uintptr_t)subtable, + XLAT_TABLE_ENTRIES * sizeof(uint64_t)); +#endif if (end_va != (table_idx_va + XLAT_BLOCK_SIZE(level) - 1U)) return end_va; @@ -859,7 +877,10 @@ int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm) end_va = xlat_tables_map_region(ctx, mm_cursor, 0U, ctx->base_table, ctx->base_table_entries, ctx->base_level); - +#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) + xlat_clean_dcache_range((uintptr_t)ctx->base_table, + ctx->base_table_entries * sizeof(uint64_t)); +#endif /* Failed to map, remove mmap entry, unmap and return error. */ if (end_va != (mm_cursor->base_va + mm_cursor->size - 1U)) { (void)memmove(mm_cursor, mm_cursor + 1U, @@ -885,7 +906,10 @@ int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm) xlat_tables_unmap_region(ctx, &unmap_mm, 0U, ctx->base_table, ctx->base_table_entries, ctx->base_level); - +#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) + xlat_clean_dcache_range((uintptr_t)ctx->base_table, + ctx->base_table_entries * sizeof(uint64_t)); +#endif return -ENOMEM; } @@ -951,6 +975,10 @@ int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx, uintptr_t base_va, xlat_tables_unmap_region(ctx, mm, 0U, ctx->base_table, ctx->base_table_entries, ctx->base_level); +#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) + xlat_clean_dcache_range((uintptr_t)ctx->base_table, + ctx->base_table_entries * sizeof(uint64_t)); +#endif xlat_arch_tlbi_va_sync(); } @@ -1012,7 +1040,10 @@ void init_xlat_tables_ctx(xlat_ctx_t *ctx) uintptr_t end_va = xlat_tables_map_region(ctx, mm, 0U, ctx->base_table, ctx->base_table_entries, ctx->base_level); - +#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) + xlat_clean_dcache_range((uintptr_t)ctx->base_table, + ctx->base_table_entries * sizeof(uint64_t)); +#endif if (end_va != (mm->base_va + mm->size - 1U)) { ERROR("Not enough memory to map region:\n" " VA:0x%lx PA:0x%llx size:0x%zx attr:0x%x\n", diff --git a/lib/xlat_tables_v2/xlat_tables_private.h b/lib/xlat_tables_v2/xlat_tables_private.h index 313bd8d68d6936389a66a719a188c7aa6ad4d453..528996a29aa6028ed738e3443161b7b445478923 100644 --- a/lib/xlat_tables_v2/xlat_tables_private.h +++ b/lib/xlat_tables_v2/xlat_tables_private.h @@ -97,4 +97,7 @@ unsigned long long xlat_arch_get_max_supported_pa(void); */ bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx); +/* Returns true if the data cache is enabled at the current EL. */ +bool is_dcache_enabled(void); + #endif /* XLAT_TABLES_PRIVATE_H */ diff --git a/lib/xlat_tables_v2/xlat_tables_utils.c b/lib/xlat_tables_v2/xlat_tables_utils.c index 0cbd45e400bfc3df361076bc3535445d269ffe9a..8cad3483ffd06e73020f8c502a51ff9f0816e701 100644 --- a/lib/xlat_tables_v2/xlat_tables_utils.c +++ b/lib/xlat_tables_v2/xlat_tables_utils.c @@ -314,8 +314,8 @@ static uint64_t *find_xlat_table_entry(uintptr_t virtual_addr, } -static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va, - uint32_t *attributes, uint64_t **table_entry, +static int xlat_get_mem_attributes_internal(const xlat_ctx_t *ctx, + uintptr_t base_va, uint32_t *attributes, uint64_t **table_entry, unsigned long long *addr_pa, unsigned int *table_level) { uint64_t *entry; @@ -407,18 +407,16 @@ static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va, } -int get_mem_attributes(const xlat_ctx_t *ctx, uintptr_t base_va, - uint32_t *attributes) +int xlat_get_mem_attributes_ctx(const xlat_ctx_t *ctx, uintptr_t base_va, + uint32_t *attr) { - return get_mem_attributes_internal(ctx, base_va, attributes, - NULL, NULL, NULL); + return xlat_get_mem_attributes_internal(ctx, base_va, attr, + NULL, NULL, NULL); } -int change_mem_attributes(const xlat_ctx_t *ctx, - uintptr_t base_va, - size_t size, - uint32_t attr) +int xlat_change_mem_attributes_ctx(const xlat_ctx_t *ctx, uintptr_t base_va, + size_t size, uint32_t attr) { /* Note: This implementation isn't optimized. */ @@ -517,7 +515,7 @@ int change_mem_attributes(const xlat_ctx_t *ctx, unsigned int level = 0U; unsigned long long addr_pa = 0ULL; - (void) get_mem_attributes_internal(ctx, base_va, &old_attr, + (void) xlat_get_mem_attributes_internal(ctx, base_va, &old_attr, &entry, &addr_pa, &level); /* @@ -541,7 +539,9 @@ int change_mem_attributes(const xlat_ctx_t *ctx, * before writing the new descriptor. */ *entry = INVALID_DESC; - +#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) + dccvac((uintptr_t)entry); +#endif /* Invalidate any cached copy of this mapping in the TLBs. */ xlat_arch_tlbi_va(base_va, ctx->xlat_regime); @@ -550,7 +550,9 @@ int change_mem_attributes(const xlat_ctx_t *ctx, /* Write new descriptor */ *entry = xlat_desc(ctx, new_attr, addr_pa, level); - +#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) + dccvac((uintptr_t)entry); +#endif base_va += PAGE_SIZE; } diff --git a/services/std_svc/spm/sp_xlat.c b/services/std_svc/spm/sp_xlat.c index 2aa2fa135763a51a4178a7a098e2c3e8a3fe7555..3527138600c83f867f489311c3e29abf7993d3b9 100644 --- a/services/std_svc/spm/sp_xlat.c +++ b/services/std_svc/spm/sp_xlat.c @@ -44,7 +44,7 @@ xlat_ctx_t *spm_get_sp_xlat_context(void) * converts an attributes value from the SMC format to the mmap_attr_t format by * setting MT_RW/MT_RO, MT_USER/MT_PRIVILEGED and MT_EXECUTE/MT_EXECUTE_NEVER. * The other fields are left as 0 because they are ignored by the function - * change_mem_attributes(). + * xlat_change_mem_attributes_ctx(). */ static unsigned int smc_attr_to_mmap_attr(unsigned int attributes) { @@ -112,12 +112,12 @@ int32_t spm_memory_attributes_get_smc_handler(sp_context_t *sp_ctx, spin_lock(&mem_attr_smc_lock); - int rc = get_mem_attributes(sp_ctx->xlat_ctx_handle, + int rc = xlat_get_mem_attributes_ctx(sp_ctx->xlat_ctx_handle, base_va, &attributes); spin_unlock(&mem_attr_smc_lock); - /* Convert error codes of get_mem_attributes() into SPM ones. */ + /* Convert error codes of xlat_get_mem_attributes_ctx() into SPM. */ assert((rc == 0) || (rc == -EINVAL)); if (rc == 0) { @@ -142,13 +142,13 @@ int spm_memory_attributes_set_smc_handler(sp_context_t *sp_ctx, spin_lock(&mem_attr_smc_lock); - int ret = change_mem_attributes(sp_ctx->xlat_ctx_handle, + int ret = xlat_change_mem_attributes_ctx(sp_ctx->xlat_ctx_handle, base_va, size, smc_attr_to_mmap_attr(attributes)); spin_unlock(&mem_attr_smc_lock); - /* Convert error codes of change_mem_attributes() into SPM ones. */ + /* Convert error codes of xlat_change_mem_attributes_ctx() into SPM. */ assert((ret == 0) || (ret == -EINVAL)); return (ret == 0) ? SPM_SUCCESS : SPM_INVALID_PARAMETER;