diff --git a/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S b/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S index 9677e2e05daff9721118b459fcb8aba43de51630..cd824973249ca6430d5255c5b322338f655a9c11 100644 --- a/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S +++ b/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S @@ -5,6 +5,7 @@ */ #include <arch.h> +#include <arm_arch_svc.h> #include <asm_macros.S> #include <context.h> @@ -12,6 +13,7 @@ #define EMIT_BPIALL 0xee070fd5 #define EMIT_SMC 0xe1600070 +#define ESR_EL3_A64_SMC0 0x5e000000 .macro enter_workaround _from_vector /* @@ -303,6 +305,23 @@ vector_entry workaround_bpiall_vbar1_sync_exception_aarch32 /* Fast path Sync exceptions. Static predictor will fall through. */ tbz w2, #0, workaround_not_sync + + /* + * Check if SMC is coming from A64 state on #0 + * with W0 = SMCCC_ARCH_WORKAROUND_1 + * + * This sequence evaluates as: + * (W0==SMCCC_ARCH_WORKAROUND_1) ? (ESR_EL3==SMC#0) : (NE) + * allowing use of a single branch operation + */ + orr w2, wzr, #SMCCC_ARCH_WORKAROUND_1 + cmp w0, w2 + mov_imm w2, ESR_EL3_A64_SMC0 + ccmp w3, w2, #0, eq + /* Static predictor will predict a fall through */ + bne 1f + eret +1: ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] b sync_exception_aarch64 check_vector_size workaround_bpiall_vbar1_sync_exception_aarch32 diff --git a/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S b/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S index f4781484c9c0ed3c669c7854f4061c0a434226ad..b24b620c81db7991f74092a4ef4e3a695e731ca2 100644 --- a/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S +++ b/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S @@ -1,26 +1,60 @@ /* - * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ #include <arch.h> +#include <arm_arch_svc.h> #include <asm_macros.S> #include <context.h> .globl workaround_mmu_runtime_exceptions +#define ESR_EL3_A64_SMC0 0x5e000000 + vector_base workaround_mmu_runtime_exceptions - .macro apply_workaround + .macro apply_workaround _is_sync_exception stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] - mrs x0, sctlr_el3 + mrs x1, sctlr_el3 /* Disable MMU */ - bic x1, x0, #SCTLR_M_BIT + bic x1, x1, #SCTLR_M_BIT msr sctlr_el3, x1 isb - /* Restore MMU config */ - msr sctlr_el3, x0 + /* Enable MMU */ + orr x1, x1, #SCTLR_M_BIT + msr sctlr_el3, x1 + /* + * Defer ISB to avoid synchronizing twice in case we hit + * the workaround SMC call which will implicitly synchronize + * because of the ERET instruction. + */ + + /* + * Ensure SMC is coming from A64 state on #0 + * with W0 = SMCCC_ARCH_WORKAROUND_1 + * + * This sequence evaluates as: + * (W0==SMCCC_ARCH_WORKAROUND_1) ? (ESR_EL3==SMC#0) : (NE) + * allowing use of a single branch operation + */ + .if \_is_sync_exception + orr w1, wzr, #SMCCC_ARCH_WORKAROUND_1 + cmp w0, w1 + mrs x0, esr_el3 + mov_imm w1, ESR_EL3_A64_SMC0 + ccmp w0, w1, #0, eq + /* Static predictor will predict a fall through */ + bne 1f + eret +1: + .endif + + /* + * Synchronize now to enable the MMU. This is required + * to ensure the load pair below reads the data stored earlier. + */ isb ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] .endm @@ -70,22 +104,22 @@ vector_entry workaround_mmu_serror_sp_elx * --------------------------------------------------------------------- */ vector_entry workaround_mmu_sync_exception_aarch64 - apply_workaround + apply_workaround _is_sync_exception=1 b sync_exception_aarch64 check_vector_size workaround_mmu_sync_exception_aarch64 vector_entry workaround_mmu_irq_aarch64 - apply_workaround + apply_workaround _is_sync_exception=0 b irq_aarch64 check_vector_size workaround_mmu_irq_aarch64 vector_entry workaround_mmu_fiq_aarch64 - apply_workaround + apply_workaround _is_sync_exception=0 b fiq_aarch64 check_vector_size workaround_mmu_fiq_aarch64 vector_entry workaround_mmu_serror_aarch64 - apply_workaround + apply_workaround _is_sync_exception=0 b serror_aarch64 check_vector_size workaround_mmu_serror_aarch64 @@ -94,21 +128,21 @@ vector_entry workaround_mmu_serror_aarch64 * --------------------------------------------------------------------- */ vector_entry workaround_mmu_sync_exception_aarch32 - apply_workaround + apply_workaround _is_sync_exception=1 b sync_exception_aarch32 check_vector_size workaround_mmu_sync_exception_aarch32 vector_entry workaround_mmu_irq_aarch32 - apply_workaround + apply_workaround _is_sync_exception=0 b irq_aarch32 check_vector_size workaround_mmu_irq_aarch32 vector_entry workaround_mmu_fiq_aarch32 - apply_workaround + apply_workaround _is_sync_exception=0 b fiq_aarch32 check_vector_size workaround_mmu_fiq_aarch32 vector_entry workaround_mmu_serror_aarch32 - apply_workaround + apply_workaround _is_sync_exception=0 b serror_aarch32 check_vector_size workaround_mmu_serror_aarch32