Unverified Commit 9bc94a6d authored by davidcunado-arm's avatar davidcunado-arm Committed by GitHub
Browse files

Merge pull request #1240 from dp-arm/dp/smccc

Implement support for SMCCC v1.1 and optimize security mitigations for CVE-2017-5715 on AArch64
parents 334e1ceb 1d6d47a8
/* /*
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
* *
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
...@@ -153,7 +153,14 @@ interrupt_exit_\label: ...@@ -153,7 +153,14 @@ interrupt_exit_\label:
.endm .endm
.macro save_x18_to_x29_sp_el0 .macro save_x4_to_x29_sp_el0
stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20] stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
stp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22] stp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
...@@ -297,34 +304,16 @@ smc_handler32: ...@@ -297,34 +304,16 @@ smc_handler32:
/* Check whether aarch32 issued an SMC64 */ /* Check whether aarch32 issued an SMC64 */
tbnz x0, #FUNCID_CC_SHIFT, smc_prohibited tbnz x0, #FUNCID_CC_SHIFT, smc_prohibited
/*
* Since we're are coming from aarch32, x8-x18 need to be saved as per
* SMC32 calling convention. If a lower EL in aarch64 is making an
* SMC32 call then it must have saved x8-x17 already therein.
*/
stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
/* x4-x7, x18, sp_el0 are saved below */
smc_handler64: smc_handler64:
/* /*
* Populate the parameters for the SMC handler. * Populate the parameters for the SMC handler.
* We already have x0-x4 in place. x5 will point to a cookie (not used * We already have x0-x4 in place. x5 will point to a cookie (not used
* now). x6 will point to the context structure (SP_EL3) and x7 will * now). x6 will point to the context structure (SP_EL3) and x7 will
* contain flags we need to pass to the handler Hence save x5-x7. * contain flags we need to pass to the handler.
* *
* Note: x4 only needs to be preserved for AArch32 callers but we do it * Save x4-x29 and sp_el0. Refer to SMCCC v1.1.
* for AArch64 callers as well for convenience
*/ */
stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] save_x4_to_x29_sp_el0
stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
/* Save rest of the gpregs and sp_el0*/
save_x18_to_x29_sp_el0
mov x5, xzr mov x5, xzr
mov x6, sp mov x6, sp
......
# #
# Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved. # Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
# #
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
...@@ -23,6 +23,7 @@ BL31_SOURCES += bl31/bl31_main.c \ ...@@ -23,6 +23,7 @@ BL31_SOURCES += bl31/bl31_main.c \
bl31/bl31_context_mgmt.c \ bl31/bl31_context_mgmt.c \
common/runtime_svc.c \ common/runtime_svc.c \
plat/common/aarch64/platform_mp_stack.S \ plat/common/aarch64/platform_mp_stack.S \
services/arm_arch_svc/arm_arch_svc_setup.c \
services/std_svc/std_svc_setup.c \ services/std_svc/std_svc_setup.c \
${PSCI_LIB_SOURCES} \ ${PSCI_LIB_SOURCES} \
${SPM_SOURCES} \ ${SPM_SOURCES} \
......
/* /*
* Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
* *
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
...@@ -46,26 +46,12 @@ ...@@ -46,26 +46,12 @@
#define CTX_GPREG_SP_EL0 U(0xf8) #define CTX_GPREG_SP_EL0 U(0xf8)
#define CTX_GPREGS_END U(0x100) #define CTX_GPREGS_END U(0x100)
#if WORKAROUND_CVE_2017_5715
#define CTX_CVE_2017_5715_OFFSET (CTX_GPREGS_OFFSET + CTX_GPREGS_END)
#define CTX_CVE_2017_5715_QUAD0 U(0x0)
#define CTX_CVE_2017_5715_QUAD1 U(0x8)
#define CTX_CVE_2017_5715_QUAD2 U(0x10)
#define CTX_CVE_2017_5715_QUAD3 U(0x18)
#define CTX_CVE_2017_5715_QUAD4 U(0x20)
#define CTX_CVE_2017_5715_QUAD5 U(0x28)
#define CTX_CVE_2017_5715_END U(0x30)
#else
#define CTX_CVE_2017_5715_OFFSET CTX_GPREGS_OFFSET
#define CTX_CVE_2017_5715_END CTX_GPREGS_END
#endif
/******************************************************************************* /*******************************************************************************
* Constants that allow assembler code to access members of and the 'el3_state' * Constants that allow assembler code to access members of and the 'el3_state'
* structure at their correct offsets. Note that some of the registers are only * structure at their correct offsets. Note that some of the registers are only
* 32-bits wide but are stored as 64-bit values for convenience * 32-bits wide but are stored as 64-bit values for convenience
******************************************************************************/ ******************************************************************************/
#define CTX_EL3STATE_OFFSET (CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_END) #define CTX_EL3STATE_OFFSET (CTX_GPREGS_OFFSET + CTX_GPREGS_END)
#define CTX_SCR_EL3 U(0x0) #define CTX_SCR_EL3 U(0x0)
#define CTX_RUNTIME_SP U(0x8) #define CTX_RUNTIME_SP U(0x8)
#define CTX_SPSR_EL3 U(0x10) #define CTX_SPSR_EL3 U(0x10)
...@@ -200,9 +186,6 @@ ...@@ -200,9 +186,6 @@
/* Constants to determine the size of individual context structures */ /* Constants to determine the size of individual context structures */
#define CTX_GPREG_ALL (CTX_GPREGS_END >> DWORD_SHIFT) #define CTX_GPREG_ALL (CTX_GPREGS_END >> DWORD_SHIFT)
#if WORKAROUND_CVE_2017_5715
#define CTX_CVE_2017_5715_ALL (CTX_CVE_2017_5715_END >> DWORD_SHIFT)
#endif
#define CTX_SYSREG_ALL (CTX_SYSREGS_END >> DWORD_SHIFT) #define CTX_SYSREG_ALL (CTX_SYSREGS_END >> DWORD_SHIFT)
#if CTX_INCLUDE_FPREGS #if CTX_INCLUDE_FPREGS
#define CTX_FPREG_ALL (CTX_FPREGS_END >> DWORD_SHIFT) #define CTX_FPREG_ALL (CTX_FPREGS_END >> DWORD_SHIFT)
...@@ -218,10 +201,6 @@ ...@@ -218,10 +201,6 @@
*/ */
DEFINE_REG_STRUCT(gp_regs, CTX_GPREG_ALL); DEFINE_REG_STRUCT(gp_regs, CTX_GPREG_ALL);
#if WORKAROUND_CVE_2017_5715
DEFINE_REG_STRUCT(cve_2017_5715_regs, CTX_CVE_2017_5715_ALL);
#endif
/* /*
* AArch64 EL1 system register context structure for preserving the * AArch64 EL1 system register context structure for preserving the
* architectural state during switches from one security state to * architectural state during switches from one security state to
...@@ -263,9 +242,6 @@ DEFINE_REG_STRUCT(el3_state, CTX_EL3STATE_ALL); ...@@ -263,9 +242,6 @@ DEFINE_REG_STRUCT(el3_state, CTX_EL3STATE_ALL);
*/ */
typedef struct cpu_context { typedef struct cpu_context {
gp_regs_t gpregs_ctx; gp_regs_t gpregs_ctx;
#if WORKAROUND_CVE_2017_5715
cve_2017_5715_regs_t cve_2017_5715_regs_ctx;
#endif
el3_state_t el3state_ctx; el3_state_t el3state_ctx;
el1_sys_regs_t sysregs_ctx; el1_sys_regs_t sysregs_ctx;
#if CTX_INCLUDE_FPREGS #if CTX_INCLUDE_FPREGS
......
/* /*
* Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
* *
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
...@@ -67,6 +67,11 @@ ...@@ -67,6 +67,11 @@
#include <cassert.h> #include <cassert.h>
#include <stdint.h> #include <stdint.h>
#define SMCCC_MAJOR_VERSION U(1)
#define SMCCC_MINOR_VERSION U(1)
#define MAKE_SMCCC_VERSION(_major, _minor) (((_major) << 16) | (_minor))
/* Various flags passed to SMC handlers */ /* Various flags passed to SMC handlers */
#define SMC_FROM_SECURE (U(0) << 0) #define SMC_FROM_SECURE (U(0) << 0)
#define SMC_FROM_NON_SECURE (U(1) << 0) #define SMC_FROM_NON_SECURE (U(1) << 0)
...@@ -78,6 +83,10 @@ ...@@ -78,6 +83,10 @@
#define is_std_svc_call(_fid) ((((_fid) >> FUNCID_OEN_SHIFT) & \ #define is_std_svc_call(_fid) ((((_fid) >> FUNCID_OEN_SHIFT) & \
FUNCID_OEN_MASK) == OEN_STD_START) FUNCID_OEN_MASK) == OEN_STD_START)
/* The macro below is used to identify a Arm Architectural Service SMC call */
#define is_arm_arch_svc_call(_fid) ((((_fid) >> FUNCID_OEN_SHIFT) & \
FUNCID_OEN_MASK) == OEN_ARM_START)
/* The macro below is used to identify a valid Fast SMC call */ /* The macro below is used to identify a valid Fast SMC call */
#define is_valid_fast_smc(_fid) ((!(((_fid) >> 16) & U(0xff))) && \ #define is_valid_fast_smc(_fid) ((!(((_fid) >> 16) & U(0xff))) && \
(GET_SMC_TYPE(_fid) == SMC_TYPE_FAST)) (GET_SMC_TYPE(_fid) == SMC_TYPE_FAST))
......
/*
* Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef __ARM_ARCH_SVC_H__
#define __ARM_ARCH_SVC_H__
#define SMCCC_VERSION U(0x80000000)
#define SMCCC_ARCH_FEATURES U(0x80000001)
#define SMCCC_ARCH_WORKAROUND_1 U(0x80008000)
#endif /* __ARM_ARCH_SVC_H__ */
/* /*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
* *
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
#include <arch.h> #include <arch.h>
#include <arm_arch_svc.h>
#include <asm_macros.S> #include <asm_macros.S>
#include <context.h> #include <context.h>
.globl workaround_bpiall_vbar0_runtime_exceptions .globl workaround_bpiall_vbar0_runtime_exceptions
#define EMIT_BPIALL 0xee070fd5 #define EMIT_BPIALL 0xee070fd5
#define EMIT_MOV_R0_IMM(v) 0xe3a0000##v
#define EMIT_SMC 0xe1600070 #define EMIT_SMC 0xe1600070
#define ESR_EL3_A64_SMC0 0x5e000000
.macro enter_workaround _from_vector
/*
* Save register state to enable a call to AArch32 S-EL1 and return
* Identify the original calling vector in w2 (==_from_vector)
* Use w3-w6 for additional register state preservation while in S-EL1
*/
.macro enter_workaround _stub_name
/* Save GP regs */ /* Save GP regs */
stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
...@@ -32,47 +39,50 @@ ...@@ -32,47 +39,50 @@
stp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26] stp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
adr x4, \_stub_name /* Identify the original exception vector */
mov w2, \_from_vector
/* Preserve 32-bit system registers in GP registers through the workaround */
mrs x3, esr_el3
mrs x4, spsr_el3
mrs x5, scr_el3
mrs x6, sctlr_el1
/* /*
* Load SPSR_EL3 and VBAR_EL3. SPSR_EL3 is set up to have * Preserve LR and ELR_EL3 registers in the GP regs context.
* all interrupts masked in preparation to running the workaround * Temporarily use the CTX_GPREG_SP_EL0 slot to preserve ELR_EL3
* stub in S-EL1. VBAR_EL3 points to the vector table that * through the workaround. This is OK because at this point the
* will handle the SMC back from the workaround stub. * current state for this context's SP_EL0 is in the live system
* register, which is unmodified by the workaround.
*/ */
ldp x0, x1, [x4, #0] mrs x7, elr_el3
stp x30, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
/* /*
* Load SCTLR_EL1 and ELR_EL3. SCTLR_EL1 is configured to disable * Load system registers for entry to S-EL1.
* the MMU in S-EL1. ELR_EL3 points to the appropriate stub in S-EL1.
*/ */
ldp x2, x3, [x4, #16]
mrs x4, scr_el3 /* Mask all interrupts and set AArch32 Supervisor mode */
mrs x5, spsr_el3 movz w8, SPSR_MODE32(MODE32_svc, SPSR_T_ARM, SPSR_E_LITTLE, SPSR_AIF_MASK)
mrs x6, elr_el3
mrs x7, sctlr_el1 /* Switch EL3 exception vectors while the workaround is executing. */
mrs x8, esr_el3 adr x9, workaround_bpiall_vbar1_runtime_exceptions
/* Setup SCTLR_EL1 with MMU off and I$ on */
ldr x10, stub_sel1_sctlr
/* Preserve system registers in the workaround context */ /* Land at the S-EL1 workaround stub */
stp x4, x5, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD0] adr x11, aarch32_stub
stp x6, x7, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD2]
stp x8, x30, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD4]
/* /*
* Setting SCR_EL3 to all zeroes means that the NS, RW * Setting SCR_EL3 to all zeroes means that the NS, RW
* and SMD bits are configured as expected. * and SMD bits are configured as expected.
*/ */
msr scr_el3, xzr msr scr_el3, xzr
msr spsr_el3, x8
/* msr vbar_el3, x9
* Reload system registers with the crafted values msr sctlr_el1, x10
* in preparation for entry in S-EL1. msr elr_el3, x11
*/
msr spsr_el3, x0
msr vbar_el3, x1
msr sctlr_el1, x2
msr elr_el3, x3
eret eret
.endm .endm
...@@ -91,76 +101,31 @@ vector_base workaround_bpiall_vbar0_runtime_exceptions ...@@ -91,76 +101,31 @@ vector_base workaround_bpiall_vbar0_runtime_exceptions
*/ */
vector_entry workaround_bpiall_vbar0_sync_exception_sp_el0 vector_entry workaround_bpiall_vbar0_sync_exception_sp_el0
b sync_exception_sp_el0 b sync_exception_sp_el0
nop /* to force 8 byte alignment for the following stub */
/* /*
* Since each vector table entry is 128 bytes, we can store the * Since each vector table entry is 128 bytes, we can store the
* stub context in the unused space to minimize memory footprint. * stub context in the unused space to minimize memory footprint.
*/ */
aarch32_stub_smc: stub_sel1_sctlr:
.quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT
aarch32_stub:
.word EMIT_BPIALL .word EMIT_BPIALL
.word EMIT_MOV_R0_IMM(1)
.word EMIT_SMC .word EMIT_SMC
aarch32_stub_ctx_smc:
/* Mask all interrupts and set AArch32 Supervisor mode */
.quad (SPSR_AIF_MASK << SPSR_AIF_SHIFT | \
SPSR_M_AARCH32 << SPSR_M_SHIFT | \
MODE32_svc << MODE32_SHIFT)
/*
* VBAR_EL3 points to vbar1 which is the vector table
* used while the workaround is executing.
*/
.quad workaround_bpiall_vbar1_runtime_exceptions
/* Setup SCTLR_EL1 with MMU off and I$ on */
.quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT
/* ELR_EL3 is setup to point to the sync exception stub in AArch32 */
.quad aarch32_stub_smc
check_vector_size workaround_bpiall_vbar0_sync_exception_sp_el0 check_vector_size workaround_bpiall_vbar0_sync_exception_sp_el0
vector_entry workaround_bpiall_vbar0_irq_sp_el0 vector_entry workaround_bpiall_vbar0_irq_sp_el0
b irq_sp_el0 b irq_sp_el0
aarch32_stub_irq:
.word EMIT_BPIALL
.word EMIT_MOV_R0_IMM(2)
.word EMIT_SMC
aarch32_stub_ctx_irq:
.quad (SPSR_AIF_MASK << SPSR_AIF_SHIFT | \
SPSR_M_AARCH32 << SPSR_M_SHIFT | \
MODE32_svc << MODE32_SHIFT)
.quad workaround_bpiall_vbar1_runtime_exceptions
.quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT
.quad aarch32_stub_irq
check_vector_size workaround_bpiall_vbar0_irq_sp_el0 check_vector_size workaround_bpiall_vbar0_irq_sp_el0
vector_entry workaround_bpiall_vbar0_fiq_sp_el0 vector_entry workaround_bpiall_vbar0_fiq_sp_el0
b fiq_sp_el0 b fiq_sp_el0
aarch32_stub_fiq:
.word EMIT_BPIALL
.word EMIT_MOV_R0_IMM(4)
.word EMIT_SMC
aarch32_stub_ctx_fiq:
.quad (SPSR_AIF_MASK << SPSR_AIF_SHIFT | \
SPSR_M_AARCH32 << SPSR_M_SHIFT | \
MODE32_svc << MODE32_SHIFT)
.quad workaround_bpiall_vbar1_runtime_exceptions
.quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT
.quad aarch32_stub_fiq
check_vector_size workaround_bpiall_vbar0_fiq_sp_el0 check_vector_size workaround_bpiall_vbar0_fiq_sp_el0
vector_entry workaround_bpiall_vbar0_serror_sp_el0 vector_entry workaround_bpiall_vbar0_serror_sp_el0
b serror_sp_el0 b serror_sp_el0
aarch32_stub_serror:
.word EMIT_BPIALL
.word EMIT_MOV_R0_IMM(8)
.word EMIT_SMC
aarch32_stub_ctx_serror:
.quad (SPSR_AIF_MASK << SPSR_AIF_SHIFT | \
SPSR_M_AARCH32 << SPSR_M_SHIFT | \
MODE32_svc << MODE32_SHIFT)
.quad workaround_bpiall_vbar1_runtime_exceptions
.quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT
.quad aarch32_stub_serror
check_vector_size workaround_bpiall_vbar0_serror_sp_el0 check_vector_size workaround_bpiall_vbar0_serror_sp_el0
/* --------------------------------------------------------------------- /* ---------------------------------------------------------------------
...@@ -188,19 +153,19 @@ vector_entry workaround_bpiall_vbar0_serror_sp_elx ...@@ -188,19 +153,19 @@ vector_entry workaround_bpiall_vbar0_serror_sp_elx
* --------------------------------------------------------------------- * ---------------------------------------------------------------------
*/ */
vector_entry workaround_bpiall_vbar0_sync_exception_aarch64 vector_entry workaround_bpiall_vbar0_sync_exception_aarch64
enter_workaround aarch32_stub_ctx_smc enter_workaround 1
check_vector_size workaround_bpiall_vbar0_sync_exception_aarch64 check_vector_size workaround_bpiall_vbar0_sync_exception_aarch64
vector_entry workaround_bpiall_vbar0_irq_aarch64 vector_entry workaround_bpiall_vbar0_irq_aarch64
enter_workaround aarch32_stub_ctx_irq enter_workaround 2
check_vector_size workaround_bpiall_vbar0_irq_aarch64 check_vector_size workaround_bpiall_vbar0_irq_aarch64
vector_entry workaround_bpiall_vbar0_fiq_aarch64 vector_entry workaround_bpiall_vbar0_fiq_aarch64
enter_workaround aarch32_stub_ctx_fiq enter_workaround 4
check_vector_size workaround_bpiall_vbar0_fiq_aarch64 check_vector_size workaround_bpiall_vbar0_fiq_aarch64
vector_entry workaround_bpiall_vbar0_serror_aarch64 vector_entry workaround_bpiall_vbar0_serror_aarch64
enter_workaround aarch32_stub_ctx_serror enter_workaround 8
check_vector_size workaround_bpiall_vbar0_serror_aarch64 check_vector_size workaround_bpiall_vbar0_serror_aarch64
/* --------------------------------------------------------------------- /* ---------------------------------------------------------------------
...@@ -208,19 +173,19 @@ vector_entry workaround_bpiall_vbar0_serror_aarch64 ...@@ -208,19 +173,19 @@ vector_entry workaround_bpiall_vbar0_serror_aarch64
* --------------------------------------------------------------------- * ---------------------------------------------------------------------
*/ */
vector_entry workaround_bpiall_vbar0_sync_exception_aarch32 vector_entry workaround_bpiall_vbar0_sync_exception_aarch32
enter_workaround aarch32_stub_ctx_smc enter_workaround 1
check_vector_size workaround_bpiall_vbar0_sync_exception_aarch32 check_vector_size workaround_bpiall_vbar0_sync_exception_aarch32
vector_entry workaround_bpiall_vbar0_irq_aarch32 vector_entry workaround_bpiall_vbar0_irq_aarch32
enter_workaround aarch32_stub_ctx_irq enter_workaround 2
check_vector_size workaround_bpiall_vbar0_irq_aarch32 check_vector_size workaround_bpiall_vbar0_irq_aarch32
vector_entry workaround_bpiall_vbar0_fiq_aarch32 vector_entry workaround_bpiall_vbar0_fiq_aarch32
enter_workaround aarch32_stub_ctx_fiq enter_workaround 4
check_vector_size workaround_bpiall_vbar0_fiq_aarch32 check_vector_size workaround_bpiall_vbar0_fiq_aarch32
vector_entry workaround_bpiall_vbar0_serror_aarch32 vector_entry workaround_bpiall_vbar0_serror_aarch32
enter_workaround aarch32_stub_ctx_serror enter_workaround 8
check_vector_size workaround_bpiall_vbar0_serror_aarch32 check_vector_size workaround_bpiall_vbar0_serror_aarch32
/* --------------------------------------------------------------------- /* ---------------------------------------------------------------------
...@@ -297,31 +262,33 @@ vector_entry workaround_bpiall_vbar1_serror_aarch64 ...@@ -297,31 +262,33 @@ vector_entry workaround_bpiall_vbar1_serror_aarch64
* --------------------------------------------------------------------- * ---------------------------------------------------------------------
*/ */
vector_entry workaround_bpiall_vbar1_sync_exception_aarch32 vector_entry workaround_bpiall_vbar1_sync_exception_aarch32
/* Restore register state from the workaround context */ /*
ldp x2, x3, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD0] * w2 indicates which SEL1 stub was run and thus which original vector was used
ldp x4, x5, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD2] * w3-w6 contain saved system register state (esr_el3 in w3)
ldp x6, x30, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD4] * Restore LR and ELR_EL3 register state from the GP regs context
*/
ldp x30, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
/* Apply the restored system register state */ /* Apply the restored system register state */
msr scr_el3, x2 msr esr_el3, x3
msr spsr_el3, x3 msr spsr_el3, x4
msr elr_el3, x4 msr scr_el3, x5
msr sctlr_el1, x5 msr sctlr_el1, x6
msr esr_el3, x6 msr elr_el3, x7
/* /*
* Workaround is complete, so swap VBAR_EL3 to point * Workaround is complete, so swap VBAR_EL3 to point
* to workaround entry table in preparation for subsequent * to workaround entry table in preparation for subsequent
* Sync/IRQ/FIQ/SError exceptions. * Sync/IRQ/FIQ/SError exceptions.
*/ */
adr x2, workaround_bpiall_vbar0_runtime_exceptions adr x0, workaround_bpiall_vbar0_runtime_exceptions
msr vbar_el3, x2 msr vbar_el3, x0
/* /*
* Restore all GP regs except x0 and x1. The value in x0 * Restore all GP regs except x2 and x3 (esr). The value in x2
* indicates the type of the original exception. * indicates the type of the original exception.
*/ */
ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
...@@ -336,37 +303,55 @@ vector_entry workaround_bpiall_vbar1_sync_exception_aarch32 ...@@ -336,37 +303,55 @@ vector_entry workaround_bpiall_vbar1_sync_exception_aarch32
ldp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26] ldp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
/* Fast path Sync exceptions. Static predictor will fall through. */
tbz w2, #0, workaround_not_sync
/* /*
* Each of these handlers will first restore x0 and x1 from * Check if SMC is coming from A64 state on #0
* the context and the branch to the common implementation for * with W0 = SMCCC_ARCH_WORKAROUND_1
* each of the exception types. *
* This sequence evaluates as:
* (W0==SMCCC_ARCH_WORKAROUND_1) ? (ESR_EL3==SMC#0) : (NE)
* allowing use of a single branch operation
*/ */
tbnz x0, #1, workaround_bpiall_vbar1_irq orr w2, wzr, #SMCCC_ARCH_WORKAROUND_1
tbnz x0, #2, workaround_bpiall_vbar1_fiq cmp w0, w2
tbnz x0, #3, workaround_bpiall_vbar1_serror mov_imm w2, ESR_EL3_A64_SMC0
ccmp w3, w2, #0, eq
/* Fallthrough case for Sync exception */ /* Static predictor will predict a fall through */
ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] bne 1f
eret
1:
ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
b sync_exception_aarch64 b sync_exception_aarch64
check_vector_size workaround_bpiall_vbar1_sync_exception_aarch32 check_vector_size workaround_bpiall_vbar1_sync_exception_aarch32
vector_entry workaround_bpiall_vbar1_irq_aarch32 vector_entry workaround_bpiall_vbar1_irq_aarch32
b report_unhandled_interrupt b report_unhandled_interrupt
workaround_bpiall_vbar1_irq:
ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] /*
* Post-workaround fan-out for non-sync exceptions
*/
workaround_not_sync:
tbnz w2, #3, workaround_bpiall_vbar1_serror
tbnz w2, #2, workaround_bpiall_vbar1_fiq
/* IRQ */
ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
b irq_aarch64 b irq_aarch64
workaround_bpiall_vbar1_fiq:
ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
b fiq_aarch64
workaround_bpiall_vbar1_serror:
ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
b serror_aarch64
check_vector_size workaround_bpiall_vbar1_irq_aarch32 check_vector_size workaround_bpiall_vbar1_irq_aarch32
vector_entry workaround_bpiall_vbar1_fiq_aarch32 vector_entry workaround_bpiall_vbar1_fiq_aarch32
b report_unhandled_interrupt b report_unhandled_interrupt
workaround_bpiall_vbar1_fiq:
ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
b fiq_aarch64
check_vector_size workaround_bpiall_vbar1_fiq_aarch32 check_vector_size workaround_bpiall_vbar1_fiq_aarch32
vector_entry workaround_bpiall_vbar1_serror_aarch32 vector_entry workaround_bpiall_vbar1_serror_aarch32
b report_unhandled_exception b report_unhandled_exception
workaround_bpiall_vbar1_serror:
ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
b serror_aarch64
check_vector_size workaround_bpiall_vbar1_serror_aarch32 check_vector_size workaround_bpiall_vbar1_serror_aarch32
/* /*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
* *
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
#include <arch.h> #include <arch.h>
#include <arm_arch_svc.h>
#include <asm_macros.S> #include <asm_macros.S>
#include <context.h> #include <context.h>
.globl workaround_mmu_runtime_exceptions .globl workaround_mmu_runtime_exceptions
#define ESR_EL3_A64_SMC0 0x5e000000
vector_base workaround_mmu_runtime_exceptions vector_base workaround_mmu_runtime_exceptions
.macro apply_workaround .macro apply_workaround _is_sync_exception
stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
mrs x0, sctlr_el3 mrs x1, sctlr_el3
/* Disable MMU */ /* Disable MMU */
bic x1, x0, #SCTLR_M_BIT bic x1, x1, #SCTLR_M_BIT
msr sctlr_el3, x1 msr sctlr_el3, x1
isb isb
/* Restore MMU config */ /* Enable MMU */
msr sctlr_el3, x0 orr x1, x1, #SCTLR_M_BIT
msr sctlr_el3, x1
/*
* Defer ISB to avoid synchronizing twice in case we hit
* the workaround SMC call which will implicitly synchronize
* because of the ERET instruction.
*/
/*
* Ensure SMC is coming from A64 state on #0
* with W0 = SMCCC_ARCH_WORKAROUND_1
*
* This sequence evaluates as:
* (W0==SMCCC_ARCH_WORKAROUND_1) ? (ESR_EL3==SMC#0) : (NE)
* allowing use of a single branch operation
*/
.if \_is_sync_exception
orr w1, wzr, #SMCCC_ARCH_WORKAROUND_1
cmp w0, w1
mrs x0, esr_el3
mov_imm w1, ESR_EL3_A64_SMC0
ccmp w0, w1, #0, eq
/* Static predictor will predict a fall through */
bne 1f
eret
1:
.endif
/*
* Synchronize now to enable the MMU. This is required
* to ensure the load pair below reads the data stored earlier.
*/
isb isb
ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
.endm .endm
...@@ -70,22 +104,22 @@ vector_entry workaround_mmu_serror_sp_elx ...@@ -70,22 +104,22 @@ vector_entry workaround_mmu_serror_sp_elx
* --------------------------------------------------------------------- * ---------------------------------------------------------------------
*/ */
vector_entry workaround_mmu_sync_exception_aarch64 vector_entry workaround_mmu_sync_exception_aarch64
apply_workaround apply_workaround _is_sync_exception=1
b sync_exception_aarch64 b sync_exception_aarch64
check_vector_size workaround_mmu_sync_exception_aarch64 check_vector_size workaround_mmu_sync_exception_aarch64
vector_entry workaround_mmu_irq_aarch64 vector_entry workaround_mmu_irq_aarch64
apply_workaround apply_workaround _is_sync_exception=0
b irq_aarch64 b irq_aarch64
check_vector_size workaround_mmu_irq_aarch64 check_vector_size workaround_mmu_irq_aarch64
vector_entry workaround_mmu_fiq_aarch64 vector_entry workaround_mmu_fiq_aarch64
apply_workaround apply_workaround _is_sync_exception=0
b fiq_aarch64 b fiq_aarch64
check_vector_size workaround_mmu_fiq_aarch64 check_vector_size workaround_mmu_fiq_aarch64
vector_entry workaround_mmu_serror_aarch64 vector_entry workaround_mmu_serror_aarch64
apply_workaround apply_workaround _is_sync_exception=0
b serror_aarch64 b serror_aarch64
check_vector_size workaround_mmu_serror_aarch64 check_vector_size workaround_mmu_serror_aarch64
...@@ -94,21 +128,21 @@ vector_entry workaround_mmu_serror_aarch64 ...@@ -94,21 +128,21 @@ vector_entry workaround_mmu_serror_aarch64
* --------------------------------------------------------------------- * ---------------------------------------------------------------------
*/ */
vector_entry workaround_mmu_sync_exception_aarch32 vector_entry workaround_mmu_sync_exception_aarch32
apply_workaround apply_workaround _is_sync_exception=1
b sync_exception_aarch32 b sync_exception_aarch32
check_vector_size workaround_mmu_sync_exception_aarch32 check_vector_size workaround_mmu_sync_exception_aarch32
vector_entry workaround_mmu_irq_aarch32 vector_entry workaround_mmu_irq_aarch32
apply_workaround apply_workaround _is_sync_exception=0
b irq_aarch32 b irq_aarch32
check_vector_size workaround_mmu_irq_aarch32 check_vector_size workaround_mmu_irq_aarch32
vector_entry workaround_mmu_fiq_aarch32 vector_entry workaround_mmu_fiq_aarch32
apply_workaround apply_workaround _is_sync_exception=0
b fiq_aarch32 b fiq_aarch32
check_vector_size workaround_mmu_fiq_aarch32 check_vector_size workaround_mmu_fiq_aarch32
vector_entry workaround_mmu_serror_aarch32 vector_entry workaround_mmu_serror_aarch32
apply_workaround apply_workaround _is_sync_exception=0
b serror_aarch32 b serror_aarch32
check_vector_size workaround_mmu_serror_aarch32 check_vector_size workaround_mmu_serror_aarch32
/* /*
* Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
* *
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
#include <arch.h> #include <arch.h>
#include <arch_helpers.h> #include <arch_helpers.h>
#include <arm_arch_svc.h>
#include <assert.h> #include <assert.h>
#include <debug.h> #include <debug.h>
#include <platform.h> #include <platform.h>
...@@ -322,6 +323,9 @@ int psci_features(unsigned int psci_fid) ...@@ -322,6 +323,9 @@ int psci_features(unsigned int psci_fid)
{ {
unsigned int local_caps = psci_caps; unsigned int local_caps = psci_caps;
if (psci_fid == SMCCC_VERSION)
return PSCI_E_SUCCESS;
/* Check if it is a 64 bit function */ /* Check if it is a 64 bit function */
if (((psci_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_64) if (((psci_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_64)
local_caps &= PSCI_CAP_64BIT_MASK; local_caps &= PSCI_CAP_64BIT_MASK;
......
...@@ -122,4 +122,6 @@ ERRATA_A53_836870 := 1 ...@@ -122,4 +122,6 @@ ERRATA_A53_836870 := 1
ERRATA_A53_843419 := 1 ERRATA_A53_843419 := 1
ERRATA_A53_855873 := 1 ERRATA_A53_855873 := 1
WORKAROUND_CVE_2017_5715 := 0
FIP_ALIGN := 512 FIP_ALIGN := 512
...@@ -29,6 +29,8 @@ ERRATA_A53_835769 := 1 ...@@ -29,6 +29,8 @@ ERRATA_A53_835769 := 1
ERRATA_A53_843419 := 1 ERRATA_A53_843419 := 1
ENABLE_SVE_FOR_NS := 0 ENABLE_SVE_FOR_NS := 0
WORKAROUND_CVE_2017_5715 := 0
ARM_GIC_ARCH := 2 ARM_GIC_ARCH := 2
$(eval $(call add_define,ARM_GIC_ARCH)) $(eval $(call add_define,ARM_GIC_ARCH))
......
...@@ -61,6 +61,8 @@ $(eval $(call add_define,ARM_GIC_ARCH)) ...@@ -61,6 +61,8 @@ $(eval $(call add_define,ARM_GIC_ARCH))
ERRATA_A53_826319 := 1 ERRATA_A53_826319 := 1
ERRATA_A53_836870 := 1 ERRATA_A53_836870 := 1
WORKAROUND_CVE_2017_5715 := 0
# indicate the reset vector address can be programmed # indicate the reset vector address can be programmed
PROGRAMMABLE_RESET_ADDRESS := 1 PROGRAMMABLE_RESET_ADDRESS := 1
......
...@@ -58,3 +58,5 @@ $(eval $(call add_define,PLAT_SKIP_OPTEE_S_EL1_INT_REGISTER)) ...@@ -58,3 +58,5 @@ $(eval $(call add_define,PLAT_SKIP_OPTEE_S_EL1_INT_REGISTER))
# Do not enable SVE # Do not enable SVE
ENABLE_SVE_FOR_NS := 0 ENABLE_SVE_FOR_NS := 0
WORKAROUND_CVE_2017_5715 := 0
...@@ -57,3 +57,5 @@ $(eval $(call add_define,PLAT_EXTRA_LD_SCRIPT)) ...@@ -57,3 +57,5 @@ $(eval $(call add_define,PLAT_EXTRA_LD_SCRIPT))
# Do not enable SVE # Do not enable SVE
ENABLE_SVE_FOR_NS := 0 ENABLE_SVE_FOR_NS := 0
WORKAROUND_CVE_2017_5715 := 0
...@@ -64,6 +64,8 @@ ERRATA_A53_836870 := 1 ...@@ -64,6 +64,8 @@ ERRATA_A53_836870 := 1
ERRATA_A53_843419 := 1 ERRATA_A53_843419 := 1
ERRATA_A53_855873 := 1 ERRATA_A53_855873 := 1
WORKAROUND_CVE_2017_5715 := 0
# Disable the PSCI platform compatibility layer by default # Disable the PSCI platform compatibility layer by default
ENABLE_PLAT_COMPAT := 0 ENABLE_PLAT_COMPAT := 0
......
...@@ -14,6 +14,8 @@ override RESET_TO_BL31 := 1 ...@@ -14,6 +14,8 @@ override RESET_TO_BL31 := 1
# Do not enable SVE # Do not enable SVE
ENABLE_SVE_FOR_NS := 0 ENABLE_SVE_FOR_NS := 0
WORKAROUND_CVE_2017_5715 := 0
ifdef ZYNQMP_ATF_MEM_BASE ifdef ZYNQMP_ATF_MEM_BASE
$(eval $(call add_define,ZYNQMP_ATF_MEM_BASE)) $(eval $(call add_define,ZYNQMP_ATF_MEM_BASE))
......
/*
* Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arm_arch_svc.h>
#include <debug.h>
#include <runtime_svc.h>
#include <smcc.h>
#include <smcc_helpers.h>
static int32_t smccc_version(void)
{
return MAKE_SMCCC_VERSION(SMCCC_MAJOR_VERSION, SMCCC_MINOR_VERSION);
}
static int32_t smccc_arch_features(u_register_t arg)
{
switch (arg) {
case SMCCC_VERSION:
case SMCCC_ARCH_FEATURES:
return SMC_OK;
#if WORKAROUND_CVE_2017_5715
case SMCCC_ARCH_WORKAROUND_1:
return SMC_OK;
#endif
default:
return SMC_UNK;
}
}
/*
* Top-level Arm Architectural Service SMC handler.
*/
uintptr_t arm_arch_svc_smc_handler(uint32_t smc_fid,
u_register_t x1,
u_register_t x2,
u_register_t x3,
u_register_t x4,
void *cookie,
void *handle,
u_register_t flags)
{
switch (smc_fid) {
case SMCCC_VERSION:
SMC_RET1(handle, smccc_version());
case SMCCC_ARCH_FEATURES:
SMC_RET1(handle, smccc_arch_features(x1));
#if WORKAROUND_CVE_2017_5715
case SMCCC_ARCH_WORKAROUND_1:
/*
* The workaround has already been applied on affected PEs
* during entry to EL3. On unaffected PEs, this function
* has no effect.
*/
SMC_RET0(handle);
#endif
default:
WARN("Unimplemented Arm Architecture Service Call: 0x%x \n",
smc_fid);
SMC_RET1(handle, SMC_UNK);
}
}
/* Register Standard Service Calls as runtime service */
DECLARE_RT_SVC(
arm_arch_svc,
OEN_ARM_START,
OEN_ARM_END,
SMC_TYPE_FAST,
NULL,
arm_arch_svc_smc_handler
);
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment