Commit 0c5e7d1c authored by Max Shvetsov's avatar Max Shvetsov Committed by Maksims Svecovs
Browse files

feat(sve): enable SVE for the secure world



Enables SVE support for the secure world via ENABLE_SVE_FOR_SWD.
ENABLE_SVE_FOR_SWD defaults to 0 and has to be explicitly set by the
platform. SVE is configured during initial setup and then uses EL3
context save/restore routine to switch between SVE configurations for
different contexts.
Reset value of CPTR_EL3 changed to be most restrictive by default.
Signed-off-by: default avatarMax Shvetsov <maksims.svecovs@arm.com>
Change-Id: I889fbbc2e435435d66779b73a2d90d1188bf4116
parent a5394205
......@@ -913,6 +913,7 @@ $(eval $(call assert_booleans,\
ENABLE_RUNTIME_INSTRUMENTATION \
ENABLE_SPE_FOR_LOWER_ELS \
ENABLE_SVE_FOR_NS \
ENABLE_SVE_FOR_SWD \
ERROR_DEPRECATED \
FAULT_INJECTION_SUPPORT \
GENERATE_COT \
......@@ -1006,6 +1007,7 @@ $(eval $(call add_defines,\
ENABLE_RUNTIME_INSTRUMENTATION \
ENABLE_SPE_FOR_LOWER_ELS \
ENABLE_SVE_FOR_NS \
ENABLE_SVE_FOR_SWD \
ENCRYPT_BL31 \
ENCRYPT_BL32 \
ERROR_DEPRECATED \
......
......@@ -278,7 +278,8 @@ Common build options
- ``ENABLE_SVE_FOR_NS``: Boolean option to enable Scalable Vector Extension
(SVE) for the Non-secure world only. SVE is an optional architectural feature
for AArch64. Note that when SVE is enabled for the Non-secure world, access
to SIMD and floating-point functionality from the Secure world is disabled.
to SIMD and floating-point functionality from the Secure world is disabled by
default and controlled with ENABLE_SVE_FOR_SWD.
This is to avoid corruption of the Non-secure world data in the Z-registers
which are aliased by the SIMD and FP registers. The build option is not
compatible with the ``CTX_INCLUDE_FPREGS`` build option, and will raise an
......@@ -286,6 +287,11 @@ Common build options
1. The default is 1 but is automatically disabled when the target
architecture is AArch32.
- ``ENABLE_SVE_FOR_SWD``: Boolean option to enable SVE for the Secure world.
SVE is an optional architectural feature for AArch64. Note that this option
requires ENABLE_SVE_FOR_NS to be enabled. The default is 0 and it is
automatically disabled when the target architecture is AArch32.
- ``ENABLE_STACK_PROTECTOR``: String option to enable the stack protection
checks in GCC. Allowed values are "all", "strong", "default" and "none". The
default value is set to "none". "strong" is the recommended stack protection
......
......@@ -170,6 +170,7 @@
#define ID_AA64PFR0_GIC_MASK ULL(0xf)
#define ID_AA64PFR0_SVE_SHIFT U(32)
#define ID_AA64PFR0_SVE_MASK ULL(0xf)
#define ID_AA64PFR0_SVE_LENGTH U(4)
#define ID_AA64PFR0_SEL2_SHIFT U(36)
#define ID_AA64PFR0_SEL2_MASK ULL(0xf)
#define ID_AA64PFR0_MPAM_SHIFT U(40)
......@@ -529,7 +530,7 @@
#define TTA_BIT (U(1) << 20)
#define TFP_BIT (U(1) << 10)
#define CPTR_EZ_BIT (U(1) << 8)
#define CPTR_EL3_RESET_VAL U(0x0)
#define CPTR_EL3_RESET_VAL (TCPAC_BIT | TAM_BIT | TTA_BIT | TFP_BIT & ~(CPTR_EZ_BIT))
/* CPTR_EL2 definitions */
#define CPTR_EL2_RES1 ((U(1) << 13) | (U(1) << 12) | (U(0x3ff)))
......
......@@ -185,7 +185,14 @@
* CPTR_EL3.TFP: Set to zero so that accesses to the V- or Z- registers
* by Advanced SIMD, floating-point or SVE instructions (if implemented)
* do not trap to EL3.
*
* CPTR_EL3.TAM: Set to one so that Activity Monitor access is
* trapped to EL3 by default.
*
* CPTR_EL3.EZ: Set to zero so that all SVE functionality is trapped
* to EL3 by default.
*/
mov_imm x0, (CPTR_EL3_RESET_VAL & ~(TCPAC_BIT | TTA_BIT | TFP_BIT))
msr cptr_el3, x0
......
......@@ -61,7 +61,9 @@
#define CTX_ELR_EL3 U(0x20)
#define CTX_PMCR_EL0 U(0x28)
#define CTX_IS_IN_EL3 U(0x30)
#define CTX_EL3STATE_END U(0x40) /* Align to the next 16 byte boundary */
#define CTX_CPTR_EL3 U(0x38)
#define CTX_ZCR_EL3 U(0x40)
#define CTX_EL3STATE_END U(0x50) /* Align to the next 16 byte boundary */
/*******************************************************************************
* Constants that allow assembler code to access members of and the
......
/*
* Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
......@@ -7,9 +7,8 @@
#ifndef SVE_H
#define SVE_H
#include <stdbool.h>
#include <context.h>
bool sve_supported(void);
void sve_enable(bool el2_unused);
void sve_enable(cpu_context_t *context);
#endif /* SVE_H */
......@@ -901,6 +901,29 @@ func el3_exit
msr spsr_el3, x16
msr elr_el3, x17
#if IMAGE_BL31
/* ----------------------------------------------------------
* Restore CPTR_EL3, ZCR_EL3 for SVE support.
* If SVE is not supported - skip the restoration.
* ZCR is only restored if SVE is supported and enabled.
* Synchronization is required before zcr_el3 is addressed.
* ----------------------------------------------------------
*/
mrs x17, id_aa64pfr0_el1
ubfx x17, x17, ID_AA64PFR0_SVE_SHIFT, ID_AA64PFR0_SVE_LENGTH
cbz x17, sve_not_enabled
ldp x19, x20, [sp, #CTX_EL3STATE_OFFSET + CTX_CPTR_EL3]
msr cptr_el3, x19
ands x19, x19, #CPTR_EZ_BIT
beq sve_not_enabled
isb
msr S3_6_C1_C2_0, x20 /* zcr_el3 */
sve_not_enabled:
#endif
#if IMAGE_BL31 && DYNAMIC_WORKAROUND_CVE_2018_3639
/* ----------------------------------------------------------
* Restore mitigation state as it was on entry to EL3
......
......@@ -178,6 +178,18 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
* indicated by the interrupt routing model for BL31.
*/
scr_el3 |= get_scr_el3_from_routing_model(security_state);
#if ENABLE_SVE_FOR_NS
if (security_state == NON_SECURE) {
sve_enable(ctx);
}
#endif
#if ENABLE_SVE_FOR_SWD
if (security_state == SECURE) {
sve_enable(ctx);
}
#endif
#endif
/*
......@@ -334,10 +346,6 @@ static void enable_extensions_nonsecure(bool el2_unused)
amu_enable(el2_unused);
#endif
#if ENABLE_SVE_FOR_NS
sve_enable(el2_unused);
#endif
#if ENABLE_MPAM_FOR_LOWER_ELS
mpam_enable(el2_unused);
#endif
......
/*
* Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
......@@ -11,7 +11,13 @@
#include <lib/el3_runtime/pubsub.h>
#include <lib/extensions/sve.h>
bool sve_supported(void)
/*
* Converts SVE vector size restriction in bytes to LEN according to ZCR_EL3 documentation.
* VECTOR_SIZE = (LEN+1) * 128
*/
#define CONVERT_SVE_LENGTH(x) (((x / 128) - 1))
static bool sve_supported(void)
{
uint64_t features;
......@@ -19,113 +25,19 @@ bool sve_supported(void)
return (features & ID_AA64PFR0_SVE_MASK) == 1U;
}
static void *disable_sve_hook(const void *arg)
void sve_enable(cpu_context_t *context)
{
uint64_t cptr;
if (!sve_supported())
return (void *)-1;
/*
* Disable SVE, SIMD and FP access for the Secure world.
* As the SIMD/FP registers are part of the SVE Z-registers, any
* use of SIMD/FP functionality will corrupt the SVE registers.
* Therefore it is necessary to prevent use of SIMD/FP support
* in the Secure world as well as SVE functionality.
*/
cptr = read_cptr_el3();
cptr = (cptr | TFP_BIT) & ~(CPTR_EZ_BIT);
write_cptr_el3(cptr);
/*
* No explicit ISB required here as ERET to switch to Secure
* world covers it
*/
return (void *)0;
}
static void *enable_sve_hook(const void *arg)
{
uint64_t cptr;
if (!sve_supported())
return (void *)-1;
/*
* Enable SVE, SIMD and FP access for the Non-secure world.
*/
cptr = read_cptr_el3();
cptr = (cptr | CPTR_EZ_BIT) & ~(TFP_BIT);
write_cptr_el3(cptr);
/*
* No explicit ISB required here as ERET to switch to Non-secure
* world covers it
*/
return (void *)0;
}
void sve_enable(bool el2_unused)
{
uint64_t cptr;
if (!sve_supported())
if (!sve_supported()) {
return;
}
#if CTX_INCLUDE_FPREGS
/*
* CTX_INCLUDE_FPREGS is not supported on SVE enabled systems.
*/
assert(0);
#endif
/*
* Update CPTR_EL3 to enable access to SVE functionality for the
* Non-secure world.
* NOTE - assumed that CPTR_EL3.TFP is set to allow access to
* the SIMD, floating-point and SVE support.
*
* CPTR_EL3.EZ: Set to 1 to enable access to SVE functionality
* in the Non-secure world.
*/
cptr = read_cptr_el3();
cptr |= CPTR_EZ_BIT;
write_cptr_el3(cptr);
/*
* Need explicit ISB here to guarantee that update to ZCR_ELx
* and CPTR_EL2.TZ do not result in trap to EL3.
*/
isb();
/*
* Ensure lower ELs have access to full vector length.
*/
write_zcr_el3(ZCR_EL3_LEN_MASK);
u_register_t cptr_el3 = read_cptr_el3();
if (el2_unused) {
/*
* Update CPTR_EL2 to enable access to SVE functionality
* for Non-secure world, EL2 and Non-secure EL1 and EL0.
* NOTE - assumed that CPTR_EL2.TFP is set to allow
* access to the SIMD, floating-point and SVE support.
*
* CPTR_EL2.TZ: Set to 0 to enable access to SVE support
* for EL2 and Non-secure EL1 and EL0.
*/
cptr = read_cptr_el2();
cptr &= ~(CPTR_EL2_TZ_BIT);
write_cptr_el2(cptr);
/* Enable access to SVE functionality for all ELs. */
cptr_el3 = (cptr_el3 | CPTR_EZ_BIT) & ~(TFP_BIT);
write_ctx_reg(get_el3state_ctx(context), CTX_CPTR_EL3, cptr_el3);
/*
* Ensure lower ELs have access to full vector length.
*/
write_zcr_el2(ZCR_EL2_LEN_MASK);
}
/*
* No explicit ISB required here as ERET to switch to
* Non-secure world covers it.
*/
/* Restrict maximum SVE vector length (SVE_VECTOR_LENGTH+1) * 128. */
write_ctx_reg(get_el3state_ctx(context), CTX_ZCR_EL3,
(ZCR_EL3_LEN_MASK & CONVERT_SVE_LENGTH(512)));
}
SUBSCRIBE_TO_EVENT(cm_exited_normal_world, disable_sve_hook);
SUBSCRIBE_TO_EVENT(cm_entering_normal_world, enable_sve_hook);
......@@ -299,13 +299,15 @@ CTX_INCLUDE_MTE_REGS := 0
ENABLE_AMU := 0
AMU_RESTRICT_COUNTERS := 0
# By default, enable Scalable Vector Extension if implemented for Non-secure
# By default, enable Scalable Vector Extension if implemented only for Non-secure
# lower ELs
# Note SVE is only supported on AArch64 - therefore do not enable in AArch32
ifneq (${ARCH},aarch32)
ENABLE_SVE_FOR_NS := 1
ENABLE_SVE_FOR_SWD := 0
else
override ENABLE_SVE_FOR_NS := 0
override ENABLE_SVE_FOR_SWD := 0
endif
SANITIZE_UB := off
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment