Commit 1a853370 authored by David Cunado's avatar David Cunado
Browse files

Enable SVE for Non-secure world



This patch adds a new build option, ENABLE_SVE_FOR_NS, which when set
to one EL3 will check to see if the Scalable Vector Extension (SVE) is
implemented when entering and exiting the Non-secure world.

If SVE is implemented, EL3 will do the following:

- Entry to Non-secure world: SIMD, FP and SVE functionality is enabled.

- Exit from Non-secure world: SIMD, FP and SVE functionality is
  disabled. As SIMD and FP registers are part of the SVE Z-registers
  then any use of SIMD / FP functionality would corrupt the SVE
  registers.

The build option default is 1. The SVE functionality is only supported
on AArch64 and so the build option is set to zero when the target
archiecture is AArch32.

This build option is not compatible with the CTX_INCLUDE_FPREGS - an
assert will be raised on platforms where SVE is implemented and both
ENABLE_SVE_FOR_NS and CTX_INCLUDE_FPREGS are set to 1.

Also note this change prevents secure world use of FP&SIMD registers on
SVE-enabled platforms. Existing Secure-EL1 Payloads will not work on
such platforms unless ENABLE_SVE_FOR_NS is set to 0.

Additionally, on the first entry into the Non-secure world the SVE
functionality is enabled and the SVE Z-register length is set to the
maximum size allowed by the architecture. This includes the use case
where EL2 is implemented but not used.

Change-Id: Ie2d733ddaba0b9bef1d7c9765503155188fe7dae
Signed-off-by: default avatarDavid Cunado <david.cunado@arm.com>
parent 3642ca95
...@@ -463,6 +463,7 @@ $(eval $(call assert_boolean,ENABLE_PMF)) ...@@ -463,6 +463,7 @@ $(eval $(call assert_boolean,ENABLE_PMF))
$(eval $(call assert_boolean,ENABLE_PSCI_STAT)) $(eval $(call assert_boolean,ENABLE_PSCI_STAT))
$(eval $(call assert_boolean,ENABLE_RUNTIME_INSTRUMENTATION)) $(eval $(call assert_boolean,ENABLE_RUNTIME_INSTRUMENTATION))
$(eval $(call assert_boolean,ENABLE_SPE_FOR_LOWER_ELS)) $(eval $(call assert_boolean,ENABLE_SPE_FOR_LOWER_ELS))
$(eval $(call assert_boolean,ENABLE_SVE_FOR_NS))
$(eval $(call assert_boolean,ERROR_DEPRECATED)) $(eval $(call assert_boolean,ERROR_DEPRECATED))
$(eval $(call assert_boolean,GENERATE_COT)) $(eval $(call assert_boolean,GENERATE_COT))
$(eval $(call assert_boolean,GICV2_G0_FOR_EL3)) $(eval $(call assert_boolean,GICV2_G0_FOR_EL3))
...@@ -503,6 +504,7 @@ $(eval $(call add_define,ENABLE_PMF)) ...@@ -503,6 +504,7 @@ $(eval $(call add_define,ENABLE_PMF))
$(eval $(call add_define,ENABLE_PSCI_STAT)) $(eval $(call add_define,ENABLE_PSCI_STAT))
$(eval $(call add_define,ENABLE_RUNTIME_INSTRUMENTATION)) $(eval $(call add_define,ENABLE_RUNTIME_INSTRUMENTATION))
$(eval $(call add_define,ENABLE_SPE_FOR_LOWER_ELS)) $(eval $(call add_define,ENABLE_SPE_FOR_LOWER_ELS))
$(eval $(call add_define,ENABLE_SVE_FOR_NS))
$(eval $(call add_define,ERROR_DEPRECATED)) $(eval $(call add_define,ERROR_DEPRECATED))
$(eval $(call add_define,GICV2_G0_FOR_EL3)) $(eval $(call add_define,GICV2_G0_FOR_EL3))
$(eval $(call add_define,HW_ASSISTED_COHERENCY)) $(eval $(call add_define,HW_ASSISTED_COHERENCY))
......
...@@ -54,6 +54,10 @@ ifeq (${ENABLE_AMU},1) ...@@ -54,6 +54,10 @@ ifeq (${ENABLE_AMU},1)
BL31_SOURCES += lib/extensions/amu/aarch64/amu.c BL31_SOURCES += lib/extensions/amu/aarch64/amu.c
endif endif
ifeq (${ENABLE_SVE_FOR_NS},1)
BL31_SOURCES += lib/extensions/sve/sve.c
endif
BL31_LINKERFILE := bl31/bl31.ld.S BL31_LINKERFILE := bl31/bl31.ld.S
# Flag used to indicate if Crash reporting via console should be included # Flag used to indicate if Crash reporting via console should be included
......
...@@ -354,6 +354,17 @@ Common build options ...@@ -354,6 +354,17 @@ Common build options
The default is 1 but is automatically disabled when the target architecture The default is 1 but is automatically disabled when the target architecture
is AArch32. is AArch32.
- ``ENABLE_SVE_FOR_NS``: Boolean option to enable Scalable Vector Extension
(SVE) for the Non-secure world only. SVE is an optional architectural feature
for AArch64. Note that when SVE is enabled for the Non-secure world, access
to SIMD and floating-point functionality from the Secure world is disabled.
This is to avoid corruption of the Non-secure world data in the Z-registers
which are aliased by the SIMD and FP registers. The build option is not
compatible with the ``CTX_INCLUDE_FPREGS`` build option, and will raise an
assert on platforms where SVE is implemented and ``ENABLE_SVE_FOR_NS`` set to
1. The default is 1 but is automatically disabled when the target
architecture is AArch32.
- ``ENABLE_STACK_PROTECTOR``: String option to enable the stack protection - ``ENABLE_STACK_PROTECTOR``: String option to enable the stack protection
checks in GCC. Allowed values are "all", "strong" and "0" (default). checks in GCC. Allowed values are "all", "strong" and "0" (default).
"strong" is the recommended stack protection level if this feature is "strong" is the recommended stack protection level if this feature is
......
...@@ -127,9 +127,9 @@ ...@@ -127,9 +127,9 @@
* CPTR_EL3.TTA: Set to zero so that System register accesses to the * CPTR_EL3.TTA: Set to zero so that System register accesses to the
* trace registers do not trap to EL3. * trace registers do not trap to EL3.
* *
* CPTR_EL3.TFP: Set to zero so that accesses to Advanced SIMD and * CPTR_EL3.TFP: Set to zero so that accesses to the V- or Z- registers
* floating-point functionality do not trap to EL3. * by Advanced SIMD, floating-point or SVE instructions (if implemented)
* --------------------------------------------------------------------- * do not trap to EL3.
*/ */
mov_imm x0, (CPTR_EL3_RESET_VAL & ~(TCPAC_BIT | TTA_BIT | TFP_BIT)) mov_imm x0, (CPTR_EL3_RESET_VAL & ~(TCPAC_BIT | TTA_BIT | TFP_BIT))
msr cptr_el3, x0 msr cptr_el3, x0
......
...@@ -114,6 +114,9 @@ ...@@ -114,6 +114,9 @@
#define ID_AA64PFR0_AMU_LENGTH U(4) #define ID_AA64PFR0_AMU_LENGTH U(4)
#define ID_AA64PFR0_AMU_MASK U(0xf) #define ID_AA64PFR0_AMU_MASK U(0xf)
#define ID_AA64PFR0_ELX_MASK U(0xf) #define ID_AA64PFR0_ELX_MASK U(0xf)
#define ID_AA64PFR0_SVE_SHIFT U(32)
#define ID_AA64PFR0_SVE_MASK U(0xf)
#define ID_AA64PFR0_SVE_LENGTH U(4)
/* ID_AA64DFR0_EL1.PMS definitions (for ARMv8.2+) */ /* ID_AA64DFR0_EL1.PMS definitions (for ARMv8.2+) */
#define ID_AA64DFR0_PMS_SHIFT U(32) #define ID_AA64DFR0_PMS_SHIFT U(32)
...@@ -301,6 +304,7 @@ ...@@ -301,6 +304,7 @@
#define TAM_BIT (U(1) << 30) #define TAM_BIT (U(1) << 30)
#define TTA_BIT (U(1) << 20) #define TTA_BIT (U(1) << 20)
#define TFP_BIT (U(1) << 10) #define TFP_BIT (U(1) << 10)
#define CPTR_EZ_BIT (U(1) << 8)
#define CPTR_EL3_RESET_VAL U(0x0) #define CPTR_EL3_RESET_VAL U(0x0)
/* CPTR_EL2 definitions */ /* CPTR_EL2 definitions */
...@@ -309,6 +313,7 @@ ...@@ -309,6 +313,7 @@
#define CPTR_EL2_TAM_BIT (U(1) << 30) #define CPTR_EL2_TAM_BIT (U(1) << 30)
#define CPTR_EL2_TTA_BIT (U(1) << 20) #define CPTR_EL2_TTA_BIT (U(1) << 20)
#define CPTR_EL2_TFP_BIT (U(1) << 10) #define CPTR_EL2_TFP_BIT (U(1) << 10)
#define CPTR_EL2_TZ_BIT (U(1) << 8)
#define CPTR_EL2_RESET_VAL CPTR_EL2_RES1 #define CPTR_EL2_RESET_VAL CPTR_EL2_RES1
/* CPSR/SPSR definitions */ /* CPSR/SPSR definitions */
...@@ -555,6 +560,18 @@ ...@@ -555,6 +560,18 @@
#define PMCR_EL0_X_BIT (U(1) << 4) #define PMCR_EL0_X_BIT (U(1) << 4)
#define PMCR_EL0_D_BIT (U(1) << 3) #define PMCR_EL0_D_BIT (U(1) << 3)
/*******************************************************************************
* Definitions for system register interface to SVE
******************************************************************************/
#define ZCR_EL3 S3_6_C1_C2_0
#define ZCR_EL2 S3_4_C1_C2_0
/* ZCR_EL3 definitions */
#define ZCR_EL3_LEN_MASK U(0xf)
/* ZCR_EL2 definitions */
#define ZCR_EL2_LEN_MASK U(0xf)
/******************************************************************************* /*******************************************************************************
* Definitions of MAIR encodings for device and normal memory * Definitions of MAIR encodings for device and normal memory
******************************************************************************/ ******************************************************************************/
......
...@@ -329,6 +329,9 @@ DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenset1_el0, AMCNTENSET1_EL0) ...@@ -329,6 +329,9 @@ DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenset1_el0, AMCNTENSET1_EL0)
DEFINE_RENAME_SYSREG_RW_FUNCS(pmblimitr_el1, PMBLIMITR_EL1) DEFINE_RENAME_SYSREG_RW_FUNCS(pmblimitr_el1, PMBLIMITR_EL1)
DEFINE_RENAME_SYSREG_WRITE_FUNC(zcr_el3, ZCR_EL3)
DEFINE_RENAME_SYSREG_WRITE_FUNC(zcr_el2, ZCR_EL2)
#define IS_IN_EL(x) \ #define IS_IN_EL(x) \
(GET_EL(read_CurrentEl()) == MODE_EL##x) (GET_EL(read_CurrentEl()) == MODE_EL##x)
......
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef __SVE_H__
#define __SVE_H__
void sve_enable(int el2_unused);
#endif /* __SVE_H__ */
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <smcc_helpers.h> #include <smcc_helpers.h>
#include <spe.h> #include <spe.h>
#include <string.h> #include <string.h>
#include <sve.h>
#include <utils.h> #include <utils.h>
...@@ -225,6 +226,10 @@ static void enable_extensions_nonsecure(int el2_unused) ...@@ -225,6 +226,10 @@ static void enable_extensions_nonsecure(int el2_unused)
#if ENABLE_AMU #if ENABLE_AMU
amu_enable(el2_unused); amu_enable(el2_unused);
#endif #endif
#if ENABLE_SVE_FOR_NS
sve_enable(el2_unused);
#endif
#endif #endif
} }
......
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <arch_helpers.h>
#include <pubsub.h>
#include <sve.h>
static void *disable_sve_hook(const void *arg)
{
uint64_t features;
features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_SVE_SHIFT;
if ((features & ID_AA64PFR0_SVE_MASK) == 1) {
uint64_t cptr;
/*
* Disable SVE, SIMD and FP access for the Secure world.
* As the SIMD/FP registers are part of the SVE Z-registers, any
* use of SIMD/FP functionality will corrupt the SVE registers.
* Therefore it is necessary to prevent use of SIMD/FP support
* in the Secure world as well as SVE functionality.
*/
cptr = read_cptr_el3();
cptr = (cptr | TFP_BIT) & ~(CPTR_EZ_BIT);
write_cptr_el3(cptr);
/*
* No explicit ISB required here as ERET to switch to Secure
* world covers it
*/
}
return 0;
}
static void *enable_sve_hook(const void *arg)
{
uint64_t features;
features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_SVE_SHIFT;
if ((features & ID_AA64PFR0_SVE_MASK) == 1) {
uint64_t cptr;
/*
* Enable SVE, SIMD and FP access for the Non-secure world.
*/
cptr = read_cptr_el3();
cptr = (cptr | CPTR_EZ_BIT) & ~(TFP_BIT);
write_cptr_el3(cptr);
/*
* No explicit ISB required here as ERET to switch to Non-secure
* world covers it
*/
}
return 0;
}
void sve_enable(int el2_unused)
{
uint64_t features;
features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_SVE_SHIFT;
if ((features & ID_AA64PFR0_SVE_MASK) == 1) {
uint64_t cptr;
#if CTX_INCLUDE_FPREGS
/*
* CTX_INCLUDE_FPREGS is not supported on SVE enabled systems.
*/
assert(0);
#endif
/*
* Update CPTR_EL3 to enable access to SVE functionality for the
* Non-secure world.
* NOTE - assumed that CPTR_EL3.TFP is set to allow access to
* the SIMD, floating-point and SVE support.
*
* CPTR_EL3.EZ: Set to 1 to enable access to SVE functionality
* in the Non-secure world.
*/
cptr = read_cptr_el3();
cptr |= CPTR_EZ_BIT;
write_cptr_el3(cptr);
/*
* Need explicit ISB here to guarantee that update to ZCR_ELx
* and CPTR_EL2.TZ do not result in trap to EL3.
*/
isb();
/*
* Ensure lower ELs have access to full vector length.
*/
write_zcr_el3(ZCR_EL3_LEN_MASK);
if (el2_unused) {
/*
* Update CPTR_EL2 to enable access to SVE functionality
* for Non-secure world, EL2 and Non-secure EL1 and EL0.
* NOTE - assumed that CPTR_EL2.TFP is set to allow
* access to the SIMD, floating-point and SVE support.
*
* CPTR_EL2.TZ: Set to 0 to enable access to SVE support
* for EL2 and Non-secure EL1 and EL0.
*/
cptr = read_cptr_el2();
cptr &= ~(CPTR_EL2_TZ_BIT);
write_cptr_el2(cptr);
/*
* Ensure lower ELs have access to full vector length.
*/
write_zcr_el2(ZCR_EL2_LEN_MASK);
}
/*
* No explicit ISB required here as ERET to switch to
* Non-secure world covers it.
*/
}
}
SUBSCRIBE_TO_EVENT(cm_exited_normal_world, disable_sve_hook);
SUBSCRIBE_TO_EVENT(cm_entering_normal_world, enable_sve_hook);
...@@ -158,3 +158,12 @@ ifeq (${ARCH},aarch32) ...@@ -158,3 +158,12 @@ ifeq (${ARCH},aarch32)
endif endif
ENABLE_AMU := 0 ENABLE_AMU := 0
# By default, enable Scalable Vector Extension if implemented for Non-secure
# lower ELs
# Note SVE is only supported on AArch64 - therefore do not enable in AArch32
ifneq (${ARCH},aarch32)
ENABLE_SVE_FOR_NS := 1
else
override ENABLE_SVE_FOR_NS := 0
endif
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment