Unverified Commit ec04e0f4 authored by davidcunado-arm's avatar davidcunado-arm Committed by GitHub
Browse files

Merge pull request #1162 from dp-arm/spe-rework

Move SPE code to lib/extensions
parents 203444c5 281a08cc
...@@ -189,6 +189,7 @@ INCLUDES += -Iinclude/bl1 \ ...@@ -189,6 +189,7 @@ INCLUDES += -Iinclude/bl1 \
-Iinclude/lib/cpus/${ARCH} \ -Iinclude/lib/cpus/${ARCH} \
-Iinclude/lib/el3_runtime \ -Iinclude/lib/el3_runtime \
-Iinclude/lib/el3_runtime/${ARCH} \ -Iinclude/lib/el3_runtime/${ARCH} \
-Iinclude/lib/extensions \
-Iinclude/lib/pmf \ -Iinclude/lib/pmf \
-Iinclude/lib/psci \ -Iinclude/lib/psci \
-Iinclude/lib/xlat_tables \ -Iinclude/lib/xlat_tables \
......
...@@ -46,6 +46,10 @@ BL31_SOURCES += services/std_svc/sdei/sdei_event.c \ ...@@ -46,6 +46,10 @@ BL31_SOURCES += services/std_svc/sdei/sdei_event.c \
services/std_svc/sdei/sdei_state.c services/std_svc/sdei/sdei_state.c
endif endif
ifeq (${ENABLE_SPE_FOR_LOWER_ELS},1)
BL31_SOURCES += lib/extensions/spe/spe.c
endif
BL31_LINKERFILE := bl31/bl31.ld.S BL31_LINKERFILE := bl31/bl31.ld.S
# Flag used to indicate if Crash reporting via console should be included # Flag used to indicate if Crash reporting via console should be included
......
...@@ -344,9 +344,9 @@ Common build options ...@@ -344,9 +344,9 @@ Common build options
the ``ENABLE_PMF`` build option as well. Default is 0. the ``ENABLE_PMF`` build option as well. Default is 0.
- ``ENABLE_SPE_FOR_LOWER_ELS`` : Boolean option to enable Statistical Profiling - ``ENABLE_SPE_FOR_LOWER_ELS`` : Boolean option to enable Statistical Profiling
extensions. This is an optional architectural feature available only for extensions. This is an optional architectural feature for AArch64.
AArch64 8.2 onwards. This option defaults to 1 but is automatically The default is 1 but is automatically disabled when the target architecture
disabled when the target architecture is AArch32 or AArch64 8.0/8.1. is AArch32.
- ``ENABLE_STACK_PROTECTOR``: String option to enable the stack protection - ``ENABLE_STACK_PROTECTOR``: String option to enable the stack protection
checks in GCC. Allowed values are "all", "strong" and "0" (default). checks in GCC. Allowed values are "all", "strong" and "0" (default).
......
...@@ -95,10 +95,6 @@ ...@@ -95,10 +95,6 @@
* MDCR_EL3.SPD32: Set to 0b10 to disable AArch32 Secure self-hosted * MDCR_EL3.SPD32: Set to 0b10 to disable AArch32 Secure self-hosted
* privileged debug from S-EL1. * privileged debug from S-EL1.
* *
* MDCR_EL3.NSPB (ARM v8.2): SPE enabled in non-secure state and
* disabled in secure state. Accesses to SPE registers at SEL1 generate
* trap exceptions to EL3.
*
* MDCR_EL3.TDOSA: Set to zero so that EL2 and EL2 System register * MDCR_EL3.TDOSA: Set to zero so that EL2 and EL2 System register
* access to the powerdown debug registers do not trap to EL3. * access to the powerdown debug registers do not trap to EL3.
* *
...@@ -112,19 +108,6 @@ ...@@ -112,19 +108,6 @@
*/ */
mov_imm x0, ((MDCR_EL3_RESET_VAL | MDCR_SDD_BIT | MDCR_SPD32(MDCR_SPD32_DISABLE)) \ mov_imm x0, ((MDCR_EL3_RESET_VAL | MDCR_SDD_BIT | MDCR_SPD32(MDCR_SPD32_DISABLE)) \
& ~(MDCR_TDOSA_BIT | MDCR_TDA_BIT | MDCR_TPM_BIT)) & ~(MDCR_TDOSA_BIT | MDCR_TDA_BIT | MDCR_TPM_BIT))
#if ENABLE_SPE_FOR_LOWER_ELS
/* Detect if SPE is implemented */
mrs x1, id_aa64dfr0_el1
ubfx x1, x1, #ID_AA64DFR0_PMS_SHIFT, #ID_AA64DFR0_PMS_LENGTH
cmp x1, #0x1
b.ne 1f
/* Enable SPE for use by normal world */
orr x0, x0, #MDCR_NSPB(MDCR_NSPB_EL1)
1:
#endif
msr mdcr_el3, x0 msr mdcr_el3, x0
/* --------------------------------------------------------------------- /* ---------------------------------------------------------------------
......
...@@ -604,4 +604,9 @@ ...@@ -604,4 +604,9 @@
#define PAR_ADDR_SHIFT 12 #define PAR_ADDR_SHIFT 12
#define PAR_ADDR_MASK (BIT(40) - 1) /* 40-bits-wide page address */ #define PAR_ADDR_MASK (BIT(40) - 1) /* 40-bits-wide page address */
/*******************************************************************************
* Definitions for system register interface to SPE
******************************************************************************/
#define PMBLIMITR_EL1 S3_0_C9_C10_0
#endif /* __ARCH_H__ */ #endif /* __ARCH_H__ */
...@@ -197,6 +197,7 @@ DEFINE_SYSOP_TYPE_FUNC(dmb, sy) ...@@ -197,6 +197,7 @@ DEFINE_SYSOP_TYPE_FUNC(dmb, sy)
DEFINE_SYSOP_TYPE_FUNC(dmb, st) DEFINE_SYSOP_TYPE_FUNC(dmb, st)
DEFINE_SYSOP_TYPE_FUNC(dmb, ld) DEFINE_SYSOP_TYPE_FUNC(dmb, ld)
DEFINE_SYSOP_TYPE_FUNC(dsb, ish) DEFINE_SYSOP_TYPE_FUNC(dsb, ish)
DEFINE_SYSOP_TYPE_FUNC(dsb, nsh)
DEFINE_SYSOP_TYPE_FUNC(dsb, ishst) DEFINE_SYSOP_TYPE_FUNC(dsb, ishst)
DEFINE_SYSOP_TYPE_FUNC(dmb, ish) DEFINE_SYSOP_TYPE_FUNC(dmb, ish)
DEFINE_SYSOP_TYPE_FUNC(dmb, ishst) DEFINE_SYSOP_TYPE_FUNC(dmb, ishst)
...@@ -301,6 +302,7 @@ DEFINE_SYSREG_READ_FUNC(isr_el1) ...@@ -301,6 +302,7 @@ DEFINE_SYSREG_READ_FUNC(isr_el1)
DEFINE_SYSREG_READ_FUNC(ctr_el0) DEFINE_SYSREG_READ_FUNC(ctr_el0)
DEFINE_SYSREG_RW_FUNCS(mdcr_el2) DEFINE_SYSREG_RW_FUNCS(mdcr_el2)
DEFINE_SYSREG_RW_FUNCS(mdcr_el3)
DEFINE_SYSREG_RW_FUNCS(hstr_el2) DEFINE_SYSREG_RW_FUNCS(hstr_el2)
DEFINE_SYSREG_RW_FUNCS(cnthp_ctl_el2) DEFINE_SYSREG_RW_FUNCS(cnthp_ctl_el2)
DEFINE_SYSREG_RW_FUNCS(pmcr_el0) DEFINE_SYSREG_RW_FUNCS(pmcr_el0)
...@@ -320,6 +322,7 @@ DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_eoir0_el1, ICC_EOIR0_EL1) ...@@ -320,6 +322,7 @@ DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_eoir0_el1, ICC_EOIR0_EL1)
DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_eoir1_el1, ICC_EOIR1_EL1) DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_eoir1_el1, ICC_EOIR1_EL1)
DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_sgi0r_el1, ICC_SGI0R_EL1) DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_sgi0r_el1, ICC_SGI0R_EL1)
DEFINE_RENAME_SYSREG_RW_FUNCS(pmblimitr_el1, PMBLIMITR_EL1)
#define IS_IN_EL(x) \ #define IS_IN_EL(x) \
(GET_EL(read_CurrentEl()) == MODE_EL##x) (GET_EL(read_CurrentEl()) == MODE_EL##x)
......
...@@ -313,7 +313,6 @@ CASSERT(CTX_EL3STATE_OFFSET == __builtin_offsetof(cpu_context_t, el3state_ctx), ...@@ -313,7 +313,6 @@ CASSERT(CTX_EL3STATE_OFFSET == __builtin_offsetof(cpu_context_t, el3state_ctx),
* Function prototypes * Function prototypes
******************************************************************************/ ******************************************************************************/
void el1_sysregs_context_save(el1_sys_regs_t *regs); void el1_sysregs_context_save(el1_sys_regs_t *regs);
void el1_sysregs_context_save_post_ops(void);
void el1_sysregs_context_restore(el1_sys_regs_t *regs); void el1_sysregs_context_restore(el1_sys_regs_t *regs);
#if CTX_INCLUDE_FPREGS #if CTX_INCLUDE_FPREGS
void fpregs_context_save(fp_regs_t *regs); void fpregs_context_save(fp_regs_t *regs);
......
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef __SPE_H__
#define __SPE_H__
void spe_enable(int el2_unused);
void spe_disable(void);
#endif /* __SPE_H__ */
...@@ -227,7 +227,4 @@ int arm_execution_state_switch(unsigned int smc_fid, ...@@ -227,7 +227,4 @@ int arm_execution_state_switch(unsigned int smc_fid,
uint32_t cookie_lo, uint32_t cookie_lo,
void *handle); void *handle);
/* Disable Statistical Profiling Extensions helper */
void arm_disable_spe(void);
#endif /* __PLAT_ARM_H__ */ #endif /* __PLAT_ARM_H__ */
...@@ -124,6 +124,17 @@ static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t ...@@ -124,6 +124,17 @@ static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t
memcpy((void *)reg_ctx, (void *)&ep->args, sizeof(aapcs32_params_t)); memcpy((void *)reg_ctx, (void *)&ep->args, sizeof(aapcs32_params_t));
} }
/*******************************************************************************
* Enable architecture extensions on first entry to Non-secure world.
* When EL2 is implemented but unused `el2_unused` is non-zero, otherwise
* it is zero.
******************************************************************************/
static void enable_extensions_nonsecure(int el2_unused)
{
#if IMAGE_BL32
#endif
}
/******************************************************************************* /*******************************************************************************
* The following function initializes the cpu_context for a CPU specified by * The following function initializes the cpu_context for a CPU specified by
* its `cpu_idx` for first use, and sets the initial entrypoint state as * its `cpu_idx` for first use, and sets the initial entrypoint state as
...@@ -161,6 +172,7 @@ void cm_prepare_el3_exit(uint32_t security_state) ...@@ -161,6 +172,7 @@ void cm_prepare_el3_exit(uint32_t security_state)
{ {
uint32_t hsctlr, scr; uint32_t hsctlr, scr;
cpu_context_t *ctx = cm_get_context(security_state); cpu_context_t *ctx = cm_get_context(security_state);
int el2_unused = 0;
assert(ctx); assert(ctx);
...@@ -185,6 +197,8 @@ void cm_prepare_el3_exit(uint32_t security_state) ...@@ -185,6 +197,8 @@ void cm_prepare_el3_exit(uint32_t security_state)
isb(); isb();
} else if (read_id_pfr1() & } else if (read_id_pfr1() &
(ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) { (ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) {
el2_unused = 1;
/* /*
* Set the NS bit to access NS copies of certain banked * Set the NS bit to access NS copies of certain banked
* registers * registers
...@@ -283,5 +297,6 @@ void cm_prepare_el3_exit(uint32_t security_state) ...@@ -283,5 +297,6 @@ void cm_prepare_el3_exit(uint32_t security_state)
write_scr(read_scr() & ~SCR_NS_BIT); write_scr(read_scr() & ~SCR_NS_BIT);
isb(); isb();
} }
enable_extensions_nonsecure(el2_unused);
} }
} }
...@@ -9,7 +9,6 @@ ...@@ -9,7 +9,6 @@
#include <context.h> #include <context.h>
.global el1_sysregs_context_save .global el1_sysregs_context_save
.global el1_sysregs_context_save_post_ops
.global el1_sysregs_context_restore .global el1_sysregs_context_restore
#if CTX_INCLUDE_FPREGS #if CTX_INCLUDE_FPREGS
.global fpregs_context_save .global fpregs_context_save
...@@ -109,36 +108,6 @@ func el1_sysregs_context_save ...@@ -109,36 +108,6 @@ func el1_sysregs_context_save
ret ret
endfunc el1_sysregs_context_save endfunc el1_sysregs_context_save
/* -----------------------------------------------------
* The following function strictly follows the AArch64
* PCS to use x9-x17 (temporary caller-saved registers)
* to do post operations after saving the EL1 system
* register context.
* -----------------------------------------------------
*/
func el1_sysregs_context_save_post_ops
#if ENABLE_SPE_FOR_LOWER_ELS
/* Detect if SPE is implemented */
mrs x9, id_aa64dfr0_el1
ubfx x9, x9, #ID_AA64DFR0_PMS_SHIFT, #ID_AA64DFR0_PMS_LENGTH
cmp x9, #0x1
b.ne 1f
/*
* Before switching from normal world to secure world
* the profiling buffers need to be drained out to memory. This is
* required to avoid an invalid memory access when TTBR is switched
* for entry to SEL1.
*/
.arch armv8.2-a+profile
psb csync
dsb nsh
.arch armv8-a
1:
#endif
ret
endfunc el1_sysregs_context_save_post_ops
/* ----------------------------------------------------- /* -----------------------------------------------------
* The following function strictly follows the AArch64 * The following function strictly follows the AArch64
* PCS to use x9-x17 (temporary caller-saved registers) * PCS to use x9-x17 (temporary caller-saved registers)
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <platform_def.h> #include <platform_def.h>
#include <pubsub_events.h> #include <pubsub_events.h>
#include <smcc_helpers.h> #include <smcc_helpers.h>
#include <spe.h>
#include <string.h> #include <string.h>
#include <utils.h> #include <utils.h>
...@@ -208,6 +209,20 @@ static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t ...@@ -208,6 +209,20 @@ static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t
memcpy(gp_regs, (void *)&ep->args, sizeof(aapcs64_params_t)); memcpy(gp_regs, (void *)&ep->args, sizeof(aapcs64_params_t));
} }
/*******************************************************************************
* Enable architecture extensions on first entry to Non-secure world.
* When EL2 is implemented but unused `el2_unused` is non-zero, otherwise
* it is zero.
******************************************************************************/
static void enable_extensions_nonsecure(int el2_unused)
{
#if IMAGE_BL31
#if ENABLE_SPE_FOR_LOWER_ELS
spe_enable(el2_unused);
#endif
#endif
}
/******************************************************************************* /*******************************************************************************
* The following function initializes the cpu_context for a CPU specified by * The following function initializes the cpu_context for a CPU specified by
* its `cpu_idx` for first use, and sets the initial entrypoint state as * its `cpu_idx` for first use, and sets the initial entrypoint state as
...@@ -245,6 +260,7 @@ void cm_prepare_el3_exit(uint32_t security_state) ...@@ -245,6 +260,7 @@ void cm_prepare_el3_exit(uint32_t security_state)
{ {
uint32_t sctlr_elx, scr_el3, mdcr_el2; uint32_t sctlr_elx, scr_el3, mdcr_el2;
cpu_context_t *ctx = cm_get_context(security_state); cpu_context_t *ctx = cm_get_context(security_state);
int el2_unused = 0;
assert(ctx); assert(ctx);
...@@ -258,6 +274,8 @@ void cm_prepare_el3_exit(uint32_t security_state) ...@@ -258,6 +274,8 @@ void cm_prepare_el3_exit(uint32_t security_state)
sctlr_elx |= SCTLR_EL2_RES1; sctlr_elx |= SCTLR_EL2_RES1;
write_sctlr_el2(sctlr_elx); write_sctlr_el2(sctlr_elx);
} else if (EL_IMPLEMENTED(2)) { } else if (EL_IMPLEMENTED(2)) {
el2_unused = 1;
/* /*
* EL2 present but unused, need to disable safely. * EL2 present but unused, need to disable safely.
* SCTLR_EL2 can be ignored in this case. * SCTLR_EL2 can be ignored in this case.
...@@ -340,13 +358,6 @@ void cm_prepare_el3_exit(uint32_t security_state) ...@@ -340,13 +358,6 @@ void cm_prepare_el3_exit(uint32_t security_state)
* relying on hw. Some fields are architecturally * relying on hw. Some fields are architecturally
* UNKNOWN on reset. * UNKNOWN on reset.
* *
* MDCR_EL2.TPMS (ARM v8.2): Do not trap statistical
* profiling controls to EL2.
*
* MDCR_EL2.E2PB (ARM v8.2): SPE enabled in non-secure
* state. Accesses to profiling buffer controls at
* non-secure EL1 are not trapped to EL2.
*
* MDCR_EL2.TDRA: Set to zero so that Non-secure EL0 and * MDCR_EL2.TDRA: Set to zero so that Non-secure EL0 and
* EL1 System register accesses to the Debug ROM * EL1 System register accesses to the Debug ROM
* registers are not trapped to EL2. * registers are not trapped to EL2.
...@@ -383,22 +394,6 @@ void cm_prepare_el3_exit(uint32_t security_state) ...@@ -383,22 +394,6 @@ void cm_prepare_el3_exit(uint32_t security_state)
| MDCR_EL2_HPME_BIT | MDCR_EL2_TPM_BIT | MDCR_EL2_HPME_BIT | MDCR_EL2_TPM_BIT
| MDCR_EL2_TPMCR_BIT)); | MDCR_EL2_TPMCR_BIT));
#if ENABLE_SPE_FOR_LOWER_ELS
uint64_t id_aa64dfr0_el1;
/* Detect if SPE is implemented */
id_aa64dfr0_el1 = read_id_aa64dfr0_el1() >>
ID_AA64DFR0_PMS_SHIFT;
if ((id_aa64dfr0_el1 & ID_AA64DFR0_PMS_MASK) == 1) {
/*
* Make sure traps to EL2 are not generated if
* EL2 is implemented but not used.
*/
mdcr_el2 &= ~MDCR_EL2_TPMS;
mdcr_el2 |= MDCR_EL2_E2PB(MDCR_EL2_E2PB_EL1);
}
#endif
write_mdcr_el2(mdcr_el2); write_mdcr_el2(mdcr_el2);
/* /*
...@@ -420,6 +415,7 @@ void cm_prepare_el3_exit(uint32_t security_state) ...@@ -420,6 +415,7 @@ void cm_prepare_el3_exit(uint32_t security_state)
write_cnthp_ctl_el2(CNTHP_CTL_RESET_VAL & write_cnthp_ctl_el2(CNTHP_CTL_RESET_VAL &
~(CNTHP_CTL_ENABLE_BIT)); ~(CNTHP_CTL_ENABLE_BIT));
} }
enable_extensions_nonsecure(el2_unused);
} }
cm_el1_sysregs_context_restore(security_state); cm_el1_sysregs_context_restore(security_state);
...@@ -439,7 +435,6 @@ void cm_el1_sysregs_context_save(uint32_t security_state) ...@@ -439,7 +435,6 @@ void cm_el1_sysregs_context_save(uint32_t security_state)
assert(ctx); assert(ctx);
el1_sysregs_context_save(get_sysregs_ctx(ctx)); el1_sysregs_context_save(get_sysregs_ctx(ctx));
el1_sysregs_context_save_post_ops();
#if IMAGE_BL31 #if IMAGE_BL31
if (security_state == SECURE) if (security_state == SECURE)
......
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <arch_helpers.h>
#include <pubsub.h>
/*
* The assembler does not yet understand the psb csync mnemonic
* so use the equivalent hint instruction.
*/
#define psb_csync() asm volatile("hint #17")
void spe_enable(int el2_unused)
{
uint64_t features;
features = read_id_aa64dfr0_el1() >> ID_AA64DFR0_PMS_SHIFT;
if ((features & ID_AA64DFR0_PMS_MASK) == 1) {
uint64_t v;
if (el2_unused) {
/*
* MDCR_EL2.TPMS (ARM v8.2): Do not trap statistical
* profiling controls to EL2.
*
* MDCR_EL2.E2PB (ARM v8.2): SPE enabled in Non-secure
* state. Accesses to profiling buffer controls at
* Non-secure EL1 are not trapped to EL2.
*/
v = read_mdcr_el2();
v &= ~MDCR_EL2_TPMS;
v |= MDCR_EL2_E2PB(MDCR_EL2_E2PB_EL1);
write_mdcr_el2(v);
}
/*
* MDCR_EL2.NSPB (ARM v8.2): SPE enabled in Non-secure state
* and disabled in secure state. Accesses to SPE registers at
* S-EL1 generate trap exceptions to EL3.
*/
v = read_mdcr_el3();
v |= MDCR_NSPB(MDCR_NSPB_EL1);
write_mdcr_el3(v);
}
}
void spe_disable(void)
{
uint64_t features;
features = read_id_aa64dfr0_el1() >> ID_AA64DFR0_PMS_SHIFT;
if ((features & ID_AA64DFR0_PMS_MASK) == 1) {
uint64_t v;
/* Drain buffered data */
psb_csync();
dsbnsh();
/* Disable profiling buffer */
v = read_pmblimitr_el1();
v &= ~(1ULL << 0);
write_pmblimitr_el1(v);
isb();
}
}
static void *spe_drain_buffers_hook(const void *arg)
{
uint64_t features;
features = read_id_aa64dfr0_el1() >> ID_AA64DFR0_PMS_SHIFT;
if ((features & ID_AA64DFR0_PMS_MASK) == 1) {
/* Drain buffered data */
psb_csync();
dsbnsh();
}
return 0;
}
SUBSCRIBE_TO_EVENT(cm_entering_secure_world, spe_drain_buffers_hook);
...@@ -149,19 +149,10 @@ V := 0 ...@@ -149,19 +149,10 @@ V := 0
# platforms). # platforms).
WARMBOOT_ENABLE_DCACHE_EARLY := 0 WARMBOOT_ENABLE_DCACHE_EARLY := 0
# By default, enable Statistical Profiling Extensions. # Build option to enable/disable the Statistical Profiling Extensions
# The top level Makefile will disable this feature depending on
# the target architecture and version number.
ENABLE_SPE_FOR_LOWER_ELS := 1 ENABLE_SPE_FOR_LOWER_ELS := 1
# SPE is enabled by default but only supported on AArch64 8.2 onwards. # SPE is only supported on AArch64 so disable it on AArch32.
# Disable it in all other cases.
ifeq (${ARCH},aarch32) ifeq (${ARCH},aarch32)
override ENABLE_SPE_FOR_LOWER_ELS := 0 override ENABLE_SPE_FOR_LOWER_ELS := 0
else
ifeq (${ARM_ARCH_MAJOR},8)
ifeq ($(ARM_ARCH_MINOR),$(filter $(ARM_ARCH_MINOR),0 1))
ENABLE_SPE_FOR_LOWER_ELS := 0
endif
endif
endif endif
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <plat_arm.h> #include <plat_arm.h>
#include <platform.h> #include <platform.h>
#include <psci.h> #include <psci.h>
#include <spe.h>
#include <v2m_def.h> #include <v2m_def.h>
#include "drivers/pwrc/fvp_pwrc.h" #include "drivers/pwrc/fvp_pwrc.h"
#include "fvp_def.h" #include "fvp_def.h"
...@@ -57,7 +58,7 @@ static void fvp_cluster_pwrdwn_common(void) ...@@ -57,7 +58,7 @@ static void fvp_cluster_pwrdwn_common(void)
* On power down we need to disable statistical profiling extensions * On power down we need to disable statistical profiling extensions
* before exiting coherency. * before exiting coherency.
*/ */
arm_disable_spe(); spe_disable();
#endif #endif
/* Disable coherency if this cluster is to be turned off */ /* Disable coherency if this cluster is to be turned off */
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
.globl plat_crash_console_putc .globl plat_crash_console_putc
.globl plat_crash_console_flush .globl plat_crash_console_flush
.globl platform_mem_init .globl platform_mem_init
.globl arm_disable_spe
/* ----------------------------------------------------- /* -----------------------------------------------------
...@@ -88,34 +87,6 @@ func platform_mem_init ...@@ -88,34 +87,6 @@ func platform_mem_init
ret ret
endfunc platform_mem_init endfunc platform_mem_init
/* -----------------------------------------------------
* void arm_disable_spe (void);
* -----------------------------------------------------
*/
#if ENABLE_SPE_FOR_LOWER_ELS
func arm_disable_spe
/* Detect if SPE is implemented */
mrs x0, id_aa64dfr0_el1
ubfx x0, x0, #ID_AA64DFR0_PMS_SHIFT, #ID_AA64DFR0_PMS_LENGTH
cmp x0, #0x1
b.ne 1f
/* Drain buffered data */
.arch armv8.2-a+profile
psb csync
dsb nsh
/* Disable Profiling Buffer */
mrs x0, pmblimitr_el1
bic x0, x0, #1
msr pmblimitr_el1, x0
isb
.arch armv8-a
1:
ret
endfunc arm_disable_spe
#endif
/* /*
* Need to use coherent stack when ARM Cryptocell is used to autheticate images * Need to use coherent stack when ARM Cryptocell is used to autheticate images
* since Cryptocell uses DMA to transfer data and it is not coherent with the * since Cryptocell uses DMA to transfer data and it is not coherent with the
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment