From b4474fabe850cb75eeb895a3d7eff6b68e6ab072 Mon Sep 17 00:00:00 2001
From: Antonio Nino Diaz <antonio.ninodiaz@arm.com>
Date: Fri, 23 Nov 2018 15:04:01 +0000
Subject: [PATCH] Import exception helpers from TF-A-Tests

This is done in order to keep the files in both repositories in sync.

Change-Id: Ie1a9f321cbcfe8d7d14f206883fa718872271218
Signed-off-by: Antonio Nino Diaz <antonio.ninodiaz@arm.com>
---
 include/lib/aarch32/arch_helpers.h | 55 ++++++++++++++++++++++++
 include/lib/aarch64/arch_helpers.h | 68 ++++++++++++++++++++++++++++++
 2 files changed, 123 insertions(+)

diff --git a/include/lib/aarch32/arch_helpers.h b/include/lib/aarch32/arch_helpers.h
index 30e0584db..a6fe14fb8 100644
--- a/include/lib/aarch32/arch_helpers.h
+++ b/include/lib/aarch32/arch_helpers.h
@@ -391,4 +391,59 @@ static inline unsigned int get_current_el(void)
 #define read_amcntenset0_el0()	read_amcntenset0()
 #define read_amcntenset1_el0()	read_amcntenset1()
 
+/* Helper functions to manipulate CPSR */
+static inline void enable_irq(void)
+{
+	/*
+	 * The compiler memory barrier will prevent the compiler from
+	 * scheduling non-volatile memory access after the write to the
+	 * register.
+	 *
+	 * This could happen if some initialization code issues non-volatile
+	 * accesses to an area used by an interrupt handler, in the assumption
+	 * that it is safe as the interrupts are disabled at the time it does
+	 * that (according to program order). However, non-volatile accesses
+	 * are not necessarily in program order relatively with volatile inline
+	 * assembly statements (and volatile accesses).
+	 */
+	COMPILER_BARRIER();
+	__asm__ volatile ("cpsie	i");
+	isb();
+}
+
+static inline void enable_serror(void)
+{
+	COMPILER_BARRIER();
+	__asm__ volatile ("cpsie	a");
+	isb();
+}
+
+static inline void enable_fiq(void)
+{
+	COMPILER_BARRIER();
+	__asm__ volatile ("cpsie	f");
+	isb();
+}
+
+static inline void disable_irq(void)
+{
+	COMPILER_BARRIER();
+	__asm__ volatile ("cpsid	i");
+	isb();
+}
+
+static inline void disable_serror(void)
+{
+	COMPILER_BARRIER();
+	__asm__ volatile ("cpsid	a");
+	isb();
+}
+
+static inline void disable_fiq(void)
+{
+	COMPILER_BARRIER();
+	__asm__ volatile ("cpsid	f");
+	isb();
+}
+
 #endif /* ARCH_HELPERS_H */
diff --git a/include/lib/aarch64/arch_helpers.h b/include/lib/aarch64/arch_helpers.h
index 92b16fc8d..7222b9dc7 100644
--- a/include/lib/aarch64/arch_helpers.h
+++ b/include/lib/aarch64/arch_helpers.h
@@ -215,6 +215,74 @@ DEFINE_SYSOP_TYPE_FUNC(dmb, ishst)
 DEFINE_SYSOP_TYPE_FUNC(dmb, ish)
 DEFINE_SYSOP_FUNC(isb)
 
+static inline void enable_irq(void)
+{
+	/*
+	 * The compiler memory barrier will prevent the compiler from
+	 * scheduling non-volatile memory access after the write to the
+	 * register.
+	 *
+	 * This could happen if some initialization code issues non-volatile
+	 * accesses to an area used by an interrupt handler, in the assumption
+	 * that it is safe as the interrupts are disabled at the time it does
+	 * that (according to program order). However, non-volatile accesses
+	 * are not necessarily in program order relatively with volatile inline
+	 * assembly statements (and volatile accesses).
+	 */
+	COMPILER_BARRIER();
+	write_daifclr(DAIF_IRQ_BIT);
+	isb();
+}
+
+static inline void enable_fiq(void)
+{
+	COMPILER_BARRIER();
+	write_daifclr(DAIF_FIQ_BIT);
+	isb();
+}
+
+static inline void enable_serror(void)
+{
+	COMPILER_BARRIER();
+	write_daifclr(DAIF_ABT_BIT);
+	isb();
+}
+
+static inline void enable_debug_exceptions(void)
+{
+	COMPILER_BARRIER();
+	write_daifclr(DAIF_DBG_BIT);
+	isb();
+}
+
+static inline void disable_irq(void)
+{
+	COMPILER_BARRIER();
+	write_daifset(DAIF_IRQ_BIT);
+	isb();
+}
+
+static inline void disable_fiq(void)
+{
+	COMPILER_BARRIER();
+	write_daifset(DAIF_FIQ_BIT);
+	isb();
+}
+
+static inline void disable_serror(void)
+{
+	COMPILER_BARRIER();
+	write_daifset(DAIF_ABT_BIT);
+	isb();
+}
+
+static inline void disable_debug_exceptions(void)
+{
+	COMPILER_BARRIER();
+	write_daifset(DAIF_DBG_BIT);
+	isb();
+}
+
 #if !ERROR_DEPRECATED
 uint32_t get_afflvl_shift(uint32_t);
 uint32_t mpidr_mask_lower_afflvls(uint64_t, uint32_t);
-- 
GitLab