Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
Menu
Open sidebar
adam.huang
Arm Trusted Firmware
Commits
bcc7ad76
Unverified
Commit
bcc7ad76
authored
Dec 18, 2018
by
Antonio Niño Díaz
Committed by
GitHub
Dec 18, 2018
Browse files
Merge pull request #1722 from antonio-nino-diaz-arm/an/arch
Synchronize architectural headers with TF-A-Tests
parents
08fd68a2
b4474fab
Changes
4
Hide whitespace changes
Inline
Side-by-side
include/lib/aarch32/arch_helpers.h
View file @
bcc7ad76
...
...
@@ -248,6 +248,19 @@ DEFINE_COPROCR_RW_FUNCS(cnthp_ctl_el2, CNTHP_CTL)
DEFINE_COPROCR_RW_FUNCS
(
cnthp_tval_el2
,
CNTHP_TVAL
)
DEFINE_COPROCR_RW_FUNCS_64
(
cnthp_cval_el2
,
CNTHP_CVAL_64
)
#define get_cntp_ctl_enable(x) (((x) >> CNTP_CTL_ENABLE_SHIFT) & \
CNTP_CTL_ENABLE_MASK)
#define get_cntp_ctl_imask(x) (((x) >> CNTP_CTL_IMASK_SHIFT) & \
CNTP_CTL_IMASK_MASK)
#define get_cntp_ctl_istatus(x) (((x) >> CNTP_CTL_ISTATUS_SHIFT) & \
CNTP_CTL_ISTATUS_MASK)
#define set_cntp_ctl_enable(x) ((x) |= U(1) << CNTP_CTL_ENABLE_SHIFT)
#define set_cntp_ctl_imask(x) ((x) |= U(1) << CNTP_CTL_IMASK_SHIFT)
#define clr_cntp_ctl_enable(x) ((x) &= ~(U(1) << CNTP_CTL_ENABLE_SHIFT))
#define clr_cntp_ctl_imask(x) ((x) &= ~(U(1) << CNTP_CTL_IMASK_SHIFT))
DEFINE_COPROCR_RW_FUNCS
(
icc_sre_el1
,
ICC_SRE
)
DEFINE_COPROCR_RW_FUNCS
(
icc_sre_el2
,
ICC_HSRE
)
DEFINE_COPROCR_RW_FUNCS
(
icc_sre_el3
,
ICC_MSRE
)
...
...
@@ -378,4 +391,59 @@ static inline unsigned int get_current_el(void)
#define read_amcntenset0_el0() read_amcntenset0()
#define read_amcntenset1_el0() read_amcntenset1()
/* Helper functions to manipulate CPSR */
static
inline
void
enable_irq
(
void
)
{
/*
* The compiler memory barrier will prevent the compiler from
* scheduling non-volatile memory access after the write to the
* register.
*
* This could happen if some initialization code issues non-volatile
* accesses to an area used by an interrupt handler, in the assumption
* that it is safe as the interrupts are disabled at the time it does
* that (according to program order). However, non-volatile accesses
* are not necessarily in program order relatively with volatile inline
* assembly statements (and volatile accesses).
*/
COMPILER_BARRIER
();
__asm__
volatile
(
"cpsie i"
);
isb
();
}
static
inline
void
enable_serror
(
void
)
{
COMPILER_BARRIER
();
__asm__
volatile
(
"cpsie a"
);
isb
();
}
static
inline
void
enable_fiq
(
void
)
{
COMPILER_BARRIER
();
__asm__
volatile
(
"cpsie f"
);
isb
();
}
static
inline
void
disable_irq
(
void
)
{
COMPILER_BARRIER
();
__asm__
volatile
(
"cpsid i"
);
isb
();
}
static
inline
void
disable_serror
(
void
)
{
COMPILER_BARRIER
();
__asm__
volatile
(
"cpsid a"
);
isb
();
}
static
inline
void
disable_fiq
(
void
)
{
COMPILER_BARRIER
();
__asm__
volatile
(
"cpsid f"
);
isb
();
}
#endif
/* ARCH_HELPERS_H */
include/lib/aarch64/arch.h
View file @
bcc7ad76
...
...
@@ -534,19 +534,6 @@
#define CNTP_CTL_IMASK_MASK U(1)
#define CNTP_CTL_ISTATUS_MASK U(1)
#define get_cntp_ctl_enable(x) (((x) >> CNTP_CTL_ENABLE_SHIFT) & \
CNTP_CTL_ENABLE_MASK)
#define get_cntp_ctl_imask(x) (((x) >> CNTP_CTL_IMASK_SHIFT) & \
CNTP_CTL_IMASK_MASK)
#define get_cntp_ctl_istatus(x) (((x) >> CNTP_CTL_ISTATUS_SHIFT) & \
CNTP_CTL_ISTATUS_MASK)
#define set_cntp_ctl_enable(x) ((x) |= (U(1) << CNTP_CTL_ENABLE_SHIFT))
#define set_cntp_ctl_imask(x) ((x) |= (U(1) << CNTP_CTL_IMASK_SHIFT))
#define clr_cntp_ctl_enable(x) ((x) &= ~(U(1) << CNTP_CTL_ENABLE_SHIFT))
#define clr_cntp_ctl_imask(x) ((x) &= ~(U(1) << CNTP_CTL_IMASK_SHIFT))
/* Exception Syndrome register bits and bobs */
#define ESR_EC_SHIFT U(26)
#define ESR_EC_MASK U(0x3f)
...
...
include/lib/aarch64/arch_helpers.h
View file @
bcc7ad76
...
...
@@ -215,11 +215,81 @@ DEFINE_SYSOP_TYPE_FUNC(dmb, ishst)
DEFINE_SYSOP_TYPE_FUNC
(
dmb
,
ish
)
DEFINE_SYSOP_FUNC
(
isb
)
static
inline
void
enable_irq
(
void
)
{
/*
* The compiler memory barrier will prevent the compiler from
* scheduling non-volatile memory access after the write to the
* register.
*
* This could happen if some initialization code issues non-volatile
* accesses to an area used by an interrupt handler, in the assumption
* that it is safe as the interrupts are disabled at the time it does
* that (according to program order). However, non-volatile accesses
* are not necessarily in program order relatively with volatile inline
* assembly statements (and volatile accesses).
*/
COMPILER_BARRIER
();
write_daifclr
(
DAIF_IRQ_BIT
);
isb
();
}
static
inline
void
enable_fiq
(
void
)
{
COMPILER_BARRIER
();
write_daifclr
(
DAIF_FIQ_BIT
);
isb
();
}
static
inline
void
enable_serror
(
void
)
{
COMPILER_BARRIER
();
write_daifclr
(
DAIF_ABT_BIT
);
isb
();
}
static
inline
void
enable_debug_exceptions
(
void
)
{
COMPILER_BARRIER
();
write_daifclr
(
DAIF_DBG_BIT
);
isb
();
}
static
inline
void
disable_irq
(
void
)
{
COMPILER_BARRIER
();
write_daifset
(
DAIF_IRQ_BIT
);
isb
();
}
static
inline
void
disable_fiq
(
void
)
{
COMPILER_BARRIER
();
write_daifset
(
DAIF_FIQ_BIT
);
isb
();
}
static
inline
void
disable_serror
(
void
)
{
COMPILER_BARRIER
();
write_daifset
(
DAIF_ABT_BIT
);
isb
();
}
static
inline
void
disable_debug_exceptions
(
void
)
{
COMPILER_BARRIER
();
write_daifset
(
DAIF_DBG_BIT
);
isb
();
}
#if !ERROR_DEPRECATED
uint32_t
get_afflvl_shift
(
uint32_t
);
uint32_t
mpidr_mask_lower_afflvls
(
uint64_t
,
uint32_t
);
void
__dead2
eret
(
uint64_t
x0
,
uint64_t
x1
,
uint64_t
x2
,
uint64_t
x3
,
uint64_t
x4
,
uint64_t
x5
,
uint64_t
x6
,
uint64_t
x7
);
#endif
void
__dead2
smc
(
uint64_t
x0
,
uint64_t
x1
,
uint64_t
x2
,
uint64_t
x3
,
uint64_t
x4
,
uint64_t
x5
,
uint64_t
x6
,
uint64_t
x7
);
...
...
@@ -306,6 +376,19 @@ DEFINE_SYSREG_RW_FUNCS(cntp_cval_el0)
DEFINE_SYSREG_READ_FUNC
(
cntpct_el0
)
DEFINE_SYSREG_RW_FUNCS
(
cnthctl_el2
)
#define get_cntp_ctl_enable(x) (((x) >> CNTP_CTL_ENABLE_SHIFT) & \
CNTP_CTL_ENABLE_MASK)
#define get_cntp_ctl_imask(x) (((x) >> CNTP_CTL_IMASK_SHIFT) & \
CNTP_CTL_IMASK_MASK)
#define get_cntp_ctl_istatus(x) (((x) >> CNTP_CTL_ISTATUS_SHIFT) & \
CNTP_CTL_ISTATUS_MASK)
#define set_cntp_ctl_enable(x) ((x) |= (U(1) << CNTP_CTL_ENABLE_SHIFT))
#define set_cntp_ctl_imask(x) ((x) |= (U(1) << CNTP_CTL_IMASK_SHIFT))
#define clr_cntp_ctl_enable(x) ((x) &= ~(U(1) << CNTP_CTL_ENABLE_SHIFT))
#define clr_cntp_ctl_imask(x) ((x) &= ~(U(1) << CNTP_CTL_IMASK_SHIFT))
DEFINE_SYSREG_RW_FUNCS
(
tpidr_el3
)
DEFINE_SYSREG_RW_FUNCS
(
cntvoff_el2
)
...
...
lib/aarch64/misc_helpers.S
View file @
bcc7ad76
...
...
@@ -9,9 +9,11 @@
#include <assert_macros.S>
#include <xlat_tables_defs.h>
#if !ERROR_DEPRECATED
.
globl
get_afflvl_shift
.
globl
mpidr_mask_lower_afflvls
.
globl
eret
#endif /* ERROR_DEPRECATED */
.
globl
smc
.
globl
zero_normalmem
...
...
@@ -30,6 +32,7 @@
.
globl
enable_vfp
#endif
#if !ERROR_DEPRECATED
func
get_afflvl_shift
cmp
x0
,
#
3
cinc
x0
,
x0
,
eq
...
...
@@ -52,7 +55,7 @@ endfunc mpidr_mask_lower_afflvls
func
eret
eret
endfunc
eret
#endif /* ERROR_DEPRECATED */
func
smc
smc
#
0
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment