Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
Menu
Open sidebar
adam.huang
Arm Trusted Firmware
Commits
31dfea92
Unverified
Commit
31dfea92
authored
Jan 11, 2018
by
davidcunado-arm
Committed by
GitHub
Jan 11, 2018
Browse files
Merge pull request #1214 from dp-arm/dp/cve_2017_5715
Workarounds for CVE-2017-5715 on Cortex A57/A72/A73 and A75
parents
f10c0c45
780edd86
Changes
13
Hide whitespace changes
Inline
Side-by-side
bl31/aarch64/runtime_exceptions.S
View file @
31dfea92
...
...
@@ -14,6 +14,26 @@
.
globl
runtime_exceptions
.
globl
sync_exception_sp_el0
.
globl
irq_sp_el0
.
globl
fiq_sp_el0
.
globl
serror_sp_el0
.
globl
sync_exception_sp_elx
.
globl
irq_sp_elx
.
globl
fiq_sp_elx
.
globl
serror_sp_elx
.
globl
sync_exception_aarch64
.
globl
irq_aarch64
.
globl
fiq_aarch64
.
globl
serror_aarch64
.
globl
sync_exception_aarch32
.
globl
irq_aarch32
.
globl
fiq_aarch32
.
globl
serror_aarch32
/
*
---------------------------------------------------------------------
*
This
macro
handles
Synchronous
exceptions
.
*
Only
SMC
exceptions
are
supported
.
...
...
bl31/bl31.mk
View file @
31dfea92
...
...
@@ -58,6 +58,11 @@ ifeq (${ENABLE_SVE_FOR_NS},1)
BL31_SOURCES
+=
lib/extensions/sve/sve.c
endif
ifeq
(${WORKAROUND_CVE_2017_5715},1)
BL31_SOURCES
+=
lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S
\
lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S
endif
BL31_LINKERFILE
:=
bl31/bl31.ld.S
# Flag used to indicate if Crash reporting via console should be included
...
...
docs/cpu-specific-build-macros.rst
View file @
31dfea92
...
...
@@ -11,6 +11,15 @@ This document describes the various build options present in the CPU specific
operations framework to enable errata workarounds and to enable optimizations
for a specific CPU on a platform.
Security Vulnerability Workarounds
----------------------------------
ARM Trusted Firmware exports a series of build flags which control which
security vulnerability workarounds should be applied at runtime.
- ``WORKAROUND_CVE_2017_5715``: Enables the security workaround for
`CVE-2017-5715`_. Defaults to 1.
CPU Errata Workarounds
----------------------
...
...
@@ -142,6 +151,7 @@ architecture that can be enabled by the platform as desired.
*Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.*
.. _CVE-2017-5715: http://www.cve.mitre.org/cgi-bin/cvename.cgi?name=2017-5715
.. _Cortex-A53 MPCore Software Developers Errata Notice: http://infocenter.arm.com/help/topic/com.arm.doc.epm048406/Cortex_A53_MPCore_Software_Developers_Errata_Notice.pdf
.. _Cortex-A57 MPCore Software Developers Errata Notice: http://infocenter.arm.com/help/topic/com.arm.doc.epm049219/cortex_a57_mpcore_software_developers_errata_notice.pdf
.. _Cortex-A72 MPCore Software Developers Errata Notice: http://infocenter.arm.com/help/topic/com.arm.doc.epm012079/index.html
...
...
include/common/aarch64/el3_common_macros.S
View file @
31dfea92
...
...
@@ -13,7 +13,7 @@
/
*
*
Helper
macro
to
initialise
EL3
registers
we
care
about
.
*/
.
macro
el3_arch_init_common
_exception_vectors
.
macro
el3_arch_init_common
/
*
---------------------------------------------------------------------
*
SCTLR_EL3
has
already
been
initialised
-
read
current
value
before
*
modifying
.
...
...
@@ -49,14 +49,6 @@
bl
init_cpu_data_ptr
#endif /* IMAGE_BL31 */
/
*
---------------------------------------------------------------------
*
Set
the
exception
vectors
.
*
---------------------------------------------------------------------
*/
adr
x0
,
\
_exception_vectors
msr
vbar_el3
,
x0
isb
/
*
---------------------------------------------------------------------
*
Initialise
SCR_EL3
,
setting
all
fields
rather
than
relying
on
hw
.
*
All
fields
are
architecturally
UNKNOWN
on
reset
.
The
following
fields
...
...
@@ -220,6 +212,14 @@
do_cold_boot
:
.
endif
/*
_warm_boot_mailbox
*/
/
*
---------------------------------------------------------------------
*
Set
the
exception
vectors
.
*
---------------------------------------------------------------------
*/
adr
x0
,
\
_exception_vectors
msr
vbar_el3
,
x0
isb
/
*
---------------------------------------------------------------------
*
It
is
a
cold
boot
.
*
Perform
any
processor
specific
actions
upon
reset
e
.
g
.
cache
,
TLB
...
...
@@ -228,7 +228,7 @@
*/
bl
reset_handler
el3_arch_init_common
\
_exception_vectors
el3_arch_init_common
.
if
\
_secondary_cold_boot
/
*
-------------------------------------------------------------
...
...
include/lib/aarch64/arch.h
View file @
31dfea92
...
...
@@ -117,6 +117,9 @@
#define ID_AA64PFR0_SVE_SHIFT U(32)
#define ID_AA64PFR0_SVE_MASK U(0xf)
#define ID_AA64PFR0_SVE_LENGTH U(4)
#define ID_AA64PFR0_CSV2_SHIFT U(56)
#define ID_AA64PFR0_CSV2_MASK U(0xf)
#define ID_AA64PFR0_CSV2_LENGTH U(4)
/* ID_AA64DFR0_EL1.PMS definitions (for ARMv8.2+) */
#define ID_AA64DFR0_PMS_SHIFT U(32)
...
...
@@ -337,6 +340,11 @@
#define SPSR_T_ARM U(0x0)
#define SPSR_T_THUMB U(0x1)
#define SPSR_M_SHIFT U(4)
#define SPSR_M_MASK U(0x1)
#define SPSR_M_AARCH64 U(0x0)
#define SPSR_M_AARCH32 U(0x1)
#define DISABLE_ALL_EXCEPTIONS \
(DAIF_FIQ_BIT | DAIF_IRQ_BIT | DAIF_ABT_BIT | DAIF_DBG_BIT)
...
...
include/lib/el3_runtime/aarch64/context.h
View file @
31dfea92
...
...
@@ -46,12 +46,26 @@
#define CTX_GPREG_SP_EL0 U(0xf8)
#define CTX_GPREGS_END U(0x100)
#if WORKAROUND_CVE_2017_5715
#define CTX_CVE_2017_5715_OFFSET (CTX_GPREGS_OFFSET + CTX_GPREGS_END)
#define CTX_CVE_2017_5715_QUAD0 U(0x0)
#define CTX_CVE_2017_5715_QUAD1 U(0x8)
#define CTX_CVE_2017_5715_QUAD2 U(0x10)
#define CTX_CVE_2017_5715_QUAD3 U(0x18)
#define CTX_CVE_2017_5715_QUAD4 U(0x20)
#define CTX_CVE_2017_5715_QUAD5 U(0x28)
#define CTX_CVE_2017_5715_END U(0x30)
#else
#define CTX_CVE_2017_5715_OFFSET CTX_GPREGS_OFFSET
#define CTX_CVE_2017_5715_END CTX_GPREGS_END
#endif
/*******************************************************************************
* Constants that allow assembler code to access members of and the 'el3_state'
* structure at their correct offsets. Note that some of the registers are only
* 32-bits wide but are stored as 64-bit values for convenience
******************************************************************************/
#define CTX_EL3STATE_OFFSET (CTX_
GPREGS
_OFFSET + CTX_
GPREGS
_END)
#define CTX_EL3STATE_OFFSET (CTX_
CVE_2017_5715
_OFFSET + CTX_
CVE_2017_5715
_END)
#define CTX_SCR_EL3 U(0x0)
#define CTX_RUNTIME_SP U(0x8)
#define CTX_SPSR_EL3 U(0x10)
...
...
@@ -186,6 +200,9 @@
/* Constants to determine the size of individual context structures */
#define CTX_GPREG_ALL (CTX_GPREGS_END >> DWORD_SHIFT)
#if WORKAROUND_CVE_2017_5715
#define CTX_CVE_2017_5715_ALL (CTX_CVE_2017_5715_END >> DWORD_SHIFT)
#endif
#define CTX_SYSREG_ALL (CTX_SYSREGS_END >> DWORD_SHIFT)
#if CTX_INCLUDE_FPREGS
#define CTX_FPREG_ALL (CTX_FPREGS_END >> DWORD_SHIFT)
...
...
@@ -201,6 +218,10 @@
*/
DEFINE_REG_STRUCT
(
gp_regs
,
CTX_GPREG_ALL
);
#if WORKAROUND_CVE_2017_5715
DEFINE_REG_STRUCT
(
cve_2017_5715_regs
,
CTX_CVE_2017_5715_ALL
);
#endif
/*
* AArch64 EL1 system register context structure for preserving the
* architectural state during switches from one security state to
...
...
@@ -242,6 +263,9 @@ DEFINE_REG_STRUCT(el3_state, CTX_EL3STATE_ALL);
*/
typedef
struct
cpu_context
{
gp_regs_t
gpregs_ctx
;
#if WORKAROUND_CVE_2017_5715
cve_2017_5715_regs_t
cve_2017_5715_regs_ctx
;
#endif
el3_state_t
el3state_ctx
;
el1_sys_regs_t
sysregs_ctx
;
#if CTX_INCLUDE_FPREGS
...
...
lib/cpus/aarch64/cortex_a57.S
View file @
31dfea92
...
...
@@ -383,6 +383,11 @@ func cortex_a57_reset_func
bl
errata_a57_859972_wa
#endif
#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
adr
x0
,
workaround_mmu_runtime_exceptions
msr
vbar_el3
,
x0
#endif
/
*
---------------------------------------------
*
Enable
the
SMP
bit
.
*
---------------------------------------------
...
...
lib/cpus/aarch64/cortex_a72.S
View file @
31dfea92
...
...
@@ -110,6 +110,12 @@ func cortex_a72_reset_func
mov
x0
,
x18
bl
errata_a72_859971_wa
#endif
#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
adr
x0
,
workaround_mmu_runtime_exceptions
msr
vbar_el3
,
x0
#endif
/
*
---------------------------------------------
*
Enable
the
SMP
bit
.
*
---------------------------------------------
...
...
lib/cpus/aarch64/cortex_a73.S
View file @
31dfea92
...
...
@@ -36,6 +36,11 @@ func cortex_a73_disable_smp
endfunc
cortex_a73_disable_smp
func
cortex_a73_reset_func
#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
adr
x0
,
workaround_bpiall_vbar0_runtime_exceptions
msr
vbar_el3
,
x0
#endif
/
*
---------------------------------------------
*
Enable
the
SMP
bit
.
*
Clobbers
:
x0
...
...
lib/cpus/aarch64/cortex_a75.S
View file @
31dfea92
...
...
@@ -12,6 +12,21 @@
#include <cortex_a75.h>
func
cortex_a75_reset_func
#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
mrs
x0
,
id_aa64pfr0_el1
ubfx
x0
,
x0
,
#
ID_AA64PFR0_CSV2_SHIFT
,
#
ID_AA64PFR0_CSV2_LENGTH
/
*
*
If
the
field
equals
to
1
then
branch
targets
trained
in
one
*
context
cannot
affect
speculative
execution
in
a
different
context
.
*/
cmp
x0
,
#
1
beq
1
f
adr
x0
,
workaround_bpiall_vbar0_runtime_exceptions
msr
vbar_el3
,
x0
1
:
#endif
#if ENABLE_AMU
/
*
Make
sure
accesses
from
EL0
/
EL1
and
EL2
are
not
trapped
to
EL3
*/
mrs
x0
,
actlr_el3
...
...
lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S
0 → 100644
View file @
31dfea92
/*
*
Copyright
(
c
)
2017
,
ARM
Limited
and
Contributors
.
All
rights
reserved
.
*
*
SPDX
-
License
-
Identifier
:
BSD
-
3
-
Clause
*/
#include <arch.h>
#include <asm_macros.S>
#include <context.h>
.
globl
workaround_bpiall_vbar0_runtime_exceptions
#define EMIT_BPIALL 0xee070fd5
#define EMIT_MOV_R0_IMM(v) 0xe3a0000##v
#define EMIT_SMC 0xe1600070
.
macro
enter_workaround
_stub_name
/
*
Save
GP
regs
*/
stp
x0
,
x1
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X0
]
stp
x2
,
x3
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X2
]
stp
x4
,
x5
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X4
]
stp
x6
,
x7
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X6
]
stp
x8
,
x9
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X8
]
stp
x10
,
x11
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X10
]
stp
x12
,
x13
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X12
]
stp
x14
,
x15
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X14
]
stp
x16
,
x17
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X16
]
stp
x18
,
x19
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X18
]
stp
x20
,
x21
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X20
]
stp
x22
,
x23
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X22
]
stp
x24
,
x25
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X24
]
stp
x26
,
x27
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X26
]
stp
x28
,
x29
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X28
]
adr
x4
,
\
_stub_name
/
*
*
Load
SPSR_EL3
and
VBAR_EL3
.
SPSR_EL3
is
set
up
to
have
*
all
interrupts
masked
in
preparation
to
running
the
workaround
*
stub
in
S
-
EL1
.
VBAR_EL3
points
to
the
vector
table
that
*
will
handle
the
SMC
back
from
the
workaround
stub
.
*/
ldp
x0
,
x1
,
[
x4
,
#
0
]
/
*
*
Load
SCTLR_EL1
and
ELR_EL3
.
SCTLR_EL1
is
configured
to
disable
*
the
MMU
in
S
-
EL1
.
ELR_EL3
points
to
the
appropriate
stub
in
S
-
EL1
.
*/
ldp
x2
,
x3
,
[
x4
,
#
16
]
mrs
x4
,
scr_el3
mrs
x5
,
spsr_el3
mrs
x6
,
elr_el3
mrs
x7
,
sctlr_el1
mrs
x8
,
esr_el3
/
*
Preserve
system
registers
in
the
workaround
context
*/
stp
x4
,
x5
,
[
sp
,
#
CTX_CVE_2017_5715_OFFSET
+
CTX_CVE_2017_5715_QUAD0
]
stp
x6
,
x7
,
[
sp
,
#
CTX_CVE_2017_5715_OFFSET
+
CTX_CVE_2017_5715_QUAD2
]
stp
x8
,
x30
,
[
sp
,
#
CTX_CVE_2017_5715_OFFSET
+
CTX_CVE_2017_5715_QUAD4
]
/
*
*
Setting
SCR_EL3
to
all
zeroes
means
that
the
NS
,
RW
*
and
SMD
bits
are
configured
as
expected
.
*/
msr
scr_el3
,
xzr
/
*
*
Reload
system
registers
with
the
crafted
values
*
in
preparation
for
entry
in
S
-
EL1
.
*/
msr
spsr_el3
,
x0
msr
vbar_el3
,
x1
msr
sctlr_el1
,
x2
msr
elr_el3
,
x3
eret
.
endm
/
*
---------------------------------------------------------------------
*
This
vector
table
is
used
at
runtime
to
enter
the
workaround
at
*
AArch32
S
-
EL1
for
Sync
/
IRQ
/
FIQ
/
SError
exceptions
.
If
the
workaround
*
is
not
enabled
,
the
existing
runtime
exception
vector
table
is
used
.
*
---------------------------------------------------------------------
*/
vector_base
workaround_bpiall_vbar0_runtime_exceptions
/
*
---------------------------------------------------------------------
*
Current
EL
with
SP_EL0
:
0x0
-
0x200
*
---------------------------------------------------------------------
*/
vector_entry
workaround_bpiall_vbar0_sync_exception_sp_el0
b
sync_exception_sp_el0
/
*
*
Since
each
vector
table
entry
is
128
bytes
,
we
can
store
the
*
stub
context
in
the
unused
space
to
minimize
memory
footprint
.
*/
aarch32_stub_smc
:
.
word
EMIT_BPIALL
.
word
EMIT_MOV_R0_IMM
(
1
)
.
word
EMIT_SMC
aarch32_stub_ctx_smc
:
/
*
Mask
all
interrupts
and
set
AArch32
Supervisor
mode
*/
.
quad
(
SPSR_AIF_MASK
<<
SPSR_AIF_SHIFT
|
\
SPSR_M_AARCH32
<<
SPSR_M_SHIFT
|
\
MODE32_svc
<<
MODE32_SHIFT
)
/
*
*
VBAR_EL3
points
to
vbar1
which
is
the
vector
table
*
used
while
the
workaround
is
executing
.
*/
.
quad
workaround_bpiall_vbar1_runtime_exceptions
/
*
Setup
SCTLR_EL1
with
MMU
off
and
I
$
on
*/
.
quad
SCTLR_AARCH32_EL1_RES1
|
SCTLR_I_BIT
/
*
ELR_EL3
is
setup
to
point
to
the
sync
exception
stub
in
AArch32
*/
.
quad
aarch32_stub_smc
check_vector_size
workaround_bpiall_vbar0_sync_exception_sp_el0
vector_entry
workaround_bpiall_vbar0_irq_sp_el0
b
irq_sp_el0
aarch32_stub_irq
:
.
word
EMIT_BPIALL
.
word
EMIT_MOV_R0_IMM
(
2
)
.
word
EMIT_SMC
aarch32_stub_ctx_irq
:
.
quad
(
SPSR_AIF_MASK
<<
SPSR_AIF_SHIFT
|
\
SPSR_M_AARCH32
<<
SPSR_M_SHIFT
|
\
MODE32_svc
<<
MODE32_SHIFT
)
.
quad
workaround_bpiall_vbar1_runtime_exceptions
.
quad
SCTLR_AARCH32_EL1_RES1
|
SCTLR_I_BIT
.
quad
aarch32_stub_irq
check_vector_size
workaround_bpiall_vbar0_irq_sp_el0
vector_entry
workaround_bpiall_vbar0_fiq_sp_el0
b
fiq_sp_el0
aarch32_stub_fiq
:
.
word
EMIT_BPIALL
.
word
EMIT_MOV_R0_IMM
(
4
)
.
word
EMIT_SMC
aarch32_stub_ctx_fiq
:
.
quad
(
SPSR_AIF_MASK
<<
SPSR_AIF_SHIFT
|
\
SPSR_M_AARCH32
<<
SPSR_M_SHIFT
|
\
MODE32_svc
<<
MODE32_SHIFT
)
.
quad
workaround_bpiall_vbar1_runtime_exceptions
.
quad
SCTLR_AARCH32_EL1_RES1
|
SCTLR_I_BIT
.
quad
aarch32_stub_fiq
check_vector_size
workaround_bpiall_vbar0_fiq_sp_el0
vector_entry
workaround_bpiall_vbar0_serror_sp_el0
b
serror_sp_el0
aarch32_stub_serror
:
.
word
EMIT_BPIALL
.
word
EMIT_MOV_R0_IMM
(
8
)
.
word
EMIT_SMC
aarch32_stub_ctx_serror
:
.
quad
(
SPSR_AIF_MASK
<<
SPSR_AIF_SHIFT
|
\
SPSR_M_AARCH32
<<
SPSR_M_SHIFT
|
\
MODE32_svc
<<
MODE32_SHIFT
)
.
quad
workaround_bpiall_vbar1_runtime_exceptions
.
quad
SCTLR_AARCH32_EL1_RES1
|
SCTLR_I_BIT
.
quad
aarch32_stub_serror
check_vector_size
workaround_bpiall_vbar0_serror_sp_el0
/
*
---------------------------------------------------------------------
*
Current
EL
with
SP_ELx
:
0x200
-
0x400
*
---------------------------------------------------------------------
*/
vector_entry
workaround_bpiall_vbar0_sync_exception_sp_elx
b
sync_exception_sp_elx
check_vector_size
workaround_bpiall_vbar0_sync_exception_sp_elx
vector_entry
workaround_bpiall_vbar0_irq_sp_elx
b
irq_sp_elx
check_vector_size
workaround_bpiall_vbar0_irq_sp_elx
vector_entry
workaround_bpiall_vbar0_fiq_sp_elx
b
fiq_sp_elx
check_vector_size
workaround_bpiall_vbar0_fiq_sp_elx
vector_entry
workaround_bpiall_vbar0_serror_sp_elx
b
serror_sp_elx
check_vector_size
workaround_bpiall_vbar0_serror_sp_elx
/
*
---------------------------------------------------------------------
*
Lower
EL
using
AArch64
:
0x400
-
0x600
*
---------------------------------------------------------------------
*/
vector_entry
workaround_bpiall_vbar0_sync_exception_aarch64
enter_workaround
aarch32_stub_ctx_smc
check_vector_size
workaround_bpiall_vbar0_sync_exception_aarch64
vector_entry
workaround_bpiall_vbar0_irq_aarch64
enter_workaround
aarch32_stub_ctx_irq
check_vector_size
workaround_bpiall_vbar0_irq_aarch64
vector_entry
workaround_bpiall_vbar0_fiq_aarch64
enter_workaround
aarch32_stub_ctx_fiq
check_vector_size
workaround_bpiall_vbar0_fiq_aarch64
vector_entry
workaround_bpiall_vbar0_serror_aarch64
enter_workaround
aarch32_stub_ctx_serror
check_vector_size
workaround_bpiall_vbar0_serror_aarch64
/
*
---------------------------------------------------------------------
*
Lower
EL
using
AArch32
:
0x600
-
0x800
*
---------------------------------------------------------------------
*/
vector_entry
workaround_bpiall_vbar0_sync_exception_aarch32
enter_workaround
aarch32_stub_ctx_smc
check_vector_size
workaround_bpiall_vbar0_sync_exception_aarch32
vector_entry
workaround_bpiall_vbar0_irq_aarch32
enter_workaround
aarch32_stub_ctx_irq
check_vector_size
workaround_bpiall_vbar0_irq_aarch32
vector_entry
workaround_bpiall_vbar0_fiq_aarch32
enter_workaround
aarch32_stub_ctx_fiq
check_vector_size
workaround_bpiall_vbar0_fiq_aarch32
vector_entry
workaround_bpiall_vbar0_serror_aarch32
enter_workaround
aarch32_stub_ctx_serror
check_vector_size
workaround_bpiall_vbar0_serror_aarch32
/
*
---------------------------------------------------------------------
*
This
vector
table
is
used
while
the
workaround
is
executing
.
It
*
installs
a
simple
SMC
handler
to
allow
the
Sync
/
IRQ
/
FIQ
/
SError
*
workaround
stubs
to
enter
EL3
from
S
-
EL1
.
It
restores
the
previous
*
EL3
state
before
proceeding
with
the
normal
runtime
exception
vector
.
*
---------------------------------------------------------------------
*/
vector_base
workaround_bpiall_vbar1_runtime_exceptions
/
*
---------------------------------------------------------------------
*
Current
EL
with
SP_EL0
:
0x0
-
0x200
(
UNUSED
)
*
---------------------------------------------------------------------
*/
vector_entry
workaround_bpiall_vbar1_sync_exception_sp_el0
b
report_unhandled_exception
check_vector_size
workaround_bpiall_vbar1_sync_exception_sp_el0
vector_entry
workaround_bpiall_vbar1_irq_sp_el0
b
report_unhandled_interrupt
check_vector_size
workaround_bpiall_vbar1_irq_sp_el0
vector_entry
workaround_bpiall_vbar1_fiq_sp_el0
b
report_unhandled_interrupt
check_vector_size
workaround_bpiall_vbar1_fiq_sp_el0
vector_entry
workaround_bpiall_vbar1_serror_sp_el0
b
report_unhandled_exception
check_vector_size
workaround_bpiall_vbar1_serror_sp_el0
/
*
---------------------------------------------------------------------
*
Current
EL
with
SP_ELx
:
0x200
-
0x400
(
UNUSED
)
*
---------------------------------------------------------------------
*/
vector_entry
workaround_bpiall_vbar1_sync_exception_sp_elx
b
report_unhandled_exception
check_vector_size
workaround_bpiall_vbar1_sync_exception_sp_elx
vector_entry
workaround_bpiall_vbar1_irq_sp_elx
b
report_unhandled_interrupt
check_vector_size
workaround_bpiall_vbar1_irq_sp_elx
vector_entry
workaround_bpiall_vbar1_fiq_sp_elx
b
report_unhandled_interrupt
check_vector_size
workaround_bpiall_vbar1_fiq_sp_elx
vector_entry
workaround_bpiall_vbar1_serror_sp_elx
b
report_unhandled_exception
check_vector_size
workaround_bpiall_vbar1_serror_sp_elx
/
*
---------------------------------------------------------------------
*
Lower
EL
using
AArch64
:
0x400
-
0x600
(
UNUSED
)
*
---------------------------------------------------------------------
*/
vector_entry
workaround_bpiall_vbar1_sync_exception_aarch64
b
report_unhandled_exception
check_vector_size
workaround_bpiall_vbar1_sync_exception_aarch64
vector_entry
workaround_bpiall_vbar1_irq_aarch64
b
report_unhandled_interrupt
check_vector_size
workaround_bpiall_vbar1_irq_aarch64
vector_entry
workaround_bpiall_vbar1_fiq_aarch64
b
report_unhandled_interrupt
check_vector_size
workaround_bpiall_vbar1_fiq_aarch64
vector_entry
workaround_bpiall_vbar1_serror_aarch64
b
report_unhandled_exception
check_vector_size
workaround_bpiall_vbar1_serror_aarch64
/
*
---------------------------------------------------------------------
*
Lower
EL
using
AArch32
:
0x600
-
0x800
*
---------------------------------------------------------------------
*/
vector_entry
workaround_bpiall_vbar1_sync_exception_aarch32
/
*
Restore
register
state
from
the
workaround
context
*/
ldp
x2
,
x3
,
[
sp
,
#
CTX_CVE_2017_5715_OFFSET
+
CTX_CVE_2017_5715_QUAD0
]
ldp
x4
,
x5
,
[
sp
,
#
CTX_CVE_2017_5715_OFFSET
+
CTX_CVE_2017_5715_QUAD2
]
ldp
x6
,
x30
,
[
sp
,
#
CTX_CVE_2017_5715_OFFSET
+
CTX_CVE_2017_5715_QUAD4
]
/
*
Apply
the
restored
system
register
state
*/
msr
scr_el3
,
x2
msr
spsr_el3
,
x3
msr
elr_el3
,
x4
msr
sctlr_el1
,
x5
msr
esr_el3
,
x6
/
*
*
Workaround
is
complete
,
so
swap
VBAR_EL3
to
point
*
to
workaround
entry
table
in
preparation
for
subsequent
*
Sync
/
IRQ
/
FIQ
/
SError
exceptions
.
*/
adr
x2
,
workaround_bpiall_vbar0_runtime_exceptions
msr
vbar_el3
,
x2
/
*
*
Restore
all
GP
regs
except
x0
and
x1
.
The
value
in
x0
*
indicates
the
type
of
the
original
exception
.
*/
ldp
x2
,
x3
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X2
]
ldp
x4
,
x5
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X4
]
ldp
x6
,
x7
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X6
]
ldp
x8
,
x9
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X8
]
ldp
x10
,
x11
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X10
]
ldp
x12
,
x13
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X12
]
ldp
x14
,
x15
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X14
]
ldp
x16
,
x17
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X16
]
ldp
x18
,
x19
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X18
]
ldp
x20
,
x21
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X20
]
ldp
x22
,
x23
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X22
]
ldp
x24
,
x25
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X24
]
ldp
x26
,
x27
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X26
]
ldp
x28
,
x29
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X28
]
/
*
*
Each
of
these
handlers
will
first
restore
x0
and
x1
from
*
the
context
and
the
branch
to
the
common
implementation
for
*
each
of
the
exception
types
.
*/
tbnz
x0
,
#
1
,
workaround_bpiall_vbar1_irq
tbnz
x0
,
#
2
,
workaround_bpiall_vbar1_fiq
tbnz
x0
,
#
3
,
workaround_bpiall_vbar1_serror
/
*
Fallthrough
case
for
Sync
exception
*/
ldp
x0
,
x1
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X0
]
b
sync_exception_aarch64
check_vector_size
workaround_bpiall_vbar1_sync_exception_aarch32
vector_entry
workaround_bpiall_vbar1_irq_aarch32
b
report_unhandled_interrupt
workaround_bpiall_vbar1_irq
:
ldp
x0
,
x1
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X0
]
b
irq_aarch64
check_vector_size
workaround_bpiall_vbar1_irq_aarch32
vector_entry
workaround_bpiall_vbar1_fiq_aarch32
b
report_unhandled_interrupt
workaround_bpiall_vbar1_fiq
:
ldp
x0
,
x1
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X0
]
b
fiq_aarch64
check_vector_size
workaround_bpiall_vbar1_fiq_aarch32
vector_entry
workaround_bpiall_vbar1_serror_aarch32
b
report_unhandled_exception
workaround_bpiall_vbar1_serror
:
ldp
x0
,
x1
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X0
]
b
serror_aarch64
check_vector_size
workaround_bpiall_vbar1_serror_aarch32
lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S
0 → 100644
View file @
31dfea92
/*
*
Copyright
(
c
)
2017
,
ARM
Limited
and
Contributors
.
All
rights
reserved
.
*
*
SPDX
-
License
-
Identifier
:
BSD
-
3
-
Clause
*/
#include <arch.h>
#include <asm_macros.S>
#include <context.h>
.
globl
workaround_mmu_runtime_exceptions
vector_base
workaround_mmu_runtime_exceptions
.
macro
apply_workaround
stp
x0
,
x1
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X0
]
mrs
x0
,
sctlr_el3
/
*
Disable
MMU
*/
bic
x1
,
x0
,
#
SCTLR_M_BIT
msr
sctlr_el3
,
x1
isb
/
*
Restore
MMU
config
*/
msr
sctlr_el3
,
x0
isb
ldp
x0
,
x1
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X0
]
.
endm
/
*
---------------------------------------------------------------------
*
Current
EL
with
SP_EL0
:
0x0
-
0x200
*
---------------------------------------------------------------------
*/
vector_entry
workaround_mmu_sync_exception_sp_el0
b
sync_exception_sp_el0
check_vector_size
workaround_mmu_sync_exception_sp_el0
vector_entry
workaround_mmu_irq_sp_el0
b
irq_sp_el0
check_vector_size
workaround_mmu_irq_sp_el0
vector_entry
workaround_mmu_fiq_sp_el0
b
fiq_sp_el0
check_vector_size
workaround_mmu_fiq_sp_el0
vector_entry
workaround_mmu_serror_sp_el0
b
serror_sp_el0
check_vector_size
workaround_mmu_serror_sp_el0
/
*
---------------------------------------------------------------------
*
Current
EL
with
SP_ELx
:
0x200
-
0x400
*
---------------------------------------------------------------------
*/
vector_entry
workaround_mmu_sync_exception_sp_elx
b
sync_exception_sp_elx
check_vector_size
workaround_mmu_sync_exception_sp_elx
vector_entry
workaround_mmu_irq_sp_elx
b
irq_sp_elx
check_vector_size
workaround_mmu_irq_sp_elx
vector_entry
workaround_mmu_fiq_sp_elx
b
fiq_sp_elx
check_vector_size
workaround_mmu_fiq_sp_elx
vector_entry
workaround_mmu_serror_sp_elx
b
serror_sp_elx
check_vector_size
workaround_mmu_serror_sp_elx
/
*
---------------------------------------------------------------------
*
Lower
EL
using
AArch64
:
0x400
-
0x600
*
---------------------------------------------------------------------
*/
vector_entry
workaround_mmu_sync_exception_aarch64
apply_workaround
b
sync_exception_aarch64
check_vector_size
workaround_mmu_sync_exception_aarch64
vector_entry
workaround_mmu_irq_aarch64
apply_workaround
b
irq_aarch64
check_vector_size
workaround_mmu_irq_aarch64
vector_entry
workaround_mmu_fiq_aarch64
apply_workaround
b
fiq_aarch64
check_vector_size
workaround_mmu_fiq_aarch64
vector_entry
workaround_mmu_serror_aarch64
apply_workaround
b
serror_aarch64
check_vector_size
workaround_mmu_serror_aarch64
/
*
---------------------------------------------------------------------
*
Lower
EL
using
AArch32
:
0x600
-
0x800
*
---------------------------------------------------------------------
*/
vector_entry
workaround_mmu_sync_exception_aarch32
apply_workaround
b
sync_exception_aarch32
check_vector_size
workaround_mmu_sync_exception_aarch32
vector_entry
workaround_mmu_irq_aarch32
apply_workaround
b
irq_aarch32
check_vector_size
workaround_mmu_irq_aarch32
vector_entry
workaround_mmu_fiq_aarch32
apply_workaround
b
fiq_aarch32
check_vector_size
workaround_mmu_fiq_aarch32
vector_entry
workaround_mmu_serror_aarch32
apply_workaround
b
serror_aarch32
check_vector_size
workaround_mmu_serror_aarch32
lib/cpus/cpu-ops.mk
View file @
31dfea92
...
...
@@ -16,6 +16,8 @@ A53_DISABLE_NON_TEMPORAL_HINT ?=1
# It is enabled by default.
A57_DISABLE_NON_TEMPORAL_HINT
?=
1
WORKAROUND_CVE_2017_5715
?=
1
# Process SKIP_A57_L1_FLUSH_PWR_DWN flag
$(eval
$(call
assert_boolean,SKIP_A57_L1_FLUSH_PWR_DWN))
$(eval
$(call
add_define,SKIP_A57_L1_FLUSH_PWR_DWN))
...
...
@@ -28,6 +30,9 @@ $(eval $(call add_define,A53_DISABLE_NON_TEMPORAL_HINT))
$(eval
$(call
assert_boolean,A57_DISABLE_NON_TEMPORAL_HINT))
$(eval
$(call
add_define,A57_DISABLE_NON_TEMPORAL_HINT))
# Process WORKAROUND_CVE_2017_5715 flag
$(eval
$(call
assert_boolean,WORKAROUND_CVE_2017_5715))
$(eval
$(call
add_define,WORKAROUND_CVE_2017_5715))
# CPU Errata Build flags.
# These should be enabled by the platform if the erratum workaround needs to be
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment