Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
Menu
Open sidebar
adam.huang
Arm Trusted Firmware
Commits
a513506b
Unverified
Commit
a513506b
authored
May 15, 2018
by
Dimitris Papastamos
Committed by
GitHub
May 15, 2018
Browse files
Merge pull request #1373 from jeenu-arm/ras-support
RAS support
parents
83cf7a00
0b9ce906
Changes
23
Show whitespace changes
Inline
Side-by-side
Makefile
View file @
a513506b
...
@@ -387,6 +387,20 @@ ifneq (${SMCCC_MAJOR_VERSION},1)
...
@@ -387,6 +387,20 @@ ifneq (${SMCCC_MAJOR_VERSION},1)
endif
endif
endif
endif
# For RAS_EXTENSION, require that EAs are handled in EL3 first
ifeq
($(RAS_EXTENSION),1)
ifneq
($(HANDLE_EA_EL3_FIRST),1)
$(error
For
RAS_EXTENSION,
HANDLE_EA_EL3_FIRST
must
also
be
1)
endif
endif
# When FAULT_INJECTION_SUPPORT is used, require that RAS_EXTENSION is enabled
ifeq
($(FAULT_INJECTION_SUPPORT),1)
ifneq
($(RAS_EXTENSION),1)
$(error
For
FAULT_INJECTION_SUPPORT,
RAS_EXTENSION
must
also
be
1)
endif
endif
################################################################################
################################################################################
# Process platform overrideable behaviour
# Process platform overrideable behaviour
################################################################################
################################################################################
...
@@ -514,8 +528,10 @@ $(eval $(call assert_boolean,ENABLE_SPE_FOR_LOWER_ELS))
...
@@ -514,8 +528,10 @@ $(eval $(call assert_boolean,ENABLE_SPE_FOR_LOWER_ELS))
$(eval
$(call
assert_boolean,ENABLE_SPM))
$(eval
$(call
assert_boolean,ENABLE_SPM))
$(eval
$(call
assert_boolean,ENABLE_SVE_FOR_NS))
$(eval
$(call
assert_boolean,ENABLE_SVE_FOR_NS))
$(eval
$(call
assert_boolean,ERROR_DEPRECATED))
$(eval
$(call
assert_boolean,ERROR_DEPRECATED))
$(eval
$(call
assert_boolean,FAULT_INJECTION_SUPPORT))
$(eval
$(call
assert_boolean,GENERATE_COT))
$(eval
$(call
assert_boolean,GENERATE_COT))
$(eval
$(call
assert_boolean,GICV2_G0_FOR_EL3))
$(eval
$(call
assert_boolean,GICV2_G0_FOR_EL3))
$(eval
$(call
assert_boolean,HANDLE_EA_EL3_FIRST))
$(eval
$(call
assert_boolean,HW_ASSISTED_COHERENCY))
$(eval
$(call
assert_boolean,HW_ASSISTED_COHERENCY))
$(eval
$(call
assert_boolean,LOAD_IMAGE_V2))
$(eval
$(call
assert_boolean,LOAD_IMAGE_V2))
$(eval
$(call
assert_boolean,MULTI_CONSOLE_API))
$(eval
$(call
assert_boolean,MULTI_CONSOLE_API))
...
@@ -523,6 +539,7 @@ $(eval $(call assert_boolean,NS_TIMER_SWITCH))
...
@@ -523,6 +539,7 @@ $(eval $(call assert_boolean,NS_TIMER_SWITCH))
$(eval
$(call
assert_boolean,PL011_GENERIC_UART))
$(eval
$(call
assert_boolean,PL011_GENERIC_UART))
$(eval
$(call
assert_boolean,PROGRAMMABLE_RESET_ADDRESS))
$(eval
$(call
assert_boolean,PROGRAMMABLE_RESET_ADDRESS))
$(eval
$(call
assert_boolean,PSCI_EXTENDED_STATE_ID))
$(eval
$(call
assert_boolean,PSCI_EXTENDED_STATE_ID))
$(eval
$(call
assert_boolean,RAS_EXTENSION))
$(eval
$(call
assert_boolean,RESET_TO_BL31))
$(eval
$(call
assert_boolean,RESET_TO_BL31))
$(eval
$(call
assert_boolean,SAVE_KEYS))
$(eval
$(call
assert_boolean,SAVE_KEYS))
$(eval
$(call
assert_boolean,SEPARATE_CODE_AND_RODATA))
$(eval
$(call
assert_boolean,SEPARATE_CODE_AND_RODATA))
...
@@ -561,7 +578,9 @@ $(eval $(call add_define,ENABLE_SPE_FOR_LOWER_ELS))
...
@@ -561,7 +578,9 @@ $(eval $(call add_define,ENABLE_SPE_FOR_LOWER_ELS))
$(eval
$(call
add_define,ENABLE_SPM))
$(eval
$(call
add_define,ENABLE_SPM))
$(eval
$(call
add_define,ENABLE_SVE_FOR_NS))
$(eval
$(call
add_define,ENABLE_SVE_FOR_NS))
$(eval
$(call
add_define,ERROR_DEPRECATED))
$(eval
$(call
add_define,ERROR_DEPRECATED))
$(eval
$(call
add_define,FAULT_INJECTION_SUPPORT))
$(eval
$(call
add_define,GICV2_G0_FOR_EL3))
$(eval
$(call
add_define,GICV2_G0_FOR_EL3))
$(eval
$(call
add_define,HANDLE_EA_EL3_FIRST))
$(eval
$(call
add_define,HW_ASSISTED_COHERENCY))
$(eval
$(call
add_define,HW_ASSISTED_COHERENCY))
$(eval
$(call
add_define,LOAD_IMAGE_V2))
$(eval
$(call
add_define,LOAD_IMAGE_V2))
$(eval
$(call
add_define,LOG_LEVEL))
$(eval
$(call
add_define,LOG_LEVEL))
...
@@ -571,6 +590,7 @@ $(eval $(call add_define,PL011_GENERIC_UART))
...
@@ -571,6 +590,7 @@ $(eval $(call add_define,PL011_GENERIC_UART))
$(eval
$(call
add_define,PLAT_${PLAT}))
$(eval
$(call
add_define,PLAT_${PLAT}))
$(eval
$(call
add_define,PROGRAMMABLE_RESET_ADDRESS))
$(eval
$(call
add_define,PROGRAMMABLE_RESET_ADDRESS))
$(eval
$(call
add_define,PSCI_EXTENDED_STATE_ID))
$(eval
$(call
add_define,PSCI_EXTENDED_STATE_ID))
$(eval
$(call
add_define,RAS_EXTENSION))
$(eval
$(call
add_define,RESET_TO_BL31))
$(eval
$(call
add_define,RESET_TO_BL31))
$(eval
$(call
add_define,SEPARATE_CODE_AND_RODATA))
$(eval
$(call
add_define,SEPARATE_CODE_AND_RODATA))
$(eval
$(call
add_define,SMCCC_MAJOR_VERSION))
$(eval
$(call
add_define,SMCCC_MAJOR_VERSION))
...
...
bl31/aarch64/runtime_exceptions.S
View file @
a513506b
...
@@ -8,6 +8,7 @@
...
@@ -8,6 +8,7 @@
#include <asm_macros.S>
#include <asm_macros.S>
#include <context.h>
#include <context.h>
#include <cpu_data.h>
#include <cpu_data.h>
#include <ea_handle.h>
#include <interrupt_mgmt.h>
#include <interrupt_mgmt.h>
#include <platform_def.h>
#include <platform_def.h>
#include <runtime_svc.h>
#include <runtime_svc.h>
...
@@ -35,17 +36,77 @@
...
@@ -35,17 +36,77 @@
.
globl
fiq_aarch32
.
globl
fiq_aarch32
.
globl
serror_aarch32
.
globl
serror_aarch32
/
*
*
Macro
that
prepares
entry
to
EL3
upon
taking
an
exception
.
*
*
With
RAS_EXTENSION
,
this
macro
synchronizes
pending
errors
with
an
ESB
*
instruction
.
When
an
error
is
thus
synchronized
,
the
handling
is
*
delegated
to
platform
EA
handler
.
*
*
Without
RAS_EXTENSION
,
this
macro
just
saves
x30
,
and
unmasks
*
Asynchronous
External
Aborts
.
*/
.
macro
check_and_unmask_ea
#if RAS_EXTENSION
/
*
Synchronize
pending
External
Aborts
*/
esb
/
*
Unmask
the
SError
interrupt
*/
msr
daifclr
,
#
DAIF_ABT_BIT
/
*
*
Explicitly
save
x30
so
as
to
free
up
a
register
and
to
enable
*
branching
*/
str
x30
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_LR
]
/
*
Check
for
SErrors
synchronized
by
the
ESB
instruction
*/
mrs
x30
,
DISR_EL1
tbz
x30
,
#
DISR_A_BIT
,
1
f
/
*
Save
GP
registers
and
restore
them
afterwards
*/
bl
save_gp_registers
mov
x0
,
#
ERROR_EA_ESB
mrs
x1
,
DISR_EL1
bl
delegate_ea
bl
restore_gp_registers
1
:
#else
/
*
Unmask
the
SError
interrupt
*/
msr
daifclr
,
#
DAIF_ABT_BIT
str
x30
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_LR
]
#endif
.
endm
/
*
*
Handle
External
Abort
by
delegating
to
the
platform
's EA handler.
*
Once
the
platform
handler
returns
,
the
macro
exits
EL3
and
returns
to
*
where
the
abort
was
taken
from
.
*
*
This
macro
assumes
that
x30
is
available
for
use
.
*
*
'abort_type'
is
a
constant
passed
to
the
platform
handler
,
indicating
*
the
cause
of
the
External
Abort
.
*/
.
macro
handle_ea
abort_type
/
*
Save
GP
registers
*/
bl
save_gp_registers
/
*
Setup
exception
class
and
syndrome
arguments
for
platform
handler
*/
mov
x0
,
\
abort_type
mrs
x1
,
esr_el3
adr
x30
,
el3_exit
b
delegate_ea
.
endm
/
*
---------------------------------------------------------------------
/
*
---------------------------------------------------------------------
*
This
macro
handles
Synchronous
exceptions
.
*
This
macro
handles
Synchronous
exceptions
.
*
Only
SMC
exceptions
are
supported
.
*
Only
SMC
exceptions
are
supported
.
*
---------------------------------------------------------------------
*
---------------------------------------------------------------------
*/
*/
.
macro
handle_sync_exception
.
macro
handle_sync_exception
/
*
Enable
the
SError
interrupt
*/
msr
daifclr
,
#
DAIF_ABT_BIT
str
x30
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_LR
]
#if ENABLE_RUNTIME_INSTRUMENTATION
#if ENABLE_RUNTIME_INSTRUMENTATION
/
*
/
*
*
Read
the
timestamp
value
and
store
it
in
per
-
cpu
data
.
The
value
*
Read
the
timestamp
value
and
store
it
in
per
-
cpu
data
.
The
value
...
@@ -69,6 +130,20 @@
...
@@ -69,6 +130,20 @@
cmp
x30
,
#
EC_AARCH64_SMC
cmp
x30
,
#
EC_AARCH64_SMC
b.eq
smc_handler64
b.eq
smc_handler64
/
*
Check
for
I
/
D
aborts
from
lower
EL
*/
cmp
x30
,
#
EC_IABORT_LOWER_EL
b.eq
1
f
cmp
x30
,
#
EC_DABORT_LOWER_EL
b.ne
2
f
1
:
/
*
Test
for
EA
bit
in
the
instruction
syndrome
*/
mrs
x30
,
esr_el3
tbz
x30
,
#
ESR_ISS_EABORT_EA_BIT
,
2
f
handle_ea
#
ERROR_EA_SYNC
2
:
/
*
Other
kinds
of
synchronous
exceptions
are
not
handled
*/
/
*
Other
kinds
of
synchronous
exceptions
are
not
handled
*/
ldr
x30
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_LR
]
ldr
x30
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_LR
]
b
report_unhandled_exception
b
report_unhandled_exception
...
@@ -81,12 +156,7 @@
...
@@ -81,12 +156,7 @@
*
---------------------------------------------------------------------
*
---------------------------------------------------------------------
*/
*/
.
macro
handle_interrupt_exception
label
.
macro
handle_interrupt_exception
label
/
*
Enable
the
SError
interrupt
*/
msr
daifclr
,
#
DAIF_ABT_BIT
str
x30
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_LR
]
bl
save_gp_registers
bl
save_gp_registers
/
*
Save
the
EL3
system
registers
needed
to
return
from
this
exception
*/
/
*
Save
the
EL3
system
registers
needed
to
return
from
this
exception
*/
mrs
x0
,
spsr_el3
mrs
x0
,
spsr_el3
mrs
x1
,
elr_el3
mrs
x1
,
elr_el3
...
@@ -154,25 +224,6 @@ interrupt_exit_\label:
...
@@ -154,25 +224,6 @@ interrupt_exit_\label:
.
endm
.
endm
.
macro
save_x4_to_x29_sp_el0
stp
x4
,
x5
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X4
]
stp
x6
,
x7
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X6
]
stp
x8
,
x9
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X8
]
stp
x10
,
x11
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X10
]
stp
x12
,
x13
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X12
]
stp
x14
,
x15
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X14
]
stp
x16
,
x17
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X16
]
stp
x18
,
x19
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X18
]
stp
x20
,
x21
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X20
]
stp
x22
,
x23
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X22
]
stp
x24
,
x25
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X24
]
stp
x26
,
x27
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X26
]
stp
x28
,
x29
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X28
]
mrs
x18
,
sp_el0
str
x18
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_SP_EL0
]
.
endm
vector_base
runtime_exceptions
vector_base
runtime_exceptions
/
*
---------------------------------------------------------------------
/
*
---------------------------------------------------------------------
...
@@ -239,23 +290,29 @@ vector_entry sync_exception_aarch64
...
@@ -239,23 +290,29 @@ vector_entry sync_exception_aarch64
*
to
a
valid
cpu
context
where
the
general
purpose
and
system
register
*
to
a
valid
cpu
context
where
the
general
purpose
and
system
register
*
state
can
be
saved
.
*
state
can
be
saved
.
*/
*/
check_and_unmask_ea
handle_sync_exception
handle_sync_exception
check_vector_size
sync_exception_aarch64
check_vector_size
sync_exception_aarch64
vector_entry
irq_aarch64
vector_entry
irq_aarch64
check_and_unmask_ea
handle_interrupt_exception
irq_aarch64
handle_interrupt_exception
irq_aarch64
check_vector_size
irq_aarch64
check_vector_size
irq_aarch64
vector_entry
fiq_aarch64
vector_entry
fiq_aarch64
check_and_unmask_ea
handle_interrupt_exception
fiq_aarch64
handle_interrupt_exception
fiq_aarch64
check_vector_size
fiq_aarch64
check_vector_size
fiq_aarch64
vector_entry
serror_aarch64
vector_entry
serror_aarch64
msr
daifclr
,
#
DAIF_ABT_BIT
/
*
/
*
*
SError
exceptions
from
lower
ELs
are
not
currently
supported
.
*
Explicitly
save
x30
so
as
to
free
up
a
re
gister
and
to
enable
*
Report
their
occurrence
.
*
branching
*/
*/
b
report_unhandled_exception
str
x30
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_LR
]
handle_ea
#
ERROR_EA_ASYNC
check_vector_size
serror_aarch64
check_vector_size
serror_aarch64
/
*
---------------------------------------------------------------------
/
*
---------------------------------------------------------------------
...
@@ -269,23 +326,29 @@ vector_entry sync_exception_aarch32
...
@@ -269,23 +326,29 @@ vector_entry sync_exception_aarch32
*
to
a
valid
cpu
context
where
the
general
purpose
and
system
register
*
to
a
valid
cpu
context
where
the
general
purpose
and
system
register
*
state
can
be
saved
.
*
state
can
be
saved
.
*/
*/
check_and_unmask_ea
handle_sync_exception
handle_sync_exception
check_vector_size
sync_exception_aarch32
check_vector_size
sync_exception_aarch32
vector_entry
irq_aarch32
vector_entry
irq_aarch32
check_and_unmask_ea
handle_interrupt_exception
irq_aarch32
handle_interrupt_exception
irq_aarch32
check_vector_size
irq_aarch32
check_vector_size
irq_aarch32
vector_entry
fiq_aarch32
vector_entry
fiq_aarch32
check_and_unmask_ea
handle_interrupt_exception
fiq_aarch32
handle_interrupt_exception
fiq_aarch32
check_vector_size
fiq_aarch32
check_vector_size
fiq_aarch32
vector_entry
serror_aarch32
vector_entry
serror_aarch32
msr
daifclr
,
#
DAIF_ABT_BIT
/
*
/
*
*
SError
exceptions
from
lower
ELs
are
not
currently
supported
.
*
Explicitly
save
x30
so
as
to
free
up
a
re
gister
and
to
enable
*
Report
their
occurrence
.
*
branching
*/
*/
b
report_unhandled_exception
str
x30
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_LR
]
handle_ea
#
ERROR_EA_ASYNC
check_vector_size
serror_aarch32
check_vector_size
serror_aarch32
...
@@ -345,7 +408,21 @@ smc_handler64:
...
@@ -345,7 +408,21 @@ smc_handler64:
*
*
*
Save
x4
-
x29
and
sp_el0
.
*
Save
x4
-
x29
and
sp_el0
.
*/
*/
save_x4_to_x29_sp_el0
stp
x4
,
x5
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X4
]
stp
x6
,
x7
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X6
]
stp
x8
,
x9
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X8
]
stp
x10
,
x11
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X10
]
stp
x12
,
x13
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X12
]
stp
x14
,
x15
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X14
]
stp
x16
,
x17
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X16
]
stp
x18
,
x19
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X18
]
stp
x20
,
x21
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X20
]
stp
x22
,
x23
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X22
]
stp
x24
,
x25
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X24
]
stp
x26
,
x27
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X26
]
stp
x28
,
x29
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X28
]
mrs
x18
,
sp_el0
str
x18
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_SP_EL0
]
mov
x5
,
xzr
mov
x5
,
xzr
mov
x6
,
sp
mov
x6
,
sp
...
@@ -431,14 +508,12 @@ compat_or_vendor:
...
@@ -431,14 +508,12 @@ compat_or_vendor:
smc_unknown
:
smc_unknown
:
/
*
/
*
*
Here
we
restore
x4
-
x18
regardless
of
where
we
came
from
.
AArch32
*
Unknown
SMC
call
.
Populate
return
value
with
SMC_UNK
,
restore
*
callers
will
find
the
registers
contents
unchanged
,
but
AArch64
*
GP
registers
,
and
return
to
caller
.
*
callers
will
find
the
registers
modified
(
with
stale
earlier
NS
*
content
)
.
Either
way
,
we
aren
't leaking any secure information
*
through
them
.
*/
*/
mov
x0
,
#
SMC_UNK
mov
x0
,
#
SMC_UNK
b
restore_gp_registers_callee_eret
str
x0
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X0
]
b
restore_gp_registers_eret
smc_prohibited
:
smc_prohibited
:
ldr
x30
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_LR
]
ldr
x30
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_LR
]
...
@@ -450,3 +525,62 @@ rt_svc_fw_critical_error:
...
@@ -450,3 +525,62 @@ rt_svc_fw_critical_error:
msr
spsel
,
#
1
msr
spsel
,
#
1
no_ret
report_unhandled_exception
no_ret
report_unhandled_exception
endfunc
smc_handler
endfunc
smc_handler
/*
*
Delegate
External
Abort
handling
to
platform
's EA handler. This function
*
assumes
that
all
GP
registers
have
been
saved
by
the
caller
.
*
*
x0
:
EA
reason
*
x1
:
EA
syndrome
*/
func
delegate_ea
/
*
Save
EL3
state
*/
mrs
x2
,
spsr_el3
mrs
x3
,
elr_el3
stp
x2
,
x3
,
[
sp
,
#
CTX_EL3STATE_OFFSET
+
CTX_SPSR_EL3
]
/
*
*
Save
ESR
as
handling
might
involve
lower
ELs
,
and
returning
back
to
*
EL3
from
there
would
trample
the
original
ESR
.
*/
mrs
x4
,
scr_el3
mrs
x5
,
esr_el3
stp
x4
,
x5
,
[
sp
,
#
CTX_EL3STATE_OFFSET
+
CTX_SCR_EL3
]
/
*
*
Setup
rest
of
arguments
,
and
call
platform
External
Abort
handler
.
*
*
x0
:
EA
reason
(
already
in
place
)
*
x1
:
Exception
syndrome
(
already
in
place
)
.
*
x2
:
Cookie
(
unused
for
now
)
.
*
x3
:
Context
pointer
.
*
x4
:
Flags
(
security
state
from
SCR
for
now
)
.
*/
mov
x2
,
xzr
mov
x3
,
sp
ubfx
x4
,
x4
,
#
0
,
#
1
/
*
Switch
to
runtime
stack
*/
ldr
x5
,
[
sp
,
#
CTX_EL3STATE_OFFSET
+
CTX_RUNTIME_SP
]
msr
spsel
,
#
0
mov
sp
,
x5
mov
x29
,
x30
bl
plat_ea_handler
mov
x30
,
x29
/
*
Make
SP
point
to
context
*/
msr
spsel
,
#
1
/
*
Restore
EL3
state
*/
ldp
x1
,
x2
,
[
sp
,
#
CTX_EL3STATE_OFFSET
+
CTX_SPSR_EL3
]
msr
spsr_el3
,
x1
msr
elr_el3
,
x2
/
*
Restore
ESR_EL3
and
SCR_EL3
*/
ldp
x3
,
x4
,
[
sp
,
#
CTX_EL3STATE_OFFSET
+
CTX_SCR_EL3
]
msr
scr_el3
,
x3
msr
esr_el3
,
x4
ret
endfunc
delegate_ea
docs/user-guide.rst
View file @
a513506b
...
@@ -390,6 +390,14 @@ Common build options
...
@@ -390,6 +390,14 @@ Common build options
handled
at
EL3
,
and
a
panic
will
result
.
This
is
supported
only
for
AArch64
handled
at
EL3
,
and
a
panic
will
result
.
This
is
supported
only
for
AArch64
builds
.
builds
.
-
``
FAULT_INJECTION_SUPPORT
``:
ARMv8
.4
externsions
introduced
support
for
fault
injection
from
lower
ELs
,
and
this
build
option
enables
lower
ELs
to
use
Error
Records
accessed
via
System
Registers
to
inject
faults
.
This
is
applicable
only
to
AArch64
builds
.
This
feature
is
intended
for
testing
purposes
only
,
and
is
advisable
to
keep
disabled
for
production
images
.
-
``
FIP_NAME
``:
This
is
an
optional
build
option
which
specifies
the
FIP
-
``
FIP_NAME
``:
This
is
an
optional
build
option
which
specifies
the
FIP
filename
for
the
``
fip
``
target
.
Default
is
``
fip
.
bin
``.
filename
for
the
``
fip
``
target
.
Default
is
``
fip
.
bin
``.
...
@@ -531,6 +539,15 @@ Common build options
...
@@ -531,6 +539,15 @@ Common build options
smc
function
id
.
When
this
option
is
enabled
on
Arm
platforms
,
the
smc
function
id
.
When
this
option
is
enabled
on
Arm
platforms
,
the
option
``
ARM_RECOM_STATE_ID_ENC
``
needs
to
be
set
to
1
as
well
.
option
``
ARM_RECOM_STATE_ID_ENC
``
needs
to
be
set
to
1
as
well
.
-
``
RAS_EXTENSION
``:
When
set
to
``
1
``,
enable
Armv8
.2
RAS
features
.
RAS
features
are
an
optional
extension
for
pre
-
Armv8
.2
CPUs
,
but
are
mandatory
for
Armv8
.2
or
later
CPUs
.
When
``
RAS_EXTENSION
``
is
set
to
``
1
``,
``
HANDLE_EA_EL3_FIRST
``
must
also
be
set
to
``
1
``.
This
option
is
disabled
by
default
.
-
``
RESET_TO_BL31
``:
Enable
BL31
entrypoint
as
the
CPU
reset
vector
instead
-
``
RESET_TO_BL31
``:
Enable
BL31
entrypoint
as
the
CPU
reset
vector
instead
of
the
BL1
entrypoint
.
It
can
take
the
value
0
(
CPU
reset
to
BL1
of
the
BL1
entrypoint
.
It
can
take
the
value
0
(
CPU
reset
to
BL1
entrypoint
)
or
1
(
CPU
reset
to
BL31
entrypoint
).
entrypoint
)
or
1
(
CPU
reset
to
BL31
entrypoint
).
...
...
include/bl31/ea_handle.h
0 → 100644
View file @
a513506b
/*
* Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef __EA_HANDLE_H__
#define __EA_HANDLE_H__
/* Constants indicating the reason for an External Abort */
/* External Abort received at SError vector */
#define ERROR_EA_ASYNC 0
/* Synchronous External Abort received at Synchronous exception vector */
#define ERROR_EA_SYNC 1
/* External Abort synchronized by ESB instruction */
#define ERROR_EA_ESB 2
/* RAS event signalled as peripheral interrupt */
#define ERROR_INTERRUPT 3
#endif
/* __EA_HANDLE_H__ */
include/common/aarch64/asm_macros.S
View file @
a513506b
/*
/*
*
Copyright
(
c
)
2013
-
201
7
,
ARM
Limited
and
Contributors
.
All
rights
reserved
.
*
Copyright
(
c
)
2013
-
201
8
,
ARM
Limited
and
Contributors
.
All
rights
reserved
.
*
*
*
SPDX
-
License
-
Identifier
:
BSD
-
3
-
Clause
*
SPDX
-
License
-
Identifier
:
BSD
-
3
-
Clause
*/
*/
...
@@ -192,4 +192,10 @@
...
@@ -192,4 +192,10 @@
.
space
SPINLOCK_ASM_SIZE
.
space
SPINLOCK_ASM_SIZE
.
endm
.
endm
#if RAS_EXTENSION
.
macro
esb
.
inst
0xd503221f
.
endm
#endif
#endif /* __ASM_MACROS_S__ */
#endif /* __ASM_MACROS_S__ */
include/lib/aarch64/arch.h
View file @
a513506b
/*
/*
* Copyright (c) 2013-201
7
, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-201
8
, ARM Limited and Contributors. All rights reserved.
*
*
* SPDX-License-Identifier: BSD-3-Clause
* SPDX-License-Identifier: BSD-3-Clause
*/
*/
...
@@ -215,6 +215,7 @@
...
@@ -215,6 +215,7 @@
/* SCR definitions */
/* SCR definitions */
#define SCR_RES1_BITS ((U(1) << 4) | (U(1) << 5))
#define SCR_RES1_BITS ((U(1) << 4) | (U(1) << 5))
#define SCR_FIEN_BIT (U(1) << 21)
#define SCR_TWE_BIT (U(1) << 13)
#define SCR_TWE_BIT (U(1) << 13)
#define SCR_TWI_BIT (U(1) << 12)
#define SCR_TWI_BIT (U(1) << 12)
#define SCR_ST_BIT (U(1) << 11)
#define SCR_ST_BIT (U(1) << 11)
...
@@ -528,6 +529,12 @@
...
@@ -528,6 +529,12 @@
#define EC_AARCH64_FP U(0x2c)
#define EC_AARCH64_FP U(0x2c)
#define EC_SERROR U(0x2f)
#define EC_SERROR U(0x2f)
/*
* External Abort bit in Instruction and Data Aborts synchronous exception
* syndromes.
*/
#define ESR_ISS_EABORT_EA_BIT U(9)
#define EC_BITS(x) (((x) >> ESR_EC_SHIFT) & ESR_EC_MASK)
#define EC_BITS(x) (((x) >> ESR_EC_SHIFT) & ESR_EC_MASK)
/* Reset bit inside the Reset management register for EL3 (RMR_EL3) */
/* Reset bit inside the Reset management register for EL3 (RMR_EL3) */
...
@@ -705,4 +712,23 @@
...
@@ -705,4 +712,23 @@
#define AMCGCR_EL0_CG1NC_LENGTH U(8)
#define AMCGCR_EL0_CG1NC_LENGTH U(8)
#define AMCGCR_EL0_CG1NC_MASK U(0xff)
#define AMCGCR_EL0_CG1NC_MASK U(0xff)
/*******************************************************************************
* RAS system registers
*******************************************************************************/
#define DISR_EL1 S3_0_C12_C1_1
#define DISR_A_BIT 31
#define ERRIDR_EL1 S3_0_C5_C3_0
#define ERRIDR_MASK 0xffff
#define ERRSELR_EL1 S3_0_C5_C3_1
/* System register access to Standard Error Record registers */
#define ERXFR_EL1 S3_0_C5_C4_0
#define ERXCTLR_EL1 S3_0_C5_C4_1
#define ERXSTATUS_EL1 S3_0_C5_C4_2
#define ERXADDR_EL1 S3_0_C5_C4_3
#define ERXMISC0_EL1 S3_0_C5_C4_4
#define ERXMISC1_EL1 S3_0_C5_C4_5
#endif
/* __ARCH_H__ */
#endif
/* __ARCH_H__ */
include/lib/aarch64/arch_helpers.h
View file @
a513506b
/*
/*
* Copyright (c) 2013-201
7
, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-201
8
, ARM Limited and Contributors. All rights reserved.
*
*
* SPDX-License-Identifier: BSD-3-Clause
* SPDX-License-Identifier: BSD-3-Clause
*/
*/
...
@@ -333,6 +333,16 @@ DEFINE_RENAME_SYSREG_RW_FUNCS(pmblimitr_el1, PMBLIMITR_EL1)
...
@@ -333,6 +333,16 @@ DEFINE_RENAME_SYSREG_RW_FUNCS(pmblimitr_el1, PMBLIMITR_EL1)
DEFINE_RENAME_SYSREG_WRITE_FUNC
(
zcr_el3
,
ZCR_EL3
)
DEFINE_RENAME_SYSREG_WRITE_FUNC
(
zcr_el3
,
ZCR_EL3
)
DEFINE_RENAME_SYSREG_WRITE_FUNC
(
zcr_el2
,
ZCR_EL2
)
DEFINE_RENAME_SYSREG_WRITE_FUNC
(
zcr_el2
,
ZCR_EL2
)
DEFINE_RENAME_SYSREG_READ_FUNC
(
erridr_el1
,
ERRIDR_EL1
)
DEFINE_RENAME_SYSREG_WRITE_FUNC
(
errselr_el1
,
ERRSELR_EL1
)
DEFINE_RENAME_SYSREG_READ_FUNC
(
erxfr_el1
,
ERXFR_EL1
)
DEFINE_RENAME_SYSREG_RW_FUNCS
(
erxctlr_el1
,
ERXCTLR_EL1
)
DEFINE_RENAME_SYSREG_RW_FUNCS
(
erxstatus_el1
,
ERXSTATUS_EL1
)
DEFINE_RENAME_SYSREG_READ_FUNC
(
erxaddr_el1
,
ERXADDR_EL1
)
DEFINE_RENAME_SYSREG_READ_FUNC
(
erxmisc0_el1
,
ERXMISC0_EL1
)
DEFINE_RENAME_SYSREG_READ_FUNC
(
erxmisc1_el1
,
ERXMISC1_EL1
)
#define IS_IN_EL(x) \
#define IS_IN_EL(x) \
(GET_EL(read_CurrentEl()) == MODE_EL##x)
(GET_EL(read_CurrentEl()) == MODE_EL##x)
...
...
include/lib/el3_runtime/aarch64/context.h
View file @
a513506b
...
@@ -7,6 +7,8 @@
...
@@ -7,6 +7,8 @@
#ifndef __CONTEXT_H__
#ifndef __CONTEXT_H__
#define __CONTEXT_H__
#define __CONTEXT_H__
#include <utils_def.h>
/*******************************************************************************
/*******************************************************************************
* Constants that allow assembler code to access members of and the 'gp_regs'
* Constants that allow assembler code to access members of and the 'gp_regs'
* structure at their correct offsets.
* structure at their correct offsets.
...
@@ -53,10 +55,12 @@
...
@@ -53,10 +55,12 @@
******************************************************************************/
******************************************************************************/
#define CTX_EL3STATE_OFFSET (CTX_GPREGS_OFFSET + CTX_GPREGS_END)
#define CTX_EL3STATE_OFFSET (CTX_GPREGS_OFFSET + CTX_GPREGS_END)
#define CTX_SCR_EL3 U(0x0)
#define CTX_SCR_EL3 U(0x0)
#define CTX_RUNTIME_SP U(0x8)
#define CTX_ESR_EL3 U(0x8)
#define CTX_SPSR_EL3 U(0x10)
#define CTX_RUNTIME_SP U(0x10)
#define CTX_ELR_EL3 U(0x18)
#define CTX_SPSR_EL3 U(0x18)
#define CTX_EL3STATE_END U(0x20)
#define CTX_ELR_EL3 U(0x20)
#define CTX_UNUSED U(0x28)
#define CTX_EL3STATE_END U(0x30)
/*******************************************************************************
/*******************************************************************************
* Constants that allow assembler code to access members of and the
* Constants that allow assembler code to access members of and the
...
...
include/lib/extensions/ras.h
0 → 100644
View file @
a513506b
/*
* Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef __RAS_COMMON__
#define __RAS_COMMON__
#define ERR_HANDLER_VERSION 1
/* Error record access mechanism */
#define ERR_ACCESS_SYSREG 0
#define ERR_ACCESS_MEMMAP 1
/*
* Register all error records on the platform.
*
* This macro must be used in the same file as the array of error record info
* are declared. Only then would ARRAY_SIZE() yield a meaningful value.
*/
#define REGISTER_ERR_RECORD_INFO(_records) \
const struct err_record_mapping err_record_mapping = { \
.err_records = _records, \
.num_err_records = ARRAY_SIZE(_records), \
}
/* Error record info iterator */
#define for_each_err_record_info(_i, _info) \
for (_i = 0, _info = err_record_mapping.err_records; \
_i < err_record_mapping.num_err_records; \
_i++, _info++)
#define _ERR_RECORD_COMMON(_probe, _handler, _aux) \
.probe = _probe, \
.handler = _handler, \
.aux_data = _aux,
#define ERR_RECORD_SYSREG_V1(_idx_start, _num_idx, _probe, _handler, _aux) \
{ \
.version = 1, \
.sysreg.idx_start = _idx_start, \
.sysreg.num_idx = _num_idx, \
.access = ERR_ACCESS_SYSREG, \
_ERR_RECORD_COMMON(_probe, _handler, _aux) \
}
#define ERR_RECORD_MEMMAP_V1(_base_addr, _size_num_k, _probe, _handler, _aux) \
{ \
.version = 1, \
.memmap.base_addr = _base_addr, \
.memmap.size_num_k = _size_num_k, \
.access = ERR_ACCESS_MEMMAP, \
_ERR_RECORD_COMMON(_probe, _handler, _aux) \
}
/*
* Macro to be used to name and declare an array of RAS interrupts along with
* their handlers.
*
* This macro must be used in the same file as the array of interrupts are
* declared. Only then would ARRAY_SIZE() yield a meaningful value. Also, the
* array is expected to be sorted in the increasing order of interrupt number.
*/
#define REGISTER_RAS_INTERRUPTS(_array) \
const struct ras_interrupt_mapping ras_interrupt_mapping = { \
.intrs = _array, \
.num_intrs = ARRAY_SIZE(_array), \
}
#ifndef __ASSEMBLY__
#include <assert.h>
#include <ras_arch.h>
struct
err_record_info
;
struct
ras_interrupt
{
/* Interrupt number, and the associated error record info */
unsigned
int
intr_number
;
struct
err_record_info
*
err_record
;
void
*
cookie
;
};
/* Function to probe a error record group for error */
typedef
int
(
*
err_record_probe_t
)(
const
struct
err_record_info
*
info
,
int
*
probe_data
);
/* Data passed to error record group handler */
struct
err_handler_data
{
/* Info passed on from top-level exception handler */
uint64_t
flags
;
void
*
cookie
;
void
*
handle
;
/* Data structure version */
unsigned
int
version
;
/* Reason for EA: one the ERROR_* constants */
unsigned
int
ea_reason
;
/*
* For EAs received at vector, the value read from ESR; for an EA
* synchronized by ESB, the value of DISR.
*/
uint32_t
syndrome
;
/* For errors signalled via. interrupt, the raw interrupt ID; otherwise, 0. */
unsigned
int
interrupt
;
};
/* Function to handle error from an error record group */
typedef
int
(
*
err_record_handler_t
)(
const
struct
err_record_info
*
info
,
int
probe_data
,
const
struct
err_handler_data
*
const
data
);
/* Error record information */
struct
err_record_info
{
/* Function to probe error record group for errors */
err_record_probe_t
probe
;
/* Function to handle error record group errors */
err_record_handler_t
handler
;
/* Opaque group-specific data */
void
*
aux_data
;
/* Additional information for Standard Error Records */
union
{
struct
{
/*
* For a group accessed via. memory-mapped register,
* base address of the page hosting error records, and
* the size of the record group.
*/
uintptr_t
base_addr
;
/* Size of group in number of KBs */
unsigned
int
size_num_k
;
}
memmap
;
struct
{
/*
* For error records accessed via. system register, index of
* the error record.
*/
unsigned
int
idx_start
;
unsigned
int
num_idx
;
}
sysreg
;
};
/* Data structure version */
unsigned
int
version
;
/* Error record access mechanism */
unsigned
int
access
:
1
;
};
struct
err_record_mapping
{
struct
err_record_info
*
err_records
;
size_t
num_err_records
;
};
struct
ras_interrupt_mapping
{
struct
ras_interrupt
*
intrs
;
size_t
num_intrs
;
};
extern
const
struct
err_record_mapping
err_record_mapping
;
extern
const
struct
ras_interrupt_mapping
ras_interrupt_mapping
;
/*
* Helper functions to probe memory-mapped and system registers implemented in
* Standard Error Record format
*/
static
inline
int
ras_err_ser_probe_memmap
(
const
struct
err_record_info
*
info
,
int
*
probe_data
)
{
assert
(
info
->
version
==
ERR_HANDLER_VERSION
);
return
ser_probe_memmap
(
info
->
memmap
.
base_addr
,
info
->
memmap
.
size_num_k
,
probe_data
);
}
static
inline
int
ras_err_ser_probe_sysreg
(
const
struct
err_record_info
*
info
,
int
*
probe_data
)
{
assert
(
info
->
version
==
ERR_HANDLER_VERSION
);
return
ser_probe_sysreg
(
info
->
sysreg
.
idx_start
,
info
->
sysreg
.
num_idx
,
probe_data
);
}
int
ras_ea_handler
(
unsigned
int
ea_reason
,
uint64_t
syndrome
,
void
*
cookie
,
void
*
handle
,
uint64_t
flags
);
void
ras_init
(
void
);
#endif
/* __ASSEMBLY__ */
#endif
/* __RAS_COMMON__ */
include/lib/extensions/ras_arch.h
0 → 100644
View file @
a513506b
/*
* Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef __RAS_H__
#define __RAS_H__
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
#include <context.h>
#include <mmio.h>
#include <stdint.h>
/*
* Size of nodes implementing Standard Error Records - currently only 4k is
* supported.
*/
#define STD_ERR_NODE_SIZE_NUM_K 4
/*
* Individual register offsets within an error record in Standard Error Record
* format when error records are accessed through memory-mapped registers.
*/
#define ERR_FR(n) (0x0 + (64 * (n)))
#define ERR_CTLR(n) (0x8 + (64 * (n)))
#define ERR_STATUS(n) (0x10 + (64 * (n)))
#define ERR_ADDR(n) (0x18 + (64 * (n)))
#define ERR_MISC0(n) (0x20 + (64 * (n)))
#define ERR_MISC1(n) (0x28 + (64 * (n)))
/* Group Status Register (ERR_STATUS) offset */
#define ERR_GSR(base, size_num_k, n) \
((base) + (0x380 * (size_num_k)) + (8 * (n)))
/* Management register offsets */
#define ERR_DEVID(base, size_num_k) \
((base) + ((0x400 * (size_num_k)) - 0x100) + 0xc8)
#define ERR_DEVID_MASK 0xffff
/* Standard Error Record status register fields */
#define ERR_STATUS_AV_SHIFT 31
#define ERR_STATUS_AV_MASK U(0x1)
#define ERR_STATUS_V_SHIFT 30
#define ERR_STATUS_V_MASK U(0x1)
#define ERR_STATUS_UE_SHIFT 29
#define ERR_STATUS_UE_MASK U(0x1)
#define ERR_STATUS_ER_SHIFT 28
#define ERR_STATUS_ER_MASK U(0x1)
#define ERR_STATUS_OF_SHIFT 27
#define ERR_STATUS_OF_MASK U(0x1)
#define ERR_STATUS_MV_SHIFT 26
#define ERR_STATUS_MV_MASK U(0x1)
#define ERR_STATUS_CE_SHIFT 24
#define ERR_STATUS_CE_MASK U(0x3)
#define ERR_STATUS_DE_SHIFT 23
#define ERR_STATUS_DE_MASK U(0x1)
#define ERR_STATUS_PN_SHIFT 22
#define ERR_STATUS_PN_MASK U(0x1)
#define ERR_STATUS_UET_SHIFT 20
#define ERR_STATUS_UET_MASK U(0x3)
#define ERR_STATUS_IERR_SHIFT 8
#define ERR_STATUS_IERR_MASK U(0xff)
#define ERR_STATUS_SERR_SHIFT 0
#define ERR_STATUS_SERR_MASK U(0xff)
#define ERR_STATUS_GET_FIELD(_status, _field) \
(((_status) >> ERR_STATUS_ ##_field ##_SHIFT) & ERR_STATUS_ ##_field ##_MASK)
#define ERR_STATUS_CLR_FIELD(_status, _field) \
(_status) &= ~(ERR_STATUS_ ##_field ##_MASK << ERR_STATUS_ ##_field ##_SHIFT)
#define ERR_STATUS_SET_FIELD(_status, _field, _value) \
(_status) |= (((_value) & ERR_STATUS_ ##_field ##_MASK) << ERR_STATUS_ ##_field ##_SHIFT)
#define ERR_STATUS_WRITE_FIELD(_status, _field, _value) do { \
ERR_STATUS_CLR_FIELD(_status, _field, _value); \
ERR_STATUS_SET_FIELD(_status, _field, _value); \
} while (0)
/* Standard Error Record control register fields */
#define ERR_CTLR_WDUI_SHIFT 11
#define ERR_CTLR_WDUI_MASK 0x1
#define ERR_CTLR_RDUI_SHIFT 10
#define ERR_CTLR_RDUI_MASK 0x1
#define ERR_CTLR_DUI_SHIFT ERR_CTLR_RDUI_SHIFT
#define ERR_CTLR_DUI_MASK ERR_CTLR_RDUI_MASK
#define ERR_CTLR_WCFI_SHIFT 9
#define ERR_CTLR_WCFI_MASK 0x1
#define ERR_CTLR_RCFI_SHIFT 8
#define ERR_CTLR_RCFI_MASK 0x1
#define ERR_CTLR_CFI_SHIFT ERR_CTLR_RCFI_SHIFT
#define ERR_CTLR_CFI_MASK ERR_CTLR_RCFI_MASK
#define ERR_CTLR_WUE_SHIFT 7
#define ERR_CTLR_WUE_MASK 0x1
#define ERR_CTLR_WFI_SHIFT 6
#define ERR_CTLR_WFI_MASK 0x1
#define ERR_CTLR_WUI_SHIFT 5
#define ERR_CTLR_WUI_MASK 0x1
#define ERR_CTLR_RUE_SHIFT 4
#define ERR_CTLR_RUE_MASK 0x1
#define ERR_CTLR_UE_SHIFT ERR_CTLR_RUE_SHIFT
#define ERR_CTLR_UE_MASK ERR_CTLR_RUE_MASK
#define ERR_CTLR_RFI_SHIFT 3
#define ERR_CTLR_RFI_MASK 0x1
#define ERR_CTLR_FI_SHIFT ERR_CTLR_RFI_SHIFT
#define ERR_CTLR_FI_MASK ERR_CTLR_RFI_MASK
#define ERR_CTLR_RUI_SHIFT 2
#define ERR_CTLR_RUI_MASK 0x1
#define ERR_CTLR_UI_SHIFT ERR_CTLR_RUI_SHIFT
#define ERR_CTLR_UI_MASK ERR_CTLR_RUI_MASK
#define ERR_CTLR_ED_SHIFT 0
#define ERR_CTLR_ED_MASK 0x1
#define ERR_CTLR_CLR_FIELD(_ctlr, _field) \
(_ctlr) &= ~(ERR_CTLR_ ##_field _MASK << ERR_CTLR_ ##_field ##_SHIFT)
#define ERR_CTLR_SET_FIELD(_ctlr, _field, _value) \
(_ctlr) |= (((_value) & ERR_CTLR_ ##_field ##_MASK) << ERR_CTLR_ ##_field ##_SHIFT)
#define ERR_CTLR_ENABLE_FIELD(_ctlr, _field) \
ERR_CTLR_SET_FIELD(_ctlr, _field, ERR_CTLR_ ##_field ##_MASK)
/* Uncorrected error types */
#define ERROR_STATUS_UET_UC 0x0
/* Uncontainable */
#define ERROR_STATUS_UET_UEU 0x1
/* Unrecoverable */
#define ERROR_STATUS_UET_UEO 0x2
/* Restable */
#define ERROR_STATUS_UET_UER 0x3
/* Recoverable */
/*
* Standard Error Record accessors for memory-mapped registers.
*/
static
inline
uint64_t
ser_get_feature
(
uintptr_t
base
,
unsigned
int
idx
)
{
return
mmio_read_64
(
base
+
ERR_FR
(
idx
));
}
static
inline
uint64_t
ser_get_control
(
uintptr_t
base
,
unsigned
int
idx
)
{
return
mmio_read_64
(
base
+
ERR_CTLR
(
idx
));
}
static
inline
uint64_t
ser_get_status
(
uintptr_t
base
,
unsigned
int
idx
)
{
return
mmio_read_64
(
base
+
ERR_STATUS
(
idx
));
}
/*
* Error handling agent would write to the status register to clear an
* identified/handled error. Most fields in the status register are
* conditional write-one-to-clear.
*
* Typically, to clear the status, it suffices to write back the same value
* previously read. However, if there were new, higher-priority errors recorded
* on the node since status was last read, writing read value won't clear the
* status. Therefore, an error handling agent must wait on and verify the status
* has indeed been cleared.
*/
static
inline
void
ser_set_status
(
uintptr_t
base
,
unsigned
int
idx
,
uint64_t
status
)
{
mmio_write_64
(
base
+
ERR_STATUS
(
idx
),
status
);
}
static
inline
uint64_t
ser_get_addr
(
uintptr_t
base
,
unsigned
int
idx
)
{
return
mmio_read_64
(
base
+
ERR_ADDR
(
idx
));
}
static
inline
uint64_t
ser_get_misc0
(
uintptr_t
base
,
unsigned
int
idx
)
{
return
mmio_read_64
(
base
+
ERR_MISC0
(
idx
));
}
static
inline
uint64_t
ser_get_misc1
(
uintptr_t
base
,
unsigned
int
idx
)
{
return
mmio_read_64
(
base
+
ERR_MISC1
(
idx
));
}
/*
* Standard Error Record helpers for System registers.
*/
static
inline
void
ser_sys_select_record
(
unsigned
int
idx
)
{
unsigned
int
max_idx
__unused
=
read_erridr_el1
()
&
ERRIDR_MASK
;
assert
(
idx
<
max_idx
);
write_errselr_el1
(
idx
);
isb
();
}
/* Library functions to probe Standard Error Record */
int
ser_probe_memmap
(
uintptr_t
base
,
unsigned
int
size_num_k
,
int
*
probe_data
);
int
ser_probe_sysreg
(
unsigned
int
idx_start
,
unsigned
int
num_idx
,
int
*
probe_data
);
#endif
/* __RAS_H__ */
include/lib/utils_def.h
View file @
a513506b
...
@@ -67,6 +67,13 @@
...
@@ -67,6 +67,13 @@
#define check_uptr_overflow(ptr, inc) \
#define check_uptr_overflow(ptr, inc) \
(((ptr) > UINTPTR_MAX - (inc)) ? 1 : 0)
(((ptr) > UINTPTR_MAX - (inc)) ? 1 : 0)
/*
* Evaluates to 1 if (u32 + inc) overflows, 0 otherwise.
* Both arguments must be 32-bit unsigned integers (i.e. effectively uint32_t).
*/
#define check_u32_overflow(u32, inc) \
((u32) > (UINT32_MAX - (inc)) ? 1 : 0)
/*
/*
* For those constants to be shared between C and other sources, apply a 'u'
* For those constants to be shared between C and other sources, apply a 'u'
* or 'ull' suffix to the argument only in C, to avoid undefined or unintended
* or 'ull' suffix to the argument only in C, to avoid undefined or unintended
...
...
include/plat/arm/common/arm_def.h
View file @
a513506b
...
@@ -483,6 +483,7 @@
...
@@ -483,6 +483,7 @@
#define PLAT_PERCPU_BAKERY_LOCK_SIZE (1 * CACHE_WRITEBACK_GRANULE)
#define PLAT_PERCPU_BAKERY_LOCK_SIZE (1 * CACHE_WRITEBACK_GRANULE)
/* Priority levels for ARM platforms */
/* Priority levels for ARM platforms */
#define PLAT_RAS_PRI 0x10
#define PLAT_SDEI_CRITICAL_PRI 0x60
#define PLAT_SDEI_CRITICAL_PRI 0x60
#define PLAT_SDEI_NORMAL_PRI 0x70
#define PLAT_SDEI_NORMAL_PRI 0x70
...
...
include/plat/common/platform.h
View file @
a513506b
...
@@ -124,6 +124,9 @@ int plat_sdei_validate_entry_point(uintptr_t ep, unsigned int client_mode);
...
@@ -124,6 +124,9 @@ int plat_sdei_validate_entry_point(uintptr_t ep, unsigned int client_mode);
void
plat_sdei_handle_masked_trigger
(
uint64_t
mpidr
,
unsigned
int
intr
);
void
plat_sdei_handle_masked_trigger
(
uint64_t
mpidr
,
unsigned
int
intr
);
#endif
#endif
void
plat_ea_handler
(
unsigned
int
ea_reason
,
uint64_t
syndrome
,
void
*
cookie
,
void
*
handle
,
uint64_t
flags
);
/*
/*
* The following function is mandatory when the
* The following function is mandatory when the
* firmware update feature is used.
* firmware update feature is used.
...
...
lib/el3_runtime/aarch64/context.S
View file @
a513506b
/*
/*
*
Copyright
(
c
)
2013
-
201
7
,
ARM
Limited
and
Contributors
.
All
rights
reserved
.
*
Copyright
(
c
)
2013
-
201
8
,
ARM
Limited
and
Contributors
.
All
rights
reserved
.
*
*
*
SPDX
-
License
-
Identifier
:
BSD
-
3
-
Clause
*
SPDX
-
License
-
Identifier
:
BSD
-
3
-
Clause
*/
*/
...
@@ -15,8 +15,8 @@
...
@@ -15,8 +15,8 @@
.
global
fpregs_context_restore
.
global
fpregs_context_restore
#endif
#endif
.
global
save_gp_registers
.
global
save_gp_registers
.
global
restore_gp_registers
.
global
restore_gp_registers_eret
.
global
restore_gp_registers_eret
.
global
restore_gp_registers_callee_eret
.
global
el3_exit
.
global
el3_exit
/*
-----------------------------------------------------
/*
-----------------------------------------------------
...
@@ -332,30 +332,50 @@ func save_gp_registers
...
@@ -332,30 +332,50 @@ func save_gp_registers
ret
ret
endfunc
save_gp_registers
endfunc
save_gp_registers
func
restore_gp_registers_eret
/*
*
This
function
restores
all
general
purpose
registers
except
x30
from
the
*
CPU
context
.
x30
register
must
be
explicitly
restored
by
the
caller
.
*/
func
restore_gp_registers
ldp
x0
,
x1
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X0
]
ldp
x0
,
x1
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X0
]
ldp
x2
,
x3
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X2
]
ldp
x2
,
x3
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X2
]
b
restore_gp_registers_callee_eret
endfunc
restore_gp_registers_eret
func
restore_gp_registers_callee_eret
ldp
x4
,
x5
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X4
]
ldp
x4
,
x5
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X4
]
ldp
x6
,
x7
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X6
]
ldp
x6
,
x7
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X6
]
ldp
x8
,
x9
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X8
]
ldp
x8
,
x9
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X8
]
ldp
x10
,
x11
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X10
]
ldp
x10
,
x11
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X10
]
ldp
x12
,
x13
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X12
]
ldp
x12
,
x13
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X12
]
ldp
x14
,
x15
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X14
]
ldp
x14
,
x15
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X14
]
ldp
x16
,
x17
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X16
]
ldp
x18
,
x19
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X18
]
ldp
x18
,
x19
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X18
]
ldp
x20
,
x21
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X20
]
ldp
x20
,
x21
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X20
]
ldp
x22
,
x23
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X22
]
ldp
x22
,
x23
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X22
]
ldp
x24
,
x25
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X24
]
ldp
x24
,
x25
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X24
]
ldp
x26
,
x27
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X26
]
ldp
x26
,
x27
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X26
]
ldr
x28
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_SP_EL0
]
msr
sp_el0
,
x28
ldp
x28
,
x29
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X28
]
ldp
x28
,
x29
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X28
]
ldp
x30
,
x17
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_LR
]
ret
msr
sp_el0
,
x17
endfunc
restore_gp_registers
ldp
x16
,
x17
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_X16
]
/*
*
Restore
general
purpose
registers
(
including
x30
),
and
exit
EL3
via
.
ERET
to
*
a
lower
exception
level
.
*/
func
restore_gp_registers_eret
bl
restore_gp_registers
ldr
x30
,
[
sp
,
#
CTX_GPREGS_OFFSET
+
CTX_GPREG_LR
]
#if IMAGE_BL31 && RAS_EXTENSION
/
*
*
Issue
Error
Synchronization
Barrier
to
synchronize
SErrors
before
*
exiting
EL3
.
We
're running with EAs unmasked, so any synchronized
*
errors
would
be
taken
immediately
; therefore no need to inspect
*
DISR_EL1
register
.
*/
esb
#endif
eret
eret
endfunc
restore_gp_registers_
callee_
eret
endfunc
restore_gp_registers_eret
/
*
-----------------------------------------------------
/
*
-----------------------------------------------------
*
This
routine
assumes
that
the
SP_EL3
is
pointing
to
*
This
routine
assumes
that
the
SP_EL3
is
pointing
to
...
...
lib/el3_runtime/aarch64/context_mgmt.c
View file @
a513506b
...
@@ -114,6 +114,11 @@ static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t
...
@@ -114,6 +114,11 @@ static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t
scr_el3
&=
~
SCR_EA_BIT
;
scr_el3
&=
~
SCR_EA_BIT
;
#endif
#endif
#if FAULT_INJECTION_SUPPORT
/* Enable fault injection from lower ELs */
scr_el3
|=
SCR_FIEN_BIT
;
#endif
#ifdef IMAGE_BL31
#ifdef IMAGE_BL31
/*
/*
* SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ rounting as
* SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ rounting as
...
...
lib/extensions/ras/ras_common.c
0 → 100644
View file @
a513506b
/*
* Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch_helpers.h>
#include <debug.h>
#include <ea_handle.h>
#include <ehf.h>
#include <platform.h>
#include <ras.h>
#include <ras_arch.h>
#ifndef PLAT_RAS_PRI
# error Platform must define RAS priority value
#endif
/* Handler that receives External Aborts on RAS-capable systems */
int
ras_ea_handler
(
unsigned
int
ea_reason
,
uint64_t
syndrome
,
void
*
cookie
,
void
*
handle
,
uint64_t
flags
)
{
unsigned
int
i
,
n_handled
=
0
,
ret
;
int
probe_data
;
struct
err_record_info
*
info
;
const
struct
err_handler_data
err_data
=
{
.
version
=
ERR_HANDLER_VERSION
,
.
ea_reason
=
ea_reason
,
.
interrupt
=
0
,
.
syndrome
=
syndrome
,
.
flags
=
flags
,
.
cookie
=
cookie
,
.
handle
=
handle
};
for_each_err_record_info
(
i
,
info
)
{
assert
(
info
->
probe
!=
NULL
);
assert
(
info
->
handler
!=
NULL
);
/* Continue probing until the record group signals no error */
while
(
1
)
{
if
(
info
->
probe
(
info
,
&
probe_data
)
==
0
)
break
;
/* Handle error */
ret
=
info
->
handler
(
info
,
probe_data
,
&
err_data
);
if
(
ret
!=
0
)
return
ret
;
n_handled
++
;
}
}
return
(
n_handled
!=
0
);
}
#if ENABLE_ASSERTIONS
static
void
assert_interrupts_sorted
(
void
)
{
unsigned
int
i
,
last
;
struct
ras_interrupt
*
start
=
ras_interrupt_mapping
.
intrs
;
if
(
ras_interrupt_mapping
.
num_intrs
==
0
)
return
;
last
=
start
[
0
].
intr_number
;
for
(
i
=
1
;
i
<
ras_interrupt_mapping
.
num_intrs
;
i
++
)
{
assert
(
start
[
i
].
intr_number
>
last
);
last
=
start
[
i
].
intr_number
;
}
}
#endif
/*
* Given an RAS interrupt number, locate the registered handler and call it. If
* no handler was found for the interrupt number, this function panics.
*/
static
int
ras_interrupt_handler
(
uint32_t
intr_raw
,
uint32_t
flags
,
void
*
handle
,
void
*
cookie
)
{
struct
ras_interrupt
*
ras_inrs
=
ras_interrupt_mapping
.
intrs
;
struct
ras_interrupt
*
selected
=
NULL
;
int
start
,
end
,
mid
,
probe_data
,
ret
__unused
;
const
struct
err_handler_data
err_data
=
{
.
version
=
ERR_HANDLER_VERSION
,
.
interrupt
=
intr_raw
,
.
flags
=
flags
,
.
cookie
=
cookie
,
.
handle
=
handle
};
assert
(
ras_interrupt_mapping
.
num_intrs
>
0
);
start
=
0
;
end
=
ras_interrupt_mapping
.
num_intrs
;
while
(
start
<=
end
)
{
mid
=
((
end
+
start
)
/
2
);
if
(
intr_raw
==
ras_inrs
[
mid
].
intr_number
)
{
selected
=
&
ras_inrs
[
mid
];
break
;
}
else
if
(
intr_raw
<
ras_inrs
[
mid
].
intr_number
)
{
/* Move left */
end
=
mid
-
1
;
}
else
{
/* Move right */
start
=
mid
+
1
;
}
}
if
(
selected
==
NULL
)
{
ERROR
(
"RAS interrupt %u has no handler!
\n
"
,
intr_raw
);
panic
();
}
ret
=
selected
->
err_record
->
probe
(
selected
->
err_record
,
&
probe_data
);
assert
(
ret
!=
0
);
/* Call error handler for the record group */
assert
(
selected
->
err_record
->
handler
!=
NULL
);
selected
->
err_record
->
handler
(
selected
->
err_record
,
probe_data
,
&
err_data
);
return
0
;
}
void
ras_init
(
void
)
{
#if ENABLE_ASSERTIONS
/* Check RAS interrupts are sorted */
assert_interrupts_sorted
();
#endif
/* Register RAS priority handler */
ehf_register_priority_handler
(
PLAT_RAS_PRI
,
ras_interrupt_handler
);
}
lib/extensions/ras/std_err_record.c
0 → 100644
View file @
a513506b
/*
* Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <ras_arch.h>
#include <utils_def.h>
/*
* Probe for error in memory-mapped registers containing error records
* implemented Standard Error Record format. Upon detecting an error, set probe
* data to the index of the record in error, and return 1; otherwise, return 0.
*/
int
ser_probe_memmap
(
uintptr_t
base
,
unsigned
int
size_num_k
,
int
*
probe_data
)
{
int
num_records
,
num_group_regs
,
i
;
uint64_t
gsr
;
assert
(
base
!=
0
);
/* Only 4K supported for now */
assert
(
size_num_k
==
STD_ERR_NODE_SIZE_NUM_K
);
num_records
=
(
mmio_read_32
(
ERR_DEVID
(
base
,
size_num_k
))
&
ERR_DEVID_MASK
);
/* A group register shows error status for 2^6 error records */
num_group_regs
=
(
num_records
>>
6
)
+
1
;
/* Iterate through group registers to find a record in error */
for
(
i
=
0
;
i
<
num_group_regs
;
i
++
)
{
gsr
=
mmio_read_64
(
ERR_GSR
(
base
,
size_num_k
,
i
));
if
(
gsr
==
0
)
continue
;
/* Return the index of the record in error */
if
(
probe_data
!=
NULL
)
*
probe_data
=
((
i
<<
6
)
+
__builtin_ctz
(
gsr
));
return
1
;
}
return
0
;
}
/*
* Probe for error in System Registers where error records are implemented in
* Standard Error Record format. Upon detecting an error, set probe data to the
* index of the record in error, and return 1; otherwise, return 0.
*/
int
ser_probe_sysreg
(
unsigned
int
idx_start
,
unsigned
int
num_idx
,
int
*
probe_data
)
{
int
i
;
uint64_t
status
;
unsigned
int
max_idx
__unused
=
read_erridr_el1
()
&
ERRIDR_MASK
;
assert
(
idx_start
<
max_idx
);
assert
(
check_u32_overflow
(
idx_start
,
num_idx
)
==
0
);
assert
((
idx_start
+
num_idx
-
1
)
<
max_idx
);
for
(
i
=
0
;
i
<
num_idx
;
i
++
)
{
/* Select the error record */
ser_sys_select_record
(
idx_start
+
i
);
/* Retrieve status register from the error record */
status
=
read_erxstatus_el1
();
/* Check for valid field in status */
if
(
ERR_STATUS_GET_FIELD
(
status
,
V
))
{
if
(
probe_data
!=
NULL
)
*
probe_data
=
i
;
return
1
;
}
}
return
0
;
}
make_helpers/defaults.mk
View file @
a513506b
...
@@ -76,6 +76,9 @@ EL3_EXCEPTION_HANDLING := 0
...
@@ -76,6 +76,9 @@ EL3_EXCEPTION_HANDLING := 0
# Build flag to treat usage of deprecated platform and framework APIs as error.
# Build flag to treat usage of deprecated platform and framework APIs as error.
ERROR_DEPRECATED
:=
0
ERROR_DEPRECATED
:=
0
# Fault injection support
FAULT_INJECTION_SUPPORT
:=
0
# Byte alignment that each component in FIP is aligned to
# Byte alignment that each component in FIP is aligned to
FIP_ALIGN
:=
0
FIP_ALIGN
:=
0
...
@@ -92,6 +95,10 @@ GENERATE_COT := 0
...
@@ -92,6 +95,10 @@ GENERATE_COT := 0
# default, they are for Secure EL1.
# default, they are for Secure EL1.
GICV2_G0_FOR_EL3
:=
0
GICV2_G0_FOR_EL3
:=
0
# Route External Aborts to EL3. Disabled by default; External Aborts are handled
# by lower ELs.
HANDLE_EA_EL3_FIRST
:=
0
# Whether system coherency is managed in hardware, without explicit software
# Whether system coherency is managed in hardware, without explicit software
# operations.
# operations.
HW_ASSISTED_COHERENCY
:=
0
HW_ASSISTED_COHERENCY
:=
0
...
@@ -120,6 +127,9 @@ PROGRAMMABLE_RESET_ADDRESS := 0
...
@@ -120,6 +127,9 @@ PROGRAMMABLE_RESET_ADDRESS := 0
# Original format.
# Original format.
PSCI_EXTENDED_STATE_ID
:=
0
PSCI_EXTENDED_STATE_ID
:=
0
# Enable RAS support
RAS_EXTENSION
:=
0
# By default, BL1 acts as the reset handler, not BL31
# By default, BL1 acts as the reset handler, not BL31
RESET_TO_BL31
:=
0
RESET_TO_BL31
:=
0
...
...
plat/arm/common/aarch64/arm_ehf.c
View file @
a513506b
/*
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017
-2018
, ARM Limited and Contributors. All rights reserved.
*
*
* SPDX-License-Identifier: BSD-3-Clause
* SPDX-License-Identifier: BSD-3-Clause
*/
*/
...
@@ -11,6 +11,11 @@
...
@@ -11,6 +11,11 @@
* Enumeration of priority levels on ARM platforms.
* Enumeration of priority levels on ARM platforms.
*/
*/
ehf_pri_desc_t
arm_exceptions
[]
=
{
ehf_pri_desc_t
arm_exceptions
[]
=
{
#if RAS_EXTENSION
/* RAS Priority */
EHF_PRI_DESC
(
ARM_PRI_BITS
,
PLAT_RAS_PRI
),
#endif
#if SDEI_SUPPORT
#if SDEI_SUPPORT
/* Critical priority SDEI */
/* Critical priority SDEI */
EHF_PRI_DESC
(
ARM_PRI_BITS
,
PLAT_SDEI_CRITICAL_PRI
),
EHF_PRI_DESC
(
ARM_PRI_BITS
,
PLAT_SDEI_CRITICAL_PRI
),
...
...
plat/arm/common/aarch64/arm_ras.c
0 → 100644
View file @
a513506b
/*
* Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <ras.h>
struct
ras_interrupt
arm_ras_interrupts
[]
=
{
};
struct
err_record_info
arm_err_records
[]
=
{
};
REGISTER_ERR_RECORD_INFO
(
arm_err_records
);
REGISTER_RAS_INTERRUPTS
(
arm_ras_interrupts
);
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment