Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
Menu
Open sidebar
adam.huang
Arm Trusted Firmware
Commits
cf0886e2
Unverified
Commit
cf0886e2
authored
Oct 29, 2018
by
Soby Mathew
Committed by
GitHub
Oct 29, 2018
Browse files
Merge pull request #1644 from soby-mathew/sm/pie_proto
Position Indepedent Executable (PIE) Support
parents
c38941f0
fc922ca8
Changes
32
Hide whitespace changes
Inline
Side-by-side
Makefile
View file @
cf0886e2
...
...
@@ -205,11 +205,6 @@ TF_CFLAGS += $(CPPFLAGS) $(TF_CFLAGS_$(ARCH)) \
-Os
-ffunction-sections
-fdata-sections
GCC_V_OUTPUT
:=
$(
shell
$(CC)
-v
2>&1
)
PIE_FOUND
:=
$(
findstring
--enable-default-pie
,
${GCC_V_OUTPUT}
)
ifneq
($(PIE_FOUND),)
TF_CFLAGS
+=
-fno-PIE
endif
# Force the compiler to include the frame pointer
ifeq
(${ENABLE_BACKTRACE},1)
...
...
@@ -335,6 +330,16 @@ ifeq (${ARM_ARCH_MAJOR},7)
include
make_helpers/armv7-a-cpus.mk
endif
ifeq
($(ENABLE_PIE),1)
TF_CFLAGS
+=
-fpie
TF_LDFLAGS
+=
-pie
else
PIE_FOUND
:=
$(
findstring
--enable-default-pie
,
${GCC_V_OUTPUT}
)
ifneq
($(PIE_FOUND),)
TF_CFLAGS
+=
-fno-PIE
endif
endif
# Include the CPU specific operations makefile, which provides default
# values for all CPU errata workarounds and CPU specific optimisations.
# This can be overridden by the platform.
...
...
@@ -565,6 +570,7 @@ $(eval $(call assert_boolean,ENABLE_AMU))
$(eval $(call assert_boolean,ENABLE_ASSERTIONS))
$(eval $(call assert_boolean,ENABLE_BACKTRACE))
$(eval $(call assert_boolean,ENABLE_MPAM_FOR_LOWER_ELS))
$(eval $(call assert_boolean,ENABLE_PIE))
$(eval $(call assert_boolean,ENABLE_PMF))
$(eval $(call assert_boolean,ENABLE_PSCI_STAT))
$(eval $(call assert_boolean,ENABLE_RUNTIME_INSTRUMENTATION))
...
...
@@ -615,6 +621,7 @@ $(eval $(call add_define,ENABLE_AMU))
$(eval $(call add_define,ENABLE_ASSERTIONS))
$(eval $(call add_define,ENABLE_BACKTRACE))
$(eval $(call add_define,ENABLE_MPAM_FOR_LOWER_ELS))
$(eval $(call add_define,ENABLE_PIE))
$(eval $(call add_define,ENABLE_PMF))
$(eval $(call add_define,ENABLE_PSCI_STAT))
$(eval $(call add_define,ENABLE_RUNTIME_INSTRUMENTATION))
...
...
bl2/aarch64/bl2_entrypoint.S
View file @
cf0886e2
...
...
@@ -70,13 +70,19 @@ func bl2_entrypoint
*
-
the
coherent
memory
section
.
*
---------------------------------------------
*/
ldr
x0
,
=
__BSS_START__
ldr
x1
,
=
__BSS_SIZE__
adrp
x0
,
__BSS_START__
add
x0
,
x0
,
:
lo12
:
__BSS_START__
adrp
x1
,
__BSS_END__
add
x1
,
x1
,
:
lo12
:
__BSS_END__
sub
x1
,
x1
,
x0
bl
zeromem
#if USE_COHERENT_MEM
ldr
x0
,
=
__COHERENT_RAM_START__
ldr
x1
,
=
__COHERENT_RAM_UNALIGNED_SIZE__
adrp
x0
,
__COHERENT_RAM_START__
add
x0
,
x0
,
:
lo12
:
__COHERENT_RAM_START__
adrp
x1
,
__COHERENT_RAM_END_UNALIGNED__
add
x1
,
x1
,
:
lo12
:
__COHERENT_RAM_END_UNALIGNED__
sub
x1
,
x1
,
x0
bl
zeromem
#endif
...
...
bl31/aarch64/bl31_entrypoint.S
View file @
cf0886e2
...
...
@@ -7,6 +7,7 @@
#include <arch.h>
#include <bl_common.h>
#include <el3_common_macros.S>
#include <platform_def.h>
#include <pmf_asm_macros.S>
#include <runtime_instr.h>
#include <xlat_mmu_helpers.h>
...
...
@@ -73,6 +74,18 @@ func bl31_entrypoint
mov
x22
,
0
mov
x23
,
0
#endif /* RESET_TO_BL31 */
/
*
--------------------------------------------------------------------
*
If
PIE
is
enabled
,
fixup
the
Global
descriptor
Table
and
dynamic
*
relocations
*
--------------------------------------------------------------------
*/
#if ENABLE_PIE
mov_imm
x0
,
BL31_BASE
mov_imm
x1
,
BL31_LIMIT
bl
fixup_gdt_reloc
#endif /* ENABLE_PIE */
/
*
---------------------------------------------
*
Perform
platform
specific
early
arch
.
setup
*
---------------------------------------------
...
...
bl31/bl31.ld.S
View file @
cf0886e2
...
...
@@ -26,6 +26,8 @@ SECTIONS
ASSERT
(.
==
ALIGN
(
PAGE_SIZE
),
"
BL31_BASE
address
is
not
aligned
on
a
page
boundary
.
")
__BL31_START__
=
.
;
#if SEPARATE_CODE_AND_RODATA
.
text
.
:
{
__TEXT_START__
=
.
;
...
...
@@ -63,6 +65,16 @@ SECTIONS
KEEP
(*(
cpu_ops
))
__CPU_OPS_END__
=
.
;
/
*
*
Keep
the
.
got
section
in
the
RO
section
as
the
it
is
patched
*
prior
to
enabling
the
MMU
and
having
the
.
got
in
RO
is
better
for
*
security
.
*/
.
=
ALIGN
(
16
)
;
__GOT_START__
=
.
;
*(.
got
)
__GOT_END__
=
.
;
/
*
Place
pubsub
sections
for
events
*/
.
=
ALIGN
(
8
)
;
#include <pubsub_events.h>
...
...
@@ -153,6 +165,16 @@ SECTIONS
__DATA_END__
=
.
;
}
>
RAM
.
=
ALIGN
(
16
)
;
/
*
*
.
rela
.
dyn
needs
to
come
after
.
data
for
the
read
-
elf
utility
to
parse
*
this
section
correctly
.
*/
__RELA_START__
=
.
;
.
rela.dyn
.
:
{
}
>
RAM
__RELA_END__
=
.
;
#ifdef BL31_PROGBITS_LIMIT
ASSERT
(.
<=
BL31_PROGBITS_LIMIT
,
"BL31 progbits has exceeded its limit."
)
#endif
...
...
@@ -265,11 +287,5 @@ SECTIONS
__RW_END__
=
.
;
__BL31_END__
=
.
;
__BSS_SIZE__
=
SIZEOF
(
.
bss
)
;
#if USE_COHERENT_MEM
__COHERENT_RAM_UNALIGNED_SIZE__
=
__COHERENT_RAM_END_UNALIGNED__
-
__COHERENT_RAM_START__
;
#endif
ASSERT
(.
<=
BL31_LIMIT
,
"BL31 image has exceeded its limit."
)
}
docs/user-guide.rst
View file @
cf0886e2
...
...
@@ -371,6 +371,10 @@ Common build options
partitioning
in
EL3
,
however
.
Platform
initialisation
code
should
configure
and
use
partitions
in
EL3
as
required
.
This
option
defaults
to
``
0
``.
-
``
ENABLE_PIE
``:
Boolean
option
to
enable
Position
Independent
Executable
(
PIE
)
support
within
generic
code
in
TF
-
A
.
This
option
is
currently
only
supported
in
BL31
.
Default
is
0.
-
``
ENABLE_PMF
``:
Boolean
option
to
enable
support
for
optional
Performance
Measurement
Framework
(
PMF
).
Default
is
0.
...
...
include/common/aarch64/asm_macros.S
View file @
cf0886e2
...
...
@@ -105,8 +105,9 @@
*
Clobber
:
X30
,
X1
,
X2
*/
.
macro
get_my_mp_stack
_name
,
_size
bl
plat_my_core_pos
ldr
x2
,
=(
\
_name
+
\
_size
)
bl
plat_my_core_pos
adrp
x2
,
(
\
_name
+
\
_size
)
add
x2
,
x2
,
:
lo12
:
(
\
_name
+
\
_size
)
mov
x1
,
#
\
_size
madd
x0
,
x0
,
x1
,
x2
.
endm
...
...
@@ -117,7 +118,8 @@
*
Out
:
X0
=
physical
address
of
stack
base
*/
.
macro
get_up_stack
_name
,
_size
ldr
x0
,
=(
\
_name
+
\
_size
)
adrp
x0
,
(
\
_name
+
\
_size
)
add
x0
,
x0
,
:
lo12
:
(
\
_name
+
\
_size
)
.
endm
/
*
...
...
include/common/aarch64/el3_common_macros.S
View file @
cf0886e2
...
...
@@ -283,26 +283,38 @@
*
an
earlier
boot
loader
stage
.
*
-------------------------------------------------------------
*/
ldr
x0
,
=
__RW_START__
ldr
x1
,
=
__RW_END__
adrp
x0
,
__RW_START__
add
x0
,
x0
,
:
lo12
:
__RW_START__
adrp
x1
,
__RW_END__
add
x1
,
x1
,
:
lo12
:
__RW_END__
sub
x1
,
x1
,
x0
bl
inv_dcache_range
#endif
adrp
x0
,
__BSS_START__
add
x0
,
x0
,
:
lo12
:
__BSS_START__
ldr
x0
,
=
__BSS_START__
ldr
x1
,
=
__BSS_SIZE__
adrp
x1
,
__BSS_END__
add
x1
,
x1
,
:
lo12
:
__BSS_END__
sub
x1
,
x1
,
x0
bl
zeromem
#if USE_COHERENT_MEM
ldr
x0
,
=
__COHERENT_RAM_START__
ldr
x1
,
=
__COHERENT_RAM_UNALIGNED_SIZE__
adrp
x0
,
__COHERENT_RAM_START__
add
x0
,
x0
,
:
lo12
:
__COHERENT_RAM_START__
adrp
x1
,
__COHERENT_RAM_END_UNALIGNED__
add
x1
,
x1
,
:
lo12
:
__COHERENT_RAM_END_UNALIGNED__
sub
x1
,
x1
,
x0
bl
zeromem
#endif
#if defined(IMAGE_BL1) || (defined(IMAGE_BL2) && BL2_IN_XIP_MEM)
ldr
x0
,
=
__DATA_RAM_START__
ldr
x1
,
=
__DATA_ROM_START__
ldr
x2
,
=
__DATA_SIZE__
adrp
x0
,
__DATA_RAM_START__
add
x0
,
x0
,
:
lo12
:
__DATA_RAM_START__
adrp
x1
,
__DATA_ROM_START__
add
x1
,
x1
,
:
lo12
:
__DATA_ROM_START__
adrp
x2
,
__DATA_RAM_END__
add
x2
,
x2
,
:
lo12
:
__DATA_RAM_END__
sub
x2
,
x2
,
x0
bl
memcpy16
#endif
.
endif
/*
_init_c_runtime
*/
...
...
include/common/bl_common.h
View file @
cf0886e2
...
...
@@ -83,6 +83,7 @@ IMPORT_SYM(unsigned long, __BL2_END__, BL2_END);
#elif defined(IMAGE_BL2U)
IMPORT_SYM
(
unsigned
long
,
__BL2U_END__
,
BL2U_END
);
#elif defined(IMAGE_BL31)
IMPORT_SYM
(
unsigned
long
,
__BL31_START__
,
BL31_START
);
IMPORT_SYM
(
unsigned
long
,
__BL31_END__
,
BL31_END
);
#elif defined(IMAGE_BL32)
IMPORT_SYM
(
unsigned
long
,
__BL32_END__
,
BL32_END
);
...
...
include/lib/cpus/aarch32/cpu_macros.S
View file @
cf0886e2
...
...
@@ -161,10 +161,9 @@
.
endif
/
*
*
Weakly
-
bound
,
optional
errata
status
printing
function
for
CPUs
of
*
Mandatory
errata
status
printing
function
for
CPUs
of
*
this
class
.
*/
.
weak
\
_name
\
()
_errata_report
.
word
\
_name
\
()
_errata_report
#ifdef IMAGE_BL32
...
...
include/lib/cpus/aarch64/cpu_macros.S
View file @
cf0886e2
...
...
@@ -183,10 +183,9 @@
.
endif
/
*
*
Weakly
-
bound
,
optional
errata
status
printing
function
for
CPUs
of
*
Mandatory
errata
status
printing
function
for
CPUs
of
*
this
class
.
*/
.
weak
\
_name
\
()
_errata_report
.
quad
\
_name
\
()
_errata_report
#ifdef IMAGE_BL31
...
...
include/lib/pmf/pmf_asm_macros.S
View file @
cf0886e2
...
...
@@ -18,10 +18,12 @@
mov
x9
,
x30
bl
plat_my_core_pos
mov
x30
,
x9
ldr
x1
,
=
__PERCPU_TIMESTAMP_SIZE__
adr
x2
,
__PMF_PERCPU_TIMESTAMP_END__
adr
x1
,
__PMF_TIMESTAMP_START__
sub
x1
,
x2
,
x1
mov
x2
,
#(
\
_tid
*
PMF_TS_SIZE
)
madd
x0
,
x0
,
x1
,
x2
l
dr
x1
,
=
pmf_ts_mem_
\
_name
a
dr
x1
,
pmf_ts_mem_
\
_name
add
x0
,
x0
,
x1
.
endm
...
...
include/lib/utils.h
View file @
cf0886e2
...
...
@@ -67,6 +67,29 @@ void zero_normalmem(void *mem, u_register_t length);
* zeroing.
*/
void
zeromem
(
void
*
mem
,
u_register_t
length
);
/*
* Utility function to return the address of a symbol. By default, the
* compiler generates adr/adrp instruction pair to return the reference
* to the symbol and this utility is used to override this compiler
* generated to code to use `ldr` instruction.
*
* This helps when Position Independent Executable needs to reference a symbol
* which is constant and does not depend on the execute address of the binary.
*/
#define DEFINE_LOAD_SYM_ADDR(_name) \
static inline u_register_t load_addr_## _name(void) \
{ \
u_register_t v; \
/* Create a void reference to silence compiler */
\
(void) _name; \
__asm__ volatile ("ldr %0, =" #_name : "=r" (v)); \
return v; \
}
/* Helper to invoke the function defined by DEFINE_LOAD_SYM_ADDR() */
#define LOAD_ADDR_OF(_name) (typeof(_name) *) load_addr_## _name()
#endif
/* !(defined(__LINKER__) || defined(__ASSEMBLY__)) */
#endif
/* __UTILS_H__ */
lib/aarch64/misc_helpers.S
View file @
cf0886e2
/*
*
Copyright
(
c
)
2013
-
201
7
,
ARM
Limited
and
Contributors
.
All
rights
reserved
.
*
Copyright
(
c
)
2013
-
201
8
,
ARM
Limited
and
Contributors
.
All
rights
reserved
.
*
*
SPDX
-
License
-
Identifier
:
BSD
-
3
-
Clause
*/
...
...
@@ -7,6 +7,7 @@
#include <arch.h>
#include <asm_macros.S>
#include <assert_macros.S>
#include <xlat_tables_defs.h>
.
globl
get_afflvl_shift
.
globl
mpidr_mask_lower_afflvls
...
...
@@ -23,6 +24,8 @@
.
globl
disable_mmu_icache_el1
.
globl
disable_mmu_icache_el3
.
globl
fixup_gdt_reloc
#if SUPPORT_VFP
.
globl
enable_vfp
#endif
...
...
@@ -497,3 +500,114 @@ func enable_vfp
ret
endfunc
enable_vfp
#endif
/*
---------------------------------------------------------------------------
*
Helper
to
fixup
Global
Descriptor
table
(
GDT
)
and
dynamic
relocations
*
(
.
rela
.
dyn
)
at
runtime
.
*
*
This
function
is
meant
to
be
used
when
the
firmware
is
compiled
with
-
fpie
*
and
linked
with
-
pie
options
.
We
rely
on
the
linker
script
exporting
*
appropriate
markers
for
start
and
end
of
the
section
.
For
GOT
,
we
*
expect
__GOT_START__
and
__GOT_END__
.
Similarly
for
.
rela
.
dyn
,
we
expect
*
__RELA_START__
and
__RELA_END__
.
*
*
The
function
takes
the
limits
of
the
memory
to
apply
fixups
to
as
*
arguments
(
which
is
usually
the
limits
of
the
relocable
BL
image
)
.
*
x0
-
the
start
of
the
fixup
region
*
x1
-
the
limit
of
the
fixup
region
*
These
addresses
have
to
be
page
(
4
KB
aligned
)
.
*
---------------------------------------------------------------------------
*/
func
fixup_gdt_reloc
mov
x6
,
x0
mov
x7
,
x1
/
*
Test
if
the
limits
are
4
K
aligned
*/
#if ENABLE_ASSERTIONS
orr
x0
,
x0
,
x1
tst
x0
,
#(
PAGE_SIZE
-
1
)
ASM_ASSERT
(
eq
)
#endif
/
*
*
Calculate
the
offset
based
on
return
address
in
x30
.
*
Assume
that
this
funtion
is
called
within
a
page
of
the
start
of
*
of
fixup
region
.
*/
and
x2
,
x30
,
#
~
(
PAGE_SIZE
-
1
)
sub
x0
,
x2
,
x6
/*
Diff
(
S
)
=
Current
Address
-
Compiled
Address
*/
adrp
x1
,
__GOT_START__
add
x1
,
x1
,
:
lo12
:
__GOT_START__
adrp
x2
,
__GOT_END__
add
x2
,
x2
,
:
lo12
:
__GOT_END__
/
*
*
GOT
is
an
array
of
64
_bit
addresses
which
must
be
fixed
up
as
*
new_addr
=
old_addr
+
Diff
(
S
)
.
*
The
new_addr
is
the
address
currently
the
binary
is
executing
from
*
and
old_addr
is
the
address
at
compile
time
.
*/
1
:
ldr
x3
,
[
x1
]
/
*
Skip
adding
offset
if
address
is
<
lower
limit
*/
cmp
x3
,
x6
b.lo
2
f
/
*
Skip
adding
offset
if
address
is
>=
upper
limit
*/
cmp
x3
,
x7
b.ge
2
f
add
x3
,
x3
,
x0
str
x3
,
[
x1
]
2
:
add
x1
,
x1
,
#
8
cmp
x1
,
x2
b.lo
1
b
/
*
Starting
dynamic
relocations
.
Use
adrp
/
adr
to
get
RELA_START
and
END
*/
adrp
x1
,
__RELA_START__
add
x1
,
x1
,
:
lo12
:
__RELA_START__
adrp
x2
,
__RELA_END__
add
x2
,
x2
,
:
lo12
:
__RELA_END__
/
*
*
According
to
ELF
-
64
specification
,
the
RELA
data
structure
is
as
*
follows
:
*
typedef
struct
*
{
*
Elf64_Addr
r_offset
;
*
Elf64_Xword
r_info
;
*
Elf64_Sxword
r_addend
;
*
}
Elf64_Rela
;
*
*
r_offset
is
address
of
reference
*
r_info
is
symbol
index
and
type
of
relocation
(
in
this
case
*
0x403
which
corresponds
to
R_AARCH64_RELATIV
)
.
*
r_addend
is
constant
part
of
expression
.
*
*
Size
of
Elf64_Rela
structure
is
24
bytes
.
*/
1
:
/
*
Assert
that
the
relocation
type
is
R_AARCH64_RELATIV
*/
#if ENABLE_ASSERTIONS
ldr
x3
,
[
x1
,
#
8
]
cmp
x3
,
#
0x403
ASM_ASSERT
(
eq
)
#endif
ldr
x3
,
[
x1
]
/*
r_offset
*/
add
x3
,
x0
,
x3
ldr
x4
,
[
x1
,
#
16
]
/*
r_addend
*/
/
*
Skip
adding
offset
if
r_addend
is
<
lower
limit
*/
cmp
x4
,
x6
b.lo
2
f
/
*
Skip
adding
offset
if
r_addend
entry
is
>=
upper
limit
*/
cmp
x4
,
x7
b.ge
2
f
add
x4
,
x0
,
x4
/*
Diff
(
S
)
+
r_addend
*/
str
x4
,
[
x3
]
2
:
add
x1
,
x1
,
#
24
cmp
x1
,
x2
b.lo
1
b
ret
endfunc
fixup_gdt_reloc
lib/cpus/aarch32/aem_generic.S
View file @
cf0886e2
...
...
@@ -40,6 +40,15 @@ func aem_generic_cluster_pwr_dwn
b
dcsw_op_all
endfunc
aem_generic_cluster_pwr_dwn
#if REPORT_ERRATA
/*
*
Errata
printing
function
for
AEM
.
Must
follow
AAPCS
.
*/
func
aem_generic_errata_report
bx
lr
endfunc
aem_generic_errata_report
#endif
/*
cpu_ops
for
Base
AEM
FVP
*/
declare_cpu_ops
aem_generic
,
BASE_AEM_MIDR
,
CPU_NO_RESET_FUNC
,
\
aem_generic_core_pwr_dwn
,
\
...
...
lib/cpus/aarch32/cortex_a12.S
View file @
cf0886e2
...
...
@@ -69,6 +69,15 @@ func cortex_a12_cluster_pwr_dwn
b
cortex_a12_disable_smp
endfunc
cortex_a12_cluster_pwr_dwn
#if REPORT_ERRATA
/*
*
Errata
printing
function
for
Cortex
-
A12
.
Must
follow
AAPCS
.
*/
func
cortex_a12_errata_report
bx
lr
endfunc
cortex_a12_errata_report
#endif
declare_cpu_ops
cortex_a12
,
CORTEX_A12_MIDR
,
\
cortex_a12_reset_func
,
\
cortex_a12_core_pwr_dwn
,
\
...
...
lib/cpus/aarch32/cortex_a32.S
View file @
cf0886e2
...
...
@@ -117,6 +117,15 @@ func cortex_a32_cluster_pwr_dwn
b
cortex_a32_disable_smp
endfunc
cortex_a32_cluster_pwr_dwn
#if REPORT_ERRATA
/*
*
Errata
printing
function
for
Cortex
-
A32
.
Must
follow
AAPCS
.
*/
func
cortex_a32_errata_report
bx
lr
endfunc
cortex_a32_errata_report
#endif
declare_cpu_ops
cortex_a32
,
CORTEX_A32_MIDR
,
\
cortex_a32_reset_func
,
\
cortex_a32_core_pwr_dwn
,
\
...
...
lib/cpus/aarch32/cortex_a5.S
View file @
cf0886e2
...
...
@@ -69,6 +69,15 @@ func cortex_a5_cluster_pwr_dwn
b
cortex_a5_disable_smp
endfunc
cortex_a5_cluster_pwr_dwn
#if REPORT_ERRATA
/*
*
Errata
printing
function
for
Cortex
-
A5
.
Must
follow
AAPCS
.
*/
func
cortex_a5_errata_report
bx
lr
endfunc
cortex_a5_errata_report
#endif
declare_cpu_ops
cortex_a5
,
CORTEX_A5_MIDR
,
\
cortex_a5_reset_func
,
\
cortex_a5_core_pwr_dwn
,
\
...
...
lib/cpus/aarch32/cortex_a7.S
View file @
cf0886e2
...
...
@@ -69,6 +69,15 @@ func cortex_a7_cluster_pwr_dwn
b
cortex_a7_disable_smp
endfunc
cortex_a7_cluster_pwr_dwn
#if REPORT_ERRATA
/*
*
Errata
printing
function
for
Cortex
-
A7
.
Must
follow
AAPCS
.
*/
func
cortex_a7_errata_report
bx
lr
endfunc
cortex_a7_errata_report
#endif
declare_cpu_ops
cortex_a7
,
CORTEX_A7_MIDR
,
\
cortex_a7_reset_func
,
\
cortex_a7_core_pwr_dwn
,
\
...
...
lib/cpus/aarch64/aem_generic.S
View file @
cf0886e2
...
...
@@ -46,6 +46,15 @@ func aem_generic_cluster_pwr_dwn
b
dcsw_op_all
endfunc
aem_generic_cluster_pwr_dwn
#if REPORT_ERRATA
/*
*
Errata
printing
function
for
AEM
.
Must
follow
AAPCS
.
*/
func
aem_generic_errata_report
ret
endfunc
aem_generic_errata_report
#endif
/
*
---------------------------------------------
*
This
function
provides
cpu
specific
*
register
information
for
crash
reporting
.
...
...
lib/cpus/aarch64/cortex_a35.S
View file @
cf0886e2
...
...
@@ -114,6 +114,16 @@ func cortex_a35_cluster_pwr_dwn
b
cortex_a35_disable_smp
endfunc
cortex_a35_cluster_pwr_dwn
#if REPORT_ERRATA
/*
*
Errata
printing
function
for
Cortex
A35
.
Must
follow
AAPCS
.
*/
func
cortex_a35_errata_report
ret
endfunc
cortex_a35_errata_report
#endif
/
*
---------------------------------------------
*
This
function
provides
cortex_a35
specific
*
register
information
for
crash
reporting
.
...
...
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment