Unverified Commit cf0886e2 authored by Soby Mathew's avatar Soby Mathew Committed by GitHub
Browse files

Merge pull request #1644 from soby-mathew/sm/pie_proto

Position Indepedent Executable (PIE) Support
parents c38941f0 fc922ca8
...@@ -205,11 +205,6 @@ TF_CFLAGS += $(CPPFLAGS) $(TF_CFLAGS_$(ARCH)) \ ...@@ -205,11 +205,6 @@ TF_CFLAGS += $(CPPFLAGS) $(TF_CFLAGS_$(ARCH)) \
-Os -ffunction-sections -fdata-sections -Os -ffunction-sections -fdata-sections
GCC_V_OUTPUT := $(shell $(CC) -v 2>&1) GCC_V_OUTPUT := $(shell $(CC) -v 2>&1)
PIE_FOUND := $(findstring --enable-default-pie,${GCC_V_OUTPUT})
ifneq ($(PIE_FOUND),)
TF_CFLAGS += -fno-PIE
endif
# Force the compiler to include the frame pointer # Force the compiler to include the frame pointer
ifeq (${ENABLE_BACKTRACE},1) ifeq (${ENABLE_BACKTRACE},1)
...@@ -335,6 +330,16 @@ ifeq (${ARM_ARCH_MAJOR},7) ...@@ -335,6 +330,16 @@ ifeq (${ARM_ARCH_MAJOR},7)
include make_helpers/armv7-a-cpus.mk include make_helpers/armv7-a-cpus.mk
endif endif
ifeq ($(ENABLE_PIE),1)
TF_CFLAGS += -fpie
TF_LDFLAGS += -pie
else
PIE_FOUND := $(findstring --enable-default-pie,${GCC_V_OUTPUT})
ifneq ($(PIE_FOUND),)
TF_CFLAGS += -fno-PIE
endif
endif
# Include the CPU specific operations makefile, which provides default # Include the CPU specific operations makefile, which provides default
# values for all CPU errata workarounds and CPU specific optimisations. # values for all CPU errata workarounds and CPU specific optimisations.
# This can be overridden by the platform. # This can be overridden by the platform.
...@@ -565,6 +570,7 @@ $(eval $(call assert_boolean,ENABLE_AMU)) ...@@ -565,6 +570,7 @@ $(eval $(call assert_boolean,ENABLE_AMU))
$(eval $(call assert_boolean,ENABLE_ASSERTIONS)) $(eval $(call assert_boolean,ENABLE_ASSERTIONS))
$(eval $(call assert_boolean,ENABLE_BACKTRACE)) $(eval $(call assert_boolean,ENABLE_BACKTRACE))
$(eval $(call assert_boolean,ENABLE_MPAM_FOR_LOWER_ELS)) $(eval $(call assert_boolean,ENABLE_MPAM_FOR_LOWER_ELS))
$(eval $(call assert_boolean,ENABLE_PIE))
$(eval $(call assert_boolean,ENABLE_PMF)) $(eval $(call assert_boolean,ENABLE_PMF))
$(eval $(call assert_boolean,ENABLE_PSCI_STAT)) $(eval $(call assert_boolean,ENABLE_PSCI_STAT))
$(eval $(call assert_boolean,ENABLE_RUNTIME_INSTRUMENTATION)) $(eval $(call assert_boolean,ENABLE_RUNTIME_INSTRUMENTATION))
...@@ -615,6 +621,7 @@ $(eval $(call add_define,ENABLE_AMU)) ...@@ -615,6 +621,7 @@ $(eval $(call add_define,ENABLE_AMU))
$(eval $(call add_define,ENABLE_ASSERTIONS)) $(eval $(call add_define,ENABLE_ASSERTIONS))
$(eval $(call add_define,ENABLE_BACKTRACE)) $(eval $(call add_define,ENABLE_BACKTRACE))
$(eval $(call add_define,ENABLE_MPAM_FOR_LOWER_ELS)) $(eval $(call add_define,ENABLE_MPAM_FOR_LOWER_ELS))
$(eval $(call add_define,ENABLE_PIE))
$(eval $(call add_define,ENABLE_PMF)) $(eval $(call add_define,ENABLE_PMF))
$(eval $(call add_define,ENABLE_PSCI_STAT)) $(eval $(call add_define,ENABLE_PSCI_STAT))
$(eval $(call add_define,ENABLE_RUNTIME_INSTRUMENTATION)) $(eval $(call add_define,ENABLE_RUNTIME_INSTRUMENTATION))
......
...@@ -70,13 +70,19 @@ func bl2_entrypoint ...@@ -70,13 +70,19 @@ func bl2_entrypoint
* - the coherent memory section. * - the coherent memory section.
* --------------------------------------------- * ---------------------------------------------
*/ */
ldr x0, =__BSS_START__ adrp x0, __BSS_START__
ldr x1, =__BSS_SIZE__ add x0, x0, :lo12:__BSS_START__
adrp x1, __BSS_END__
add x1, x1, :lo12:__BSS_END__
sub x1, x1, x0
bl zeromem bl zeromem
#if USE_COHERENT_MEM #if USE_COHERENT_MEM
ldr x0, =__COHERENT_RAM_START__ adrp x0, __COHERENT_RAM_START__
ldr x1, =__COHERENT_RAM_UNALIGNED_SIZE__ add x0, x0, :lo12:__COHERENT_RAM_START__
adrp x1, __COHERENT_RAM_END_UNALIGNED__
add x1, x1, :lo12:__COHERENT_RAM_END_UNALIGNED__
sub x1, x1, x0
bl zeromem bl zeromem
#endif #endif
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <arch.h> #include <arch.h>
#include <bl_common.h> #include <bl_common.h>
#include <el3_common_macros.S> #include <el3_common_macros.S>
#include <platform_def.h>
#include <pmf_asm_macros.S> #include <pmf_asm_macros.S>
#include <runtime_instr.h> #include <runtime_instr.h>
#include <xlat_mmu_helpers.h> #include <xlat_mmu_helpers.h>
...@@ -73,6 +74,18 @@ func bl31_entrypoint ...@@ -73,6 +74,18 @@ func bl31_entrypoint
mov x22, 0 mov x22, 0
mov x23, 0 mov x23, 0
#endif /* RESET_TO_BL31 */ #endif /* RESET_TO_BL31 */
/* --------------------------------------------------------------------
* If PIE is enabled, fixup the Global descriptor Table and dynamic
* relocations
* --------------------------------------------------------------------
*/
#if ENABLE_PIE
mov_imm x0, BL31_BASE
mov_imm x1, BL31_LIMIT
bl fixup_gdt_reloc
#endif /* ENABLE_PIE */
/* --------------------------------------------- /* ---------------------------------------------
* Perform platform specific early arch. setup * Perform platform specific early arch. setup
* --------------------------------------------- * ---------------------------------------------
......
...@@ -26,6 +26,8 @@ SECTIONS ...@@ -26,6 +26,8 @@ SECTIONS
ASSERT(. == ALIGN(PAGE_SIZE), ASSERT(. == ALIGN(PAGE_SIZE),
"BL31_BASE address is not aligned on a page boundary.") "BL31_BASE address is not aligned on a page boundary.")
__BL31_START__ = .;
#if SEPARATE_CODE_AND_RODATA #if SEPARATE_CODE_AND_RODATA
.text . : { .text . : {
__TEXT_START__ = .; __TEXT_START__ = .;
...@@ -63,6 +65,16 @@ SECTIONS ...@@ -63,6 +65,16 @@ SECTIONS
KEEP(*(cpu_ops)) KEEP(*(cpu_ops))
__CPU_OPS_END__ = .; __CPU_OPS_END__ = .;
/*
* Keep the .got section in the RO section as the it is patched
* prior to enabling the MMU and having the .got in RO is better for
* security.
*/
. = ALIGN(16);
__GOT_START__ = .;
*(.got)
__GOT_END__ = .;
/* Place pubsub sections for events */ /* Place pubsub sections for events */
. = ALIGN(8); . = ALIGN(8);
#include <pubsub_events.h> #include <pubsub_events.h>
...@@ -153,6 +165,16 @@ SECTIONS ...@@ -153,6 +165,16 @@ SECTIONS
__DATA_END__ = .; __DATA_END__ = .;
} >RAM } >RAM
. = ALIGN(16);
/*
* .rela.dyn needs to come after .data for the read-elf utility to parse
* this section correctly.
*/
__RELA_START__ = .;
.rela.dyn . : {
} >RAM
__RELA_END__ = .;
#ifdef BL31_PROGBITS_LIMIT #ifdef BL31_PROGBITS_LIMIT
ASSERT(. <= BL31_PROGBITS_LIMIT, "BL31 progbits has exceeded its limit.") ASSERT(. <= BL31_PROGBITS_LIMIT, "BL31 progbits has exceeded its limit.")
#endif #endif
...@@ -265,11 +287,5 @@ SECTIONS ...@@ -265,11 +287,5 @@ SECTIONS
__RW_END__ = .; __RW_END__ = .;
__BL31_END__ = .; __BL31_END__ = .;
__BSS_SIZE__ = SIZEOF(.bss);
#if USE_COHERENT_MEM
__COHERENT_RAM_UNALIGNED_SIZE__ =
__COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
#endif
ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.") ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
} }
...@@ -371,6 +371,10 @@ Common build options ...@@ -371,6 +371,10 @@ Common build options
partitioning in EL3, however. Platform initialisation code should configure partitioning in EL3, however. Platform initialisation code should configure
and use partitions in EL3 as required. This option defaults to ``0``. and use partitions in EL3 as required. This option defaults to ``0``.
- ``ENABLE_PIE``: Boolean option to enable Position Independent Executable(PIE)
support within generic code in TF-A. This option is currently only supported
in BL31. Default is 0.
- ``ENABLE_PMF``: Boolean option to enable support for optional Performance - ``ENABLE_PMF``: Boolean option to enable support for optional Performance
Measurement Framework(PMF). Default is 0. Measurement Framework(PMF). Default is 0.
......
...@@ -106,7 +106,8 @@ ...@@ -106,7 +106,8 @@
*/ */
.macro get_my_mp_stack _name, _size .macro get_my_mp_stack _name, _size
bl plat_my_core_pos bl plat_my_core_pos
ldr x2, =(\_name + \_size) adrp x2, (\_name + \_size)
add x2, x2, :lo12:(\_name + \_size)
mov x1, #\_size mov x1, #\_size
madd x0, x0, x1, x2 madd x0, x0, x1, x2
.endm .endm
...@@ -117,7 +118,8 @@ ...@@ -117,7 +118,8 @@
* Out: X0 = physical address of stack base * Out: X0 = physical address of stack base
*/ */
.macro get_up_stack _name, _size .macro get_up_stack _name, _size
ldr x0, =(\_name + \_size) adrp x0, (\_name + \_size)
add x0, x0, :lo12:(\_name + \_size)
.endm .endm
/* /*
......
...@@ -283,26 +283,38 @@ ...@@ -283,26 +283,38 @@
* an earlier boot loader stage. * an earlier boot loader stage.
* ------------------------------------------------------------- * -------------------------------------------------------------
*/ */
ldr x0, =__RW_START__ adrp x0, __RW_START__
ldr x1, =__RW_END__ add x0, x0, :lo12:__RW_START__
adrp x1, __RW_END__
add x1, x1, :lo12:__RW_END__
sub x1, x1, x0 sub x1, x1, x0
bl inv_dcache_range bl inv_dcache_range
#endif #endif
adrp x0, __BSS_START__
add x0, x0, :lo12:__BSS_START__
ldr x0, =__BSS_START__ adrp x1, __BSS_END__
ldr x1, =__BSS_SIZE__ add x1, x1, :lo12:__BSS_END__
sub x1, x1, x0
bl zeromem bl zeromem
#if USE_COHERENT_MEM #if USE_COHERENT_MEM
ldr x0, =__COHERENT_RAM_START__ adrp x0, __COHERENT_RAM_START__
ldr x1, =__COHERENT_RAM_UNALIGNED_SIZE__ add x0, x0, :lo12:__COHERENT_RAM_START__
adrp x1, __COHERENT_RAM_END_UNALIGNED__
add x1, x1, :lo12: __COHERENT_RAM_END_UNALIGNED__
sub x1, x1, x0
bl zeromem bl zeromem
#endif #endif
#if defined(IMAGE_BL1) || (defined(IMAGE_BL2) && BL2_IN_XIP_MEM) #if defined(IMAGE_BL1) || (defined(IMAGE_BL2) && BL2_IN_XIP_MEM)
ldr x0, =__DATA_RAM_START__ adrp x0, __DATA_RAM_START__
ldr x1, =__DATA_ROM_START__ add x0, x0, :lo12:__DATA_RAM_START__
ldr x2, =__DATA_SIZE__ adrp x1, __DATA_ROM_START__
add x1, x1, :lo12:__DATA_ROM_START__
adrp x2, __DATA_RAM_END__
add x2, x2, :lo12:__DATA_RAM_END__
sub x2, x2, x0
bl memcpy16 bl memcpy16
#endif #endif
.endif /* _init_c_runtime */ .endif /* _init_c_runtime */
......
...@@ -83,6 +83,7 @@ IMPORT_SYM(unsigned long, __BL2_END__, BL2_END); ...@@ -83,6 +83,7 @@ IMPORT_SYM(unsigned long, __BL2_END__, BL2_END);
#elif defined(IMAGE_BL2U) #elif defined(IMAGE_BL2U)
IMPORT_SYM(unsigned long, __BL2U_END__, BL2U_END); IMPORT_SYM(unsigned long, __BL2U_END__, BL2U_END);
#elif defined(IMAGE_BL31) #elif defined(IMAGE_BL31)
IMPORT_SYM(unsigned long, __BL31_START__, BL31_START);
IMPORT_SYM(unsigned long, __BL31_END__, BL31_END); IMPORT_SYM(unsigned long, __BL31_END__, BL31_END);
#elif defined(IMAGE_BL32) #elif defined(IMAGE_BL32)
IMPORT_SYM(unsigned long, __BL32_END__, BL32_END); IMPORT_SYM(unsigned long, __BL32_END__, BL32_END);
......
...@@ -161,10 +161,9 @@ ...@@ -161,10 +161,9 @@
.endif .endif
/* /*
* Weakly-bound, optional errata status printing function for CPUs of * Mandatory errata status printing function for CPUs of
* this class. * this class.
*/ */
.weak \_name\()_errata_report
.word \_name\()_errata_report .word \_name\()_errata_report
#ifdef IMAGE_BL32 #ifdef IMAGE_BL32
......
...@@ -183,10 +183,9 @@ ...@@ -183,10 +183,9 @@
.endif .endif
/* /*
* Weakly-bound, optional errata status printing function for CPUs of * Mandatory errata status printing function for CPUs of
* this class. * this class.
*/ */
.weak \_name\()_errata_report
.quad \_name\()_errata_report .quad \_name\()_errata_report
#ifdef IMAGE_BL31 #ifdef IMAGE_BL31
......
...@@ -18,10 +18,12 @@ ...@@ -18,10 +18,12 @@
mov x9, x30 mov x9, x30
bl plat_my_core_pos bl plat_my_core_pos
mov x30, x9 mov x30, x9
ldr x1, =__PERCPU_TIMESTAMP_SIZE__ adr x2, __PMF_PERCPU_TIMESTAMP_END__
adr x1, __PMF_TIMESTAMP_START__
sub x1, x2, x1
mov x2, #(\_tid * PMF_TS_SIZE) mov x2, #(\_tid * PMF_TS_SIZE)
madd x0, x0, x1, x2 madd x0, x0, x1, x2
ldr x1, =pmf_ts_mem_\_name adr x1, pmf_ts_mem_\_name
add x0, x0, x1 add x0, x0, x1
.endm .endm
......
...@@ -67,6 +67,29 @@ void zero_normalmem(void *mem, u_register_t length); ...@@ -67,6 +67,29 @@ void zero_normalmem(void *mem, u_register_t length);
* zeroing. * zeroing.
*/ */
void zeromem(void *mem, u_register_t length); void zeromem(void *mem, u_register_t length);
/*
* Utility function to return the address of a symbol. By default, the
* compiler generates adr/adrp instruction pair to return the reference
* to the symbol and this utility is used to override this compiler
* generated to code to use `ldr` instruction.
*
* This helps when Position Independent Executable needs to reference a symbol
* which is constant and does not depend on the execute address of the binary.
*/
#define DEFINE_LOAD_SYM_ADDR(_name) \
static inline u_register_t load_addr_## _name(void) \
{ \
u_register_t v; \
/* Create a void reference to silence compiler */ \
(void) _name; \
__asm__ volatile ("ldr %0, =" #_name : "=r" (v)); \
return v; \
}
/* Helper to invoke the function defined by DEFINE_LOAD_SYM_ADDR() */
#define LOAD_ADDR_OF(_name) (typeof(_name) *) load_addr_## _name()
#endif /* !(defined(__LINKER__) || defined(__ASSEMBLY__)) */ #endif /* !(defined(__LINKER__) || defined(__ASSEMBLY__)) */
#endif /* __UTILS_H__ */ #endif /* __UTILS_H__ */
/* /*
* Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
* *
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <arch.h> #include <arch.h>
#include <asm_macros.S> #include <asm_macros.S>
#include <assert_macros.S> #include <assert_macros.S>
#include <xlat_tables_defs.h>
.globl get_afflvl_shift .globl get_afflvl_shift
.globl mpidr_mask_lower_afflvls .globl mpidr_mask_lower_afflvls
...@@ -23,6 +24,8 @@ ...@@ -23,6 +24,8 @@
.globl disable_mmu_icache_el1 .globl disable_mmu_icache_el1
.globl disable_mmu_icache_el3 .globl disable_mmu_icache_el3
.globl fixup_gdt_reloc
#if SUPPORT_VFP #if SUPPORT_VFP
.globl enable_vfp .globl enable_vfp
#endif #endif
...@@ -497,3 +500,114 @@ func enable_vfp ...@@ -497,3 +500,114 @@ func enable_vfp
ret ret
endfunc enable_vfp endfunc enable_vfp
#endif #endif
/* ---------------------------------------------------------------------------
* Helper to fixup Global Descriptor table (GDT) and dynamic relocations
* (.rela.dyn) at runtime.
*
* This function is meant to be used when the firmware is compiled with -fpie
* and linked with -pie options. We rely on the linker script exporting
* appropriate markers for start and end of the section. For GOT, we
* expect __GOT_START__ and __GOT_END__. Similarly for .rela.dyn, we expect
* __RELA_START__ and __RELA_END__.
*
* The function takes the limits of the memory to apply fixups to as
* arguments (which is usually the limits of the relocable BL image).
* x0 - the start of the fixup region
* x1 - the limit of the fixup region
* These addresses have to be page (4KB aligned).
* ---------------------------------------------------------------------------
*/
func fixup_gdt_reloc
mov x6, x0
mov x7, x1
/* Test if the limits are 4K aligned */
#if ENABLE_ASSERTIONS
orr x0, x0, x1
tst x0, #(PAGE_SIZE - 1)
ASM_ASSERT(eq)
#endif
/*
* Calculate the offset based on return address in x30.
* Assume that this funtion is called within a page of the start of
* of fixup region.
*/
and x2, x30, #~(PAGE_SIZE - 1)
sub x0, x2, x6 /* Diff(S) = Current Address - Compiled Address */
adrp x1, __GOT_START__
add x1, x1, :lo12:__GOT_START__
adrp x2, __GOT_END__
add x2, x2, :lo12:__GOT_END__
/*
* GOT is an array of 64_bit addresses which must be fixed up as
* new_addr = old_addr + Diff(S).
* The new_addr is the address currently the binary is executing from
* and old_addr is the address at compile time.
*/
1:
ldr x3, [x1]
/* Skip adding offset if address is < lower limit */
cmp x3, x6
b.lo 2f
/* Skip adding offset if address is >= upper limit */
cmp x3, x7
b.ge 2f
add x3, x3, x0
str x3, [x1]
2:
add x1, x1, #8
cmp x1, x2
b.lo 1b
/* Starting dynamic relocations. Use adrp/adr to get RELA_START and END */
adrp x1, __RELA_START__
add x1, x1, :lo12:__RELA_START__
adrp x2, __RELA_END__
add x2, x2, :lo12:__RELA_END__
/*
* According to ELF-64 specification, the RELA data structure is as
* follows:
* typedef struct
* {
* Elf64_Addr r_offset;
* Elf64_Xword r_info;
* Elf64_Sxword r_addend;
* } Elf64_Rela;
*
* r_offset is address of reference
* r_info is symbol index and type of relocation (in this case
* 0x403 which corresponds to R_AARCH64_RELATIV).
* r_addend is constant part of expression.
*
* Size of Elf64_Rela structure is 24 bytes.
*/
1:
/* Assert that the relocation type is R_AARCH64_RELATIV */
#if ENABLE_ASSERTIONS
ldr x3, [x1, #8]
cmp x3, #0x403
ASM_ASSERT(eq)
#endif
ldr x3, [x1] /* r_offset */
add x3, x0, x3
ldr x4, [x1, #16] /* r_addend */
/* Skip adding offset if r_addend is < lower limit */
cmp x4, x6
b.lo 2f
/* Skip adding offset if r_addend entry is >= upper limit */
cmp x4, x7
b.ge 2f
add x4, x0, x4 /* Diff(S) + r_addend */
str x4, [x3]
2: add x1, x1, #24
cmp x1, x2
b.lo 1b
ret
endfunc fixup_gdt_reloc
...@@ -40,6 +40,15 @@ func aem_generic_cluster_pwr_dwn ...@@ -40,6 +40,15 @@ func aem_generic_cluster_pwr_dwn
b dcsw_op_all b dcsw_op_all
endfunc aem_generic_cluster_pwr_dwn endfunc aem_generic_cluster_pwr_dwn
#if REPORT_ERRATA
/*
* Errata printing function for AEM. Must follow AAPCS.
*/
func aem_generic_errata_report
bx lr
endfunc aem_generic_errata_report
#endif
/* cpu_ops for Base AEM FVP */ /* cpu_ops for Base AEM FVP */
declare_cpu_ops aem_generic, BASE_AEM_MIDR, CPU_NO_RESET_FUNC, \ declare_cpu_ops aem_generic, BASE_AEM_MIDR, CPU_NO_RESET_FUNC, \
aem_generic_core_pwr_dwn, \ aem_generic_core_pwr_dwn, \
......
...@@ -69,6 +69,15 @@ func cortex_a12_cluster_pwr_dwn ...@@ -69,6 +69,15 @@ func cortex_a12_cluster_pwr_dwn
b cortex_a12_disable_smp b cortex_a12_disable_smp
endfunc cortex_a12_cluster_pwr_dwn endfunc cortex_a12_cluster_pwr_dwn
#if REPORT_ERRATA
/*
* Errata printing function for Cortex-A12. Must follow AAPCS.
*/
func cortex_a12_errata_report
bx lr
endfunc cortex_a12_errata_report
#endif
declare_cpu_ops cortex_a12, CORTEX_A12_MIDR, \ declare_cpu_ops cortex_a12, CORTEX_A12_MIDR, \
cortex_a12_reset_func, \ cortex_a12_reset_func, \
cortex_a12_core_pwr_dwn, \ cortex_a12_core_pwr_dwn, \
......
...@@ -117,6 +117,15 @@ func cortex_a32_cluster_pwr_dwn ...@@ -117,6 +117,15 @@ func cortex_a32_cluster_pwr_dwn
b cortex_a32_disable_smp b cortex_a32_disable_smp
endfunc cortex_a32_cluster_pwr_dwn endfunc cortex_a32_cluster_pwr_dwn
#if REPORT_ERRATA
/*
* Errata printing function for Cortex-A32. Must follow AAPCS.
*/
func cortex_a32_errata_report
bx lr
endfunc cortex_a32_errata_report
#endif
declare_cpu_ops cortex_a32, CORTEX_A32_MIDR, \ declare_cpu_ops cortex_a32, CORTEX_A32_MIDR, \
cortex_a32_reset_func, \ cortex_a32_reset_func, \
cortex_a32_core_pwr_dwn, \ cortex_a32_core_pwr_dwn, \
......
...@@ -69,6 +69,15 @@ func cortex_a5_cluster_pwr_dwn ...@@ -69,6 +69,15 @@ func cortex_a5_cluster_pwr_dwn
b cortex_a5_disable_smp b cortex_a5_disable_smp
endfunc cortex_a5_cluster_pwr_dwn endfunc cortex_a5_cluster_pwr_dwn
#if REPORT_ERRATA
/*
* Errata printing function for Cortex-A5. Must follow AAPCS.
*/
func cortex_a5_errata_report
bx lr
endfunc cortex_a5_errata_report
#endif
declare_cpu_ops cortex_a5, CORTEX_A5_MIDR, \ declare_cpu_ops cortex_a5, CORTEX_A5_MIDR, \
cortex_a5_reset_func, \ cortex_a5_reset_func, \
cortex_a5_core_pwr_dwn, \ cortex_a5_core_pwr_dwn, \
......
...@@ -69,6 +69,15 @@ func cortex_a7_cluster_pwr_dwn ...@@ -69,6 +69,15 @@ func cortex_a7_cluster_pwr_dwn
b cortex_a7_disable_smp b cortex_a7_disable_smp
endfunc cortex_a7_cluster_pwr_dwn endfunc cortex_a7_cluster_pwr_dwn
#if REPORT_ERRATA
/*
* Errata printing function for Cortex-A7. Must follow AAPCS.
*/
func cortex_a7_errata_report
bx lr
endfunc cortex_a7_errata_report
#endif
declare_cpu_ops cortex_a7, CORTEX_A7_MIDR, \ declare_cpu_ops cortex_a7, CORTEX_A7_MIDR, \
cortex_a7_reset_func, \ cortex_a7_reset_func, \
cortex_a7_core_pwr_dwn, \ cortex_a7_core_pwr_dwn, \
......
...@@ -46,6 +46,15 @@ func aem_generic_cluster_pwr_dwn ...@@ -46,6 +46,15 @@ func aem_generic_cluster_pwr_dwn
b dcsw_op_all b dcsw_op_all
endfunc aem_generic_cluster_pwr_dwn endfunc aem_generic_cluster_pwr_dwn
#if REPORT_ERRATA
/*
* Errata printing function for AEM. Must follow AAPCS.
*/
func aem_generic_errata_report
ret
endfunc aem_generic_errata_report
#endif
/* --------------------------------------------- /* ---------------------------------------------
* This function provides cpu specific * This function provides cpu specific
* register information for crash reporting. * register information for crash reporting.
......
...@@ -114,6 +114,16 @@ func cortex_a35_cluster_pwr_dwn ...@@ -114,6 +114,16 @@ func cortex_a35_cluster_pwr_dwn
b cortex_a35_disable_smp b cortex_a35_disable_smp
endfunc cortex_a35_cluster_pwr_dwn endfunc cortex_a35_cluster_pwr_dwn
#if REPORT_ERRATA
/*
* Errata printing function for Cortex A35. Must follow AAPCS.
*/
func cortex_a35_errata_report
ret
endfunc cortex_a35_errata_report
#endif
/* --------------------------------------------- /* ---------------------------------------------
* This function provides cortex_a35 specific * This function provides cortex_a35 specific
* register information for crash reporting. * register information for crash reporting.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment