Commit db9736e3 authored by Alexei Fedorov's avatar Alexei Fedorov
Browse files

AArch64: Fix assertions in processing dynamic relocations



This patch provides the following changes in fixup_gdt_reloc()
function:
- Fixes assertions in processing dynamic relocations, when
relocation entries not matching R_AARCH64_RELATIVE type are found.
Linker might generate entries of relocation type R_AARCH64_NONE
(code 0), which should be ignored to make the code boot. Similar
issue was fixed in OP-TEE (see optee_os/ldelf/ta_elf_rel.c
commit 7a4dc765c133125428136a496a7644c6fec9b3c2)
- Fixes bug when "b.ge" (signed greater than or equal) condition
codes were used instead of "b.hs" (greater than or equal) for
comparison of absolute addresses.
- Adds optimisation which skips fixing Global Object Table (GOT)
entries when offset value is 0.

Change-Id: I35e34e055b7476843903859be947b883a1feb1b5
Signed-off-by: default avatarAlexei Fedorov <Alexei.Fedorov@arm.com>
parent 4811168a
/* /*
* Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2013-2021, Arm Limited and Contributors. All rights reserved.
* *
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
...@@ -486,15 +486,20 @@ endfunc enable_vfp ...@@ -486,15 +486,20 @@ endfunc enable_vfp
* arguments (which is usually the limits of the relocable BL image). * arguments (which is usually the limits of the relocable BL image).
* x0 - the start of the fixup region * x0 - the start of the fixup region
* x1 - the limit of the fixup region * x1 - the limit of the fixup region
* These addresses have to be page (4KB aligned). * These addresses have to be 4KB page aligned.
* --------------------------------------------------------------------------- * ---------------------------------------------------------------------------
*/ */
/* Relocation codes */
#define R_AARCH64_NONE 0
#define R_AARCH64_RELATIVE 1027
func fixup_gdt_reloc func fixup_gdt_reloc
mov x6, x0 mov x6, x0
mov x7, x1 mov x7, x1
/* Test if the limits are 4K aligned */
#if ENABLE_ASSERTIONS #if ENABLE_ASSERTIONS
/* Test if the limits are 4KB aligned */
orr x0, x0, x1 orr x0, x0, x1
tst x0, #(PAGE_SIZE_MASK) tst x0, #(PAGE_SIZE_MASK)
ASM_ASSERT(eq) ASM_ASSERT(eq)
...@@ -505,7 +510,8 @@ func fixup_gdt_reloc ...@@ -505,7 +510,8 @@ func fixup_gdt_reloc
* fixup region. * fixup region.
*/ */
and x2, x30, #~(PAGE_SIZE_MASK) and x2, x30, #~(PAGE_SIZE_MASK)
sub x0, x2, x6 /* Diff(S) = Current Address - Compiled Address */ subs x0, x2, x6 /* Diff(S) = Current Address - Compiled Address */
b.eq 3f /* Diff(S) = 0. No relocation needed */
adrp x1, __GOT_START__ adrp x1, __GOT_START__
add x1, x1, :lo12:__GOT_START__ add x1, x1, :lo12:__GOT_START__
...@@ -518,31 +524,32 @@ func fixup_gdt_reloc ...@@ -518,31 +524,32 @@ func fixup_gdt_reloc
* The new_addr is the address currently the binary is executing from * The new_addr is the address currently the binary is executing from
* and old_addr is the address at compile time. * and old_addr is the address at compile time.
*/ */
1: 1: ldr x3, [x1]
ldr x3, [x1]
/* Skip adding offset if address is < lower limit */ /* Skip adding offset if address is < lower limit */
cmp x3, x6 cmp x3, x6
b.lo 2f b.lo 2f
/* Skip adding offset if address is >= upper limit */ /* Skip adding offset if address is >= upper limit */
cmp x3, x7 cmp x3, x7
b.ge 2f b.hs 2f
add x3, x3, x0 add x3, x3, x0
str x3, [x1] str x3, [x1]
2:
add x1, x1, #8 2: add x1, x1, #8
cmp x1, x2 cmp x1, x2
b.lo 1b b.lo 1b
/* Starting dynamic relocations. Use adrp/adr to get RELA_START and END */ /* Starting dynamic relocations. Use adrp/adr to get RELA_START and END */
adrp x1, __RELA_START__ 3: adrp x1, __RELA_START__
add x1, x1, :lo12:__RELA_START__ add x1, x1, :lo12:__RELA_START__
adrp x2, __RELA_END__ adrp x2, __RELA_END__
add x2, x2, :lo12:__RELA_END__ add x2, x2, :lo12:__RELA_END__
/* /*
* According to ELF-64 specification, the RELA data structure is as * According to ELF-64 specification, the RELA data structure is as
* follows: * follows:
* typedef struct * typedef struct {
* {
* Elf64_Addr r_offset; * Elf64_Addr r_offset;
* Elf64_Xword r_info; * Elf64_Xword r_info;
* Elf64_Sxword r_addend; * Elf64_Sxword r_addend;
...@@ -550,16 +557,19 @@ func fixup_gdt_reloc ...@@ -550,16 +557,19 @@ func fixup_gdt_reloc
* *
* r_offset is address of reference * r_offset is address of reference
* r_info is symbol index and type of relocation (in this case * r_info is symbol index and type of relocation (in this case
* 0x403 which corresponds to R_AARCH64_RELATIVE). * code 1027 which corresponds to R_AARCH64_RELATIVE).
* r_addend is constant part of expression. * r_addend is constant part of expression.
* *
* Size of Elf64_Rela structure is 24 bytes. * Size of Elf64_Rela structure is 24 bytes.
*/ */
1:
/* Assert that the relocation type is R_AARCH64_RELATIVE */ /* Skip R_AARCH64_NONE entry with code 0 */
1: ldr x3, [x1, #8]
cbz x3, 2f
#if ENABLE_ASSERTIONS #if ENABLE_ASSERTIONS
ldr x3, [x1, #8] /* Assert that the relocation type is R_AARCH64_RELATIVE */
cmp x3, #0x403 cmp x3, #R_AARCH64_RELATIVE
ASM_ASSERT(eq) ASM_ASSERT(eq)
#endif #endif
ldr x3, [x1] /* r_offset */ ldr x3, [x1] /* r_offset */
...@@ -569,9 +579,10 @@ func fixup_gdt_reloc ...@@ -569,9 +579,10 @@ func fixup_gdt_reloc
/* Skip adding offset if r_addend is < lower limit */ /* Skip adding offset if r_addend is < lower limit */
cmp x4, x6 cmp x4, x6
b.lo 2f b.lo 2f
/* Skip adding offset if r_addend entry is >= upper limit */ /* Skip adding offset if r_addend entry is >= upper limit */
cmp x4, x7 cmp x4, x7
b.ge 2f b.hs 2f
add x4, x0, x4 /* Diff(S) + r_addend */ add x4, x0, x4 /* Diff(S) + r_addend */
str x4, [x3] str x4, [x3]
...@@ -579,6 +590,5 @@ func fixup_gdt_reloc ...@@ -579,6 +590,5 @@ func fixup_gdt_reloc
2: add x1, x1, #24 2: add x1, x1, #24
cmp x1, x2 cmp x1, x2
b.lo 1b b.lo 1b
ret ret
endfunc fixup_gdt_reloc endfunc fixup_gdt_reloc
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment