Commit ea7fc9d1 authored by Sandrine Bailleux's avatar Sandrine Bailleux Committed by TrustedFirmware Code Review
Browse files

Merge changes from topic "xlat" into integration

* changes:
  xlat_tables_v2: fix assembler warning of PLAT_RO_XLAT_TABLES
  linker_script: move bss section to bl_common.ld.h
  linker_script: replace common read-only data with RODATA_COMMON
  linker_script: move more common code to bl_common.ld.h
parents ed81fde0 268131c2
...@@ -4,8 +4,6 @@ ...@@ -4,8 +4,6 @@
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
#include <platform_def.h>
#include <common/bl_common.ld.h> #include <common/bl_common.ld.h>
#include <lib/xlat_tables/xlat_tables_defs.h> #include <lib/xlat_tables/xlat_tables_defs.h>
...@@ -47,20 +45,7 @@ SECTIONS ...@@ -47,20 +45,7 @@ SECTIONS
__RODATA_START__ = .; __RODATA_START__ = .;
*(SORT_BY_ALIGNMENT(.rodata*)) *(SORT_BY_ALIGNMENT(.rodata*))
/* Ensure 8-byte alignment for descriptors and ensure inclusion */ RODATA_COMMON
. = ALIGN(8);
__PARSER_LIB_DESCS_START__ = .;
KEEP(*(.img_parser_lib_descs))
__PARSER_LIB_DESCS_END__ = .;
/*
* Ensure 8-byte alignment for cpu_ops so that its fields are also
* aligned. Also ensure cpu_ops inclusion.
*/
. = ALIGN(8);
__CPU_OPS_START__ = .;
KEEP(*(cpu_ops))
__CPU_OPS_END__ = .;
/* /*
* No need to pad out the .rodata section to a page boundary. Next is * No need to pad out the .rodata section to a page boundary. Next is
...@@ -81,20 +66,7 @@ SECTIONS ...@@ -81,20 +66,7 @@ SECTIONS
*(SORT_BY_ALIGNMENT(.text*)) *(SORT_BY_ALIGNMENT(.text*))
*(SORT_BY_ALIGNMENT(.rodata*)) *(SORT_BY_ALIGNMENT(.rodata*))
/* Ensure 8-byte alignment for descriptors and ensure inclusion */ RODATA_COMMON
. = ALIGN(8);
__PARSER_LIB_DESCS_START__ = .;
KEEP(*(.img_parser_lib_descs))
__PARSER_LIB_DESCS_END__ = .;
/*
* Ensure 8-byte alignment for cpu_ops so that its fields are also
* aligned. Also ensure cpu_ops inclusion.
*/
. = ALIGN(8);
__CPU_OPS_START__ = .;
KEEP(*(cpu_ops))
__CPU_OPS_END__ = .;
*(.vectors) *(.vectors)
__RO_END__ = .; __RO_END__ = .;
...@@ -137,18 +109,7 @@ SECTIONS ...@@ -137,18 +109,7 @@ SECTIONS
__STACKS_END__ = .; __STACKS_END__ = .;
} >RAM } >RAM
/* BSS_SECTION >RAM
* The .bss section gets initialised to 0 at runtime.
* Its base address should be 16-byte aligned for better performance of the
* zero-initialization code.
*/
.bss : ALIGN(16) {
__BSS_START__ = .;
*(SORT_BY_ALIGNMENT(.bss*))
*(COMMON)
__BSS_END__ = .;
} >RAM
XLAT_TABLE_SECTION >RAM XLAT_TABLE_SECTION >RAM
#if USE_COHERENT_MEM #if USE_COHERENT_MEM
......
...@@ -4,8 +4,6 @@ ...@@ -4,8 +4,6 @@
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
#include <platform_def.h>
#include <common/bl_common.ld.h> #include <common/bl_common.ld.h>
#include <lib/xlat_tables/xlat_tables_defs.h> #include <lib/xlat_tables/xlat_tables_defs.h>
...@@ -47,16 +45,7 @@ SECTIONS ...@@ -47,16 +45,7 @@ SECTIONS
__RODATA_START__ = .; __RODATA_START__ = .;
*(SORT_BY_ALIGNMENT(.rodata*)) *(SORT_BY_ALIGNMENT(.rodata*))
. = ALIGN(8); RODATA_COMMON
__FCONF_POPULATOR_START__ = .;
KEEP(*(.fconf_populator))
__FCONF_POPULATOR_END__ = .;
/* Ensure 8-byte alignment for descriptors and ensure inclusion */
. = ALIGN(8);
__PARSER_LIB_DESCS_START__ = .;
KEEP(*(.img_parser_lib_descs))
__PARSER_LIB_DESCS_END__ = .;
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
__RODATA_END__ = .; __RODATA_END__ = .;
...@@ -68,16 +57,7 @@ SECTIONS ...@@ -68,16 +57,7 @@ SECTIONS
*(SORT_BY_ALIGNMENT(.text*)) *(SORT_BY_ALIGNMENT(.text*))
*(SORT_BY_ALIGNMENT(.rodata*)) *(SORT_BY_ALIGNMENT(.rodata*))
. = ALIGN(8); RODATA_COMMON
__FCONF_POPULATOR_START__ = .;
KEEP(*(.fconf_populator))
__FCONF_POPULATOR_END__ = .;
/* Ensure 8-byte alignment for descriptors and ensure inclusion */
. = ALIGN(8);
__PARSER_LIB_DESCS_START__ = .;
KEEP(*(.img_parser_lib_descs))
__PARSER_LIB_DESCS_END__ = .;
*(.vectors) *(.vectors)
__RO_END_UNALIGNED__ = .; __RO_END_UNALIGNED__ = .;
...@@ -114,18 +94,7 @@ SECTIONS ...@@ -114,18 +94,7 @@ SECTIONS
__STACKS_END__ = .; __STACKS_END__ = .;
} >RAM } >RAM
/* BSS_SECTION >RAM
* The .bss section gets initialised to 0 at runtime.
* Its base address should be 16-byte aligned for better performance of the
* zero-initialization code.
*/
.bss : ALIGN(16) {
__BSS_START__ = .;
*(SORT_BY_ALIGNMENT(.bss*))
*(COMMON)
__BSS_END__ = .;
} >RAM
XLAT_TABLE_SECTION >RAM XLAT_TABLE_SECTION >RAM
#if USE_COHERENT_MEM #if USE_COHERENT_MEM
......
...@@ -4,8 +4,6 @@ ...@@ -4,8 +4,6 @@
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
#include <platform_def.h>
#include <common/bl_common.ld.h> #include <common/bl_common.ld.h>
#include <lib/xlat_tables/xlat_tables_defs.h> #include <lib/xlat_tables/xlat_tables_defs.h>
...@@ -55,30 +53,7 @@ SECTIONS ...@@ -55,30 +53,7 @@ SECTIONS
__RODATA_START__ = .; __RODATA_START__ = .;
*(SORT_BY_ALIGNMENT(.rodata*)) *(SORT_BY_ALIGNMENT(.rodata*))
/* Ensure 8-byte alignment for descriptors and ensure inclusion */ RODATA_COMMON
. = ALIGN(8);
__PARSER_LIB_DESCS_START__ = .;
KEEP(*(.img_parser_lib_descs))
__PARSER_LIB_DESCS_END__ = .;
/*
* Ensure 8-byte alignment for cpu_ops so that its fields are also
* aligned. Also ensure cpu_ops inclusion.
*/
. = ALIGN(8);
__CPU_OPS_START__ = .;
KEEP(*(cpu_ops))
__CPU_OPS_END__ = .;
/*
* Keep the .got section in the RO section as it is patched
* prior to enabling the MMU and having the .got in RO is better for
* security. GOT is a table of addresses so ensure 8-byte alignment.
*/
. = ALIGN(8);
__GOT_START__ = .;
*(.got)
__GOT_END__ = .;
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
__RODATA_END__ = .; __RODATA_END__ = .;
...@@ -96,30 +71,7 @@ SECTIONS ...@@ -96,30 +71,7 @@ SECTIONS
*(SORT_BY_ALIGNMENT(.text*)) *(SORT_BY_ALIGNMENT(.text*))
*(SORT_BY_ALIGNMENT(.rodata*)) *(SORT_BY_ALIGNMENT(.rodata*))
/* RODATA_COMMON
* Ensure 8-byte alignment for cpu_ops so that its fields are also
* aligned. Also ensure cpu_ops inclusion.
*/
. = ALIGN(8);
__CPU_OPS_START__ = .;
KEEP(*(cpu_ops))
__CPU_OPS_END__ = .;
/* Ensure 8-byte alignment for descriptors and ensure inclusion */
. = ALIGN(8);
__PARSER_LIB_DESCS_START__ = .;
KEEP(*(.img_parser_lib_descs))
__PARSER_LIB_DESCS_END__ = .;
/*
* Keep the .got section in the RO section as it is patched
* prior to enabling the MMU and having the .got in RO is better for
* security. GOT is a table of addresses so ensure 8-byte alignment.
*/
. = ALIGN(8);
__GOT_START__ = .;
*(.got)
__GOT_END__ = .;
*(.vectors) *(.vectors)
__RO_END_UNALIGNED__ = .; __RO_END_UNALIGNED__ = .;
...@@ -177,18 +129,7 @@ SECTIONS ...@@ -177,18 +129,7 @@ SECTIONS
__STACKS_END__ = .; __STACKS_END__ = .;
} >RAM } >RAM
/* BSS_SECTION >RAM
* The .bss section gets initialised to 0 at runtime.
* Its base address should be 16-byte aligned for better performance of the
* zero-initialization code.
*/
.bss : ALIGN(16) {
__BSS_START__ = .;
*(SORT_BY_ALIGNMENT(.bss*))
*(COMMON)
__BSS_END__ = .;
} >RAM
XLAT_TABLE_SECTION >RAM XLAT_TABLE_SECTION >RAM
#if USE_COHERENT_MEM #if USE_COHERENT_MEM
......
...@@ -46,6 +46,9 @@ SECTIONS ...@@ -46,6 +46,9 @@ SECTIONS
.rodata . : { .rodata . : {
__RODATA_START__ = .; __RODATA_START__ = .;
*(SORT_BY_ALIGNMENT(.rodata*)) *(SORT_BY_ALIGNMENT(.rodata*))
RODATA_COMMON
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
__RODATA_END__ = .; __RODATA_END__ = .;
} >RAM } >RAM
...@@ -56,6 +59,8 @@ SECTIONS ...@@ -56,6 +59,8 @@ SECTIONS
*(SORT_BY_ALIGNMENT(.text*)) *(SORT_BY_ALIGNMENT(.text*))
*(SORT_BY_ALIGNMENT(.rodata*)) *(SORT_BY_ALIGNMENT(.rodata*))
RODATA_COMMON
*(.vectors) *(.vectors)
__RO_END_UNALIGNED__ = .; __RO_END_UNALIGNED__ = .;
/* /*
...@@ -91,18 +96,7 @@ SECTIONS ...@@ -91,18 +96,7 @@ SECTIONS
__STACKS_END__ = .; __STACKS_END__ = .;
} >RAM } >RAM
/* BSS_SECTION >RAM
* The .bss section gets initialised to 0 at runtime.
* Its base address should be 16-byte aligned for better performance of the
* zero-initialization code.
*/
.bss : ALIGN(16) {
__BSS_START__ = .;
*(SORT_BY_ALIGNMENT(.bss*))
*(COMMON)
__BSS_END__ = .;
} >RAM
XLAT_TABLE_SECTION >RAM XLAT_TABLE_SECTION >RAM
#if USE_COHERENT_MEM #if USE_COHERENT_MEM
......
...@@ -4,8 +4,6 @@ ...@@ -4,8 +4,6 @@
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
#include <platform_def.h>
#include <common/bl_common.ld.h> #include <common/bl_common.ld.h>
#include <lib/xlat_tables/xlat_tables_defs.h> #include <lib/xlat_tables/xlat_tables_defs.h>
...@@ -49,43 +47,7 @@ SECTIONS ...@@ -49,43 +47,7 @@ SECTIONS
__RODATA_START__ = .; __RODATA_START__ = .;
*(SORT_BY_ALIGNMENT(.rodata*)) *(SORT_BY_ALIGNMENT(.rodata*))
/* Ensure 8-byte alignment for descriptors and ensure inclusion */ RODATA_COMMON
. = ALIGN(8);
__RT_SVC_DESCS_START__ = .;
KEEP(*(rt_svc_descs))
__RT_SVC_DESCS_END__ = .;
. = ALIGN(8);
__FCONF_POPULATOR_START__ = .;
KEEP(*(.fconf_populator))
__FCONF_POPULATOR_END__ = .;
#if ENABLE_PMF
/* Ensure 8-byte alignment for descriptors and ensure inclusion */
. = ALIGN(8);
__PMF_SVC_DESCS_START__ = .;
KEEP(*(pmf_svc_descs))
__PMF_SVC_DESCS_END__ = .;
#endif /* ENABLE_PMF */
/*
* Ensure 8-byte alignment for cpu_ops so that its fields are also
* aligned. Also ensure cpu_ops inclusion.
*/
. = ALIGN(8);
__CPU_OPS_START__ = .;
KEEP(*(cpu_ops))
__CPU_OPS_END__ = .;
/*
* Keep the .got section in the RO section as it is patched
* prior to enabling the MMU and having the .got in RO is better for
* security. GOT is a table of addresses so ensure 8-byte alignment.
*/
. = ALIGN(8);
__GOT_START__ = .;
*(.got)
__GOT_END__ = .;
/* Place pubsub sections for events */ /* Place pubsub sections for events */
. = ALIGN(8); . = ALIGN(8);
...@@ -101,43 +63,7 @@ SECTIONS ...@@ -101,43 +63,7 @@ SECTIONS
*(SORT_BY_ALIGNMENT(.text*)) *(SORT_BY_ALIGNMENT(.text*))
*(SORT_BY_ALIGNMENT(.rodata*)) *(SORT_BY_ALIGNMENT(.rodata*))
/* Ensure 8-byte alignment for descriptors and ensure inclusion */ RODATA_COMMON
. = ALIGN(8);
__RT_SVC_DESCS_START__ = .;
KEEP(*(rt_svc_descs))
__RT_SVC_DESCS_END__ = .;
. = ALIGN(8);
__FCONF_POPULATOR_START__ = .;
KEEP(*(.fconf_populator))
__FCONF_POPULATOR_END__ = .;
#if ENABLE_PMF
/* Ensure 8-byte alignment for descriptors and ensure inclusion */
. = ALIGN(8);
__PMF_SVC_DESCS_START__ = .;
KEEP(*(pmf_svc_descs))
__PMF_SVC_DESCS_END__ = .;
#endif /* ENABLE_PMF */
/*
* Ensure 8-byte alignment for cpu_ops so that its fields are also
* aligned. Also ensure cpu_ops inclusion.
*/
. = ALIGN(8);
__CPU_OPS_START__ = .;
KEEP(*(cpu_ops))
__CPU_OPS_END__ = .;
/*
* Keep the .got section in the RO section as it is patched
* prior to enabling the MMU and having the .got in RO is better for
* security. GOT is a table of addresses so ensure 8-byte alignment.
*/
. = ALIGN(8);
__GOT_START__ = .;
*(.got)
__GOT_END__ = .;
/* Place pubsub sections for events */ /* Place pubsub sections for events */
. = ALIGN(8); . = ALIGN(8);
...@@ -238,66 +164,7 @@ SECTIONS ...@@ -238,66 +164,7 @@ SECTIONS
__STACKS_END__ = .; __STACKS_END__ = .;
} >NOBITS } >NOBITS
/* BSS_SECTION >NOBITS
* The .bss section gets initialised to 0 at runtime.
* Its base address should be 16-byte aligned for better performance of the
* zero-initialization code.
*/
.bss (NOLOAD) : ALIGN(16) {
__BSS_START__ = .;
*(SORT_BY_ALIGNMENT(.bss*))
*(COMMON)
#if !USE_COHERENT_MEM
/*
* Bakery locks are stored in normal .bss memory
*
* Each lock's data is spread across multiple cache lines, one per CPU,
* but multiple locks can share the same cache line.
* The compiler will allocate enough memory for one CPU's bakery locks,
* the remaining cache lines are allocated by the linker script
*/
. = ALIGN(CACHE_WRITEBACK_GRANULE);
__BAKERY_LOCK_START__ = .;
__PERCPU_BAKERY_LOCK_START__ = .;
*(bakery_lock)
. = ALIGN(CACHE_WRITEBACK_GRANULE);
__PERCPU_BAKERY_LOCK_END__ = .;
__PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(__PERCPU_BAKERY_LOCK_END__ - __PERCPU_BAKERY_LOCK_START__);
. = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1));
__BAKERY_LOCK_END__ = .;
/*
* If BL31 doesn't use any bakery lock then __PERCPU_BAKERY_LOCK_SIZE__
* will be zero. For this reason, the only two valid values for
* __PERCPU_BAKERY_LOCK_SIZE__ are 0 or the platform defined value
* PLAT_PERCPU_BAKERY_LOCK_SIZE.
*/
#ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE
ASSERT((__PERCPU_BAKERY_LOCK_SIZE__ == 0) || (__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE),
"PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements");
#endif
#endif
#if ENABLE_PMF
/*
* Time-stamps are stored in normal .bss memory
*
* The compiler will allocate enough memory for one CPU's time-stamps,
* the remaining memory for other CPUs is allocated by the
* linker script
*/
. = ALIGN(CACHE_WRITEBACK_GRANULE);
__PMF_TIMESTAMP_START__ = .;
KEEP(*(pmf_timestamp_array))
. = ALIGN(CACHE_WRITEBACK_GRANULE);
__PMF_PERCPU_TIMESTAMP_END__ = .;
__PERCPU_TIMESTAMP_SIZE__ = ABSOLUTE(. - __PMF_TIMESTAMP_START__);
. = . + (__PERCPU_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1));
__PMF_TIMESTAMP_END__ = .;
#endif /* ENABLE_PMF */
__BSS_END__ = .;
} >NOBITS
XLAT_TABLE_SECTION >NOBITS XLAT_TABLE_SECTION >NOBITS
#if USE_COHERENT_MEM #if USE_COHERENT_MEM
......
...@@ -4,8 +4,6 @@ ...@@ -4,8 +4,6 @@
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
#include <platform_def.h>
#include <common/bl_common.ld.h> #include <common/bl_common.ld.h>
#include <lib/xlat_tables/xlat_tables_defs.h> #include <lib/xlat_tables/xlat_tables_defs.h>
...@@ -50,33 +48,7 @@ SECTIONS ...@@ -50,33 +48,7 @@ SECTIONS
__RODATA_START__ = .; __RODATA_START__ = .;
*(.rodata*) *(.rodata*)
/* Ensure 4-byte alignment for descriptors and ensure inclusion */ RODATA_COMMON
. = ALIGN(4);
__RT_SVC_DESCS_START__ = .;
KEEP(*(rt_svc_descs))
__RT_SVC_DESCS_END__ = .;
. = ALIGN(4);
__FCONF_POPULATOR_START__ = .;
KEEP(*(.fconf_populator))
__FCONF_POPULATOR_END__ = .;
#if ENABLE_PMF
/* Ensure 4-byte alignment for descriptors and ensure inclusion */
. = ALIGN(4);
__PMF_SVC_DESCS_START__ = .;
KEEP(*(pmf_svc_descs))
__PMF_SVC_DESCS_END__ = .;
#endif /* ENABLE_PMF */
/*
* Ensure 4-byte alignment for cpu_ops so that its fields are also
* aligned. Also ensure cpu_ops inclusion.
*/
. = ALIGN(4);
__CPU_OPS_START__ = .;
KEEP(*(cpu_ops))
__CPU_OPS_END__ = .;
/* Place pubsub sections for events */ /* Place pubsub sections for events */
. = ALIGN(8); . = ALIGN(8);
...@@ -92,25 +64,7 @@ SECTIONS ...@@ -92,25 +64,7 @@ SECTIONS
*(.text*) *(.text*)
*(.rodata*) *(.rodata*)
/* Ensure 4-byte alignment for descriptors and ensure inclusion */ RODATA_COMMON
. = ALIGN(4);
__RT_SVC_DESCS_START__ = .;
KEEP(*(rt_svc_descs))
__RT_SVC_DESCS_END__ = .;
. = ALIGN(4);
__FCONF_POPULATOR_START__ = .;
KEEP(*(.fconf_populator))
__FCONF_POPULATOR_END__ = .;
/*
* Ensure 4-byte alignment for cpu_ops so that its fields are also
* aligned. Also ensure cpu_ops inclusion.
*/
. = ALIGN(4);
__CPU_OPS_START__ = .;
KEEP(*(cpu_ops))
__CPU_OPS_END__ = .;
/* Place pubsub sections for events */ /* Place pubsub sections for events */
. = ALIGN(8); . = ALIGN(8);
...@@ -153,60 +107,7 @@ SECTIONS ...@@ -153,60 +107,7 @@ SECTIONS
__STACKS_END__ = .; __STACKS_END__ = .;
} >RAM } >RAM
/* BSS_SECTION >RAM
* The .bss section gets initialised to 0 at runtime.
* Its base address should be 8-byte aligned for better performance of the
* zero-initialization code.
*/
.bss (NOLOAD) : ALIGN(8) {
__BSS_START__ = .;
*(.bss*)
*(COMMON)
#if !USE_COHERENT_MEM
/*
* Bakery locks are stored in normal .bss memory
*
* Each lock's data is spread across multiple cache lines, one per CPU,
* but multiple locks can share the same cache line.
* The compiler will allocate enough memory for one CPU's bakery locks,
* the remaining cache lines are allocated by the linker script
*/
. = ALIGN(CACHE_WRITEBACK_GRANULE);
__BAKERY_LOCK_START__ = .;
__PERCPU_BAKERY_LOCK_START__ = .;
*(bakery_lock)
. = ALIGN(CACHE_WRITEBACK_GRANULE);
__PERCPU_BAKERY_LOCK_END__ = .;
__PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(__PERCPU_BAKERY_LOCK_END__ - __PERCPU_BAKERY_LOCK_START__);
. = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1));
__BAKERY_LOCK_END__ = .;
#ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE
ASSERT(__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE,
"PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements");
#endif
#endif
#if ENABLE_PMF
/*
* Time-stamps are stored in normal .bss memory
*
* The compiler will allocate enough memory for one CPU's time-stamps,
* the remaining memory for other CPUs is allocated by the
* linker script
*/
. = ALIGN(CACHE_WRITEBACK_GRANULE);
__PMF_TIMESTAMP_START__ = .;
KEEP(*(pmf_timestamp_array))
. = ALIGN(CACHE_WRITEBACK_GRANULE);
__PMF_PERCPU_TIMESTAMP_END__ = .;
__PERCPU_TIMESTAMP_SIZE__ = ABSOLUTE(. - __PMF_TIMESTAMP_START__);
. = . + (__PERCPU_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1));
__PMF_TIMESTAMP_END__ = .;
#endif /* ENABLE_PMF */
__BSS_END__ = .;
} >RAM
XLAT_TABLE_SECTION >RAM XLAT_TABLE_SECTION >RAM
__BSS_SIZE__ = SIZEOF(.bss); __BSS_SIZE__ = SIZEOF(.bss);
......
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
#include <common/bl_common.ld.h> #include <common/bl_common.ld.h>
#include <lib/xlat_tables/xlat_tables_defs.h> #include <lib/xlat_tables/xlat_tables_defs.h>
#include <platform_def.h>
OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT) OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
OUTPUT_ARCH(PLATFORM_LINKER_ARCH) OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
...@@ -38,15 +37,7 @@ SECTIONS ...@@ -38,15 +37,7 @@ SECTIONS
__RODATA_START__ = .; __RODATA_START__ = .;
*(.rodata*) *(.rodata*)
/* RODATA_COMMON
* Keep the .got section in the RO section as it is patched
* prior to enabling the MMU and having the .got in RO is better for
* security. GOT is a table of addresses so ensure 8-byte alignment.
*/
. = ALIGN(8);
__GOT_START__ = .;
*(.got)
__GOT_END__ = .;
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
__RODATA_END__ = .; __RODATA_END__ = .;
...@@ -58,15 +49,7 @@ SECTIONS ...@@ -58,15 +49,7 @@ SECTIONS
*(.text*) *(.text*)
*(.rodata*) *(.rodata*)
/* RODATA_COMMON
* Keep the .got section in the RO section as it is patched
* prior to enabling the MMU and having the .got in RO is better for
* security. GOT is a table of addresses so ensure 8-byte alignment.
*/
. = ALIGN(8);
__GOT_START__ = .;
*(.got)
__GOT_END__ = .;
*(.vectors) *(.vectors)
...@@ -114,18 +97,7 @@ SECTIONS ...@@ -114,18 +97,7 @@ SECTIONS
__STACKS_END__ = .; __STACKS_END__ = .;
} >RAM } >RAM
/* BSS_SECTION >RAM
* The .bss section gets initialised to 0 at runtime.
* Its base address should be 16-byte aligned for better performance of the
* zero-initialization code.
*/
.bss : ALIGN(16) {
__BSS_START__ = .;
*(SORT_BY_ALIGNMENT(.bss*))
*(COMMON)
__BSS_END__ = .;
} >RAM
XLAT_TABLE_SECTION >RAM XLAT_TABLE_SECTION >RAM
#if USE_COHERENT_MEM #if USE_COHERENT_MEM
......
...@@ -7,6 +7,164 @@ ...@@ -7,6 +7,164 @@
#ifndef BL_COMMON_LD_H #ifndef BL_COMMON_LD_H
#define BL_COMMON_LD_H #define BL_COMMON_LD_H
#include <platform_def.h>
#ifdef __aarch64__
#define STRUCT_ALIGN 8
#define BSS_ALIGN 16
#else
#define STRUCT_ALIGN 4
#define BSS_ALIGN 8
#endif
#define CPU_OPS \
. = ALIGN(STRUCT_ALIGN); \
__CPU_OPS_START__ = .; \
KEEP(*(cpu_ops)) \
__CPU_OPS_END__ = .;
#define PARSER_LIB_DESCS \
. = ALIGN(STRUCT_ALIGN); \
__PARSER_LIB_DESCS_START__ = .; \
KEEP(*(.img_parser_lib_descs)) \
__PARSER_LIB_DESCS_END__ = .;
#define RT_SVC_DESCS \
. = ALIGN(STRUCT_ALIGN); \
__RT_SVC_DESCS_START__ = .; \
KEEP(*(rt_svc_descs)) \
__RT_SVC_DESCS_END__ = .;
#define PMF_SVC_DESCS \
. = ALIGN(STRUCT_ALIGN); \
__PMF_SVC_DESCS_START__ = .; \
KEEP(*(pmf_svc_descs)) \
__PMF_SVC_DESCS_END__ = .;
#define FCONF_POPULATOR \
. = ALIGN(STRUCT_ALIGN); \
__FCONF_POPULATOR_START__ = .; \
KEEP(*(.fconf_populator)) \
__FCONF_POPULATOR_END__ = .;
/*
* Keep the .got section in the RO section as it is patched prior to enabling
* the MMU and having the .got in RO is better for security. GOT is a table of
* addresses so ensure pointer size alignment.
*/
#define GOT \
. = ALIGN(STRUCT_ALIGN); \
__GOT_START__ = .; \
*(.got) \
__GOT_END__ = .;
/*
* The base xlat table
*
* It is put into the rodata section if PLAT_RO_XLAT_TABLES=1,
* or into the bss section otherwise.
*/
#define BASE_XLAT_TABLE \
. = ALIGN(16); \
*(base_xlat_table)
#if PLAT_RO_XLAT_TABLES
#define BASE_XLAT_TABLE_RO BASE_XLAT_TABLE
#define BASE_XLAT_TABLE_BSS
#else
#define BASE_XLAT_TABLE_RO
#define BASE_XLAT_TABLE_BSS BASE_XLAT_TABLE
#endif
#define RODATA_COMMON \
RT_SVC_DESCS \
FCONF_POPULATOR \
PMF_SVC_DESCS \
PARSER_LIB_DESCS \
CPU_OPS \
GOT \
BASE_XLAT_TABLE_RO
#define STACK_SECTION \
stacks (NOLOAD) : { \
__STACKS_START__ = .; \
*(tzfw_normal_stacks) \
__STACKS_END__ = .; \
}
/*
* If BL doesn't use any bakery lock then __PERCPU_BAKERY_LOCK_SIZE__
* will be zero. For this reason, the only two valid values for
* __PERCPU_BAKERY_LOCK_SIZE__ are 0 or the platform defined value
* PLAT_PERCPU_BAKERY_LOCK_SIZE.
*/
#ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE
#define BAKERY_LOCK_SIZE_CHECK \
ASSERT((__PERCPU_BAKERY_LOCK_SIZE__ == 0) || \
(__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE), \
"PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements");
#else
#define BAKERY_LOCK_SIZE_CHECK
#endif
/*
* Bakery locks are stored in normal .bss memory
*
* Each lock's data is spread across multiple cache lines, one per CPU,
* but multiple locks can share the same cache line.
* The compiler will allocate enough memory for one CPU's bakery locks,
* the remaining cache lines are allocated by the linker script
*/
#if !USE_COHERENT_MEM
#define BAKERY_LOCK_NORMAL \
. = ALIGN(CACHE_WRITEBACK_GRANULE); \
__BAKERY_LOCK_START__ = .; \
__PERCPU_BAKERY_LOCK_START__ = .; \
*(bakery_lock) \
. = ALIGN(CACHE_WRITEBACK_GRANULE); \
__PERCPU_BAKERY_LOCK_END__ = .; \
__PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(__PERCPU_BAKERY_LOCK_END__ - __PERCPU_BAKERY_LOCK_START__); \
. = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1)); \
__BAKERY_LOCK_END__ = .; \
BAKERY_LOCK_SIZE_CHECK
#else
#define BAKERY_LOCK_NORMAL
#endif
/*
* Time-stamps are stored in normal .bss memory
*
* The compiler will allocate enough memory for one CPU's time-stamps,
* the remaining memory for other CPUs is allocated by the
* linker script
*/
#define PMF_TIMESTAMP \
. = ALIGN(CACHE_WRITEBACK_GRANULE); \
__PMF_TIMESTAMP_START__ = .; \
KEEP(*(pmf_timestamp_array)) \
. = ALIGN(CACHE_WRITEBACK_GRANULE); \
__PMF_PERCPU_TIMESTAMP_END__ = .; \
__PERCPU_TIMESTAMP_SIZE__ = ABSOLUTE(. - __PMF_TIMESTAMP_START__); \
. = . + (__PERCPU_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1)); \
__PMF_TIMESTAMP_END__ = .;
/*
* The .bss section gets initialised to 0 at runtime.
* Its base address has bigger alignment for better performance of the
* zero-initialization code.
*/
#define BSS_SECTION \
.bss (NOLOAD) : ALIGN(BSS_ALIGN) { \
__BSS_START__ = .; \
*(SORT_BY_ALIGNMENT(.bss*)) \
*(COMMON) \
BAKERY_LOCK_NORMAL \
PMF_TIMESTAMP \
BASE_XLAT_TABLE_BSS \
__BSS_END__ = .; \
}
/* /*
* The xlat_table section is for full, aligned page tables (4K). * The xlat_table section is for full, aligned page tables (4K).
* Removing them from .bss avoids forcing 4K alignment on * Removing them from .bss avoids forcing 4K alignment on
......
...@@ -164,20 +164,15 @@ typedef struct xlat_ctx xlat_ctx_t; ...@@ -164,20 +164,15 @@ typedef struct xlat_ctx xlat_ctx_t;
* Would typically be PLAT_VIRT_ADDR_SPACE_SIZE * Would typically be PLAT_VIRT_ADDR_SPACE_SIZE
* (resp. PLAT_PHY_ADDR_SPACE_SIZE) for the translation context describing the * (resp. PLAT_PHY_ADDR_SPACE_SIZE) for the translation context describing the
* BL image currently executing. * BL image currently executing.
* _base_table_section:
* Specify the name of the section where the base translation tables have to
* be placed by the linker.
*/ */
#define REGISTER_XLAT_CONTEXT(_ctx_name, _mmap_count, _xlat_tables_count, \ #define REGISTER_XLAT_CONTEXT(_ctx_name, _mmap_count, _xlat_tables_count, \
_virt_addr_space_size, _phy_addr_space_size, \ _virt_addr_space_size, _phy_addr_space_size) \
_base_table_section) \
REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, (_mmap_count), \ REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, (_mmap_count), \
(_xlat_tables_count), \ (_xlat_tables_count), \
(_virt_addr_space_size), \ (_virt_addr_space_size), \
(_phy_addr_space_size), \ (_phy_addr_space_size), \
EL_REGIME_INVALID, \ EL_REGIME_INVALID, \
"xlat_table", (_base_table_section)) "xlat_table", "base_xlat_table")
/* /*
* Same as REGISTER_XLAT_CONTEXT plus the additional parameters: * Same as REGISTER_XLAT_CONTEXT plus the additional parameters:
......
...@@ -25,15 +25,8 @@ uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX]; ...@@ -25,15 +25,8 @@ uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
* Allocate and initialise the default translation context for the BL image * Allocate and initialise the default translation context for the BL image
* currently executing. * currently executing.
*/ */
#if PLAT_RO_XLAT_TABLES
#define BASE_XLAT_TABLE_SECTION ".rodata"
#else
#define BASE_XLAT_TABLE_SECTION ".bss"
#endif
REGISTER_XLAT_CONTEXT(tf, MAX_MMAP_REGIONS, MAX_XLAT_TABLES, REGISTER_XLAT_CONTEXT(tf, MAX_MMAP_REGIONS, MAX_XLAT_TABLES,
PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE, PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE);
BASE_XLAT_TABLE_SECTION);
void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, size_t size, void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, size_t size,
unsigned int attr) unsigned int attr)
......
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
#include <common/bl_common.ld.h> #include <common/bl_common.ld.h>
#include <lib/xlat_tables/xlat_tables_defs.h> #include <lib/xlat_tables/xlat_tables_defs.h>
#include <platform_def.h>
OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT) OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
OUTPUT_ARCH(PLATFORM_LINKER_ARCH) OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
...@@ -39,20 +38,7 @@ SECTIONS ...@@ -39,20 +38,7 @@ SECTIONS
*(.text*) *(.text*)
*(.rodata*) *(.rodata*)
/* Ensure 8-byte alignment for descriptors and ensure inclusion */ RODATA_COMMON
. = ALIGN(8);
__RT_SVC_DESCS_START__ = .;
KEEP(*(rt_svc_descs))
__RT_SVC_DESCS_END__ = .;
/*
* Ensure 8-byte alignment for cpu_ops so that its fields are also
* aligned. Also ensure cpu_ops inclusion.
*/
. = ALIGN(8);
__CPU_OPS_START__ = .;
KEEP(*(cpu_ops))
__CPU_OPS_END__ = .;
__RO_END_UNALIGNED__ = .; __RO_END_UNALIGNED__ = .;
/* /*
...@@ -94,41 +80,8 @@ SECTIONS ...@@ -94,41 +80,8 @@ SECTIONS
__STACKS_END__ = .; __STACKS_END__ = .;
} >RAM } >RAM
/* BSS_SECTION >RAM
* The .bss section gets initialised to 0 at runtime. __RW_END__ = __BSS_END__;
* Its base address should be 16-byte aligned for better performance of the
* zero-initialization code.
*/
.bss (NOLOAD) : ALIGN(16) {
__BSS_START__ = .;
*(.bss*)
*(COMMON)
#if !USE_COHERENT_MEM
/*
* Bakery locks are stored in normal .bss memory
*
* Each lock's data is spread across multiple cache lines, one per CPU,
* but multiple locks can share the same cache line.
* The compiler will allocate enough memory for one CPU's bakery locks,
* the remaining cache lines are allocated by the linker script
*/
. = ALIGN(CACHE_WRITEBACK_GRANULE);
__BAKERY_LOCK_START__ = .;
__PERCPU_BAKERY_LOCK_START__ = .;
*(bakery_lock)
. = ALIGN(CACHE_WRITEBACK_GRANULE);
__PERCPU_BAKERY_LOCK_END__ = .;
__PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(__PERCPU_BAKERY_LOCK_END__ - __PERCPU_BAKERY_LOCK_START__);
. = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1));
__BAKERY_LOCK_END__ = .;
#ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE
ASSERT(__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE,
"PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements");
#endif
#endif
__BSS_END__ = .;
__RW_END__ = .;
} >RAM
ASSERT(. <= BL31_LIMIT, "BL3-1 image has exceeded its limit.") ASSERT(. <= BL31_LIMIT, "BL3-1 image has exceeded its limit.")
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment