Commit 402b3cf8 authored by Julius Werner's avatar Julius Werner
Browse files

Switch AARCH32/AARCH64 to __aarch64__



NOTE: AARCH32/AARCH64 macros are now deprecated in favor of __aarch64__.

All common C compilers pre-define the same macros to signal which
architecture the code is being compiled for: __arm__ for AArch32 (or
earlier versions) and __aarch64__ for AArch64. There's no need for TF-A
to define its own custom macros for this. In order to unify code with
the export headers (which use __aarch64__ to avoid another dependency),
let's deprecate the AARCH32 and AARCH64 macros and switch the code base
over to the pre-defined standard macro. (Since it is somewhat
unintuitive that __arm__ only means AArch32, let's standardize on only
using __aarch64__.)

Change-Id: Ic77de4b052297d77f38fc95f95f65a8ee70cf200
Signed-off-by: default avatarJulius Werner <jwerner@chromium.org>
parent d5dfdeb6
......@@ -69,7 +69,7 @@ typedef struct arm_tzc_regions_info {
void arm_setup_romlib(void);
#if defined(IMAGE_BL31) || (defined(AARCH32) && defined(IMAGE_BL32))
#if defined(IMAGE_BL31) || (!defined(__aarch64__) && defined(IMAGE_BL32))
/*
* Use this macro to instantiate lock before it is used in below
* arm_lock_xxx() macros
......@@ -102,7 +102,7 @@ void arm_setup_romlib(void);
#define arm_lock_get()
#define arm_lock_release()
#endif /* defined(IMAGE_BL31) || (defined(AARCH32) && defined(IMAGE_BL32)) */
#endif /* defined(IMAGE_BL31) || (!defined(__aarch64__) && defined(IMAGE_BL32)) */
#if ARM_RECOM_STATE_ID_ENC
/*
......
......@@ -20,13 +20,13 @@
/*
* Platform binary types for linking
*/
#ifdef AARCH32
#define PLATFORM_LINKER_FORMAT "elf32-littlearm"
#define PLATFORM_LINKER_ARCH arm
#else
#ifdef __aarch64__
#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64"
#define PLATFORM_LINKER_ARCH aarch64
#endif /* AARCH32 */
#else
#define PLATFORM_LINKER_FORMAT "elf32-littlearm"
#define PLATFORM_LINKER_ARCH arm
#endif /* __aarch64__ */
/*
* Generic platform constants
......
......@@ -18,9 +18,9 @@
#ifdef IMAGE_BL1
# define BL_STRING "BL1"
#elif defined(AARCH64) && defined(IMAGE_BL31)
#elif defined(__aarch64__) && defined(IMAGE_BL31)
# define BL_STRING "BL31"
#elif defined(AARCH32) && defined(IMAGE_BL32)
#elif !defined(__arch64__) && defined(IMAGE_BL32)
# define BL_STRING "BL32"
#elif defined(IMAGE_BL2) && BL2_AT_EL3
# define BL_STRING "BL2"
......
......@@ -167,10 +167,10 @@ void bakery_lock_get(bakery_lock_t *lock)
unsigned int their_bakery_data;
me = plat_my_core_pos();
#ifdef AARCH32
is_cached = read_sctlr() & SCTLR_C_BIT;
#else
#ifdef __aarch64__
is_cached = read_sctlr_el3() & SCTLR_C_BIT;
#else
is_cached = read_sctlr() & SCTLR_C_BIT;
#endif
/* Get a ticket */
......@@ -228,10 +228,10 @@ void bakery_lock_get(bakery_lock_t *lock)
void bakery_lock_release(bakery_lock_t *lock)
{
bakery_info_t *my_bakery_info;
#ifdef AARCH32
unsigned int is_cached = read_sctlr() & SCTLR_C_BIT;
#else
#ifdef __aarch64__
unsigned int is_cached = read_sctlr_el3() & SCTLR_C_BIT;
#else
unsigned int is_cached = read_sctlr() & SCTLR_C_BIT;
#endif
my_bakery_info = get_bakery_info(plat_my_core_pos(), lock);
......
......@@ -176,7 +176,7 @@ int parse_optee_header(entry_point_info_t *header_ep,
*/
if (!tee_validate_header(header)) {
INFO("Invalid OPTEE header, set legacy mode.\n");
#ifdef AARCH64
#ifdef __aarch64__
header_ep->args.arg0 = MODE_RW_64;
#else
header_ep->args.arg0 = MODE_RW_32;
......@@ -222,7 +222,7 @@ int parse_optee_header(entry_point_info_t *header_ep,
if (header->arch == 0) {
header_ep->args.arg0 = MODE_RW_32;
} else {
#ifdef AARCH64
#ifdef __aarch64__
header_ep->args.arg0 = MODE_RW_64;
#else
ERROR("Cannot boot an AArch64 OP-TEE\n");
......
......@@ -619,53 +619,7 @@ int psci_validate_mpidr(u_register_t mpidr)
* This function determines the full entrypoint information for the requested
* PSCI entrypoint on power on/resume and returns it.
******************************************************************************/
#ifdef AARCH32
static int psci_get_ns_ep_info(entry_point_info_t *ep,
uintptr_t entrypoint,
u_register_t context_id)
{
u_register_t ep_attr;
unsigned int aif, ee, mode;
u_register_t scr = read_scr();
u_register_t ns_sctlr, sctlr;
/* Switch to non secure state */
write_scr(scr | SCR_NS_BIT);
isb();
ns_sctlr = read_sctlr();
sctlr = scr & SCR_HCE_BIT ? read_hsctlr() : ns_sctlr;
/* Return to original state */
write_scr(scr);
isb();
ee = 0;
ep_attr = NON_SECURE | EP_ST_DISABLE;
if (sctlr & SCTLR_EE_BIT) {
ep_attr |= EP_EE_BIG;
ee = 1;
}
SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);
ep->pc = entrypoint;
zeromem(&ep->args, sizeof(ep->args));
ep->args.arg0 = context_id;
mode = scr & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc;
/*
* TODO: Choose async. exception bits if HYP mode is not
* implemented according to the values of SCR.{AW, FW} bits
*/
aif = SPSR_ABT_BIT | SPSR_IRQ_BIT | SPSR_FIQ_BIT;
ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, aif);
return PSCI_E_SUCCESS;
}
#else
#ifdef __aarch64__
static int psci_get_ns_ep_info(entry_point_info_t *ep,
uintptr_t entrypoint,
u_register_t context_id)
......@@ -722,7 +676,53 @@ static int psci_get_ns_ep_info(entry_point_info_t *ep,
return PSCI_E_SUCCESS;
}
#endif
#else /* !__aarch64__ */
static int psci_get_ns_ep_info(entry_point_info_t *ep,
uintptr_t entrypoint,
u_register_t context_id)
{
u_register_t ep_attr;
unsigned int aif, ee, mode;
u_register_t scr = read_scr();
u_register_t ns_sctlr, sctlr;
/* Switch to non secure state */
write_scr(scr | SCR_NS_BIT);
isb();
ns_sctlr = read_sctlr();
sctlr = scr & SCR_HCE_BIT ? read_hsctlr() : ns_sctlr;
/* Return to original state */
write_scr(scr);
isb();
ee = 0;
ep_attr = NON_SECURE | EP_ST_DISABLE;
if (sctlr & SCTLR_EE_BIT) {
ep_attr |= EP_EE_BIG;
ee = 1;
}
SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);
ep->pc = entrypoint;
zeromem(&ep->args, sizeof(ep->args));
ep->args.arg0 = context_id;
mode = scr & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc;
/*
* TODO: Choose async. exception bits if HYP mode is not
* implemented according to the values of SCR.{AW, FW} bits
*/
aif = SPSR_ABT_BIT | SPSR_IRQ_BIT | SPSR_FIQ_BIT;
ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, aif);
return PSCI_E_SUCCESS;
}
#endif /* __aarch64__ */
/*******************************************************************************
* This function validates the entrypoint with the platform layer if the
......
......@@ -136,48 +136,48 @@ int xlat_change_mem_attributes(uintptr_t base_va, size_t size, uint32_t attr)
#define MAX_PHYS_ADDR tf_xlat_ctx.max_pa
#endif
#ifdef AARCH32
#ifdef __aarch64__
void enable_mmu_svc_mon(unsigned int flags)
void enable_mmu_el1(unsigned int flags)
{
setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
enable_mmu_direct_svc_mon(flags);
enable_mmu_direct_el1(flags);
}
void enable_mmu_hyp(unsigned int flags)
void enable_mmu_el2(unsigned int flags)
{
setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
tf_xlat_ctx.va_max_address, EL2_REGIME);
enable_mmu_direct_hyp(flags);
enable_mmu_direct_el2(flags);
}
#else
void enable_mmu_el1(unsigned int flags)
void enable_mmu_el3(unsigned int flags)
{
setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
enable_mmu_direct_el1(flags);
tf_xlat_ctx.va_max_address, EL3_REGIME);
enable_mmu_direct_el3(flags);
}
void enable_mmu_el2(unsigned int flags)
#else /* !__aarch64__ */
void enable_mmu_svc_mon(unsigned int flags)
{
setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
tf_xlat_ctx.va_max_address, EL2_REGIME);
enable_mmu_direct_el2(flags);
tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
enable_mmu_direct_svc_mon(flags);
}
void enable_mmu_el3(unsigned int flags)
void enable_mmu_hyp(unsigned int flags)
{
setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
tf_xlat_ctx.va_max_address, EL3_REGIME);
enable_mmu_direct_el3(flags);
tf_xlat_ctx.va_max_address, EL2_REGIME);
enable_mmu_direct_hyp(flags);
}
#endif /* AARCH32 */
#endif /* __aarch64__ */
......@@ -97,7 +97,7 @@ static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc)
printf(((LOWER_ATTRS(NS) & desc) != 0ULL) ? "-NS" : "-S");
#ifdef AARCH64
#ifdef __aarch64__
/* Check Guarded Page bit */
if ((desc & GP) != 0ULL) {
printf("-GP");
......
......@@ -273,7 +273,7 @@ $(eval IMAGE := IMAGE_BL$(call uppercase,$(3)))
$(1): $(2) $(filter-out %.d,$(MAKEFILE_LIST)) | bl$(3)_dirs
$$(ECHO) " PP $$<"
$$(Q)$$(CPP) $$(CPPFLAGS) -P -x assembler-with-cpp -D__LINKER__ $(MAKE_DEP) -D$(IMAGE) -o $$@ $$<
$$(Q)$$(CPP) $$(CPPFLAGS) $(TF_CFLAGS_$(ARCH)) -P -x assembler-with-cpp -D__LINKER__ $(MAKE_DEP) -D$(IMAGE) -o $$@ $$<
-include $(DEP)
......
......@@ -81,7 +81,7 @@ const mmap_region_t plat_arm_mmap[] = {
MAP_DEVICE0,
MAP_DEVICE1,
ARM_MAP_NS_DRAM1,
#ifdef AARCH64
#ifdef __aarch64__
ARM_MAP_DRAM2,
#endif
#ifdef SPD_tspd
......@@ -150,7 +150,7 @@ const mmap_region_t plat_arm_secure_partition_mmap[] = {
#endif
#ifdef IMAGE_BL32
const mmap_region_t plat_arm_mmap[] = {
#ifdef AARCH32
#ifndef __aarch64__
ARM_MAP_SHARED_RAM,
ARM_V2M_MAP_MEM_PROTECT,
#endif
......
......@@ -120,7 +120,7 @@
#define PLAT_ARM_MAX_BL31_SIZE UL(0x3B000)
#endif
#ifdef AARCH32
#ifndef __aarch64__
/*
* Since BL32 NOBITS overlays BL2 and BL1-RW, PLAT_ARM_MAX_BL32_SIZE is
* calculated using the current SP_MIN PROGBITS debug size plus the sizes of
......@@ -259,7 +259,7 @@
/*
* Physical and virtual address space limits for MMU in AARCH64 & AARCH32 modes
*/
#ifdef AARCH64
#ifdef __aarch64__
#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 36)
#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 36)
#else
......
......@@ -331,7 +331,7 @@
/*
* Physical and virtual address space limits for MMU in AARCH64 & AARCH32 modes
*/
#ifdef AARCH64
#ifdef __aarch64__
#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 36)
#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 36)
#else
......
......@@ -291,7 +291,7 @@
/*
* Physical and virtual address space limits for MMU in AARCH64 & AARCH32 modes
*/
#ifdef AARCH64
#ifdef __aarch64__
#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 36)
#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 36)
#else
......
......@@ -37,7 +37,7 @@ const mmap_region_t plat_arm_mmap[] = {
CSS_MAP_DEVICE,
SOC_CSS_MAP_DEVICE,
ARM_MAP_NS_DRAM1,
#ifdef AARCH64
#ifdef __aarch64__
ARM_MAP_DRAM2,
#endif
#ifdef SPD_tspd
......@@ -74,7 +74,7 @@ const mmap_region_t plat_arm_mmap[] = {
#endif
#ifdef IMAGE_BL32
const mmap_region_t plat_arm_mmap[] = {
#ifdef AARCH32
#ifndef __aarch64__
ARM_MAP_SHARED_RAM,
#ifdef PLAT_ARM_MEM_PROT_ADDR
ARM_V2M_MAP_MEM_PROTECT,
......
......@@ -34,7 +34,7 @@
* space the physical & virtual address space limits are extended to
* 40-bits.
*/
#ifndef AARCH32
#ifdef __aarch64__
#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 40)
#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 40)
#else
......
......@@ -29,7 +29,7 @@
/*
* Physical and virtual address space limits for MMU in AARCH64 & AARCH32 modes
*/
#ifndef AARCH32
#ifdef __aarch64__
#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 36)
#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 36)
#else
......
......@@ -30,7 +30,7 @@
/*
* Physical and virtual address space limits for MMU in AARCH64 & AARCH32 modes
*/
#ifndef AARCH32
#ifdef __aarch64__
#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 36)
#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 36)
#else
......
......@@ -30,7 +30,7 @@
/*
* Physical and virtual address space limits for MMU in AARCH64 & AARCH32 modes
*/
#ifndef AARCH32
#ifdef __aarch64__
#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 36)
#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 36)
#else
......
......@@ -15,7 +15,7 @@
/*
* Physical and virtual address space limits for MMU in AARCH64 & AARCH32 modes
*/
#ifndef AARCH32
#ifdef __aarch64__
#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 36)
#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 36)
#else
......
......@@ -121,11 +121,11 @@ void arm_bl1_plat_arch_setup(void)
};
setup_page_tables(bl_regions, plat_arm_get_mmap());
#ifdef AARCH32
enable_mmu_svc_mon(0);
#else
#ifdef __aarch64__
enable_mmu_el3(0);
#endif /* AARCH32 */
#else
enable_mmu_svc_mon(0);
#endif /* __aarch64__ */
arm_setup_romlib();
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment