Commit 402b3cf8 authored by Julius Werner's avatar Julius Werner
Browse files

Switch AARCH32/AARCH64 to __aarch64__



NOTE: AARCH32/AARCH64 macros are now deprecated in favor of __aarch64__.

All common C compilers pre-define the same macros to signal which
architecture the code is being compiled for: __arm__ for AArch32 (or
earlier versions) and __aarch64__ for AArch64. There's no need for TF-A
to define its own custom macros for this. In order to unify code with
the export headers (which use __aarch64__ to avoid another dependency),
let's deprecate the AARCH32 and AARCH64 macros and switch the code base
over to the pre-defined standard macro. (Since it is somewhat
unintuitive that __arm__ only means AArch32, let's standardize on only
using __aarch64__.)

Change-Id: Ic77de4b052297d77f38fc95f95f65a8ee70cf200
Signed-off-by: default avatarJulius Werner <jwerner@chromium.org>
parent d5dfdeb6
...@@ -734,12 +734,6 @@ else ...@@ -734,12 +734,6 @@ else
$(eval $(call add_define,PRELOADED_BL33_BASE)) $(eval $(call add_define,PRELOADED_BL33_BASE))
endif endif
endif endif
# Define the AARCH32/AARCH64 flag based on the ARCH flag
ifeq (${ARCH},aarch32)
$(eval $(call add_define,AARCH32))
else
$(eval $(call add_define,AARCH64))
endif
# Define the DYN_DISABLE_AUTH flag only if set. # Define the DYN_DISABLE_AUTH flag only if set.
ifeq (${DYN_DISABLE_AUTH},1) ifeq (${DYN_DISABLE_AUTH},1)
...@@ -771,6 +765,12 @@ else ...@@ -771,6 +765,12 @@ else
endif endif
# __ASSEMBLY__ is deprecated in favor of the compiler-builtin __ASSEMBLER__. # __ASSEMBLY__ is deprecated in favor of the compiler-builtin __ASSEMBLER__.
ASFLAGS += -D__ASSEMBLY__ ASFLAGS += -D__ASSEMBLY__
# AARCH32/AARCH64 macros are deprecated in favor of the compiler-builtin __aarch64__.
ifeq (${ARCH},aarch32)
$(eval $(call add_define,AARCH32))
else
$(eval $(call add_define,AARCH64))
endif
endif # !ERROR_DEPRECATED endif # !ERROR_DEPRECATED
$(eval $(call MAKE_LIB_DIRS)) $(eval $(call MAKE_LIB_DIRS))
......
...@@ -520,7 +520,7 @@ static int bl1_fwu_image_execute(unsigned int image_id, ...@@ -520,7 +520,7 @@ static int bl1_fwu_image_execute(unsigned int image_id,
INFO("BL1-FWU: Executing Secure image\n"); INFO("BL1-FWU: Executing Secure image\n");
#ifdef AARCH64 #ifdef __aarch64__
/* Save NS-EL1 system registers. */ /* Save NS-EL1 system registers. */
cm_el1_sysregs_context_save(NON_SECURE); cm_el1_sysregs_context_save(NON_SECURE);
#endif #endif
...@@ -531,7 +531,7 @@ static int bl1_fwu_image_execute(unsigned int image_id, ...@@ -531,7 +531,7 @@ static int bl1_fwu_image_execute(unsigned int image_id,
/* Update the secure image id. */ /* Update the secure image id. */
sec_exec_image_id = image_id; sec_exec_image_id = image_id;
#ifdef AARCH64 #ifdef __aarch64__
*handle = cm_get_context(SECURE); *handle = cm_get_context(SECURE);
#else #else
*handle = smc_get_ctx(SECURE); *handle = smc_get_ctx(SECURE);
...@@ -584,7 +584,7 @@ static register_t bl1_fwu_image_resume(register_t image_param, ...@@ -584,7 +584,7 @@ static register_t bl1_fwu_image_resume(register_t image_param,
INFO("BL1-FWU: Resuming %s world context\n", INFO("BL1-FWU: Resuming %s world context\n",
(resume_sec_state == SECURE) ? "secure" : "normal"); (resume_sec_state == SECURE) ? "secure" : "normal");
#ifdef AARCH64 #ifdef __aarch64__
/* Save the EL1 system registers of calling world. */ /* Save the EL1 system registers of calling world. */
cm_el1_sysregs_context_save(caller_sec_state); cm_el1_sysregs_context_save(caller_sec_state);
...@@ -641,7 +641,7 @@ static int bl1_fwu_sec_image_done(void **handle, unsigned int flags) ...@@ -641,7 +641,7 @@ static int bl1_fwu_sec_image_done(void **handle, unsigned int flags)
sec_exec_image_id = INVALID_IMAGE_ID; sec_exec_image_id = INVALID_IMAGE_ID;
INFO("BL1-FWU: Resuming Normal world context\n"); INFO("BL1-FWU: Resuming Normal world context\n");
#ifdef AARCH64 #ifdef __aarch64__
/* /*
* Secure world is done so no need to save the context. * Secure world is done so no need to save the context.
* Just restore the Non-Secure context. * Just restore the Non-Secure context.
......
...@@ -59,7 +59,7 @@ void bl1_setup(void) ...@@ -59,7 +59,7 @@ void bl1_setup(void)
/* Perform early platform-specific setup */ /* Perform early platform-specific setup */
bl1_early_platform_setup(); bl1_early_platform_setup();
#ifdef AARCH64 #ifdef __aarch64__
/* /*
* Update pointer authentication key before the MMU is enabled. It is * Update pointer authentication key before the MMU is enabled. It is
* saved in the rodata section, that can be writen before enabling the * saved in the rodata section, that can be writen before enabling the
...@@ -67,7 +67,7 @@ void bl1_setup(void) ...@@ -67,7 +67,7 @@ void bl1_setup(void)
* in the early platform setup. * in the early platform setup.
*/ */
bl_handle_pauth(); bl_handle_pauth();
#endif /* AARCH64 */ #endif /* __aarch64__ */
/* Perform late platform-specific setup */ /* Perform late platform-specific setup */
bl1_plat_arch_setup(); bl1_plat_arch_setup();
...@@ -97,10 +97,10 @@ void bl1_main(void) ...@@ -97,10 +97,10 @@ void bl1_main(void)
/* /*
* Ensure that MMU/Caches and coherency are turned on * Ensure that MMU/Caches and coherency are turned on
*/ */
#ifdef AARCH32 #ifdef __aarch64__
val = read_sctlr();
#else
val = read_sctlr_el3(); val = read_sctlr_el3();
#else
val = read_sctlr();
#endif #endif
assert(val & SCTLR_M_BIT); assert(val & SCTLR_M_BIT);
assert(val & SCTLR_C_BIT); assert(val & SCTLR_C_BIT);
...@@ -198,11 +198,11 @@ static void bl1_load_bl2(void) ...@@ -198,11 +198,11 @@ static void bl1_load_bl2(void)
******************************************************************************/ ******************************************************************************/
void bl1_print_next_bl_ep_info(const entry_point_info_t *bl_ep_info) void bl1_print_next_bl_ep_info(const entry_point_info_t *bl_ep_info)
{ {
#ifdef AARCH32 #ifdef __aarch64__
NOTICE("BL1: Booting BL32\n");
#else
NOTICE("BL1: Booting BL31\n"); NOTICE("BL1: Booting BL31\n");
#endif /* AARCH32 */ #else
NOTICE("BL1: Booting BL32\n");
#endif /* __aarch64__ */
print_entry_point_info(bl_ep_info); print_entry_point_info(bl_ep_info);
} }
......
...@@ -15,10 +15,10 @@ ...@@ -15,10 +15,10 @@
#include "bl2_private.h" #include "bl2_private.h"
#ifdef AARCH32 #ifdef __aarch64__
#define NEXT_IMAGE "BL32"
#else
#define NEXT_IMAGE "BL31" #define NEXT_IMAGE "BL31"
#else
#define NEXT_IMAGE "BL32"
#endif #endif
#if !BL2_AT_EL3 #if !BL2_AT_EL3
...@@ -31,7 +31,7 @@ void bl2_setup(u_register_t arg0, u_register_t arg1, u_register_t arg2, ...@@ -31,7 +31,7 @@ void bl2_setup(u_register_t arg0, u_register_t arg1, u_register_t arg2,
/* Perform early platform-specific setup */ /* Perform early platform-specific setup */
bl2_early_platform_setup2(arg0, arg1, arg2, arg3); bl2_early_platform_setup2(arg0, arg1, arg2, arg3);
#ifdef AARCH64 #ifdef __aarch64__
/* /*
* Update pointer authentication key before the MMU is enabled. It is * Update pointer authentication key before the MMU is enabled. It is
* saved in the rodata section, that can be writen before enabling the * saved in the rodata section, that can be writen before enabling the
...@@ -39,7 +39,7 @@ void bl2_setup(u_register_t arg0, u_register_t arg1, u_register_t arg2, ...@@ -39,7 +39,7 @@ void bl2_setup(u_register_t arg0, u_register_t arg1, u_register_t arg2,
* in the early platform setup. * in the early platform setup.
*/ */
bl_handle_pauth(); bl_handle_pauth();
#endif /* AARCH64 */ #endif /* __aarch64__ */
/* Perform late platform-specific setup */ /* Perform late platform-specific setup */
bl2_plat_arch_setup(); bl2_plat_arch_setup();
...@@ -55,7 +55,7 @@ void bl2_el3_setup(u_register_t arg0, u_register_t arg1, u_register_t arg2, ...@@ -55,7 +55,7 @@ void bl2_el3_setup(u_register_t arg0, u_register_t arg1, u_register_t arg2,
/* Perform early platform-specific setup */ /* Perform early platform-specific setup */
bl2_el3_early_platform_setup(arg0, arg1, arg2, arg3); bl2_el3_early_platform_setup(arg0, arg1, arg2, arg3);
#ifdef AARCH64 #ifdef __aarch64__
/* /*
* Update pointer authentication key before the MMU is enabled. It is * Update pointer authentication key before the MMU is enabled. It is
* saved in the rodata section, that can be writen before enabling the * saved in the rodata section, that can be writen before enabling the
...@@ -63,7 +63,7 @@ void bl2_el3_setup(u_register_t arg0, u_register_t arg1, u_register_t arg2, ...@@ -63,7 +63,7 @@ void bl2_el3_setup(u_register_t arg0, u_register_t arg1, u_register_t arg2,
* in the early platform setup. * in the early platform setup.
*/ */
bl_handle_pauth(); bl_handle_pauth();
#endif /* AARCH64 */ #endif /* __aarch64__ */
/* Perform late platform-specific setup */ /* Perform late platform-specific setup */
bl2_el3_plat_arch_setup(); bl2_el3_plat_arch_setup();
...@@ -97,14 +97,14 @@ void bl2_main(void) ...@@ -97,14 +97,14 @@ void bl2_main(void)
next_bl_ep_info = bl2_load_images(); next_bl_ep_info = bl2_load_images();
#if !BL2_AT_EL3 #if !BL2_AT_EL3
#ifdef AARCH32 #ifndef __aarch64__
/* /*
* For AArch32 state BL1 and BL2 share the MMU setup. * For AArch32 state BL1 and BL2 share the MMU setup.
* Given that BL2 does not map BL1 regions, MMU needs * Given that BL2 does not map BL1 regions, MMU needs
* to be disabled in order to go back to BL1. * to be disabled in order to go back to BL1.
*/ */
disable_mmu_icache_secure(); disable_mmu_icache_secure();
#endif /* AARCH32 */ #endif /* !__aarch64__ */
console_flush(); console_flush();
......
...@@ -45,14 +45,14 @@ void bl2u_main(void) ...@@ -45,14 +45,14 @@ void bl2u_main(void)
console_flush(); console_flush();
#ifdef AARCH32 #ifndef __aarch64__
/* /*
* For AArch32 state BL1 and BL2U share the MMU setup. * For AArch32 state BL1 and BL2U share the MMU setup.
* Given that BL2U does not map BL1 regions, MMU needs * Given that BL2U does not map BL1 regions, MMU needs
* to be disabled in order to go back to BL1. * to be disabled in order to go back to BL1.
*/ */
disable_mmu_icache_secure(); disable_mmu_icache_secure();
#endif /* AARCH32 */ #endif /* !__aarch64__ */
/* /*
* Indicate that BL2U is done and resume back to * Indicate that BL2U is done and resume back to
......
...@@ -93,7 +93,7 @@ static const char *get_el_str(unsigned int el) ...@@ -93,7 +93,7 @@ static const char *get_el_str(unsigned int el)
* Returns true if the address points to a virtual address that can be read at * Returns true if the address points to a virtual address that can be read at
* the current EL, false otherwise. * the current EL, false otherwise.
*/ */
#ifdef AARCH64 #ifdef __aarch64__
static bool is_address_readable(uintptr_t addr) static bool is_address_readable(uintptr_t addr)
{ {
unsigned int el = get_current_el(); unsigned int el = get_current_el();
...@@ -123,7 +123,7 @@ static bool is_address_readable(uintptr_t addr) ...@@ -123,7 +123,7 @@ static bool is_address_readable(uintptr_t addr)
return true; return true;
} }
#else /* if AARCH32 */ #else /* !__aarch64__ */
static bool is_address_readable(uintptr_t addr) static bool is_address_readable(uintptr_t addr)
{ {
unsigned int el = get_current_el(); unsigned int el = get_current_el();
...@@ -144,7 +144,7 @@ static bool is_address_readable(uintptr_t addr) ...@@ -144,7 +144,7 @@ static bool is_address_readable(uintptr_t addr)
return true; return true;
} }
#endif #endif /* __aarch64__ */
/* /*
* Returns true if all the bytes in a given object are in mapped memory and an * Returns true if all the bytes in a given object are in mapped memory and an
...@@ -207,7 +207,7 @@ static bool is_valid_frame_record(struct frame_record *fr) ...@@ -207,7 +207,7 @@ static bool is_valid_frame_record(struct frame_record *fr)
*/ */
static struct frame_record *adjust_frame_record(struct frame_record *fr) static struct frame_record *adjust_frame_record(struct frame_record *fr)
{ {
#ifdef AARCH64 #ifdef __aarch64__
return fr; return fr;
#else #else
return (struct frame_record *)((uintptr_t)fr - 4U); return (struct frame_record *)((uintptr_t)fr - 4U);
......
...@@ -236,7 +236,7 @@ void print_entry_point_info(const entry_point_info_t *ep_info) ...@@ -236,7 +236,7 @@ void print_entry_point_info(const entry_point_info_t *ep_info)
PRINT_IMAGE_ARG(1); PRINT_IMAGE_ARG(1);
PRINT_IMAGE_ARG(2); PRINT_IMAGE_ARG(2);
PRINT_IMAGE_ARG(3); PRINT_IMAGE_ARG(3);
#ifndef AARCH32 #ifdef __aarch64__
PRINT_IMAGE_ARG(4); PRINT_IMAGE_ARG(4);
PRINT_IMAGE_ARG(5); PRINT_IMAGE_ARG(5);
PRINT_IMAGE_ARG(6); PRINT_IMAGE_ARG(6);
...@@ -245,7 +245,7 @@ void print_entry_point_info(const entry_point_info_t *ep_info) ...@@ -245,7 +245,7 @@ void print_entry_point_info(const entry_point_info_t *ep_info)
#undef PRINT_IMAGE_ARG #undef PRINT_IMAGE_ARG
} }
#ifdef AARCH64 #ifdef __aarch64__
/******************************************************************************* /*******************************************************************************
* Handle all possible cases regarding ARMv8.3-PAuth. * Handle all possible cases regarding ARMv8.3-PAuth.
******************************************************************************/ ******************************************************************************/
...@@ -293,4 +293,4 @@ void bl_handle_pauth(void) ...@@ -293,4 +293,4 @@ void bl_handle_pauth(void)
#endif /* ENABLE_PAUTH */ #endif /* ENABLE_PAUTH */
} }
#endif /* AARCH64 */ #endif /* __aarch64__ */
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
#include "ccn_private.h" #include "ccn_private.h"
static const ccn_desc_t *ccn_plat_desc; static const ccn_desc_t *ccn_plat_desc;
#if defined(IMAGE_BL31) || (defined(AARCH32) && defined(IMAGE_BL32)) #if defined(IMAGE_BL31) || (!defined(__aarch64__) && defined(IMAGE_BL32))
DEFINE_BAKERY_LOCK(ccn_lock); DEFINE_BAKERY_LOCK(ccn_lock);
#endif #endif
...@@ -264,7 +264,7 @@ static void ccn_snoop_dvm_do_op(unsigned long long rn_id_map, ...@@ -264,7 +264,7 @@ static void ccn_snoop_dvm_do_op(unsigned long long rn_id_map,
assert(ccn_plat_desc); assert(ccn_plat_desc);
assert(ccn_plat_desc->periphbase); assert(ccn_plat_desc->periphbase);
#if defined(IMAGE_BL31) || (defined(AARCH32) && defined(IMAGE_BL32)) #if defined(IMAGE_BL31) || (!defined(__aarch64__) && defined(IMAGE_BL32))
bakery_lock_get(&ccn_lock); bakery_lock_get(&ccn_lock);
#endif #endif
start_region_id = region_id; start_region_id = region_id;
...@@ -284,7 +284,7 @@ static void ccn_snoop_dvm_do_op(unsigned long long rn_id_map, ...@@ -284,7 +284,7 @@ static void ccn_snoop_dvm_do_op(unsigned long long rn_id_map,
rn_id_map); rn_id_map);
} }
#if defined(IMAGE_BL31) || (defined(AARCH32) && defined(IMAGE_BL32)) #if defined(IMAGE_BL31) || (!defined(__aarch64__) && defined(IMAGE_BL32))
bakery_lock_release(&ccn_lock); bakery_lock_release(&ccn_lock);
#endif #endif
} }
......
...@@ -73,12 +73,12 @@ void __init gicv3_driver_init(const gicv3_driver_data_t *plat_driver_data) ...@@ -73,12 +73,12 @@ void __init gicv3_driver_init(const gicv3_driver_data_t *plat_driver_data)
plat_driver_data->interrupt_props != NULL : 1); plat_driver_data->interrupt_props != NULL : 1);
/* Check for system register support */ /* Check for system register support */
#ifdef AARCH32 #ifdef __aarch64__
assert((read_id_pfr1() & (ID_PFR1_GIC_MASK << ID_PFR1_GIC_SHIFT)) != 0U);
#else
assert((read_id_aa64pfr0_el1() & assert((read_id_aa64pfr0_el1() &
(ID_AA64PFR0_GIC_MASK << ID_AA64PFR0_GIC_SHIFT)) != 0U); (ID_AA64PFR0_GIC_MASK << ID_AA64PFR0_GIC_SHIFT)) != 0U);
#endif /* AARCH32 */ #else
assert((read_id_pfr1() & (ID_PFR1_GIC_MASK << ID_PFR1_GIC_SHIFT)) != 0U);
#endif /* __aarch64__ */
/* The GIC version should be 3.0 */ /* The GIC version should be 3.0 */
gic_version = gicd_read_pidr2(plat_driver_data->gicd_base); gic_version = gicd_read_pidr2(plat_driver_data->gicd_base);
......
...@@ -40,16 +40,16 @@ static inline u_register_t gicd_irouter_val_from_mpidr(u_register_t mpidr, ...@@ -40,16 +40,16 @@ static inline u_register_t gicd_irouter_val_from_mpidr(u_register_t mpidr,
* Macro to convert a GICR_TYPER affinity value into a MPIDR value. Bits[31:24] * Macro to convert a GICR_TYPER affinity value into a MPIDR value. Bits[31:24]
* are zeroes. * are zeroes.
*/ */
#ifdef AARCH32 #ifdef __aarch64__
static inline u_register_t mpidr_from_gicr_typer(uint64_t typer_val) static inline u_register_t mpidr_from_gicr_typer(uint64_t typer_val)
{ {
return (((typer_val) >> 32) & U(0xffffff)); return (((typer_val >> 56) & MPIDR_AFFLVL_MASK) << MPIDR_AFF3_SHIFT) |
((typer_val >> 32) & U(0xffffff));
} }
#else #else
static inline u_register_t mpidr_from_gicr_typer(uint64_t typer_val) static inline u_register_t mpidr_from_gicr_typer(uint64_t typer_val)
{ {
return (((typer_val >> 56) & MPIDR_AFFLVL_MASK) << MPIDR_AFF3_SHIFT) | return (((typer_val) >> 32) & U(0xffffff));
((typer_val >> 32) & U(0xffffff));
} }
#endif #endif
......
...@@ -41,7 +41,7 @@ CASSERT(ENTRY_POINT_INFO_PC_OFFSET == ...@@ -41,7 +41,7 @@ CASSERT(ENTRY_POINT_INFO_PC_OFFSET ==
__builtin_offsetof(entry_point_info_t, pc), \ __builtin_offsetof(entry_point_info_t, pc), \
assert_BL31_pc_offset_mismatch); assert_BL31_pc_offset_mismatch);
#ifdef AARCH32 #ifndef __aarch64__
CASSERT(ENTRY_POINT_INFO_LR_SVC_OFFSET == CASSERT(ENTRY_POINT_INFO_LR_SVC_OFFSET ==
__builtin_offsetof(entry_point_info_t, lr_svc), __builtin_offsetof(entry_point_info_t, lr_svc),
assert_entrypoint_lr_offset_error); assert_entrypoint_lr_offset_error);
......
...@@ -20,15 +20,15 @@ ...@@ -20,15 +20,15 @@
* Constants to allow the assembler access a runtime service * Constants to allow the assembler access a runtime service
* descriptor * descriptor
*/ */
#ifdef AARCH32 #ifdef __aarch64__
#define RT_SVC_SIZE_LOG2 U(4)
#define RT_SVC_DESC_INIT U(8)
#define RT_SVC_DESC_HANDLE U(12)
#else
#define RT_SVC_SIZE_LOG2 U(5) #define RT_SVC_SIZE_LOG2 U(5)
#define RT_SVC_DESC_INIT U(16) #define RT_SVC_DESC_INIT U(16)
#define RT_SVC_DESC_HANDLE U(24) #define RT_SVC_DESC_HANDLE U(24)
#endif /* AARCH32 */ #else
#define RT_SVC_SIZE_LOG2 U(4)
#define RT_SVC_DESC_INIT U(8)
#define RT_SVC_DESC_HANDLE U(12)
#endif /* __aarch64__ */
#define SIZEOF_RT_SVC_DESC (U(1) << RT_SVC_SIZE_LOG2) #define SIZEOF_RT_SVC_DESC (U(1) << RT_SVC_SIZE_LOG2)
......
...@@ -35,7 +35,7 @@ void cm_init_context_by_index(unsigned int cpu_idx, ...@@ -35,7 +35,7 @@ void cm_init_context_by_index(unsigned int cpu_idx,
void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep); void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep);
void cm_prepare_el3_exit(uint32_t security_state); void cm_prepare_el3_exit(uint32_t security_state);
#ifndef AARCH32 #ifdef __aarch64__
void cm_el1_sysregs_context_save(uint32_t security_state); void cm_el1_sysregs_context_save(uint32_t security_state);
void cm_el1_sysregs_context_restore(uint32_t security_state); void cm_el1_sysregs_context_restore(uint32_t security_state);
void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint); void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint);
...@@ -78,6 +78,6 @@ static inline void cm_set_next_context(void *context) ...@@ -78,6 +78,6 @@ static inline void cm_set_next_context(void *context)
#else #else
void *cm_get_next_context(void); void *cm_get_next_context(void);
void cm_set_next_context(void *context); void cm_set_next_context(void *context);
#endif /* AARCH32 */ #endif /* __aarch64__ */
#endif /* CONTEXT_MGMT_H */ #endif /* CONTEXT_MGMT_H */
...@@ -11,15 +11,7 @@ ...@@ -11,15 +11,7 @@
#include <bl31/ehf.h> #include <bl31/ehf.h>
#ifdef AARCH32 #ifdef __aarch64__
#if CRASH_REPORTING
#error "Crash reporting is not supported in AArch32"
#endif
#define CPU_DATA_CPU_OPS_PTR 0x0
#define CPU_DATA_CRASH_BUF_OFFSET 0x4
#else /* AARCH32 */
/* Offsets for the cpu_data structure */ /* Offsets for the cpu_data structure */
#define CPU_DATA_CRASH_BUF_OFFSET 0x18 #define CPU_DATA_CRASH_BUF_OFFSET 0x18
...@@ -27,7 +19,15 @@ ...@@ -27,7 +19,15 @@
#define CPU_DATA_CRASH_BUF_SIZE 64 #define CPU_DATA_CRASH_BUF_SIZE 64
#define CPU_DATA_CPU_OPS_PTR 0x10 #define CPU_DATA_CPU_OPS_PTR 0x10
#endif /* AARCH32 */ #else /* __aarch64__ */
#if CRASH_REPORTING
#error "Crash reporting is not supported in AArch32"
#endif
#define CPU_DATA_CPU_OPS_PTR 0x0
#define CPU_DATA_CRASH_BUF_OFFSET 0x4
#endif /* __aarch64__ */
#if CRASH_REPORTING #if CRASH_REPORTING
#define CPU_DATA_CRASH_BUF_END (CPU_DATA_CRASH_BUF_OFFSET + \ #define CPU_DATA_CRASH_BUF_END (CPU_DATA_CRASH_BUF_OFFSET + \
...@@ -84,7 +84,7 @@ ...@@ -84,7 +84,7 @@
* used for this. * used for this.
******************************************************************************/ ******************************************************************************/
typedef struct cpu_data { typedef struct cpu_data {
#ifndef AARCH32 #ifdef __aarch64__
void *cpu_context[2]; void *cpu_context[2];
#endif #endif
uintptr_t cpu_ops_ptr; uintptr_t cpu_ops_ptr;
...@@ -127,7 +127,7 @@ CASSERT(CPU_DATA_PMF_TS0_OFFSET == __builtin_offsetof ...@@ -127,7 +127,7 @@ CASSERT(CPU_DATA_PMF_TS0_OFFSET == __builtin_offsetof
struct cpu_data *_cpu_data_by_index(uint32_t cpu_index); struct cpu_data *_cpu_data_by_index(uint32_t cpu_index);
#ifndef AARCH32 #ifdef __aarch64__
/* Return the cpu_data structure for the current CPU. */ /* Return the cpu_data structure for the current CPU. */
static inline struct cpu_data *_cpu_data(void) static inline struct cpu_data *_cpu_data(void)
{ {
......
...@@ -24,7 +24,7 @@ REGISTER_PUBSUB_EVENT(psci_cpu_on_finish); ...@@ -24,7 +24,7 @@ REGISTER_PUBSUB_EVENT(psci_cpu_on_finish);
REGISTER_PUBSUB_EVENT(psci_suspend_pwrdown_start); REGISTER_PUBSUB_EVENT(psci_suspend_pwrdown_start);
REGISTER_PUBSUB_EVENT(psci_suspend_pwrdown_finish); REGISTER_PUBSUB_EVENT(psci_suspend_pwrdown_finish);
#ifdef AARCH64 #ifdef __aarch64__
/* /*
* These events are published by the AArch64 context management framework * These events are published by the AArch64 context management framework
* after the secure context is restored/saved via * after the secure context is restored/saved via
...@@ -40,4 +40,4 @@ REGISTER_PUBSUB_EVENT(cm_exited_secure_world); ...@@ -40,4 +40,4 @@ REGISTER_PUBSUB_EVENT(cm_exited_secure_world);
*/ */
REGISTER_PUBSUB_EVENT(cm_entering_normal_world); REGISTER_PUBSUB_EVENT(cm_entering_normal_world);
REGISTER_PUBSUB_EVENT(cm_exited_normal_world); REGISTER_PUBSUB_EVENT(cm_exited_normal_world);
#endif /* AARCH64 */ #endif /* __aarch64__ */
...@@ -21,10 +21,10 @@ ...@@ -21,10 +21,10 @@
#define BIT_32(nr) (U(1) << (nr)) #define BIT_32(nr) (U(1) << (nr))
#define BIT_64(nr) (ULL(1) << (nr)) #define BIT_64(nr) (ULL(1) << (nr))
#ifdef AARCH32 #ifdef __aarch64__
#define BIT BIT_32
#else
#define BIT BIT_64 #define BIT BIT_64
#else
#define BIT BIT_32
#endif #endif
/* /*
...@@ -46,10 +46,10 @@ ...@@ -46,10 +46,10 @@
(((~UINT64_C(0)) << (l)) & (~UINT64_C(0) >> (64 - 1 - (h)))) (((~UINT64_C(0)) << (l)) & (~UINT64_C(0) >> (64 - 1 - (h))))
#endif #endif
#ifdef AARCH32 #ifdef __aarch64__
#define GENMASK GENMASK_32
#else
#define GENMASK GENMASK_64 #define GENMASK GENMASK_64
#else
#define GENMASK GENMASK_32
#endif #endif
/* /*
...@@ -109,10 +109,10 @@ ...@@ -109,10 +109,10 @@
((_u32) > (UINT32_MAX - (_inc))) ((_u32) > (UINT32_MAX - (_inc)))
/* Register size of the current architecture. */ /* Register size of the current architecture. */
#ifdef AARCH32 #ifdef __aarch64__
#define REGSZ U(4)
#else
#define REGSZ U(8) #define REGSZ U(8)
#else
#define REGSZ U(4)
#endif #endif
/* /*
......
...@@ -65,14 +65,7 @@ void setup_mmu_cfg(uint64_t *params, unsigned int flags, ...@@ -65,14 +65,7 @@ void setup_mmu_cfg(uint64_t *params, unsigned int flags,
const uint64_t *base_table, unsigned long long max_pa, const uint64_t *base_table, unsigned long long max_pa,
uintptr_t max_va, int xlat_regime); uintptr_t max_va, int xlat_regime);
#ifdef AARCH32 #ifdef __aarch64__
/* AArch32 specific translation table API */
void enable_mmu_svc_mon(unsigned int flags);
void enable_mmu_hyp(unsigned int flags);
void enable_mmu_direct_svc_mon(unsigned int flags);
void enable_mmu_direct_hyp(unsigned int flags);
#else
/* AArch64 specific translation table APIs */ /* AArch64 specific translation table APIs */
void enable_mmu_el1(unsigned int flags); void enable_mmu_el1(unsigned int flags);
void enable_mmu_el2(unsigned int flags); void enable_mmu_el2(unsigned int flags);
...@@ -81,7 +74,14 @@ void enable_mmu_el3(unsigned int flags); ...@@ -81,7 +74,14 @@ void enable_mmu_el3(unsigned int flags);
void enable_mmu_direct_el1(unsigned int flags); void enable_mmu_direct_el1(unsigned int flags);
void enable_mmu_direct_el2(unsigned int flags); void enable_mmu_direct_el2(unsigned int flags);
void enable_mmu_direct_el3(unsigned int flags); void enable_mmu_direct_el3(unsigned int flags);
#endif /* AARCH32 */ #else
/* AArch32 specific translation table API */
void enable_mmu_svc_mon(unsigned int flags);
void enable_mmu_hyp(unsigned int flags);
void enable_mmu_direct_svc_mon(unsigned int flags);
void enable_mmu_direct_hyp(unsigned int flags);
#endif /* __aarch64__ */
bool xlat_arch_is_granule_size_supported(size_t size); bool xlat_arch_is_granule_size_supported(size_t size);
size_t xlat_arch_get_max_supported_granule_size(void); size_t xlat_arch_get_max_supported_granule_size(void);
......
...@@ -7,10 +7,10 @@ ...@@ -7,10 +7,10 @@
#ifndef XLAT_TABLES_ARCH_H #ifndef XLAT_TABLES_ARCH_H
#define XLAT_TABLES_ARCH_H #define XLAT_TABLES_ARCH_H
#ifdef AARCH32 #ifdef __aarch64__
#include "aarch32/xlat_tables_aarch32.h"
#else
#include "aarch64/xlat_tables_aarch64.h" #include "aarch64/xlat_tables_aarch64.h"
#else
#include "aarch32/xlat_tables_aarch32.h"
#endif #endif
/* /*
......
...@@ -62,7 +62,7 @@ ...@@ -62,7 +62,7 @@
#define OSH (U(0x2) << 6) #define OSH (U(0x2) << 6)
#define ISH (U(0x3) << 6) #define ISH (U(0x3) << 6)
#ifdef AARCH64 #ifdef __aarch64__
/* Guarded Page bit */ /* Guarded Page bit */
#define GP (ULL(1) << 50) #define GP (ULL(1) << 50)
#endif #endif
......
...@@ -434,7 +434,7 @@ ...@@ -434,7 +434,7 @@
#endif #endif
#endif #endif
#if defined(AARCH32) || JUNO_AARCH32_EL3_RUNTIME #if !defined(__aarch64__) || JUNO_AARCH32_EL3_RUNTIME
/******************************************************************************* /*******************************************************************************
* BL32 specific defines for EL3 runtime in AArch32 mode * BL32 specific defines for EL3 runtime in AArch32 mode
******************************************************************************/ ******************************************************************************/
...@@ -498,17 +498,17 @@ ...@@ -498,17 +498,17 @@
# else # else
# error "Unsupported ARM_TSP_RAM_LOCATION_ID value" # error "Unsupported ARM_TSP_RAM_LOCATION_ID value"
# endif # endif
#endif /* AARCH32 || JUNO_AARCH32_EL3_RUNTIME */ #endif /* !__aarch64__ || JUNO_AARCH32_EL3_RUNTIME */
/* /*
* BL32 is mandatory in AArch32. In AArch64, undefine BL32_BASE if there is no * BL32 is mandatory in AArch32. In AArch64, undefine BL32_BASE if there is no
* SPD and no SPM, as they are the only ones that can be used as BL32. * SPD and no SPM, as they are the only ones that can be used as BL32.
*/ */
#if !(defined(AARCH32) || JUNO_AARCH32_EL3_RUNTIME) #if defined(__aarch64__) && !JUNO_AARCH32_EL3_RUNTIME
# if defined(SPD_none) && !ENABLE_SPM # if defined(SPD_none) && !ENABLE_SPM
# undef BL32_BASE # undef BL32_BASE
# endif /* defined(SPD_none) && !ENABLE_SPM */ # endif /* defined(SPD_none) && !ENABLE_SPM */
#endif /* !(defined(AARCH32) || JUNO_AARCH32_EL3_RUNTIME) */ #endif /* defined(__aarch64__) && !JUNO_AARCH32_EL3_RUNTIME */
/******************************************************************************* /*******************************************************************************
* FWU Images: NS_BL1U, BL2U & NS_BL2U defines. * FWU Images: NS_BL1U, BL2U & NS_BL2U defines.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment