Commit a3b16996 authored by Alexei Fedorov's avatar Alexei Fedorov Committed by TrustedFirmware Code Review
Browse files

Merge "Switch AARCH32/AARCH64 to __aarch64__" into integration

parents 01c44ddd 402b3cf8
...@@ -69,7 +69,7 @@ typedef struct arm_tzc_regions_info { ...@@ -69,7 +69,7 @@ typedef struct arm_tzc_regions_info {
void arm_setup_romlib(void); void arm_setup_romlib(void);
#if defined(IMAGE_BL31) || (defined(AARCH32) && defined(IMAGE_BL32)) #if defined(IMAGE_BL31) || (!defined(__aarch64__) && defined(IMAGE_BL32))
/* /*
* Use this macro to instantiate lock before it is used in below * Use this macro to instantiate lock before it is used in below
* arm_lock_xxx() macros * arm_lock_xxx() macros
...@@ -102,7 +102,7 @@ void arm_setup_romlib(void); ...@@ -102,7 +102,7 @@ void arm_setup_romlib(void);
#define arm_lock_get() #define arm_lock_get()
#define arm_lock_release() #define arm_lock_release()
#endif /* defined(IMAGE_BL31) || (defined(AARCH32) && defined(IMAGE_BL32)) */ #endif /* defined(IMAGE_BL31) || (!defined(__aarch64__) && defined(IMAGE_BL32)) */
#if ARM_RECOM_STATE_ID_ENC #if ARM_RECOM_STATE_ID_ENC
/* /*
......
...@@ -20,13 +20,13 @@ ...@@ -20,13 +20,13 @@
/* /*
* Platform binary types for linking * Platform binary types for linking
*/ */
#ifdef AARCH32 #ifdef __aarch64__
#define PLATFORM_LINKER_FORMAT "elf32-littlearm"
#define PLATFORM_LINKER_ARCH arm
#else
#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64" #define PLATFORM_LINKER_FORMAT "elf64-littleaarch64"
#define PLATFORM_LINKER_ARCH aarch64 #define PLATFORM_LINKER_ARCH aarch64
#endif /* AARCH32 */ #else
#define PLATFORM_LINKER_FORMAT "elf32-littlearm"
#define PLATFORM_LINKER_ARCH arm
#endif /* __aarch64__ */
/* /*
* Generic platform constants * Generic platform constants
......
...@@ -18,9 +18,9 @@ ...@@ -18,9 +18,9 @@
#ifdef IMAGE_BL1 #ifdef IMAGE_BL1
# define BL_STRING "BL1" # define BL_STRING "BL1"
#elif defined(AARCH64) && defined(IMAGE_BL31) #elif defined(__aarch64__) && defined(IMAGE_BL31)
# define BL_STRING "BL31" # define BL_STRING "BL31"
#elif defined(AARCH32) && defined(IMAGE_BL32) #elif !defined(__arch64__) && defined(IMAGE_BL32)
# define BL_STRING "BL32" # define BL_STRING "BL32"
#elif defined(IMAGE_BL2) && BL2_AT_EL3 #elif defined(IMAGE_BL2) && BL2_AT_EL3
# define BL_STRING "BL2" # define BL_STRING "BL2"
......
...@@ -167,10 +167,10 @@ void bakery_lock_get(bakery_lock_t *lock) ...@@ -167,10 +167,10 @@ void bakery_lock_get(bakery_lock_t *lock)
unsigned int their_bakery_data; unsigned int their_bakery_data;
me = plat_my_core_pos(); me = plat_my_core_pos();
#ifdef AARCH32 #ifdef __aarch64__
is_cached = read_sctlr() & SCTLR_C_BIT;
#else
is_cached = read_sctlr_el3() & SCTLR_C_BIT; is_cached = read_sctlr_el3() & SCTLR_C_BIT;
#else
is_cached = read_sctlr() & SCTLR_C_BIT;
#endif #endif
/* Get a ticket */ /* Get a ticket */
...@@ -228,10 +228,10 @@ void bakery_lock_get(bakery_lock_t *lock) ...@@ -228,10 +228,10 @@ void bakery_lock_get(bakery_lock_t *lock)
void bakery_lock_release(bakery_lock_t *lock) void bakery_lock_release(bakery_lock_t *lock)
{ {
bakery_info_t *my_bakery_info; bakery_info_t *my_bakery_info;
#ifdef AARCH32 #ifdef __aarch64__
unsigned int is_cached = read_sctlr() & SCTLR_C_BIT;
#else
unsigned int is_cached = read_sctlr_el3() & SCTLR_C_BIT; unsigned int is_cached = read_sctlr_el3() & SCTLR_C_BIT;
#else
unsigned int is_cached = read_sctlr() & SCTLR_C_BIT;
#endif #endif
my_bakery_info = get_bakery_info(plat_my_core_pos(), lock); my_bakery_info = get_bakery_info(plat_my_core_pos(), lock);
......
...@@ -176,7 +176,7 @@ int parse_optee_header(entry_point_info_t *header_ep, ...@@ -176,7 +176,7 @@ int parse_optee_header(entry_point_info_t *header_ep,
*/ */
if (!tee_validate_header(header)) { if (!tee_validate_header(header)) {
INFO("Invalid OPTEE header, set legacy mode.\n"); INFO("Invalid OPTEE header, set legacy mode.\n");
#ifdef AARCH64 #ifdef __aarch64__
header_ep->args.arg0 = MODE_RW_64; header_ep->args.arg0 = MODE_RW_64;
#else #else
header_ep->args.arg0 = MODE_RW_32; header_ep->args.arg0 = MODE_RW_32;
...@@ -222,7 +222,7 @@ int parse_optee_header(entry_point_info_t *header_ep, ...@@ -222,7 +222,7 @@ int parse_optee_header(entry_point_info_t *header_ep,
if (header->arch == 0) { if (header->arch == 0) {
header_ep->args.arg0 = MODE_RW_32; header_ep->args.arg0 = MODE_RW_32;
} else { } else {
#ifdef AARCH64 #ifdef __aarch64__
header_ep->args.arg0 = MODE_RW_64; header_ep->args.arg0 = MODE_RW_64;
#else #else
ERROR("Cannot boot an AArch64 OP-TEE\n"); ERROR("Cannot boot an AArch64 OP-TEE\n");
......
...@@ -619,53 +619,7 @@ int psci_validate_mpidr(u_register_t mpidr) ...@@ -619,53 +619,7 @@ int psci_validate_mpidr(u_register_t mpidr)
* This function determines the full entrypoint information for the requested * This function determines the full entrypoint information for the requested
* PSCI entrypoint on power on/resume and returns it. * PSCI entrypoint on power on/resume and returns it.
******************************************************************************/ ******************************************************************************/
#ifdef AARCH32 #ifdef __aarch64__
static int psci_get_ns_ep_info(entry_point_info_t *ep,
uintptr_t entrypoint,
u_register_t context_id)
{
u_register_t ep_attr;
unsigned int aif, ee, mode;
u_register_t scr = read_scr();
u_register_t ns_sctlr, sctlr;
/* Switch to non secure state */
write_scr(scr | SCR_NS_BIT);
isb();
ns_sctlr = read_sctlr();
sctlr = scr & SCR_HCE_BIT ? read_hsctlr() : ns_sctlr;
/* Return to original state */
write_scr(scr);
isb();
ee = 0;
ep_attr = NON_SECURE | EP_ST_DISABLE;
if (sctlr & SCTLR_EE_BIT) {
ep_attr |= EP_EE_BIG;
ee = 1;
}
SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);
ep->pc = entrypoint;
zeromem(&ep->args, sizeof(ep->args));
ep->args.arg0 = context_id;
mode = scr & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc;
/*
* TODO: Choose async. exception bits if HYP mode is not
* implemented according to the values of SCR.{AW, FW} bits
*/
aif = SPSR_ABT_BIT | SPSR_IRQ_BIT | SPSR_FIQ_BIT;
ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, aif);
return PSCI_E_SUCCESS;
}
#else
static int psci_get_ns_ep_info(entry_point_info_t *ep, static int psci_get_ns_ep_info(entry_point_info_t *ep,
uintptr_t entrypoint, uintptr_t entrypoint,
u_register_t context_id) u_register_t context_id)
...@@ -722,7 +676,53 @@ static int psci_get_ns_ep_info(entry_point_info_t *ep, ...@@ -722,7 +676,53 @@ static int psci_get_ns_ep_info(entry_point_info_t *ep,
return PSCI_E_SUCCESS; return PSCI_E_SUCCESS;
} }
#endif #else /* !__aarch64__ */
static int psci_get_ns_ep_info(entry_point_info_t *ep,
uintptr_t entrypoint,
u_register_t context_id)
{
u_register_t ep_attr;
unsigned int aif, ee, mode;
u_register_t scr = read_scr();
u_register_t ns_sctlr, sctlr;
/* Switch to non secure state */
write_scr(scr | SCR_NS_BIT);
isb();
ns_sctlr = read_sctlr();
sctlr = scr & SCR_HCE_BIT ? read_hsctlr() : ns_sctlr;
/* Return to original state */
write_scr(scr);
isb();
ee = 0;
ep_attr = NON_SECURE | EP_ST_DISABLE;
if (sctlr & SCTLR_EE_BIT) {
ep_attr |= EP_EE_BIG;
ee = 1;
}
SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);
ep->pc = entrypoint;
zeromem(&ep->args, sizeof(ep->args));
ep->args.arg0 = context_id;
mode = scr & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc;
/*
* TODO: Choose async. exception bits if HYP mode is not
* implemented according to the values of SCR.{AW, FW} bits
*/
aif = SPSR_ABT_BIT | SPSR_IRQ_BIT | SPSR_FIQ_BIT;
ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, aif);
return PSCI_E_SUCCESS;
}
#endif /* __aarch64__ */
/******************************************************************************* /*******************************************************************************
* This function validates the entrypoint with the platform layer if the * This function validates the entrypoint with the platform layer if the
......
...@@ -136,48 +136,48 @@ int xlat_change_mem_attributes(uintptr_t base_va, size_t size, uint32_t attr) ...@@ -136,48 +136,48 @@ int xlat_change_mem_attributes(uintptr_t base_va, size_t size, uint32_t attr)
#define MAX_PHYS_ADDR tf_xlat_ctx.max_pa #define MAX_PHYS_ADDR tf_xlat_ctx.max_pa
#endif #endif
#ifdef AARCH32 #ifdef __aarch64__
void enable_mmu_svc_mon(unsigned int flags) void enable_mmu_el1(unsigned int flags)
{ {
setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags, setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
tf_xlat_ctx.base_table, MAX_PHYS_ADDR, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
tf_xlat_ctx.va_max_address, EL1_EL0_REGIME); tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
enable_mmu_direct_svc_mon(flags); enable_mmu_direct_el1(flags);
} }
void enable_mmu_hyp(unsigned int flags) void enable_mmu_el2(unsigned int flags)
{ {
setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags, setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
tf_xlat_ctx.base_table, MAX_PHYS_ADDR, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
tf_xlat_ctx.va_max_address, EL2_REGIME); tf_xlat_ctx.va_max_address, EL2_REGIME);
enable_mmu_direct_hyp(flags); enable_mmu_direct_el2(flags);
} }
#else void enable_mmu_el3(unsigned int flags)
void enable_mmu_el1(unsigned int flags)
{ {
setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags, setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
tf_xlat_ctx.base_table, MAX_PHYS_ADDR, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
tf_xlat_ctx.va_max_address, EL1_EL0_REGIME); tf_xlat_ctx.va_max_address, EL3_REGIME);
enable_mmu_direct_el1(flags); enable_mmu_direct_el3(flags);
} }
void enable_mmu_el2(unsigned int flags) #else /* !__aarch64__ */
void enable_mmu_svc_mon(unsigned int flags)
{ {
setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags, setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
tf_xlat_ctx.base_table, MAX_PHYS_ADDR, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
tf_xlat_ctx.va_max_address, EL2_REGIME); tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
enable_mmu_direct_el2(flags); enable_mmu_direct_svc_mon(flags);
} }
void enable_mmu_el3(unsigned int flags) void enable_mmu_hyp(unsigned int flags)
{ {
setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags, setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
tf_xlat_ctx.base_table, MAX_PHYS_ADDR, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
tf_xlat_ctx.va_max_address, EL3_REGIME); tf_xlat_ctx.va_max_address, EL2_REGIME);
enable_mmu_direct_el3(flags); enable_mmu_direct_hyp(flags);
} }
#endif /* AARCH32 */ #endif /* __aarch64__ */
...@@ -97,7 +97,7 @@ static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc) ...@@ -97,7 +97,7 @@ static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc)
printf(((LOWER_ATTRS(NS) & desc) != 0ULL) ? "-NS" : "-S"); printf(((LOWER_ATTRS(NS) & desc) != 0ULL) ? "-NS" : "-S");
#ifdef AARCH64 #ifdef __aarch64__
/* Check Guarded Page bit */ /* Check Guarded Page bit */
if ((desc & GP) != 0ULL) { if ((desc & GP) != 0ULL) {
printf("-GP"); printf("-GP");
......
...@@ -273,7 +273,7 @@ $(eval IMAGE := IMAGE_BL$(call uppercase,$(3))) ...@@ -273,7 +273,7 @@ $(eval IMAGE := IMAGE_BL$(call uppercase,$(3)))
$(1): $(2) $(filter-out %.d,$(MAKEFILE_LIST)) | bl$(3)_dirs $(1): $(2) $(filter-out %.d,$(MAKEFILE_LIST)) | bl$(3)_dirs
$$(ECHO) " PP $$<" $$(ECHO) " PP $$<"
$$(Q)$$(CPP) $$(CPPFLAGS) -P -x assembler-with-cpp -D__LINKER__ $(MAKE_DEP) -D$(IMAGE) -o $$@ $$< $$(Q)$$(CPP) $$(CPPFLAGS) $(TF_CFLAGS_$(ARCH)) -P -x assembler-with-cpp -D__LINKER__ $(MAKE_DEP) -D$(IMAGE) -o $$@ $$<
-include $(DEP) -include $(DEP)
......
...@@ -81,7 +81,7 @@ const mmap_region_t plat_arm_mmap[] = { ...@@ -81,7 +81,7 @@ const mmap_region_t plat_arm_mmap[] = {
MAP_DEVICE0, MAP_DEVICE0,
MAP_DEVICE1, MAP_DEVICE1,
ARM_MAP_NS_DRAM1, ARM_MAP_NS_DRAM1,
#ifdef AARCH64 #ifdef __aarch64__
ARM_MAP_DRAM2, ARM_MAP_DRAM2,
#endif #endif
#ifdef SPD_tspd #ifdef SPD_tspd
...@@ -150,7 +150,7 @@ const mmap_region_t plat_arm_secure_partition_mmap[] = { ...@@ -150,7 +150,7 @@ const mmap_region_t plat_arm_secure_partition_mmap[] = {
#endif #endif
#ifdef IMAGE_BL32 #ifdef IMAGE_BL32
const mmap_region_t plat_arm_mmap[] = { const mmap_region_t plat_arm_mmap[] = {
#ifdef AARCH32 #ifndef __aarch64__
ARM_MAP_SHARED_RAM, ARM_MAP_SHARED_RAM,
ARM_V2M_MAP_MEM_PROTECT, ARM_V2M_MAP_MEM_PROTECT,
#endif #endif
......
...@@ -120,7 +120,7 @@ ...@@ -120,7 +120,7 @@
#define PLAT_ARM_MAX_BL31_SIZE UL(0x3B000) #define PLAT_ARM_MAX_BL31_SIZE UL(0x3B000)
#endif #endif
#ifdef AARCH32 #ifndef __aarch64__
/* /*
* Since BL32 NOBITS overlays BL2 and BL1-RW, PLAT_ARM_MAX_BL32_SIZE is * Since BL32 NOBITS overlays BL2 and BL1-RW, PLAT_ARM_MAX_BL32_SIZE is
* calculated using the current SP_MIN PROGBITS debug size plus the sizes of * calculated using the current SP_MIN PROGBITS debug size plus the sizes of
...@@ -259,7 +259,7 @@ ...@@ -259,7 +259,7 @@
/* /*
* Physical and virtual address space limits for MMU in AARCH64 & AARCH32 modes * Physical and virtual address space limits for MMU in AARCH64 & AARCH32 modes
*/ */
#ifdef AARCH64 #ifdef __aarch64__
#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 36) #define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 36)
#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 36) #define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 36)
#else #else
......
...@@ -331,7 +331,7 @@ ...@@ -331,7 +331,7 @@
/* /*
* Physical and virtual address space limits for MMU in AARCH64 & AARCH32 modes * Physical and virtual address space limits for MMU in AARCH64 & AARCH32 modes
*/ */
#ifdef AARCH64 #ifdef __aarch64__
#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 36) #define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 36)
#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 36) #define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 36)
#else #else
......
...@@ -291,7 +291,7 @@ ...@@ -291,7 +291,7 @@
/* /*
* Physical and virtual address space limits for MMU in AARCH64 & AARCH32 modes * Physical and virtual address space limits for MMU in AARCH64 & AARCH32 modes
*/ */
#ifdef AARCH64 #ifdef __aarch64__
#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 36) #define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 36)
#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 36) #define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 36)
#else #else
......
...@@ -37,7 +37,7 @@ const mmap_region_t plat_arm_mmap[] = { ...@@ -37,7 +37,7 @@ const mmap_region_t plat_arm_mmap[] = {
CSS_MAP_DEVICE, CSS_MAP_DEVICE,
SOC_CSS_MAP_DEVICE, SOC_CSS_MAP_DEVICE,
ARM_MAP_NS_DRAM1, ARM_MAP_NS_DRAM1,
#ifdef AARCH64 #ifdef __aarch64__
ARM_MAP_DRAM2, ARM_MAP_DRAM2,
#endif #endif
#ifdef SPD_tspd #ifdef SPD_tspd
...@@ -74,7 +74,7 @@ const mmap_region_t plat_arm_mmap[] = { ...@@ -74,7 +74,7 @@ const mmap_region_t plat_arm_mmap[] = {
#endif #endif
#ifdef IMAGE_BL32 #ifdef IMAGE_BL32
const mmap_region_t plat_arm_mmap[] = { const mmap_region_t plat_arm_mmap[] = {
#ifdef AARCH32 #ifndef __aarch64__
ARM_MAP_SHARED_RAM, ARM_MAP_SHARED_RAM,
#ifdef PLAT_ARM_MEM_PROT_ADDR #ifdef PLAT_ARM_MEM_PROT_ADDR
ARM_V2M_MAP_MEM_PROTECT, ARM_V2M_MAP_MEM_PROTECT,
......
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
* space the physical & virtual address space limits are extended to * space the physical & virtual address space limits are extended to
* 40-bits. * 40-bits.
*/ */
#ifndef AARCH32 #ifdef __aarch64__
#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 40) #define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 40)
#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 40) #define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 40)
#else #else
......
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
/* /*
* Physical and virtual address space limits for MMU in AARCH64 & AARCH32 modes * Physical and virtual address space limits for MMU in AARCH64 & AARCH32 modes
*/ */
#ifndef AARCH32 #ifdef __aarch64__
#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 36) #define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 36)
#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 36) #define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 36)
#else #else
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
/* /*
* Physical and virtual address space limits for MMU in AARCH64 & AARCH32 modes * Physical and virtual address space limits for MMU in AARCH64 & AARCH32 modes
*/ */
#ifndef AARCH32 #ifdef __aarch64__
#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 36) #define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 36)
#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 36) #define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 36)
#else #else
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
/* /*
* Physical and virtual address space limits for MMU in AARCH64 & AARCH32 modes * Physical and virtual address space limits for MMU in AARCH64 & AARCH32 modes
*/ */
#ifndef AARCH32 #ifdef __aarch64__
#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 36) #define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 36)
#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 36) #define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 36)
#else #else
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
/* /*
* Physical and virtual address space limits for MMU in AARCH64 & AARCH32 modes * Physical and virtual address space limits for MMU in AARCH64 & AARCH32 modes
*/ */
#ifndef AARCH32 #ifdef __aarch64__
#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 36) #define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 36)
#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 36) #define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 36)
#else #else
......
...@@ -121,11 +121,11 @@ void arm_bl1_plat_arch_setup(void) ...@@ -121,11 +121,11 @@ void arm_bl1_plat_arch_setup(void)
}; };
setup_page_tables(bl_regions, plat_arm_get_mmap()); setup_page_tables(bl_regions, plat_arm_get_mmap());
#ifdef AARCH32 #ifdef __aarch64__
enable_mmu_svc_mon(0);
#else
enable_mmu_el3(0); enable_mmu_el3(0);
#endif /* AARCH32 */ #else
enable_mmu_svc_mon(0);
#endif /* __aarch64__ */
arm_setup_romlib(); arm_setup_romlib();
} }
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment