Unverified Commit 0983b8b1 authored by Dimitris Papastamos's avatar Dimitris Papastamos Committed by GitHub
Browse files

Merge pull request #1519 from antonio-nino-diaz-arm/an/xlat-el2

xlat v2: Support EL2 translation regime
parents 6902e66a 4c72c3fe
......@@ -116,7 +116,7 @@ func smc_handler
/* Turn on the MMU */
mov r0, #DISABLE_DCACHE
bl enable_mmu_secure
bl enable_mmu_svc_mon
/* Enable the data cache. */
ldcopr r9, SCTLR
......
......@@ -313,6 +313,28 @@
#define TTBCR_T0SZ_SHIFT U(0)
#define TTBCR_T0SZ_MASK U(0x7)
/*
* HTCR definitions
*/
#define HTCR_RES1 ((U(1) << 31) | (U(1) << 23))
#define HTCR_SH0_NON_SHAREABLE (U(0x0) << 12)
#define HTCR_SH0_OUTER_SHAREABLE (U(0x2) << 12)
#define HTCR_SH0_INNER_SHAREABLE (U(0x3) << 12)
#define HTCR_RGN0_OUTER_NC (U(0x0) << 10)
#define HTCR_RGN0_OUTER_WBA (U(0x1) << 10)
#define HTCR_RGN0_OUTER_WT (U(0x2) << 10)
#define HTCR_RGN0_OUTER_WBNA (U(0x3) << 10)
#define HTCR_RGN0_INNER_NC (U(0x0) << 8)
#define HTCR_RGN0_INNER_WBA (U(0x1) << 8)
#define HTCR_RGN0_INNER_WT (U(0x2) << 8)
#define HTCR_RGN0_INNER_WBNA (U(0x3) << 8)
#define HTCR_T0SZ_SHIFT U(0)
#define HTCR_T0SZ_MASK U(0x7)
#define MODE_RW_SHIFT U(0x4)
#define MODE_RW_MASK U(0x1)
#define MODE_RW_32 U(0x1)
......@@ -433,6 +455,7 @@
#define TLBIMVA p15, 0, c8, c7, 1
#define TLBIMVAA p15, 0, c8, c7, 3
#define TLBIMVAAIS p15, 0, c8, c3, 3
#define TLBIMVAHIS p15, 4, c8, c3, 1
#define BPIALLIS p15, 0, c7, c1, 6
#define BPIALL p15, 0, c7, c5, 6
#define ICIALLU p15, 0, c7, c5, 0
......@@ -448,6 +471,8 @@
#define CLIDR p15, 1, c0, c0, 1
#define CSSELR p15, 2, c0, c0, 0
#define CCSIDR p15, 1, c0, c0, 0
#define HTCR p15, 4, c2, c0, 2
#define HMAIR0 p15, 4, c10, c2, 0
#define DBGOSDLR p14, 0, c1, c3, 4
/* Debug register defines. The format is: coproc, opt1, CRn, CRm, opt2 */
......@@ -487,6 +512,7 @@
#define CNTVOFF_64 p15, 4, c14
#define VTTBR_64 p15, 6, c2
#define CNTPCT_64 p15, 0, c14
#define HTTBR_64 p15, 4, c2
/* 64 bit GICv3 CPU Interface system register defines. The format is: coproc, opt1, CRm */
#define ICC_SGI1R_EL1_64 p15, 0, c12
......
/*
* Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
......@@ -300,6 +300,7 @@ DEFINE_TLBIOP_FUNC(allis, TLBIALLIS)
DEFINE_TLBIOP_PARAM_FUNC(mva, TLBIMVA)
DEFINE_TLBIOP_PARAM_FUNC(mvaa, TLBIMVAA)
DEFINE_TLBIOP_PARAM_FUNC(mvaais, TLBIMVAAIS)
DEFINE_TLBIOP_PARAM_FUNC(mvahis, TLBIMVAHIS)
/*
* BPI operation prototypes.
......@@ -320,6 +321,10 @@ DEFINE_DCOP_PARAM_FUNC(cvac, DCCMVAC)
#define IS_IN_SECURE() \
(GET_NS_BIT(read_scr()) == 0)
#define IS_IN_HYP() (GET_M32(read_cpsr()) == MODE32_hyp)
#define IS_IN_SVC() (GET_M32(read_cpsr()) == MODE32_svc)
#define IS_IN_MON() (GET_M32(read_cpsr()) == MODE32_mon)
#define IS_IN_EL2() IS_IN_HYP()
/*
* If EL3 is AArch32, then secure PL1 and monitor mode correspond to EL3
*/
......
......@@ -364,7 +364,9 @@
* TCR defintions
*/
#define TCR_EL3_RES1 ((U(1) << 31) | (U(1) << 23))
#define TCR_EL2_RES1 ((ULL(1) << 31) | (ULL(1) << 23))
#define TCR_EL1_IPS_SHIFT U(32)
#define TCR_EL2_PS_SHIFT U(16)
#define TCR_EL3_PS_SHIFT U(16)
#define TCR_TxSZ_MIN ULL(16)
......
......@@ -67,15 +67,24 @@ void setup_mmu_cfg(uint64_t *params, unsigned int flags,
#ifdef AARCH32
/* AArch32 specific translation table API */
#if !ERROR_DEPRECATED
void enable_mmu_secure(unsigned int flags);
void enable_mmu_direct(unsigned int flags);
#endif
void enable_mmu_svc_mon(unsigned int flags);
void enable_mmu_hyp(unsigned int flags);
void enable_mmu_direct_svc_mon(unsigned int flags);
void enable_mmu_direct_hyp(unsigned int flags);
#else
/* AArch64 specific translation table APIs */
void enable_mmu_el1(unsigned int flags);
void enable_mmu_el2(unsigned int flags);
void enable_mmu_el3(unsigned int flags);
void enable_mmu_direct_el1(unsigned int flags);
void enable_mmu_direct_el2(unsigned int flags);
void enable_mmu_direct_el3(unsigned int flags);
#endif /* AARCH32 */
......
......@@ -125,6 +125,7 @@ typedef struct mmap_region {
* library to detect it at runtime.
*/
#define EL1_EL0_REGIME 1
#define EL2_REGIME 2
#define EL3_REGIME 3
#define EL_REGIME_INVALID -1
......
......@@ -65,7 +65,19 @@ void init_xlat_tables(void)
* Function for enabling the MMU in Secure PL1, assuming that the
* page-tables have already been created.
******************************************************************************/
#if !ERROR_DEPRECATED
void enable_mmu_secure(unsigned int flags)
{
enable_mmu_svc_mon(flags);
}
void enable_mmu_direct(unsigned int flags)
{
enable_mmu_direct_svc_mon(flags);
}
#endif
void enable_mmu_svc_mon(unsigned int flags)
{
unsigned int mair0, ttbcr, sctlr;
uint64_t ttbr0;
......@@ -131,7 +143,7 @@ void enable_mmu_secure(unsigned int flags)
isb();
}
void enable_mmu_direct(unsigned int flags)
void enable_mmu_direct_svc_mon(unsigned int flags)
{
enable_mmu_secure(flags);
enable_mmu_svc_mon(flags);
}
......@@ -8,9 +8,11 @@
#include <assert_macros.S>
#include <xlat_tables_v2.h>
.global enable_mmu_direct
.global enable_mmu_direct_svc_mon
.global enable_mmu_direct_hyp
func enable_mmu_direct
/* void enable_mmu_direct_svc_mon(unsigned int flags) */
func enable_mmu_direct_svc_mon
/* Assert that MMU is turned off */
#if ENABLE_ASSERTIONS
ldcopr r1, SCTLR
......@@ -63,4 +65,56 @@ func enable_mmu_direct
isb
bx lr
endfunc enable_mmu_direct
endfunc enable_mmu_direct_svc_mon
/* void enable_mmu_direct_hyp(unsigned int flags) */
func enable_mmu_direct_hyp
/* Assert that MMU is turned off */
#if ENABLE_ASSERTIONS
ldcopr r1, HSCTLR
tst r1, #HSCTLR_M_BIT
ASM_ASSERT(eq)
#endif
/* Invalidate TLB entries */
TLB_INVALIDATE(r0, TLBIALL)
mov r3, r0
ldr r0, =mmu_cfg_params
/* HMAIR0 */
ldr r1, [r0, #(MMU_CFG_MAIR << 3)]
stcopr r1, HMAIR0
/* HTCR */
ldr r2, [r0, #(MMU_CFG_TCR << 3)]
stcopr r2, HTCR
/* HTTBR */
ldr r1, [r0, #(MMU_CFG_TTBR0 << 3)]
ldr r2, [r0, #((MMU_CFG_TTBR0 << 3) + 4)]
stcopr16 r1, r2, HTTBR_64
/*
* Ensure all translation table writes have drained into memory, the TLB
* invalidation is complete, and translation register writes are
* committed before enabling the MMU
*/
dsb ish
isb
/* Enable enable MMU by honoring flags */
ldcopr r1, HSCTLR
ldr r2, =(HSCTLR_WXN_BIT | HSCTLR_C_BIT | HSCTLR_M_BIT)
orr r1, r1, r2
/* Clear C bit if requested */
tst r3, #DISABLE_DCACHE
bicne r1, r1, #HSCTLR_C_BIT
stcopr r1, HSCTLR
isb
bx lr
endfunc enable_mmu_direct_hyp
......@@ -43,22 +43,38 @@ unsigned long long xlat_arch_get_max_supported_pa(void)
}
#endif /* ENABLE_ASSERTIONS*/
bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx __unused)
bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx)
{
return (read_sctlr() & SCTLR_M_BIT) != 0;
if (ctx->xlat_regime == EL1_EL0_REGIME) {
assert(xlat_arch_current_el() == 1U);
return (read_sctlr() & SCTLR_M_BIT) != 0U;
} else {
assert(ctx->xlat_regime == EL2_REGIME);
assert(xlat_arch_current_el() == 2U);
return (read_hsctlr() & HSCTLR_M_BIT) != 0U;
}
}
bool is_dcache_enabled(void)
{
return (read_sctlr() & SCTLR_C_BIT) != 0;
if (IS_IN_EL2()) {
return (read_hsctlr() & HSCTLR_C_BIT) != 0U;
} else {
return (read_sctlr() & SCTLR_C_BIT) != 0U;
}
}
uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime __unused)
uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime)
{
if (xlat_regime == EL1_EL0_REGIME) {
return UPPER_ATTRS(XN) | UPPER_ATTRS(PXN);
} else {
assert(xlat_regime == EL2_REGIME);
return UPPER_ATTRS(XN);
}
}
void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime __unused)
void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime)
{
/*
* Ensure the translation table write has drained into memory before
......@@ -66,7 +82,12 @@ void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime __unused)
*/
dsbishst();
if (xlat_regime == EL1_EL0_REGIME) {
tlbimvaais(TLBI_ADDR(va));
} else {
assert(xlat_regime == EL2_REGIME);
tlbimvahis(TLBI_ADDR(va));
}
}
void xlat_arch_tlbi_va_sync(void)
......@@ -97,19 +118,25 @@ void xlat_arch_tlbi_va_sync(void)
unsigned int xlat_arch_current_el(void)
{
if (IS_IN_HYP()) {
return 2U;
} else {
assert(IS_IN_SVC() || IS_IN_MON());
/*
* If EL3 is in AArch32 mode, all secure PL1 modes (Monitor, System,
* SVC, Abort, UND, IRQ and FIQ modes) execute at EL3.
* If EL3 is in AArch32 mode, all secure PL1 modes (Monitor,
* System, SVC, Abort, UND, IRQ and FIQ modes) execute at EL3.
*
* The PL1&0 translation regime in AArch32 behaves like the EL1&0 regime
* in AArch64 except for the XN bits, but we set and unset them at the
* same time, so there's no difference in practice.
* The PL1&0 translation regime in AArch32 behaves like the
* EL1&0 regime in AArch64 except for the XN bits, but we set
* and unset them at the same time, so there's no difference in
* practice.
*/
return 1U;
}
}
/*******************************************************************************
* Function for enabling the MMU in Secure PL1, assuming that the page tables
* Function for enabling the MMU in PL1 or PL2, assuming that the page tables
* have already been created.
******************************************************************************/
void setup_mmu_cfg(uint64_t *params, unsigned int flags,
......@@ -119,8 +146,6 @@ void setup_mmu_cfg(uint64_t *params, unsigned int flags,
uint64_t mair, ttbr0;
uint32_t ttbcr;
assert(IS_IN_SECURE());
/* Set attributes in the right indices of the MAIR */
mair = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
mair |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
......@@ -129,18 +154,32 @@ void setup_mmu_cfg(uint64_t *params, unsigned int flags,
ATTR_NON_CACHEABLE_INDEX);
/*
* Configure the control register for stage 1 of the PL1&0 translation
* regime.
* Configure the control register for stage 1 of the PL1&0 or EL2
* translation regimes.
*/
/* Use the Long-descriptor translation table format. */
ttbcr = TTBCR_EAE_BIT;
if (xlat_regime == EL1_EL0_REGIME) {
assert(IS_IN_SVC() || IS_IN_MON());
/*
* Disable translation table walk for addresses that are translated
* using TTBR1. Therefore, only TTBR0 is used.
* Disable translation table walk for addresses that are
* translated using TTBR1. Therefore, only TTBR0 is used.
*/
ttbcr |= TTBCR_EPD1_BIT;
} else {
assert(xlat_regime == EL2_REGIME);
assert(IS_IN_HYP());
/*
* Set HTCR bits as well. Set HTTBR table properties
* as Inner & outer WBWA & shareable.
*/
ttbcr |= HTCR_RES1 |
HTCR_SH0_INNER_SHAREABLE | HTCR_RGN0_OUTER_WBA |
HTCR_RGN0_INNER_WBA;
}
/*
* Limit the input address ranges and memory region sizes translated
......
......@@ -9,6 +9,7 @@
#include <xlat_tables_v2.h>
.global enable_mmu_direct_el1
.global enable_mmu_direct_el2
.global enable_mmu_direct_el3
/* Macros to read and write to system register for a given EL. */
......@@ -20,6 +21,19 @@
mrs \gp_reg, \reg_name\()_el\()\el
.endm
.macro tlbi_invalidate_all el
.if \el == 1
TLB_INVALIDATE(vmalle1)
.elseif \el == 2
TLB_INVALIDATE(alle2)
.elseif \el == 3
TLB_INVALIDATE(alle3)
.else
.error "EL must be 1, 2 or 3"
.endif
.endm
/* void enable_mmu_direct_el<x>(unsigned int flags) */
.macro define_mmu_enable_func el
func enable_mmu_direct_\()el\el
#if ENABLE_ASSERTIONS
......@@ -27,17 +41,8 @@
tst x1, #SCTLR_M_BIT
ASM_ASSERT(eq)
#endif
/* Invalidate TLB entries */
.if \el == 1
TLB_INVALIDATE(vmalle1)
.else
.if \el == 3
TLB_INVALIDATE(alle3)
.else
.error "EL must be 1 or 3"
.endif
.endif
/* Invalidate all TLB entries */
tlbi_invalidate_all \el
mov x7, x0
ldr x0, =mmu_cfg_params
......@@ -86,4 +91,5 @@
* enable_mmu_direct_el3
*/
define_mmu_enable_func 1
define_mmu_enable_func 2
define_mmu_enable_func 3
......@@ -105,6 +105,9 @@ bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx)
if (ctx->xlat_regime == EL1_EL0_REGIME) {
assert(xlat_arch_current_el() >= 1U);
return (read_sctlr_el1() & SCTLR_M_BIT) != 0U;
} else if (ctx->xlat_regime == EL2_REGIME) {
assert(xlat_arch_current_el() >= 2U);
return (read_sctlr_el2() & SCTLR_M_BIT) != 0U;
} else {
assert(ctx->xlat_regime == EL3_REGIME);
assert(xlat_arch_current_el() >= 3U);
......@@ -118,6 +121,8 @@ bool is_dcache_enabled(void)
if (el == 1U) {
return (read_sctlr_el1() & SCTLR_C_BIT) != 0U;
} else if (el == 2U) {
return (read_sctlr_el2() & SCTLR_C_BIT) != 0U;
} else {
return (read_sctlr_el3() & SCTLR_C_BIT) != 0U;
}
......@@ -128,7 +133,8 @@ uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime)
if (xlat_regime == EL1_EL0_REGIME) {
return UPPER_ATTRS(UXN) | UPPER_ATTRS(PXN);
} else {
assert(xlat_regime == EL3_REGIME);
assert((xlat_regime == EL2_REGIME) ||
(xlat_regime == EL3_REGIME));
return UPPER_ATTRS(XN);
}
}
......@@ -151,6 +157,9 @@ void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime)
if (xlat_regime == EL1_EL0_REGIME) {
assert(xlat_arch_current_el() >= 1U);
tlbivaae1is(TLBI_ADDR(va));
} else if (xlat_regime == EL2_REGIME) {
assert(xlat_arch_current_el() >= 2U);
tlbivae2is(TLBI_ADDR(va));
} else {
assert(xlat_regime == EL3_REGIME);
assert(xlat_arch_current_el() >= 3U);
......@@ -245,6 +254,8 @@ void setup_mmu_cfg(uint64_t *params, unsigned int flags,
* that are translated using TTBR1_EL1.
*/
tcr |= TCR_EPD1_BIT | (tcr_ps_bits << TCR_EL1_IPS_SHIFT);
} else if (xlat_regime == EL2_REGIME) {
tcr |= TCR_EL2_RES1 | (tcr_ps_bits << TCR_EL2_PS_SHIFT);
} else {
assert(xlat_regime == EL3_REGIME);
tcr |= TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT);
......
......@@ -82,6 +82,8 @@ void init_xlat_tables(void)
if (current_el == 1U) {
tf_xlat_ctx.xlat_regime = EL1_EL0_REGIME;
} else if (current_el == 2U) {
tf_xlat_ctx.xlat_regime = EL2_REGIME;
} else {
assert(current_el == 3U);
tf_xlat_ctx.xlat_regime = EL3_REGIME;
......@@ -119,12 +121,32 @@ int xlat_change_mem_attributes(uintptr_t base_va, size_t size, uint32_t attr)
#ifdef AARCH32
#if !ERROR_DEPRECATED
void enable_mmu_secure(unsigned int flags)
{
enable_mmu_svc_mon(flags);
}
void enable_mmu_direct(unsigned int flags)
{
enable_mmu_direct_svc_mon(flags);
}
#endif
void enable_mmu_svc_mon(unsigned int flags)
{
setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
enable_mmu_direct(flags);
enable_mmu_direct_svc_mon(flags);
}
void enable_mmu_hyp(unsigned int flags)
{
setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
tf_xlat_ctx.va_max_address, EL2_REGIME);
enable_mmu_direct_hyp(flags);
}
#else
......@@ -137,6 +159,14 @@ void enable_mmu_el1(unsigned int flags)
enable_mmu_direct_el1(flags);
}
void enable_mmu_el2(unsigned int flags)
{
setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
tf_xlat_ctx.va_max_address, EL2_REGIME);
enable_mmu_direct_el2(flags);
}
void enable_mmu_el3(unsigned int flags)
{
setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
......
......@@ -142,7 +142,8 @@ uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
desc |= LOWER_ATTRS(AP_NO_ACCESS_UNPRIVILEGED);
}
} else {
assert(ctx->xlat_regime == EL3_REGIME);
assert((ctx->xlat_regime == EL2_REGIME) ||
(ctx->xlat_regime == EL3_REGIME));
desc |= LOWER_ATTRS(AP_ONE_VA_RANGE_RES1);
}
......@@ -1016,6 +1017,7 @@ void init_xlat_tables_ctx(xlat_ctx_t *ctx)
assert(ctx != NULL);
assert(!ctx->initialized);
assert((ctx->xlat_regime == EL3_REGIME) ||
(ctx->xlat_regime == EL2_REGIME) ||
(ctx->xlat_regime == EL1_EL0_REGIME));
assert(!is_mmu_enabled_ctx(ctx));
......
......@@ -60,8 +60,8 @@ static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc)
tf_printf("DEV");
}
if (xlat_regime == EL3_REGIME) {
/* For EL3 only check the AP[2] and XN bits. */
if ((xlat_regime == EL3_REGIME) || (xlat_regime == EL2_REGIME)) {
/* For EL3 and EL2 only check the AP[2] and XN bits. */
tf_printf(((desc & LOWER_ATTRS(AP_RO)) != 0ULL) ? "-RO" : "-RW");
tf_printf(((desc & UPPER_ATTRS(XN)) != 0ULL) ? "-XN" : "-EXEC");
} else {
......@@ -200,6 +200,8 @@ void xlat_tables_print(xlat_ctx_t *ctx)
if (ctx->xlat_regime == EL1_EL0_REGIME) {
xlat_regime_str = "1&0";
} else if (ctx->xlat_regime == EL2_REGIME) {
xlat_regime_str = "2";
} else {
assert(ctx->xlat_regime == EL3_REGIME);
xlat_regime_str = "3";
......@@ -329,6 +331,7 @@ static int xlat_get_mem_attributes_internal(const xlat_ctx_t *ctx,
assert(ctx != NULL);
assert(ctx->initialized);
assert((ctx->xlat_regime == EL1_EL0_REGIME) ||
(ctx->xlat_regime == EL2_REGIME) ||
(ctx->xlat_regime == EL3_REGIME));
virt_addr_space_size = (unsigned long long)ctx->va_max_address + 1ULL;
......
......@@ -122,7 +122,7 @@ void arm_bl1_plat_arch_setup(void)
arm_setup_page_tables(bl_regions, plat_arm_get_mmap());
#ifdef AARCH32
enable_mmu_secure(0);
enable_mmu_svc_mon(0);
#else
enable_mmu_el3(0);
#endif /* AARCH32 */
......
......@@ -82,7 +82,7 @@ void arm_bl2_el3_plat_arch_setup(void)
arm_setup_page_tables(bl_regions, plat_arm_get_mmap());
#ifdef AARCH32
enable_mmu_secure(0);
enable_mmu_svc_mon(0);
#else
enable_mmu_el3(0);
#endif
......
......@@ -252,7 +252,7 @@ void arm_bl2_plat_arch_setup(void)
arm_setup_page_tables(bl_regions, plat_arm_get_mmap());
#ifdef AARCH32
enable_mmu_secure(0);
enable_mmu_svc_mon(0);
#else
enable_mmu_el1(0);
#endif
......
......@@ -79,7 +79,7 @@ void arm_bl2u_plat_arch_setup(void)
arm_setup_page_tables(bl_regions, plat_arm_get_mmap());
#ifdef AARCH32
enable_mmu_secure(0);
enable_mmu_svc_mon(0);
#else
enable_mmu_el1(0);
#endif
......
......@@ -212,5 +212,5 @@ void sp_min_plat_arch_setup(void)
arm_setup_page_tables(bl_regions, plat_arm_get_mmap());
enable_mmu_secure(0);
enable_mmu_svc_mon(0);
}
......@@ -17,5 +17,5 @@
void bl32_plat_enable_mmu(uint32_t flags)
{
enable_mmu_secure(flags);
enable_mmu_svc_mon(flags);
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment