diff --git a/bl31/aarch64/bl31_entrypoint.S b/bl31/aarch64/bl31_entrypoint.S
index 0d1077cbd608a39ee7b9b23f8f70b2fda237d841..58e8afbd2fc07264baa81ebbc1e851247dc923ac 100644
--- a/bl31/aarch64/bl31_entrypoint.S
+++ b/bl31/aarch64/bl31_entrypoint.S
@@ -170,15 +170,12 @@ func bl31_warm_entrypoint
 	 * enter coherency (as CPUs already are); and there's no reason to have
 	 * caches disabled either.
 	 */
-	mov	x0, #DISABLE_DCACHE
-	bl	bl31_plat_enable_mmu
-
 #if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY
-	mrs	x0, sctlr_el3
-	orr	x0, x0, #SCTLR_C_BIT
-	msr	sctlr_el3, x0
-	isb
+	mov	x0, xzr
+#else
+	mov	x0, #DISABLE_DCACHE
 #endif
+	bl	bl31_plat_enable_mmu
 
 	bl	psci_warmboot_entrypoint
 
diff --git a/bl32/sp_min/aarch32/entrypoint.S b/bl32/sp_min/aarch32/entrypoint.S
index 87ef3f36e534ef00872c1956a27564296782b746..d6853cc406886036a16ec24e0c5340f405cc0550 100644
--- a/bl32/sp_min/aarch32/entrypoint.S
+++ b/bl32/sp_min/aarch32/entrypoint.S
@@ -298,20 +298,17 @@ func sp_min_warm_entrypoint
 	 * enter coherency (as CPUs already are); and there's no reason to have
 	 * caches disabled either.
 	 */
+#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY
+	mov	r0, #0
+#else
 	mov	r0, #DISABLE_DCACHE
+#endif
 	bl	bl32_plat_enable_mmu
 
 #if SP_MIN_WITH_SECURE_FIQ
 	route_fiq_to_sp_min r0
 #endif
 
-#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY
-	ldcopr	r0, SCTLR
-	orr	r0, r0, #SCTLR_C_BIT
-	stcopr	r0, SCTLR
-	isb
-#endif
-
 	bl	sp_min_warm_boot
 	bl	smc_get_next_ctx
 	/* r0 points to `smc_ctx_t` */
diff --git a/bl32/tsp/aarch64/tsp_entrypoint.S b/bl32/tsp/aarch64/tsp_entrypoint.S
index 489183c52a4ea2927102108b148c2be6ebf0b3dd..5d9da857882ac527d19faa9387f1c13fcff61403 100644
--- a/bl32/tsp/aarch64/tsp_entrypoint.S
+++ b/bl32/tsp/aarch64/tsp_entrypoint.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -247,40 +247,12 @@ func tsp_cpu_on_entry
 	bl	plat_set_my_stack
 
 	/* --------------------------------------------
-	 * Enable the MMU with the DCache disabled. It
-	 * is safe to use stacks allocated in normal
-	 * memory as a result. All memory accesses are
-	 * marked nGnRnE when the MMU is disabled. So
-	 * all the stack writes will make it to memory.
-	 * All memory accesses are marked Non-cacheable
-	 * when the MMU is enabled but D$ is disabled.
-	 * So used stack memory is guaranteed to be
-	 * visible immediately after the MMU is enabled
-	 * Enabling the DCache at the same time as the
-	 * MMU can lead to speculatively fetched and
-	 * possibly stale stack memory being read from
-	 * other caches. This can lead to coherency
-	 * issues.
+	 * Enable MMU and D-caches together.
 	 * --------------------------------------------
 	 */
-	mov	x0, #DISABLE_DCACHE
+	mov	x0, #0
 	bl	bl32_plat_enable_mmu
 
-	/* ---------------------------------------------
-	 * Enable the Data cache now that the MMU has
-	 * been enabled. The stack has been unwound. It
-	 * will be written first before being read. This
-	 * will invalidate any stale cache lines resi-
-	 * -dent in other caches. We assume that
-	 * interconnect coherency has been enabled for
-	 * this cluster by EL3 firmware.
-	 * ---------------------------------------------
-	 */
-	mrs	x0, sctlr_el1
-	orr	x0, x0, #SCTLR_C_BIT
-	msr	sctlr_el1, x0
-	isb
-
 	/* ---------------------------------------------
 	 * Enter C runtime to perform any remaining
 	 * book keeping
diff --git a/docs/porting-guide.rst b/docs/porting-guide.rst
index a737cf4e62630735426ef22541d6529ca47e8f67..5462cc1ec21bae7e6c19c0fe41399f173f64041a 100644
--- a/docs/porting-guide.rst
+++ b/docs/porting-guide.rst
@@ -1997,6 +1997,25 @@ state. This function must return a pointer to the ``entry_point_info`` structure
 (that was copied during ``bl31_early_platform_setup()``) if the image exists. It
 should return NULL otherwise.
 
+Function : bl31_plat_enable_mmu [optional]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    Argument : uint32_t
+    Return   : void
+
+This function enables the MMU. The boot code calls this function with MMU and
+caches disabled. This function should program necessary registers to enable
+translation, and upon return, the MMU on the calling PE must be enabled.
+
+The function must honor flags passed in the first argument. These flags are
+defined by the translation library, and can be found in the file
+``include/lib/xlat_tables/xlat_mmu_helpers.h``.
+
+On DynamIQ systems, this function must not use stack while enabling MMU, which
+is how the function in xlat table library version 2 is implementated.
+
 Function : plat\_get\_syscnt\_freq2() [mandatory]
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
diff --git a/docs/user-guide.rst b/docs/user-guide.rst
index a40615dbfbfd85fc5034fafab9742bbfb08c8569..68a74edd5fd22a8bae0cdf85655459f25a703eb4 100644
--- a/docs/user-guide.rst
+++ b/docs/user-guide.rst
@@ -454,6 +454,10 @@ Common build options
    management operations. This option defaults to 0 and if it is enabled,
    then it implies ``WARMBOOT_ENABLE_DCACHE_EARLY`` is also enabled.
 
+   Note that, when ``HW_ASSISTED_COHERENCY`` is enabled, version 2 of
+   translation library (xlat tables v2) must be used; version 1 of translation
+   library is not supported.
+
 -  ``JUNO_AARCH32_EL3_RUNTIME``: This build flag enables you to execute EL3
    runtime software in AArch32 mode, which is required to run AArch32 on Juno.
    By default this flag is set to '0'. Enabling this flag builds BL1 and BL2 in
diff --git a/include/common/aarch32/asm_macros.S b/include/common/aarch32/asm_macros.S
index 74322228e72ecfb4b4923e5a74e611977942fa8e..f7d0595e155b1cec4b8357080c7b962a1de57438 100644
--- a/include/common/aarch32/asm_macros.S
+++ b/include/common/aarch32/asm_macros.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -10,6 +10,20 @@
 #include <asm_macros_common.S>
 #include <spinlock.h>
 
+/*
+ * TLBI instruction with type specifier that implements the workaround for
+ * errata 813419 of Cortex-A57.
+ */
+#if ERRATA_A57_813419
+#define TLB_INVALIDATE(_reg, _coproc) \
+	stcopr	_reg, _coproc; \
+	dsb	ish; \
+	stcopr	_reg, _coproc
+#else
+#define TLB_INVALIDATE(_reg, _coproc) \
+	stcopr	_reg, _coproc
+#endif
+
 #define WORD_SIZE	4
 
 	/*
diff --git a/include/common/aarch64/asm_macros.S b/include/common/aarch64/asm_macros.S
index 7c8e643d1140c3bcd4634f1afac64f0b867ff673..5b050455ceb993d555b30980655c4be58325b161 100644
--- a/include/common/aarch64/asm_macros.S
+++ b/include/common/aarch64/asm_macros.S
@@ -10,6 +10,20 @@
 #include <asm_macros_common.S>
 #include <spinlock.h>
 
+/*
+ * TLBI instruction with type specifier that implements the workaround for
+ * errata 813419 of Cortex-A57.
+ */
+#if ERRATA_A57_813419
+#define TLB_INVALIDATE(_type) \
+	tlbi	_type; \
+	dsb	ish; \
+	tlbi	_type
+#else
+#define TLB_INVALIDATE(_type) \
+	tlbi	_type
+#endif
+
 
 	.macro	func_prologue
 	stp	x29, x30, [sp, #-0x10]!
diff --git a/include/lib/aarch32/arch.h b/include/lib/aarch32/arch.h
index 910341a72e9c097bedfcfce94e736bc338cd5bb5..a940b63b59c5ea324e5626cb66d409fc712457f0 100644
--- a/include/lib/aarch32/arch.h
+++ b/include/lib/aarch32/arch.h
@@ -340,7 +340,7 @@
 /*
  * TTBR definitions
  */
-#define TTBR_CNP_BIT		0x1
+#define TTBR_CNP_BIT		U(0x1)
 
 /*
  * CTR definitions
diff --git a/include/lib/utils_def.h b/include/lib/utils_def.h
index 7335103b1b1521931535a4d27e6e4ade7cf02997..1bdf3c4b0520542d7aff607917343573266c0c0f 100644
--- a/include/lib/utils_def.h
+++ b/include/lib/utils_def.h
@@ -127,8 +127,8 @@
  * expected.
  */
 #define ARM_ARCH_AT_LEAST(_maj, _min) \
-	((ARM_ARCH_MAJOR > _maj) || \
-	 ((ARM_ARCH_MAJOR == _maj) && (ARM_ARCH_MINOR >= _min)))
+	((ARM_ARCH_MAJOR > (_maj)) || \
+	 ((ARM_ARCH_MAJOR == (_maj)) && (ARM_ARCH_MINOR >= (_min))))
 
 /*
  * Import an assembly or linker symbol as a C expression with the specified
diff --git a/include/lib/xlat_tables/xlat_mmu_helpers.h b/include/lib/xlat_tables/xlat_mmu_helpers.h
index 77953177003ab5a4a61aa29d6ce1014ed2832b99..b6c53e267d7d593e64f19702617f357ce9c03d0f 100644
--- a/include/lib/xlat_tables/xlat_mmu_helpers.h
+++ b/include/lib/xlat_tables/xlat_mmu_helpers.h
@@ -48,10 +48,15 @@
 #ifdef AARCH32
 /* AArch32 specific translation table API */
 void enable_mmu_secure(unsigned int flags);
+
+void enable_mmu_direct(unsigned int flags);
 #else
 /* AArch64 specific translation table APIs */
 void enable_mmu_el1(unsigned int flags);
 void enable_mmu_el3(unsigned int flags);
+
+void enable_mmu_direct_el1(unsigned int flags);
+void enable_mmu_direct_el3(unsigned int flags);
 #endif /* AARCH32 */
 
 int xlat_arch_is_granule_size_supported(size_t size);
diff --git a/include/lib/xlat_tables/xlat_tables_v2.h b/include/lib/xlat_tables/xlat_tables_v2.h
index 98f00d7156735f04d707ccd903eceaf085f0f6b4..4dc2c5ec7aeaf11fe2fda1764ed195e53f77b1e1 100644
--- a/include/lib/xlat_tables/xlat_tables_v2.h
+++ b/include/lib/xlat_tables/xlat_tables_v2.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -8,12 +8,12 @@
 #define __XLAT_TABLES_V2_H__
 
 #include <xlat_tables_defs.h>
+#include <xlat_tables_v2_helpers.h>
 
 #ifndef __ASSEMBLY__
 #include <stddef.h>
 #include <stdint.h>
 #include <xlat_mmu_helpers.h>
-#include <xlat_tables_v2_helpers.h>
 
 /*
  * Default granularity size for an mmap_region_t.
diff --git a/include/lib/xlat_tables/xlat_tables_v2_helpers.h b/include/lib/xlat_tables/xlat_tables_v2_helpers.h
index de1c2d4bfd3f67e512286233d6c91096d6fba14e..e1ea2b64f2f66ab038cb4f266f329e68d6d03862 100644
--- a/include/lib/xlat_tables/xlat_tables_v2_helpers.h
+++ b/include/lib/xlat_tables/xlat_tables_v2_helpers.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -16,6 +16,13 @@
 #error "Do not include this header file directly. Include xlat_tables_v2.h instead."
 #endif
 
+/* Offsets into mmu_cfg_params array. All parameters are 32 bits wide. */
+#define MMU_CFG_MAIR0		0
+#define MMU_CFG_TCR		1
+#define MMU_CFG_TTBR0_LO	2
+#define MMU_CFG_TTBR0_HI	3
+#define MMU_CFG_PARAM_MAX	4
+
 #ifndef __ASSEMBLY__
 
 #include <cassert.h>
@@ -24,6 +31,9 @@
 #include <xlat_tables_arch.h>
 #include <xlat_tables_defs.h>
 
+/* Parameters of register values required when enabling MMU */
+extern uint32_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
+
 /* Forward declaration */
 struct mmap_region;
 
@@ -162,6 +172,8 @@ struct xlat_ctx {
 		.initialized = 0,						\
 	}
 
+#endif /*__ASSEMBLY__*/
+
 #if AARCH64
 
 /*
@@ -187,6 +199,4 @@ struct xlat_ctx {
 
 #endif /* AARCH64 */
 
-#endif /*__ASSEMBLY__*/
-
 #endif /* __XLAT_TABLES_V2_HELPERS_H__ */
diff --git a/lib/xlat_tables/aarch32/xlat_tables.c b/lib/xlat_tables/aarch32/xlat_tables.c
index 720d4461d56c749aa1cb664cb06a5a8836d1d5a8..dd639397a669cc5c8e3b835ca2ba85b4cd3096b8 100644
--- a/lib/xlat_tables/aarch32/xlat_tables.c
+++ b/lib/xlat_tables/aarch32/xlat_tables.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -130,3 +130,8 @@ void enable_mmu_secure(unsigned int flags)
 	/* Ensure the MMU enable takes effect immediately */
 	isb();
 }
+
+void enable_mmu_direct(unsigned int flags)
+{
+	enable_mmu_secure(flags);
+}
diff --git a/lib/xlat_tables/aarch64/xlat_tables.c b/lib/xlat_tables/aarch64/xlat_tables.c
index a72c6454c6ac5731b3324f92b6afeb42a453d89c..5717516a4c0dbcc7e77dde8c617a4f9c75c8c5da 100644
--- a/lib/xlat_tables/aarch64/xlat_tables.c
+++ b/lib/xlat_tables/aarch64/xlat_tables.c
@@ -181,6 +181,11 @@ void init_xlat_tables(void)
 									\
 		/* Ensure the MMU enable takes effect immediately */	\
 		isb();							\
+	}								\
+									\
+	void enable_mmu_direct_el##_el(unsigned int flags)		\
+	{								\
+		enable_mmu_el##_el(flags);				\
 	}
 
 /* Define EL1 and EL3 variants of the function enabling the MMU */
diff --git a/lib/xlat_tables/xlat_tables_private.h b/lib/xlat_tables/xlat_tables_private.h
index 50d6bd59ca3f57566643d6925c7560e8727e690e..810c48e1a05fb28eb1d3c6b1f414b637b1a02734 100644
--- a/lib/xlat_tables/xlat_tables_private.h
+++ b/lib/xlat_tables/xlat_tables_private.h
@@ -11,6 +11,10 @@
 #include <platform_def.h>
 #include <xlat_tables_arch.h>
 
+#if HW_ASSISTED_COHERENCY
+#error xlat tables v2 must be used with HW_ASSISTED_COHERENCY
+#endif
+
 /*
  * If the platform hasn't defined a physical and a virtual address space size
  * default to ADDR_SPACE_SIZE.
diff --git a/lib/xlat_tables_v2/aarch32/enable_mmu.S b/lib/xlat_tables_v2/aarch32/enable_mmu.S
new file mode 100644
index 0000000000000000000000000000000000000000..97cdde7516b0effd0a1f968b5684a3e73d2ad0ba
--- /dev/null
+++ b/lib/xlat_tables_v2/aarch32/enable_mmu.S
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <xlat_tables_v2.h>
+
+	.global	enable_mmu_direct
+
+func enable_mmu_direct
+	/* Assert that MMU is turned off */
+#if ENABLE_ASSERTIONS
+	ldcopr  r1, SCTLR
+	tst	r1, #SCTLR_M_BIT
+	ASM_ASSERT(eq)
+#endif
+
+	/* Invalidate TLB entries */
+	TLB_INVALIDATE(r0, TLBIALL)
+
+	mov	r3, r0
+	ldr	r0, =mmu_cfg_params
+
+	/* MAIR0 */
+	ldr	r1, [r0, #(MMU_CFG_MAIR0 << 2)]
+	stcopr	r1, MAIR0
+
+	/* TTBCR */
+	ldr	r2, [r0, #(MMU_CFG_TCR << 2)]
+	stcopr	r2, TTBCR
+
+	/* TTBR0 */
+	ldr	r1, [r0, #(MMU_CFG_TTBR0_LO << 2)]
+	ldr	r2, [r0, #(MMU_CFG_TTBR0_HI << 2)]
+	stcopr16	r1, r2, TTBR0_64
+
+	/* TTBR1 is unused right now; set it to 0. */
+	mov	r1, #0
+	mov	r2, #0
+	stcopr16	r1, r2, TTBR1_64
+
+	/*
+	 * Ensure all translation table writes have drained into memory, the TLB
+	 * invalidation is complete, and translation register writes are
+	 * committed before enabling the MMU
+	 */
+	dsb	ish
+	isb
+
+	/* Enable enable MMU by honoring flags */
+	ldcopr  r1, SCTLR
+	ldr	r2, =(SCTLR_WXN_BIT | SCTLR_C_BIT | SCTLR_M_BIT)
+	orr	r1, r1, r2
+
+	/* Clear C bit if requested */
+	tst	r3, #DISABLE_DCACHE
+	bicne	r1, r1, #SCTLR_C_BIT
+
+	stcopr	r1, SCTLR
+	isb
+
+	bx	lr
+endfunc enable_mmu_direct
diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
index f66f802f3da3425f29c4095c578a7743d63f1e8a..94dcf578a00b79005ef21717a07279edb12dacb7 100644
--- a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
+++ b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -18,6 +18,8 @@
 #error ARMv7 target does not support LPAE MMU descriptors
 #endif
 
+uint32_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
+
 /*
  * Returns 1 if the provided granule size is supported, 0 otherwise.
  */
@@ -109,22 +111,16 @@ int xlat_arch_current_el(void)
  * Function for enabling the MMU in Secure PL1, assuming that the page tables
  * have already been created.
  ******************************************************************************/
-void enable_mmu_arch(unsigned int flags,
-		uint64_t *base_table,
+void setup_mmu_cfg(unsigned int flags,
+		const uint64_t *base_table,
 		unsigned long long max_pa,
 		uintptr_t max_va)
 {
-	u_register_t mair0, ttbcr, sctlr;
+	u_register_t mair0, ttbcr;
 	uint64_t ttbr0;
 
 	assert(IS_IN_SECURE());
 
-	sctlr = read_sctlr();
-	assert((sctlr & SCTLR_M_BIT) == 0);
-
-	/* Invalidate TLBs at the current exception level */
-	tlbiall();
-
 	/* Set attributes in the right indices of the MAIR */
 	mair0 = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
 	mair0 |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
@@ -185,30 +181,9 @@ void enable_mmu_arch(unsigned int flags,
 	ttbr0 |= TTBR_CNP_BIT;
 #endif
 
-	/* Now program the relevant system registers */
-	write_mair0(mair0);
-	write_ttbcr(ttbcr);
-	write64_ttbr0(ttbr0);
-	write64_ttbr1(0);
-
-	/*
-	 * Ensure all translation table writes have drained
-	 * into memory, the TLB invalidation is complete,
-	 * and translation register writes are committed
-	 * before enabling the MMU
-	 */
-	dsbish();
-	isb();
-
-	sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT;
-
-	if (flags & DISABLE_DCACHE)
-		sctlr &= ~SCTLR_C_BIT;
-	else
-		sctlr |= SCTLR_C_BIT;
-
-	write_sctlr(sctlr);
-
-	/* Ensure the MMU enable takes effect immediately */
-	isb();
+	/* Now populate MMU configuration */
+	mmu_cfg_params[MMU_CFG_MAIR0] = mair0;
+	mmu_cfg_params[MMU_CFG_TCR] = ttbcr;
+	mmu_cfg_params[MMU_CFG_TTBR0_LO] = (uint32_t) ttbr0;
+	mmu_cfg_params[MMU_CFG_TTBR0_HI] = ttbr0 >> 32;
 }
diff --git a/lib/xlat_tables_v2/aarch64/enable_mmu.S b/lib/xlat_tables_v2/aarch64/enable_mmu.S
new file mode 100644
index 0000000000000000000000000000000000000000..a72c7fae51c1beeea5b1eb2edf4e80afbaa32779
--- /dev/null
+++ b/lib/xlat_tables_v2/aarch64/enable_mmu.S
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <xlat_tables_v2.h>
+
+	.global	enable_mmu_direct_el1
+	.global	enable_mmu_direct_el3
+
+	/* Macros to read and write to system register for a given EL. */
+	.macro _msr reg_name, el, gp_reg
+	msr	\reg_name\()_el\()\el, \gp_reg
+	.endm
+
+	.macro _mrs gp_reg, reg_name, el
+	mrs	\gp_reg, \reg_name\()_el\()\el
+	.endm
+
+	.macro define_mmu_enable_func el
+	func enable_mmu_direct_\()el\el
+#if ENABLE_ASSERTIONS
+		_mrs	x1, sctlr, \el
+		tst	x1, #SCTLR_M_BIT
+		ASM_ASSERT(eq)
+#endif
+
+		/* Invalidate TLB entries */
+		.if \el == 1
+		TLB_INVALIDATE(vmalle1)
+		.else
+		.if \el == 3
+		TLB_INVALIDATE(alle3)
+		.else
+		.error "EL must be 1 or 3"
+		.endif
+		.endif
+
+		mov	x7, x0
+		ldr	x0, =mmu_cfg_params
+
+		/* MAIR */
+		ldr	w1, [x0, #(MMU_CFG_MAIR0 << 2)]
+		_msr	mair, \el, x1
+
+		/* TCR */
+		ldr	w2, [x0, #(MMU_CFG_TCR << 2)]
+		_msr	tcr, \el, x2
+
+		/* TTBR */
+		ldr	w3, [x0, #(MMU_CFG_TTBR0_LO << 2)]
+		ldr	w4, [x0, #(MMU_CFG_TTBR0_HI << 2)]
+		orr	x3, x3, x4, lsl #32
+		_msr	ttbr0, \el, x3
+
+		/*
+		 * Ensure all translation table writes have drained into memory, the TLB
+		 * invalidation is complete, and translation register writes are
+		 * committed before enabling the MMU
+		 */
+		dsb	ish
+		isb
+
+		/* Set and clear required fields of SCTLR */
+		_mrs	x4, sctlr, \el
+		mov_imm	x5, SCTLR_WXN_BIT | SCTLR_C_BIT | SCTLR_M_BIT
+		orr	x4, x4, x5
+
+		/* Additionally, amend SCTLR fields based on flags */
+		bic	x5, x4, #SCTLR_C_BIT
+		tst	x7, #DISABLE_DCACHE
+		csel	x4, x5, x4, ne
+
+		_msr	sctlr, \el, x4
+		isb
+
+		ret
+	endfunc enable_mmu_direct_\()el\el
+	.endm
+
+	/*
+	 * Define MMU-enabling functions for EL1 and EL3:
+	 *
+	 *  enable_mmu_direct_el1
+	 *  enable_mmu_direct_el3
+	 */
+	define_mmu_enable_func 1
+	define_mmu_enable_func 3
diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
index c501e7074968b4cad3309e39ad9401cd4d26c79a..71b9c8fae3bd55de6a7a47b72505d2b4c4dae665 100644
--- a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
+++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
@@ -16,6 +16,8 @@
 #include <xlat_tables_v2.h>
 #include "../xlat_tables_private.h"
 
+uint32_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
+
 /*
  * Returns 1 if the provided granule size is supported, 0 otherwise.
  */
@@ -183,70 +185,13 @@ int xlat_arch_current_el(void)
 	return el;
 }
 
-/*******************************************************************************
- * Macro generating the code for the function enabling the MMU in the given
- * exception level, assuming that the pagetables have already been created.
- *
- *   _el:		Exception level at which the function will run
- *   _tlbi_fct:		Function to invalidate the TLBs at the current
- *			exception level
- ******************************************************************************/
-#define DEFINE_ENABLE_MMU_EL(_el, _tlbi_fct)				\
-	static void enable_mmu_internal_el##_el(int flags,		\
-						uint64_t mair,		\
-						uint64_t tcr,		\
-						uint64_t ttbr)		\
-	{								\
-		uint32_t sctlr = read_sctlr_el##_el();			\
-		assert((sctlr & SCTLR_M_BIT) == 0);			\
-									\
-		/* Invalidate TLBs at the current exception level */	\
-		_tlbi_fct();						\
-									\
-		write_mair_el##_el(mair);				\
-		write_tcr_el##_el(tcr);					\
-									\
-		/* Set TTBR bits as well */				\
-		if (ARM_ARCH_AT_LEAST(8, 2)) {				\
-			/* Enable CnP bit so as to share page tables */	\
-			/* with all PEs. This is mandatory for */	\
-			/* ARMv8.2 implementations. */			\
-			ttbr |= TTBR_CNP_BIT;				\
-		}							\
-		write_ttbr0_el##_el(ttbr);				\
-									\
-		/* Ensure all translation table writes have drained */	\
-		/* into memory, the TLB invalidation is complete, */	\
-		/* and translation register writes are committed */	\
-		/* before enabling the MMU */				\
-		dsbish();						\
-		isb();							\
-									\
-		sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT;			\
-		if (flags & DISABLE_DCACHE)				\
-			sctlr &= ~SCTLR_C_BIT;				\
-		else							\
-			sctlr |= SCTLR_C_BIT;				\
-									\
-		write_sctlr_el##_el(sctlr);				\
-									\
-		/* Ensure the MMU enable takes effect immediately */	\
-		isb();							\
-	}
-
-/* Define EL1 and EL3 variants of the function enabling the MMU */
-#if IMAGE_EL == 1
-DEFINE_ENABLE_MMU_EL(1, tlbivmalle1)
-#elif IMAGE_EL == 3
-DEFINE_ENABLE_MMU_EL(3, tlbialle3)
-#endif
-
-void enable_mmu_arch(unsigned int flags,
-		uint64_t *base_table,
+void setup_mmu_cfg(unsigned int flags,
+		const uint64_t *base_table,
 		unsigned long long max_pa,
 		uintptr_t max_va)
 {
 	uint64_t mair, ttbr, tcr;
+	uintptr_t virtual_addr_space_size;
 
 	/* Set attributes in the right indices of the MAIR. */
 	mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
@@ -255,28 +200,26 @@ void enable_mmu_arch(unsigned int flags,
 
 	ttbr = (uint64_t) base_table;
 
-	/*
-	 * Set TCR bits as well.
-	 */
-
 	/*
 	 * Limit the input address ranges and memory region sizes translated
 	 * using TTBR0 to the given virtual address space size.
 	 */
-	assert(max_va < UINTPTR_MAX);
-	uintptr_t virtual_addr_space_size = max_va + 1;
+	assert(max_va < ((uint64_t) UINTPTR_MAX));
+
+	virtual_addr_space_size = max_va + 1;
 	assert(CHECK_VIRT_ADDR_SPACE_SIZE(virtual_addr_space_size));
+
 	/*
 	 * __builtin_ctzll(0) is undefined but here we are guaranteed that
 	 * virtual_addr_space_size is in the range [1,UINTPTR_MAX].
 	 */
-	tcr = 64 - __builtin_ctzll(virtual_addr_space_size);
+	tcr = (uint64_t) 64 - __builtin_ctzll(virtual_addr_space_size);
 
 	/*
 	 * Set the cacheability and shareability attributes for memory
 	 * associated with translation table walks.
 	 */
-	if (flags & XLAT_TABLE_NC) {
+	if ((flags & XLAT_TABLE_NC) != 0) {
 		/* Inner & outer non-cacheable non-shareable. */
 		tcr |= TCR_SH_NON_SHAREABLE |
 			TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC;
@@ -299,10 +242,23 @@ void enable_mmu_arch(unsigned int flags,
 	 * translated using TTBR1_EL1.
 	 */
 	tcr |= TCR_EPD1_BIT | (tcr_ps_bits << TCR_EL1_IPS_SHIFT);
-	enable_mmu_internal_el1(flags, mair, tcr, ttbr);
 #elif IMAGE_EL == 3
 	assert(IS_IN_EL(3));
 	tcr |= TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT);
-	enable_mmu_internal_el3(flags, mair, tcr, ttbr);
 #endif
+
+	mmu_cfg_params[MMU_CFG_MAIR0] = (uint32_t) mair;
+	mmu_cfg_params[MMU_CFG_TCR] = (uint32_t) tcr;
+
+	/* Set TTBR bits as well */
+	if (ARM_ARCH_AT_LEAST(8, 2)) {
+		/*
+		 * Enable CnP bit so as to share page tables with all PEs. This
+		 * is mandatory for ARMv8.2 implementations.
+		 */
+		ttbr |= TTBR_CNP_BIT;
+	}
+
+	mmu_cfg_params[MMU_CFG_TTBR0_LO] = (uint32_t) ttbr;
+	mmu_cfg_params[MMU_CFG_TTBR0_HI] = (uint32_t) (ttbr >> 32);
 }
diff --git a/lib/xlat_tables_v2/xlat_tables.mk b/lib/xlat_tables_v2/xlat_tables.mk
index 06dd844a21dbf8e38702e69a50aacbecfbb46580..1e70f37f7c177e898b6f06ab0bc5779e26a9103e 100644
--- a/lib/xlat_tables_v2/xlat_tables.mk
+++ b/lib/xlat_tables_v2/xlat_tables.mk
@@ -1,10 +1,11 @@
 #
-# Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
 
 XLAT_TABLES_LIB_SRCS	:=	$(addprefix lib/xlat_tables_v2/,	\
+				${ARCH}/enable_mmu.S			\
 				${ARCH}/xlat_tables_arch.c		\
 				xlat_tables_internal.c)
 
diff --git a/lib/xlat_tables_v2/xlat_tables_internal.c b/lib/xlat_tables_v2/xlat_tables_internal.c
index 5beb51e90a2eb3896ba4b976b937d910c1e83ec4..7f1d3958a2cfa2266adf48f256c8088282543ff2 100644
--- a/lib/xlat_tables_v2/xlat_tables_internal.c
+++ b/lib/xlat_tables_v2/xlat_tables_internal.c
@@ -802,7 +802,7 @@ void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
 	 * that there is free space.
 	 */
 	assert(mm_last->size == 0U);
-	
+
 	/* Make room for new region by moving other regions up by one place */
 	mm_destination = mm_cursor + 1;
 	memmove(mm_destination, mm_cursor,
@@ -1313,22 +1313,25 @@ void init_xlat_tables(void)
 
 void enable_mmu_secure(unsigned int flags)
 {
-	enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+	setup_mmu_cfg(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
 			tf_xlat_ctx.va_max_address);
+	enable_mmu_direct(flags);
 }
 
 #else
 
 void enable_mmu_el1(unsigned int flags)
 {
-	enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+	setup_mmu_cfg(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
 			tf_xlat_ctx.va_max_address);
+	enable_mmu_direct_el1(flags);
 }
 
 void enable_mmu_el3(unsigned int flags)
 {
-	enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+	setup_mmu_cfg(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
 			tf_xlat_ctx.va_max_address);
+	enable_mmu_direct_el3(flags);
 }
 
 #endif /* AARCH32 */
diff --git a/lib/xlat_tables_v2/xlat_tables_private.h b/lib/xlat_tables_v2/xlat_tables_private.h
index 157dd039622a0960a0bbef2cc2e42488d4fbf394..777189fb01eb8310ea7092b83ae7282f71877708 100644
--- a/lib/xlat_tables_v2/xlat_tables_private.h
+++ b/lib/xlat_tables_v2/xlat_tables_private.h
@@ -81,7 +81,7 @@ int xlat_arch_current_el(void);
 unsigned long long xlat_arch_get_max_supported_pa(void);
 
 /* Enable MMU and configure it to use the specified translation tables. */
-void enable_mmu_arch(unsigned int flags, uint64_t *base_table,
+void setup_mmu_cfg(unsigned int flags, const uint64_t *base_table,
 		unsigned long long max_pa, uintptr_t max_va);
 
 /*
diff --git a/plat/common/aarch64/plat_common.c b/plat/common/aarch64/plat_common.c
index 409ae55a2ff8bab66a4932b4f6613bfef4400236..5f2972cc49e6e6d3aa5792649dcb301a0158ae65 100644
--- a/plat/common/aarch64/plat_common.c
+++ b/plat/common/aarch64/plat_common.c
@@ -18,8 +18,6 @@
  * provide typical implementations that may be re-used by multiple
  * platforms but may also be overridden by a platform if required.
  */
-#pragma weak bl31_plat_enable_mmu
-#pragma weak bl32_plat_enable_mmu
 #pragma weak bl31_plat_runtime_setup
 #if !ERROR_DEPRECATED
 #pragma weak plat_get_syscnt_freq2
@@ -33,16 +31,6 @@
 
 #pragma weak plat_ea_handler
 
-void bl31_plat_enable_mmu(uint32_t flags)
-{
-	enable_mmu_el3(flags);
-}
-
-void bl32_plat_enable_mmu(uint32_t flags)
-{
-	enable_mmu_el1(flags);
-}
-
 void bl31_plat_runtime_setup(void)
 {
 #if MULTI_CONSOLE_API
diff --git a/plat/common/aarch64/platform_helpers.S b/plat/common/aarch64/platform_helpers.S
index 033a12f8e94dccb923cac22191cf178ee07b69e9..a413f5fd1f000479ebe7908bde5c6e3f1be5ad13 100644
--- a/plat/common/aarch64/platform_helpers.S
+++ b/plat/common/aarch64/platform_helpers.S
@@ -17,6 +17,8 @@
 	.weak	plat_disable_acp
 	.weak	bl1_plat_prepare_exit
 	.weak	plat_panic_handler
+	.weak	bl31_plat_enable_mmu
+	.weak	bl32_plat_enable_mmu
 
 #if !ENABLE_PLAT_COMPAT
 	.globl	platform_get_core_pos
@@ -164,3 +166,23 @@ func plat_panic_handler
 	wfi
 	b	plat_panic_handler
 endfunc plat_panic_handler
+
+	/* -----------------------------------------------------
+	 * void bl31_plat_enable_mmu(uint32_t flags);
+	 *
+	 * Enable MMU in BL31.
+	 * -----------------------------------------------------
+	 */
+func bl31_plat_enable_mmu
+	b	enable_mmu_direct_el3
+endfunc bl31_plat_enable_mmu
+
+	/* -----------------------------------------------------
+	 * void bl32_plat_enable_mmu(uint32_t flags);
+	 *
+	 * Enable MMU in BL32.
+	 * -----------------------------------------------------
+	 */
+func bl32_plat_enable_mmu
+	b	enable_mmu_direct_el1
+endfunc bl32_plat_enable_mmu