Commit 9f3ee61c authored by Soby Mathew's avatar Soby Mathew
Browse files

AArch32: Fix the stack alignment issue



The AArch32 Procedure call Standard mandates that the stack must be aligned
to 8 byte boundary at external interfaces. This patch does the required
changes.

This problem was detected when a crash was encountered in
`psci_print_power_domain_map()` while printing 64 bit values. Aligning
the stack to 8 byte boundary resolved the problem.

Fixes ARM-Software/tf-issues#437

Change-Id: I517bd8203601bb88e9311bd36d477fb7b3efb292
Signed-off-by: default avatarSoby Mathew <soby.mathew@arm.com>
parent 919ad05e
...@@ -168,8 +168,11 @@ func handle_smc ...@@ -168,8 +168,11 @@ func handle_smc
mov r2, r0 /* handle */ mov r2, r0 /* handle */
ldcopr r0, SCR ldcopr r0, SCR
/* Save SCR in stack */ /*
push {r0} * Save SCR in stack. r1 is pushed to meet the 8 byte
* stack alignment requirement.
*/
push {r0, r1}
and r3, r0, #SCR_NS_BIT /* flags */ and r3, r0, #SCR_NS_BIT /* flags */
/* Switch to Secure Mode*/ /* Switch to Secure Mode*/
...@@ -191,7 +194,7 @@ func handle_smc ...@@ -191,7 +194,7 @@ func handle_smc
/* r0 points to smc context */ /* r0 points to smc context */
/* Restore SCR from stack */ /* Restore SCR from stack */
pop {r1} pop {r1, r2}
stcopr r1, SCR stcopr r1, SCR
isb isb
......
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#define SMC_CTX_GPREG_R2 0x8 #define SMC_CTX_GPREG_R2 0x8
#define SMC_CTX_GPREG_R3 0xC #define SMC_CTX_GPREG_R3 0xC
#define SMC_CTX_GPREG_R4 0x10 #define SMC_CTX_GPREG_R4 0x10
#define SMC_CTX_GPREG_R5 0x14
#define SMC_CTX_SP_USR 0x34 #define SMC_CTX_SP_USR 0x34
#define SMC_CTX_SPSR_MON 0x78 #define SMC_CTX_SPSR_MON 0x78
#define SMC_CTX_LR_MON 0x7C #define SMC_CTX_LR_MON 0x7C
......
...@@ -38,22 +38,22 @@ ...@@ -38,22 +38,22 @@
* contains the pointer to the `smc_context_t`. * contains the pointer to the `smc_context_t`.
*/ */
.macro smcc_save_gp_mode_regs .macro smcc_save_gp_mode_regs
push {r0-r3, lr} push {r0-r4, lr}
ldcopr r0, SCR ldcopr r0, SCR
and r0, r0, #SCR_NS_BIT and r0, r0, #SCR_NS_BIT
bl smc_get_ctx bl smc_get_ctx
/* Save r4 - r12 in the SMC context */ /* Save r5 - r12 in the SMC context */
add r1, r0, #SMC_CTX_GPREG_R4 add r1, r0, #SMC_CTX_GPREG_R5
stm r1!, {r4-r12} stm r1!, {r5-r12}
/* /*
* Pop r0 - r3, lr to r4 - r7, lr from stack and then save * Pop r0 - r4, lr to r4 - r8, lr from stack and then save
* it to SMC context. * it to SMC context.
*/ */
pop {r4-r7, lr} pop {r4-r8, lr}
stm r0, {r4-r7} stm r0, {r4-r8}
/* Save the banked registers including the current SPSR and LR */ /* Save the banked registers including the current SPSR and LR */
mrs r4, sp_usr mrs r4, sp_usr
......
...@@ -72,7 +72,8 @@ endfunc cortex_a32_reset_func ...@@ -72,7 +72,8 @@ endfunc cortex_a32_reset_func
* ---------------------------------------------------- * ----------------------------------------------------
*/ */
func cortex_a32_core_pwr_dwn func cortex_a32_core_pwr_dwn
push {lr} /* r12 is pushed to meet the 8 byte stack alignment requirement */
push {r12, lr}
/* Assert if cache is enabled */ /* Assert if cache is enabled */
#if ASM_ASSERTION #if ASM_ASSERTION
...@@ -92,7 +93,7 @@ func cortex_a32_core_pwr_dwn ...@@ -92,7 +93,7 @@ func cortex_a32_core_pwr_dwn
* Come out of intra cluster coherency * Come out of intra cluster coherency
* --------------------------------------------- * ---------------------------------------------
*/ */
pop {lr} pop {r12, lr}
b cortex_a32_disable_smp b cortex_a32_disable_smp
endfunc cortex_a32_core_pwr_dwn endfunc cortex_a32_core_pwr_dwn
...@@ -102,7 +103,8 @@ endfunc cortex_a32_core_pwr_dwn ...@@ -102,7 +103,8 @@ endfunc cortex_a32_core_pwr_dwn
* ------------------------------------------------------- * -------------------------------------------------------
*/ */
func cortex_a32_cluster_pwr_dwn func cortex_a32_cluster_pwr_dwn
push {lr} /* r12 is pushed to meet the 8 byte stack alignment requirement */
push {r12, lr}
/* Assert if cache is enabled */ /* Assert if cache is enabled */
#if ASM_ASSERTION #if ASM_ASSERTION
...@@ -135,7 +137,7 @@ func cortex_a32_cluster_pwr_dwn ...@@ -135,7 +137,7 @@ func cortex_a32_cluster_pwr_dwn
* Come out of intra cluster coherency * Come out of intra cluster coherency
* --------------------------------------------- * ---------------------------------------------
*/ */
pop {lr} pop {r12, lr}
b cortex_a32_disable_smp b cortex_a32_disable_smp
endfunc cortex_a32_cluster_pwr_dwn endfunc cortex_a32_cluster_pwr_dwn
......
...@@ -76,9 +76,10 @@ endfunc reset_handler ...@@ -76,9 +76,10 @@ endfunc reset_handler
*/ */
.globl prepare_core_pwr_dwn .globl prepare_core_pwr_dwn
func prepare_core_pwr_dwn func prepare_core_pwr_dwn
push {lr} /* r12 is pushed to meet the 8 byte stack alignment requirement */
push {r12, lr}
bl _cpu_data bl _cpu_data
pop {lr} pop {r12, lr}
ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR] ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR]
#if ASM_ASSERTION #if ASM_ASSERTION
...@@ -98,9 +99,10 @@ endfunc prepare_core_pwr_dwn ...@@ -98,9 +99,10 @@ endfunc prepare_core_pwr_dwn
*/ */
.globl prepare_cluster_pwr_dwn .globl prepare_cluster_pwr_dwn
func prepare_cluster_pwr_dwn func prepare_cluster_pwr_dwn
push {lr} /* r12 is pushed to meet the 8 byte stack alignment requirement */
push {r12, lr}
bl _cpu_data bl _cpu_data
pop {lr} pop {r12, lr}
ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR] ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR]
#if ASM_ASSERTION #if ASM_ASSERTION
......
...@@ -41,9 +41,10 @@ ...@@ -41,9 +41,10 @@
* ----------------------------------------------------------------- * -----------------------------------------------------------------
*/ */
func _cpu_data func _cpu_data
push {lr} /* r12 is pushed to meet the 8 byte stack alignment requirement */
push {r12, lr}
bl plat_my_core_pos bl plat_my_core_pos
pop {lr} pop {r12, lr}
b _cpu_data_by_index b _cpu_data_by_index
endfunc _cpu_data endfunc _cpu_data
......
...@@ -93,7 +93,8 @@ endfunc psci_do_pwrdown_cache_maintenance ...@@ -93,7 +93,8 @@ endfunc psci_do_pwrdown_cache_maintenance
* ----------------------------------------------------------------------- * -----------------------------------------------------------------------
*/ */
func psci_do_pwrup_cache_maintenance func psci_do_pwrup_cache_maintenance
push {lr} /* r12 is pushed to meet the 8 byte stack alignment requirement */
push {r12, lr}
/* --------------------------------------------- /* ---------------------------------------------
* Ensure any inflight stack writes have made it * Ensure any inflight stack writes have made it
...@@ -123,7 +124,7 @@ func psci_do_pwrup_cache_maintenance ...@@ -123,7 +124,7 @@ func psci_do_pwrup_cache_maintenance
stcopr r0, SCTLR stcopr r0, SCTLR
isb isb
pop {pc} pop {r12, pc}
endfunc psci_do_pwrup_cache_maintenance endfunc psci_do_pwrup_cache_maintenance
/* --------------------------------------------- /* ---------------------------------------------
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment