Commit b6285d64 authored by Soby Mathew's avatar Soby Mathew Committed by dp-arm
Browse files

AArch32: Rework SMC context save and restore mechanism



The current SMC context data structure `smc_ctx_t` and related helpers are
optimized for case when SMC call does not result in world switch. This was
the case for SP_MIN and BL1 cold boot flow. But the firmware update usecase
requires world switch as a result of SMC and the current SMC context helpers
were not helping very much in this regard. Therefore this patch does the
following changes to improve this:

1. Add monitor stack pointer, `spmon` to `smc_ctx_t`

The C Runtime stack pointer in monitor mode, `sp_mon` is added to the
SMC context, and the `smc_ctx_t` pointer is cached in `sp_mon` prior
to exit from Monitor mode. This makes is easier to retrieve the
context when the next SMC call happens. As a result of this change,
the SMC context helpers no longer depend on the stack to save and
restore the register.

This aligns it with the context save and restore mechanism in AArch64.

2. Add SCR in `smc_ctx_t`

Adding the SCR register to `smc_ctx_t` makes it easier to manage this
register state when switching between non secure and secure world as a
result of an SMC call.

Change-Id: I5e12a7056107c1701b457b8f7363fdbf892230bf
Signed-off-by: default avatarSoby Mathew <soby.mathew@arm.com>
Signed-off-by: default avatardp-arm <dimitris.papastamos@arm.com>
parent d801fbb0
...@@ -74,6 +74,7 @@ static void copy_cpu_ctx_to_smc_ctx(const regs_t *cpu_reg_ctx, ...@@ -74,6 +74,7 @@ static void copy_cpu_ctx_to_smc_ctx(const regs_t *cpu_reg_ctx,
next_smc_ctx->r3 = read_ctx_reg(cpu_reg_ctx, CTX_GPREG_R3); next_smc_ctx->r3 = read_ctx_reg(cpu_reg_ctx, CTX_GPREG_R3);
next_smc_ctx->lr_mon = read_ctx_reg(cpu_reg_ctx, CTX_LR); next_smc_ctx->lr_mon = read_ctx_reg(cpu_reg_ctx, CTX_LR);
next_smc_ctx->spsr_mon = read_ctx_reg(cpu_reg_ctx, CTX_SPSR); next_smc_ctx->spsr_mon = read_ctx_reg(cpu_reg_ctx, CTX_SPSR);
next_smc_ctx->scr = read_ctx_reg(cpu_reg_ctx, CTX_SCR);
} }
/******************************************************************************* /*******************************************************************************
...@@ -140,6 +141,28 @@ void bl1_prepare_next_image(unsigned int image_id) ...@@ -140,6 +141,28 @@ void bl1_prepare_next_image(unsigned int image_id)
copy_cpu_ctx_to_smc_ctx(get_regs_ctx(cm_get_next_context()), copy_cpu_ctx_to_smc_ctx(get_regs_ctx(cm_get_next_context()),
smc_get_next_ctx()); smc_get_next_ctx());
/*
* If the next image is non-secure, then we need to program the banked
* non secure sctlr. This is not required when the next image is secure
* because in AArch32, we expect the secure world to have the same
* SCTLR settings.
*/
if (security_state == NON_SECURE) {
cpu_context_t *ctx = cm_get_context(security_state);
u_register_t ns_sctlr;
/* Temporarily set the NS bit to access NS SCTLR */
write_scr(read_scr() | SCR_NS_BIT);
isb();
ns_sctlr = read_ctx_reg(get_regs_ctx(ctx), CTX_NS_SCTLR);
write_sctlr(ns_sctlr);
isb();
write_scr(read_scr() & ~SCR_NS_BIT);
isb();
}
/* /*
* Flush the SMC & CPU context and the (next)pointers, * Flush the SMC & CPU context and the (next)pointers,
* to access them after caches are disabled. * to access them after caches are disabled.
......
...@@ -81,20 +81,11 @@ func bl1_entrypoint ...@@ -81,20 +81,11 @@ func bl1_entrypoint
dsb sy dsb sy
isb isb
/* Get the cpu_context for next BL image */
bl cm_get_next_context
/* Restore the SCR */
ldr r2, [r0, #CTX_REGS_OFFSET + CTX_SCR]
stcopr r2, SCR
isb
/* /*
* Get the smc_context for next BL image, * Get the smc_context for next BL image,
* program the gp/system registers and exit * program the gp/system registers and exit
* secure monitor mode * secure monitor mode
*/ */
bl smc_get_next_ctx bl smc_get_next_ctx
smcc_restore_gp_mode_regs monitor_exit
eret
endfunc bl1_entrypoint endfunc bl1_entrypoint
...@@ -115,21 +115,10 @@ func sp_min_entrypoint ...@@ -115,21 +115,10 @@ func sp_min_entrypoint
sub r1, r1, r0 sub r1, r1, r0
bl clean_dcache_range bl clean_dcache_range
/* Program the registers in cpu_context and exit monitor mode */
mov r0, #NON_SECURE
bl cm_get_context
/* Restore the SCR */
ldr r2, [r0, #CTX_REGS_OFFSET + CTX_SCR]
stcopr r2, SCR
isb
/* Restore the SCTLR */
ldr r2, [r0, #CTX_REGS_OFFSET + CTX_NS_SCTLR]
stcopr r2, SCTLR
bl smc_get_next_ctx bl smc_get_next_ctx
/* The other cpu_context registers have been copied to smc context */
/* r0 points to `smc_ctx_t` */
/* The PSCI cpu_context registers have been copied to `smc_ctx_t` */
b sp_min_exit b sp_min_exit
endfunc sp_min_entrypoint endfunc sp_min_entrypoint
...@@ -138,46 +127,44 @@ endfunc sp_min_entrypoint ...@@ -138,46 +127,44 @@ endfunc sp_min_entrypoint
* SMC handling function for SP_MIN. * SMC handling function for SP_MIN.
*/ */
func handle_smc func handle_smc
smcc_save_gp_mode_regs /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */
str lr, [sp, #SMC_CTX_LR_MON]
/* r0 points to smc_context */ smcc_save_gp_mode_regs
mov r2, r0 /* handle */
ldcopr r0, SCR
/* /*
* Save SCR in stack. r1 is pushed to meet the 8 byte * `sp` still points to `smc_ctx_t`. Save it to a register
* stack alignment requirement. * and restore the C runtime stack pointer to `sp`.
*/ */
push {r0, r1} mov r2, sp /* handle */
ldr sp, [r2, #SMC_CTX_SP_MON]
ldr r0, [r2, #SMC_CTX_SCR]
and r3, r0, #SCR_NS_BIT /* flags */ and r3, r0, #SCR_NS_BIT /* flags */
/* Switch to Secure Mode*/ /* Switch to Secure Mode*/
bic r0, #SCR_NS_BIT bic r0, #SCR_NS_BIT
stcopr r0, SCR stcopr r0, SCR
isb isb
ldr r0, [r2, #SMC_CTX_GPREG_R0] /* smc_fid */ ldr r0, [r2, #SMC_CTX_GPREG_R0] /* smc_fid */
/* Check whether an SMC64 is issued */ /* Check whether an SMC64 is issued */
tst r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT) tst r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT)
beq 1f /* SMC32 is detected */ beq 1f
/* SMC32 is not detected. Return error back to caller */
mov r0, #SMC_UNK mov r0, #SMC_UNK
str r0, [r2, #SMC_CTX_GPREG_R0] str r0, [r2, #SMC_CTX_GPREG_R0]
mov r0, r2 mov r0, r2
b 2f /* Skip handling the SMC */ b sp_min_exit
1: 1:
/* SMC32 is detected */
mov r1, #0 /* cookie */ mov r1, #0 /* cookie */
bl handle_runtime_svc bl handle_runtime_svc
2:
/* r0 points to smc context */
/* Restore SCR from stack */
pop {r1, r2}
stcopr r1, SCR
isb
/* `r0` points to `smc_ctx_t` */
b sp_min_exit b sp_min_exit
endfunc handle_smc endfunc handle_smc
/* /*
* The Warm boot entrypoint for SP_MIN. * The Warm boot entrypoint for SP_MIN.
*/ */
...@@ -234,23 +221,9 @@ func sp_min_warm_entrypoint ...@@ -234,23 +221,9 @@ func sp_min_warm_entrypoint
#endif #endif
bl sp_min_warm_boot bl sp_min_warm_boot
/* Program the registers in cpu_context and exit monitor mode */
mov r0, #NON_SECURE
bl cm_get_context
/* Restore the SCR */
ldr r2, [r0, #CTX_REGS_OFFSET + CTX_SCR]
stcopr r2, SCR
isb
/* Restore the SCTLR */
ldr r2, [r0, #CTX_REGS_OFFSET + CTX_NS_SCTLR]
stcopr r2, SCTLR
bl smc_get_next_ctx bl smc_get_next_ctx
/* r0 points to `smc_ctx_t` */
/* The other cpu_context registers have been copied to smc context */ /* The PSCI cpu_context registers have been copied to `smc_ctx_t` */
b sp_min_exit b sp_min_exit
endfunc sp_min_warm_entrypoint endfunc sp_min_warm_entrypoint
...@@ -261,6 +234,5 @@ endfunc sp_min_warm_entrypoint ...@@ -261,6 +234,5 @@ endfunc sp_min_warm_entrypoint
* Arguments : r0 must point to the SMC context to restore from. * Arguments : r0 must point to the SMC context to restore from.
*/ */
func sp_min_exit func sp_min_exit
smcc_restore_gp_mode_regs monitor_exit
eret
endfunc sp_min_exit endfunc sp_min_exit
...@@ -101,6 +101,7 @@ static void copy_cpu_ctx_to_smc_stx(const regs_t *cpu_reg_ctx, ...@@ -101,6 +101,7 @@ static void copy_cpu_ctx_to_smc_stx(const regs_t *cpu_reg_ctx,
next_smc_ctx->r0 = read_ctx_reg(cpu_reg_ctx, CTX_GPREG_R0); next_smc_ctx->r0 = read_ctx_reg(cpu_reg_ctx, CTX_GPREG_R0);
next_smc_ctx->lr_mon = read_ctx_reg(cpu_reg_ctx, CTX_LR); next_smc_ctx->lr_mon = read_ctx_reg(cpu_reg_ctx, CTX_LR);
next_smc_ctx->spsr_mon = read_ctx_reg(cpu_reg_ctx, CTX_SPSR); next_smc_ctx->spsr_mon = read_ctx_reg(cpu_reg_ctx, CTX_SPSR);
next_smc_ctx->scr = read_ctx_reg(cpu_reg_ctx, CTX_SCR);
} }
/******************************************************************************* /*******************************************************************************
...@@ -111,6 +112,8 @@ static void copy_cpu_ctx_to_smc_stx(const regs_t *cpu_reg_ctx, ...@@ -111,6 +112,8 @@ static void copy_cpu_ctx_to_smc_stx(const regs_t *cpu_reg_ctx,
static void sp_min_prepare_next_image_entry(void) static void sp_min_prepare_next_image_entry(void)
{ {
entry_point_info_t *next_image_info; entry_point_info_t *next_image_info;
cpu_context_t *ctx = cm_get_context(NON_SECURE);
u_register_t ns_sctlr;
/* Program system registers to proceed to non-secure */ /* Program system registers to proceed to non-secure */
next_image_info = sp_min_plat_get_bl33_ep_info(); next_image_info = sp_min_plat_get_bl33_ep_info();
...@@ -125,6 +128,16 @@ static void sp_min_prepare_next_image_entry(void) ...@@ -125,6 +128,16 @@ static void sp_min_prepare_next_image_entry(void)
/* Copy r0, lr and spsr from cpu context to SMC context */ /* Copy r0, lr and spsr from cpu context to SMC context */
copy_cpu_ctx_to_smc_stx(get_regs_ctx(cm_get_context(NON_SECURE)), copy_cpu_ctx_to_smc_stx(get_regs_ctx(cm_get_context(NON_SECURE)),
smc_get_next_ctx()); smc_get_next_ctx());
/* Temporarily set the NS bit to access NS SCTLR */
write_scr(read_scr() | SCR_NS_BIT);
isb();
ns_sctlr = read_ctx_reg(get_regs_ctx(ctx), CTX_NS_SCTLR);
write_sctlr(ns_sctlr);
isb();
write_scr(read_scr() & ~SCR_NS_BIT);
isb();
} }
/****************************************************************************** /******************************************************************************
......
...@@ -18,8 +18,10 @@ ...@@ -18,8 +18,10 @@
#define SMC_CTX_GPREG_R5 0x14 #define SMC_CTX_GPREG_R5 0x14
#define SMC_CTX_SP_USR 0x34 #define SMC_CTX_SP_USR 0x34
#define SMC_CTX_SPSR_MON 0x78 #define SMC_CTX_SPSR_MON 0x78
#define SMC_CTX_LR_MON 0x7C #define SMC_CTX_SP_MON 0x7C
#define SMC_CTX_SIZE 0x80 #define SMC_CTX_LR_MON 0x80
#define SMC_CTX_SCR 0x84
#define SMC_CTX_SIZE 0x88
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <cassert.h> #include <cassert.h>
...@@ -63,8 +65,14 @@ typedef struct smc_ctx { ...@@ -63,8 +65,14 @@ typedef struct smc_ctx {
u_register_t sp_und; u_register_t sp_und;
u_register_t lr_und; u_register_t lr_und;
u_register_t spsr_mon; u_register_t spsr_mon;
/* No need to save 'sp_mon' because we are already in monitor mode */ /*
* `sp_mon` will point to the C runtime stack in monitor mode. But prior
* to exit from SMC, this will point to the `smc_ctx_t` so that
* on next entry due to SMC, the `smc_ctx_t` can be easily accessed.
*/
u_register_t sp_mon;
u_register_t lr_mon; u_register_t lr_mon;
u_register_t scr;
} smc_ctx_t; } smc_ctx_t;
/* /*
......
...@@ -9,27 +9,16 @@ ...@@ -9,27 +9,16 @@
#include <arch.h> #include <arch.h>
/* /*
* Macro to save the General purpose registers including the banked * Macro to save the General purpose registers (r0 - r12), the banked
* registers to the SMC context on entry due a SMC call. On return, r0 * spsr, lr, sp registers and the `scr` register to the SMC context on entry
* contains the pointer to the `smc_context_t`. * due a SMC call. The `lr` of the current mode (monitor) is expected to be
* already saved. The `sp` must point to the `smc_ctx_t` to save to.
*/ */
.macro smcc_save_gp_mode_regs .macro smcc_save_gp_mode_regs
push {r0-r4, lr} /* Save r0 - r12 in the SMC context */
stm sp, {r0-r12}
ldcopr r0, SCR mov r0, sp
and r0, r0, #SCR_NS_BIT add r0, r0, #SMC_CTX_SP_USR
bl smc_get_ctx
/* Save r5 - r12 in the SMC context */
add r1, r0, #SMC_CTX_GPREG_R5
stm r1!, {r5-r12}
/*
* Pop r0 - r4, lr to r4 - r8, lr from stack and then save
* it to SMC context.
*/
pop {r4-r8, lr}
stm r0, {r4-r8}
/* Save the banked registers including the current SPSR and LR */ /* Save the banked registers including the current SPSR and LR */
mrs r4, sp_usr mrs r4, sp_usr
...@@ -41,7 +30,7 @@ ...@@ -41,7 +30,7 @@
mrs r10, sp_fiq mrs r10, sp_fiq
mrs r11, lr_fiq mrs r11, lr_fiq
mrs r12, spsr_svc mrs r12, spsr_svc
stm r1!, {r4-r12} stm r0!, {r4-r12}
mrs r4, sp_svc mrs r4, sp_svc
mrs r5, lr_svc mrs r5, lr_svc
...@@ -52,18 +41,36 @@ ...@@ -52,18 +41,36 @@
mrs r10, sp_und mrs r10, sp_und
mrs r11, lr_und mrs r11, lr_und
mrs r12, spsr mrs r12, spsr
stm r1!, {r4-r12, lr} stm r0!, {r4-r12}
/* lr_mon is already saved by caller */
ldcopr r4, SCR
str r4, [sp, #SMC_CTX_SCR]
.endm .endm
/* /*
* Macro to restore the General purpose registers including the banked * Macro to restore the `smc_ctx_t`, which includes the General purpose
* registers from the SMC context prior to exit from the SMC call. * registers and banked mode registers, and exit from the monitor mode.
* r0 must point to the `smc_context_t` to restore from. * r0 must point to the `smc_ctx_t` to restore from.
*/
.macro monitor_exit
/*
* Save the current sp and restore the smc context
* pointer to sp which will be used for handling the
* next SMC.
*/
str sp, [r0, #SMC_CTX_SP_MON]
mov sp, r0
/*
* Restore SCR first so that we access the right banked register
* when the other mode registers are restored.
*/ */
.macro smcc_restore_gp_mode_regs ldr r1, [r0, #SMC_CTX_SCR]
stcopr r1, SCR
isb
/* Restore the banked registers including the current SPSR and LR */ /* Restore the banked registers including the current SPSR */
add r1, r0, #SMC_CTX_SP_USR add r1, r0, #SMC_CTX_SP_USR
ldm r1!, {r4-r12} ldm r1!, {r4-r12}
msr sp_usr, r4 msr sp_usr, r4
...@@ -76,7 +83,7 @@ ...@@ -76,7 +83,7 @@
msr lr_fiq, r11 msr lr_fiq, r11
msr spsr_svc, r12 msr spsr_svc, r12
ldm r1!, {r4-r12, lr} ldm r1!, {r4-r12}
msr sp_svc, r4 msr sp_svc, r4
msr lr_svc, r5 msr lr_svc, r5
msr spsr_abt, r6 msr spsr_abt, r6
...@@ -93,8 +100,12 @@ ...@@ -93,8 +100,12 @@
*/ */
msr spsr_fsxc, r12 msr spsr_fsxc, r12
/* Restore the LR */
ldr lr, [r0, #SMC_CTX_LR_MON]
/* Restore the rest of the general purpose registers */ /* Restore the rest of the general purpose registers */
ldm r0, {r0-r12} ldm r0, {r0-r12}
eret
.endm .endm
#endif /* __SMCC_MACROS_S__ */ #endif /* __SMCC_MACROS_S__ */
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment