Commit dae374bf authored by Anthony Zhou's avatar Anthony Zhou Committed by Varun Wadekar
Browse files

spd: trusty: pass VMID via X7



According to the ARM DEN0028A spec, hypervisor ID(VMID) should be stored
in x7 (or w7). This patch gets this value from the context and passes it
to Trusty. In order to do so, introduce new macros to pass five to eight
parameters to the Trusted OS.

Change-Id: I101cf45d0712e1e880466b2274f9a48af755c9fa
Signed-off-by: default avatarAnthony Zhou <anzhou@nvidia.com>
Signed-off-by: default avatarVarun Wadekar <vwadekar@nvidia.com>
parent 32bf0e29
...@@ -56,6 +56,22 @@ ...@@ -56,6 +56,22 @@
write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X3, (_x3)); \ write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X3, (_x3)); \
SMC_RET3(_h, (_x0), (_x1), (_x2)); \ SMC_RET3(_h, (_x0), (_x1), (_x2)); \
} }
#define SMC_RET5(_h, _x0, _x1, _x2, _x3, _x4) { \
write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X4, (_x4)); \
SMC_RET4(_h, (_x0), (_x1), (_x2), (_x3)); \
}
#define SMC_RET6(_h, _x0, _x1, _x2, _x3, _x4, _x5) { \
write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X5, (_x5)); \
SMC_RET5(_h, (_x0), (_x1), (_x2), (_x3), (_x4)); \
}
#define SMC_RET7(_h, _x0, _x1, _x2, _x3, _x4, _x5, _x6) { \
write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X6, (_x6)); \
SMC_RET6(_h, (_x0), (_x1), (_x2), (_x3), (_x4), (_x5)); \
}
#define SMC_RET8(_h, _x0, _x1, _x2, _x3, _x4, _x5, _x6, _x7) { \
write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X7, (_x7)); \
SMC_RET7(_h, (_x0), (_x1), (_x2), (_x3), (_x4), (_x5), (_x6)); \
}
/* /*
* Convenience macros to access general purpose registers using handle provided * Convenience macros to access general purpose registers using handle provided
......
...@@ -28,7 +28,8 @@ ...@@ -28,7 +28,8 @@
* POSSIBILITY OF SUCH DAMAGE. * POSSIBILITY OF SUCH DAMAGE.
*/ */
#include <assert.h> #include <arch_helpers.h>
#include <assert.h> /* for context_mgmt.h */
#include <bl_common.h> #include <bl_common.h>
#include <bl31.h> #include <bl31.h>
#include <context_mgmt.h> #include <context_mgmt.h>
...@@ -41,6 +42,9 @@ ...@@ -41,6 +42,9 @@
#include "smcall.h" #include "smcall.h"
#include "sm_err.h" #include "sm_err.h"
/* macro to check if Hypervisor is enabled in the HCR_EL2 register */
#define HYP_ENABLE_FLAG 0x286001
struct trusty_stack { struct trusty_stack {
uint8_t space[PLATFORM_STACK_SIZE] __aligned(16); uint8_t space[PLATFORM_STACK_SIZE] __aligned(16);
}; };
...@@ -65,31 +69,58 @@ struct args { ...@@ -65,31 +69,58 @@ struct args {
uint64_t r1; uint64_t r1;
uint64_t r2; uint64_t r2;
uint64_t r3; uint64_t r3;
uint64_t r4;
uint64_t r5;
uint64_t r6;
uint64_t r7;
}; };
struct trusty_cpu_ctx trusty_cpu_ctx[PLATFORM_CORE_COUNT]; struct trusty_cpu_ctx trusty_cpu_ctx[PLATFORM_CORE_COUNT];
struct args trusty_init_context_stack(void **sp, void *new_stack); struct args trusty_init_context_stack(void **sp, void *new_stack);
struct args trusty_context_switch_helper(void **sp, uint64_t r0, uint64_t r1, struct args trusty_context_switch_helper(void **sp, void *smc_params);
uint64_t r2, uint64_t r3);
static struct trusty_cpu_ctx *get_trusty_ctx(void) static struct trusty_cpu_ctx *get_trusty_ctx(void)
{ {
return &trusty_cpu_ctx[plat_my_core_pos()]; return &trusty_cpu_ctx[plat_my_core_pos()];
} }
static uint32_t is_hypervisor_mode(void)
{
uint64_t hcr = read_hcr();
return !!(hcr & HYP_ENABLE_FLAG);
}
static struct args trusty_context_switch(uint32_t security_state, uint64_t r0, static struct args trusty_context_switch(uint32_t security_state, uint64_t r0,
uint64_t r1, uint64_t r2, uint64_t r3) uint64_t r1, uint64_t r2, uint64_t r3)
{ {
struct args ret; struct args ret;
struct trusty_cpu_ctx *ctx = get_trusty_ctx(); struct trusty_cpu_ctx *ctx = get_trusty_ctx();
struct trusty_cpu_ctx *ctx_smc;
assert(ctx->saved_security_state != security_state); assert(ctx->saved_security_state != security_state);
ret.r7 = 0;
if (is_hypervisor_mode()) {
/* According to the ARM DEN0028A spec, VMID is stored in x7 */
ctx_smc = cm_get_context(NON_SECURE);
assert(ctx_smc);
ret.r7 = SMC_GET_GP(ctx_smc, CTX_GPREG_X7);
}
/* r4, r5, r6 reserved for future use. */
ret.r6 = 0;
ret.r5 = 0;
ret.r4 = 0;
ret.r3 = r3;
ret.r2 = r2;
ret.r1 = r1;
ret.r0 = r0;
cm_el1_sysregs_context_save(security_state); cm_el1_sysregs_context_save(security_state);
ctx->saved_security_state = security_state; ctx->saved_security_state = security_state;
ret = trusty_context_switch_helper(&ctx->saved_sp, r0, r1, r2, r3); ret = trusty_context_switch_helper(&ctx->saved_sp, &ret);
assert(ctx->saved_security_state == !security_state); assert(ctx->saved_security_state == !security_state);
...@@ -204,7 +235,8 @@ static uint64_t trusty_smc_handler(uint32_t smc_fid, ...@@ -204,7 +235,8 @@ static uint64_t trusty_smc_handler(uint32_t smc_fid,
if (is_caller_secure(flags)) { if (is_caller_secure(flags)) {
if (smc_fid == SMC_SC_NS_RETURN) { if (smc_fid == SMC_SC_NS_RETURN) {
ret = trusty_context_switch(SECURE, x1, 0, 0, 0); ret = trusty_context_switch(SECURE, x1, 0, 0, 0);
SMC_RET4(handle, ret.r0, ret.r1, ret.r2, ret.r3); SMC_RET8(handle, ret.r0, ret.r1, ret.r2, ret.r3,
ret.r4, ret.r5, ret.r6, ret.r7);
} }
INFO("%s (0x%x, 0x%lx, 0x%lx, 0x%lx, 0x%lx, %p, %p, 0x%lx) \ INFO("%s (0x%x, 0x%lx, 0x%lx, 0x%lx, 0x%lx, %p, %p, 0x%lx) \
cpu %d, unknown smc\n", cpu %d, unknown smc\n",
...@@ -231,6 +263,7 @@ static int32_t trusty_init(void) ...@@ -231,6 +263,7 @@ static int32_t trusty_init(void)
{ {
void el3_exit(void); void el3_exit(void);
entry_point_info_t *ep_info; entry_point_info_t *ep_info;
struct args zero_args = {0};
struct trusty_cpu_ctx *ctx = get_trusty_ctx(); struct trusty_cpu_ctx *ctx = get_trusty_ctx();
uint32_t cpu = plat_my_core_pos(); uint32_t cpu = plat_my_core_pos();
int reg_width = GET_RW(read_ctx_reg(get_el3state_ctx(&ctx->cpu_ctx), int reg_width = GET_RW(read_ctx_reg(get_el3state_ctx(&ctx->cpu_ctx),
...@@ -264,7 +297,7 @@ static int32_t trusty_init(void) ...@@ -264,7 +297,7 @@ static int32_t trusty_init(void)
ctx->saved_security_state = ~0; /* initial saved state is invalid */ ctx->saved_security_state = ~0; /* initial saved state is invalid */
trusty_init_context_stack(&ctx->saved_sp, &ctx->secure_stack); trusty_init_context_stack(&ctx->saved_sp, &ctx->secure_stack);
trusty_context_switch_helper(&ctx->saved_sp, 0, 0, 0, 0); trusty_context_switch_helper(&ctx->saved_sp, &zero_args);
cm_el1_sysregs_context_restore(NON_SECURE); cm_el1_sysregs_context_restore(NON_SECURE);
cm_set_next_eret_context(NON_SECURE); cm_set_next_eret_context(NON_SECURE);
......
...@@ -60,8 +60,20 @@ func trusty_context_switch_helper ...@@ -60,8 +60,20 @@ func trusty_context_switch_helper
pop x21, x22 pop x21, x22
pop x19, x20 pop x19, x20
pop x8, xzr pop x8, xzr
stp x1, x2, [x8]
stp x3, x4, [x8, #16] ldr x2, [x1]
ldr x3, [x1, #0x08]
ldr x4, [x1, #0x10]
ldr x5, [x1, #0x18]
ldr x6, [x1, #0x20]
ldr x7, [x1, #0x28]
ldr x10, [x1, #0x30]
ldr x11, [x1, #0x38]
stp x2, x3, [x8]
stp x4, x5, [x8, #16]
stp x6, x7, [x8, #32]
stp x10, x11, [x8, #48]
ret ret
endfunc trusty_context_switch_helper endfunc trusty_context_switch_helper
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment