Commit ae030052 authored by Olivier Deprez's avatar Olivier Deprez Committed by TrustedFirmware Code Review
Browse files

Merge changes from topic "od/ffa_spmc_pwr" into integration

* changes:
  SPM: declare third cactus instance as UP SP
  SPMD: lock the g_spmd_pm structure
  FF-A: implement FFA_SECONDARY_EP_REGISTER
parents 332649da e96fc8e7
/*
* Copyright (c) 2020, Arm Limited. All rights reserved.
* Copyright (c) 2020-2021, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
......@@ -22,7 +22,7 @@
/* The macros below are used to identify FFA calls from the SMC function ID */
#define FFA_FNUM_MIN_VALUE U(0x60)
#define FFA_FNUM_MAX_VALUE U(0x7f)
#define FFA_FNUM_MAX_VALUE U(0x84)
#define is_ffa_fid(fid) __extension__ ({ \
__typeof__(fid) _fid = (fid); \
((GET_SMC_NUM(_fid) >= FFA_FNUM_MIN_VALUE) && \
......@@ -85,6 +85,7 @@
#define FFA_FNUM_MEM_RETRIEVE_RESP U(0x75)
#define FFA_FNUM_MEM_RELINQUISH U(0x76)
#define FFA_FNUM_MEM_RECLAIM U(0x77)
#define FFA_FNUM_SECONDARY_EP_REGISTER U(0x84)
/* FFA SMC32 FIDs */
#define FFA_ERROR FFA_FID(SMC_32, FFA_FNUM_ERROR)
......@@ -116,6 +117,7 @@
#define FFA_MEM_RECLAIM FFA_FID(SMC_32, FFA_FNUM_MEM_RECLAIM)
/* FFA SMC64 FIDs */
#define FFA_ERROR_SMC64 FFA_FID(SMC_64, FFA_FNUM_ERROR)
#define FFA_SUCCESS_SMC64 FFA_FID(SMC_64, FFA_FNUM_SUCCESS)
#define FFA_RXTX_MAP_SMC64 FFA_FID(SMC_64, FFA_FNUM_RXTX_MAP)
#define FFA_MSG_SEND_DIRECT_REQ_SMC64 \
......@@ -127,6 +129,8 @@
#define FFA_MEM_SHARE_SMC64 FFA_FID(SMC_64, FFA_FNUM_MEM_SHARE)
#define FFA_MEM_RETRIEVE_REQ_SMC64 \
FFA_FID(SMC_64, FFA_FNUM_MEM_RETRIEVE_REQ)
#define FFA_SECONDARY_EP_REGISTER_SMC64 \
FFA_FID(SMC_64, FFA_FNUM_SECONDARY_EP_REGISTER)
/*
* Reserve a special value for traffic targeted to the Hypervisor or SPM.
......
......@@ -47,7 +47,7 @@
is_ffa_partition;
debug_name = "cactus-tertiary";
load_address = <0x7200000>;
vcpu_count = <8>;
vcpu_count = <1>;
mem_size = <1048576>;
};
};
......
/*
* Copyright (c) 2020, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2020-2021, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
......@@ -164,7 +164,6 @@ static int32_t spmd_init(void)
for (core_id = 0U; core_id < PLATFORM_CORE_COUNT; core_id++) {
if (core_id != linear_id) {
spm_core_context[core_id].state = SPMC_STATE_OFF;
spm_core_context[core_id].secondary_ep.entry_point = 0UL;
}
}
......@@ -406,13 +405,6 @@ static int spmd_handle_spmc_message(unsigned long long msg,
VERBOSE("%s %llx %llx %llx %llx %llx\n", __func__,
msg, parm1, parm2, parm3, parm4);
switch (msg) {
case SPMD_DIRECT_MSG_SET_ENTRY_POINT:
return spmd_pm_secondary_core_set_ep(parm1, parm2, parm3);
default:
break;
}
return -EINVAL;
}
......@@ -429,6 +421,7 @@ uint64_t spmd_smc_handler(uint32_t smc_fid,
void *handle,
uint64_t flags)
{
unsigned int linear_id = plat_my_core_pos();
spmd_spm_core_context_t *ctx = spmd_get_context();
bool secure_origin;
int32_t ret;
......@@ -437,10 +430,12 @@ uint64_t spmd_smc_handler(uint32_t smc_fid,
/* Determine which security state this SMC originated from */
secure_origin = is_caller_secure(flags);
INFO("SPM: 0x%x 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n",
smc_fid, x1, x2, x3, x4, SMC_GET_GP(handle, CTX_GPREG_X5),
SMC_GET_GP(handle, CTX_GPREG_X6),
SMC_GET_GP(handle, CTX_GPREG_X7));
VERBOSE("SPM(%u): 0x%x 0x%llx 0x%llx 0x%llx 0x%llx "
"0x%llx 0x%llx 0x%llx\n",
linear_id, smc_fid, x1, x2, x3, x4,
SMC_GET_GP(handle, CTX_GPREG_X5),
SMC_GET_GP(handle, CTX_GPREG_X6),
SMC_GET_GP(handle, CTX_GPREG_X7));
switch (smc_fid) {
case FFA_ERROR:
......@@ -533,6 +528,28 @@ uint64_t spmd_smc_handler(uint32_t smc_fid,
break; /* not reached */
case FFA_SECONDARY_EP_REGISTER_SMC64:
if (secure_origin) {
ret = spmd_pm_secondary_ep_register(x1);
if (ret < 0) {
SMC_RET8(handle, FFA_ERROR_SMC64,
FFA_TARGET_INFO_MBZ, ret,
FFA_PARAM_MBZ, FFA_PARAM_MBZ,
FFA_PARAM_MBZ, FFA_PARAM_MBZ,
FFA_PARAM_MBZ);
} else {
SMC_RET8(handle, FFA_SUCCESS_SMC64,
FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ,
FFA_PARAM_MBZ, FFA_PARAM_MBZ,
FFA_PARAM_MBZ, FFA_PARAM_MBZ,
FFA_PARAM_MBZ);
}
}
return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
break; /* Not reached */
case FFA_MSG_SEND_DIRECT_REQ_SMC32:
if (secure_origin && spmd_is_spmc_message(x1)) {
ret = spmd_handle_spmc_message(x3, x4,
......
/*
* Copyright (c) 2020, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2020-2021, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
......@@ -7,8 +7,15 @@
#include <assert.h>
#include <errno.h>
#include <lib/el3_runtime/context_mgmt.h>
#include <lib/spinlock.h>
#include "spmd_private.h"
static struct {
bool secondary_ep_locked;
uintptr_t secondary_ep;
spinlock_t lock;
} g_spmd_pm;
/*******************************************************************************
* spmd_build_spmc_message
*
......@@ -25,16 +32,16 @@ static void spmd_build_spmc_message(gp_regs_t *gpregs, unsigned long long messag
}
/*******************************************************************************
* spmd_pm_secondary_core_set_ep
* spmd_pm_secondary_ep_register
******************************************************************************/
int spmd_pm_secondary_core_set_ep(unsigned long long mpidr,
uintptr_t entry_point, unsigned long long context)
int spmd_pm_secondary_ep_register(uintptr_t entry_point)
{
int id = plat_core_pos_by_mpidr(mpidr);
int ret = FFA_ERROR_INVALID_PARAMETER;
if ((id < 0) || ((unsigned int)id >= PLATFORM_CORE_COUNT)) {
ERROR("%s inconsistent MPIDR (%llx)\n", __func__, mpidr);
return -EINVAL;
spin_lock(&g_spmd_pm.lock);
if (g_spmd_pm.secondary_ep_locked == true) {
goto out;
}
/*
......@@ -42,27 +49,22 @@ int spmd_pm_secondary_core_set_ep(unsigned long long mpidr,
* load_address <= entry_point < load_address + binary_size
*/
if (!spmd_check_address_in_binary_image(entry_point)) {
ERROR("%s entry point is not within image boundaries (%llx)\n",
__func__, mpidr);
return -EINVAL;
ERROR("%s entry point is not within image boundaries\n",
__func__);
goto out;
}
spmd_spm_core_context_t *ctx = spmd_get_context_by_mpidr(mpidr);
spmd_pm_secondary_ep_t *secondary_ep = &ctx->secondary_ep;
if (secondary_ep->locked) {
ERROR("%s entry locked (%llx)\n", __func__, mpidr);
return -EINVAL;
}
g_spmd_pm.secondary_ep = entry_point;
g_spmd_pm.secondary_ep_locked = true;
/* Fill new entry to corresponding secondary core id and lock it */
secondary_ep->entry_point = entry_point;
secondary_ep->context = context;
secondary_ep->locked = true;
VERBOSE("%s %lx\n", __func__, entry_point);
VERBOSE("%s %d %llx %lx %llx\n",
__func__, id, mpidr, entry_point, context);
ret = 0;
return 0;
out:
spin_unlock(&g_spmd_pm.lock);
return ret;
}
/*******************************************************************************
......@@ -82,18 +84,20 @@ static void spmd_cpu_on_finish_handler(u_register_t unused)
assert(ctx->state != SPMC_STATE_ON);
assert(spmc_ep_info != NULL);
spin_lock(&g_spmd_pm.lock);
/*
* TODO: this might require locking the spmc_ep_info structure,
* or provisioning one structure per cpu
* Leave the possibility that the SPMC does not call
* FFA_SECONDARY_EP_REGISTER in which case re-use the
* primary core address for booting secondary cores.
*/
if (ctx->secondary_ep.entry_point == 0UL) {
goto exit;
if (g_spmd_pm.secondary_ep_locked == true) {
spmc_ep_info->pc = g_spmd_pm.secondary_ep;
}
spmc_ep_info->pc = ctx->secondary_ep.entry_point;
spin_unlock(&g_spmd_pm.lock);
cm_setup_context(&ctx->cpu_ctx, spmc_ep_info);
write_ctx_reg(get_gpregs_ctx(&ctx->cpu_ctx), CTX_GPREG_X0,
ctx->secondary_ep.context);
/* Mark CPU as initiating ON operation */
ctx->state = SPMC_STATE_ON_PENDING;
......@@ -106,7 +110,6 @@ static void spmd_cpu_on_finish_handler(u_register_t unused)
return;
}
exit:
ctx->state = SPMC_STATE_ON;
VERBOSE("CPU %u on!\n", linear_id);
......@@ -124,10 +127,6 @@ static int32_t spmd_cpu_off_handler(u_register_t unused)
assert(ctx != NULL);
assert(ctx->state != SPMC_STATE_OFF);
if (ctx->secondary_ep.entry_point == 0UL) {
goto exit;
}
/* Build an SPMD to SPMC direct message request. */
spmd_build_spmc_message(get_gpregs_ctx(&ctx->cpu_ctx), PSCI_CPU_OFF);
......@@ -136,9 +135,15 @@ static int32_t spmd_cpu_off_handler(u_register_t unused)
ERROR("%s failed (%llu) on CPU%u\n", __func__, rc, linear_id);
}
/* TODO expect FFA_DIRECT_MSG_RESP returned from SPMC */
/* Expect a direct message response from the SPMC. */
u_register_t ffa_resp_func = read_ctx_reg(get_gpregs_ctx(&ctx->cpu_ctx),
CTX_GPREG_X0);
if (ffa_resp_func != FFA_MSG_SEND_DIRECT_RESP_SMC32) {
ERROR("%s invalid SPMC response (%lx).\n",
__func__, ffa_resp_func);
return -EINVAL;
}
exit:
ctx->state = SPMC_STATE_OFF;
VERBOSE("CPU %u off!\n", linear_id);
......
/*
* Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2019-2021, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
......@@ -42,12 +42,6 @@ typedef enum spmc_state {
SPMC_STATE_ON
} spmc_state_t;
typedef struct spmd_pm_secondary_ep {
uintptr_t entry_point;
uintptr_t context;
bool locked;
} spmd_pm_secondary_ep_t;
/*
* Data structure used by the SPM dispatcher (SPMD) in EL3 to track context of
* the SPM core (SPMC) at the next lower EL.
......@@ -56,7 +50,6 @@ typedef struct spmd_spm_core_context {
uint64_t c_rt_ctx;
cpu_context_t cpu_ctx;
spmc_state_t state;
spmd_pm_secondary_ep_t secondary_ep;
} spmd_spm_core_context_t;
/*
......@@ -69,7 +62,6 @@ typedef struct spmd_spm_core_context {
#define SPMC_SECURE_ID_SHIFT U(15)
#define SPMD_DIRECT_MSG_ENDPOINT_ID U(FFA_ENDPOINT_ID_MAX - 1)
#define SPMD_DIRECT_MSG_SET_ENTRY_POINT U(1)
/* Functions used to enter/exit SPMC synchronously */
uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *ctx);
......@@ -94,8 +86,7 @@ spmd_spm_core_context_t *spmd_get_context_by_mpidr(uint64_t mpidr);
/* SPMC context on current CPU get helper */
spmd_spm_core_context_t *spmd_get_context(void);
int spmd_pm_secondary_core_set_ep(unsigned long long mpidr,
uintptr_t entry_point, unsigned long long context);
int spmd_pm_secondary_ep_register(uintptr_t entry_point);
bool spmd_check_address_in_binary_image(uint64_t address);
#endif /* __ASSEMBLER__ */
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment