spmd_pm.c 4.75 KB
Newer Older
1
2
3
4
5
6
7
/*
 * Copyright (c) 2020, ARM Limited and Contributors. All rights reserved.
 *
 * SPDX-License-Identifier: BSD-3-Clause
 */

#include <assert.h>
8
#include <errno.h>
9
#include <lib/el3_runtime/context_mgmt.h>
10
11
#include "spmd_private.h"

12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
/*******************************************************************************
 * spmd_build_spmc_message
 *
 * Builds an SPMD to SPMC direct message request.
 ******************************************************************************/
static void spmd_build_spmc_message(gp_regs_t *gpregs, unsigned long long message)
{
	write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_MSG_SEND_DIRECT_REQ_SMC32);
	write_ctx_reg(gpregs, CTX_GPREG_X1,
		(SPMD_DIRECT_MSG_ENDPOINT_ID << FFA_DIRECT_MSG_SOURCE_SHIFT) |
		spmd_spmc_id_get());
	write_ctx_reg(gpregs, CTX_GPREG_X2, FFA_PARAM_MBZ);
	write_ctx_reg(gpregs, CTX_GPREG_X3, message);
}

27
28
29
/*******************************************************************************
 * spmd_pm_secondary_core_set_ep
 ******************************************************************************/
30
31
int spmd_pm_secondary_core_set_ep(unsigned long long mpidr,
		uintptr_t entry_point, unsigned long long context)
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
{
	int id = plat_core_pos_by_mpidr(mpidr);

	if ((id < 0) || (id >= PLATFORM_CORE_COUNT)) {
		ERROR("%s inconsistent MPIDR (%llx)\n", __func__, mpidr);
		return -EINVAL;
	}

	/*
	 * Check entry_point address is a PA within
	 * load_address <= entry_point < load_address + binary_size
	 */
	if (!spmd_check_address_in_binary_image(entry_point)) {
		ERROR("%s entry point is not within image boundaries (%llx)\n",
		      __func__, mpidr);
		return -EINVAL;
	}

50
51
52
53
54
55
56
	spmd_spm_core_context_t *ctx = spmd_get_context_by_mpidr(mpidr);
	spmd_pm_secondary_ep_t *secondary_ep = &ctx->secondary_ep;
	if (secondary_ep->locked) {
		ERROR("%s entry locked (%llx)\n", __func__, mpidr);
		return -EINVAL;
	}

57
	/* Fill new entry to corresponding secondary core id and lock it */
58
59
60
	secondary_ep->entry_point = entry_point;
	secondary_ep->context = context;
	secondary_ep->locked = true;
61
62
63
64
65
66
67

	VERBOSE("%s %d %llx %lx %llx\n",
		__func__, id, mpidr, entry_point, context);

	return 0;
}

68
69
70
71
72
73
74
75
/*******************************************************************************
 * This CPU has been turned on. Enter SPMC to initialise S-EL1 or S-EL2. As part
 * of the SPMC initialization path, they will initialize any SPs that they
 * manage. Entry into SPMC is done after initialising minimal architectural
 * state that guarantees safe execution.
 ******************************************************************************/
static void spmd_cpu_on_finish_handler(u_register_t unused)
{
76
	entry_point_info_t *spmc_ep_info = spmd_spmc_ep_info_get();
77
	spmd_spm_core_context_t *ctx = spmd_get_context();
78
	unsigned int linear_id = plat_my_core_pos();
79
	uint64_t rc;
80

81
	assert(ctx != NULL);
82
	assert(ctx->state != SPMC_STATE_ON);
83
84
85
86
87
88
	assert(spmc_ep_info != NULL);

	/*
	 * TODO: this might require locking the spmc_ep_info structure,
	 * or provisioning one structure per cpu
	 */
89
	if (ctx->secondary_ep.entry_point == 0UL) {
90
91
92
		goto exit;
	}

93
	spmc_ep_info->pc = ctx->secondary_ep.entry_point;
94
95
	cm_setup_context(&ctx->cpu_ctx, spmc_ep_info);
	write_ctx_reg(get_gpregs_ctx(&ctx->cpu_ctx), CTX_GPREG_X0,
96
		      ctx->secondary_ep.context);
97
98
99

	/* Mark CPU as initiating ON operation */
	ctx->state = SPMC_STATE_ON_PENDING;
100
101

	rc = spmd_spm_core_sync_entry(ctx);
102
103
	if (rc != 0ULL) {
		ERROR("%s failed (%llu) on CPU%u\n", __func__, rc,
104
			linear_id);
105
106
107
108
		ctx->state = SPMC_STATE_OFF;
		return;
	}

109
exit:
110
	ctx->state = SPMC_STATE_ON;
111
112
113
114
115
116
117
118
119
120
121

	VERBOSE("CPU %u on!\n", linear_id);
}

/*******************************************************************************
 * spmd_cpu_off_handler
 ******************************************************************************/
static int32_t spmd_cpu_off_handler(u_register_t unused)
{
	spmd_spm_core_context_t *ctx = spmd_get_context();
	unsigned int linear_id = plat_my_core_pos();
122
	int64_t rc;
123
124
125
126

	assert(ctx != NULL);
	assert(ctx->state != SPMC_STATE_OFF);

127
	if (ctx->secondary_ep.entry_point == 0UL) {
128
129
130
131
132
133
134
		goto exit;
	}

	/* Build an SPMD to SPMC direct message request. */
	spmd_build_spmc_message(get_gpregs_ctx(&ctx->cpu_ctx), PSCI_CPU_OFF);

	rc = spmd_spm_core_sync_entry(ctx);
135
136
	if (rc != 0ULL) {
		ERROR("%s failed (%llu) on CPU%u\n", __func__, rc, linear_id);
137
138
139
140
141
142
143
144
145
146
	}

	/* TODO expect FFA_DIRECT_MSG_RESP returned from SPMC */

exit:
	ctx->state = SPMC_STATE_OFF;

	VERBOSE("CPU %u off!\n", linear_id);

	return 0;
147
148
149
150
151
152
153
154
}

/*******************************************************************************
 * Structure populated by the SPM Dispatcher to perform any bookkeeping before
 * PSCI executes a power mgmt. operation.
 ******************************************************************************/
const spd_pm_ops_t spmd_pm = {
	.svc_on_finish = spmd_cpu_on_finish_handler,
155
	.svc_off = spmd_cpu_off_handler
156
};