spmd_pm.c 4.89 KB
Newer Older
1
2
3
4
5
6
7
/*
 * Copyright (c) 2020, ARM Limited and Contributors. All rights reserved.
 *
 * SPDX-License-Identifier: BSD-3-Clause
 */

#include <assert.h>
8
#include <errno.h>
9
#include <lib/el3_runtime/context_mgmt.h>
10
11
#include "spmd_private.h"

12
13
14
15
16
17
18
19
struct spmd_pm_secondary_ep_t {
	uintptr_t entry_point;
	uintptr_t context;
	bool locked;
};

static struct spmd_pm_secondary_ep_t spmd_pm_secondary_ep[PLATFORM_CORE_COUNT];

20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
/*******************************************************************************
 * spmd_build_spmc_message
 *
 * Builds an SPMD to SPMC direct message request.
 ******************************************************************************/
static void spmd_build_spmc_message(gp_regs_t *gpregs, unsigned long long message)
{
	write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_MSG_SEND_DIRECT_REQ_SMC32);
	write_ctx_reg(gpregs, CTX_GPREG_X1,
		(SPMD_DIRECT_MSG_ENDPOINT_ID << FFA_DIRECT_MSG_SOURCE_SHIFT) |
		spmd_spmc_id_get());
	write_ctx_reg(gpregs, CTX_GPREG_X2, FFA_PARAM_MBZ);
	write_ctx_reg(gpregs, CTX_GPREG_X3, message);
}

35
36
37
/*******************************************************************************
 * spmd_pm_secondary_core_set_ep
 ******************************************************************************/
38
39
int spmd_pm_secondary_core_set_ep(unsigned long long mpidr,
		uintptr_t entry_point, unsigned long long context)
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
{
	int id = plat_core_pos_by_mpidr(mpidr);

	if ((id < 0) || (id >= PLATFORM_CORE_COUNT)) {
		ERROR("%s inconsistent MPIDR (%llx)\n", __func__, mpidr);
		return -EINVAL;
	}

	if (spmd_pm_secondary_ep[id].locked) {
		ERROR("%s entry locked (%llx)\n", __func__, mpidr);
		return -EINVAL;
	}

	/*
	 * Check entry_point address is a PA within
	 * load_address <= entry_point < load_address + binary_size
	 */
	if (!spmd_check_address_in_binary_image(entry_point)) {
		ERROR("%s entry point is not within image boundaries (%llx)\n",
		      __func__, mpidr);
		return -EINVAL;
	}

	/* Fill new entry to corresponding secondary core id and lock it */
	spmd_pm_secondary_ep[id].entry_point = entry_point;
	spmd_pm_secondary_ep[id].context = context;
	spmd_pm_secondary_ep[id].locked = true;

	VERBOSE("%s %d %llx %lx %llx\n",
		__func__, id, mpidr, entry_point, context);

	return 0;
}

74
75
76
77
78
79
80
81
/*******************************************************************************
 * This CPU has been turned on. Enter SPMC to initialise S-EL1 or S-EL2. As part
 * of the SPMC initialization path, they will initialize any SPs that they
 * manage. Entry into SPMC is done after initialising minimal architectural
 * state that guarantees safe execution.
 ******************************************************************************/
static void spmd_cpu_on_finish_handler(u_register_t unused)
{
82
	entry_point_info_t *spmc_ep_info = spmd_spmc_ep_info_get();
83
	spmd_spm_core_context_t *ctx = spmd_get_context();
84
	unsigned int linear_id = plat_my_core_pos();
85
86
	int rc;

87
	assert(ctx != NULL);
88
	assert(ctx->state != SPMC_STATE_ON);
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
	assert(spmc_ep_info != NULL);

	/*
	 * TODO: this might require locking the spmc_ep_info structure,
	 * or provisioning one structure per cpu
	 */
	if (spmd_pm_secondary_ep[linear_id].entry_point == 0) {
		goto exit;
	}

	spmc_ep_info->pc = spmd_pm_secondary_ep[linear_id].entry_point;
	cm_setup_context(&ctx->cpu_ctx, spmc_ep_info);
	write_ctx_reg(get_gpregs_ctx(&ctx->cpu_ctx), CTX_GPREG_X0,
		      spmd_pm_secondary_ep[linear_id].context);

	/* Mark CPU as initiating ON operation */
	ctx->state = SPMC_STATE_ON_PENDING;
106
107
108

	rc = spmd_spm_core_sync_entry(ctx);
	if (rc != 0) {
109
110
		ERROR("%s failed failed (%d) on CPU%u\n", __func__, rc,
			linear_id);
111
112
113
114
		ctx->state = SPMC_STATE_OFF;
		return;
	}

115
exit:
116
	ctx->state = SPMC_STATE_ON;
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152

	VERBOSE("CPU %u on!\n", linear_id);
}

/*******************************************************************************
 * spmd_cpu_off_handler
 ******************************************************************************/
static int32_t spmd_cpu_off_handler(u_register_t unused)
{
	spmd_spm_core_context_t *ctx = spmd_get_context();
	unsigned int linear_id = plat_my_core_pos();
	int32_t rc;

	assert(ctx != NULL);
	assert(ctx->state != SPMC_STATE_OFF);

	if (spmd_pm_secondary_ep[linear_id].entry_point == 0) {
		goto exit;
	}

	/* Build an SPMD to SPMC direct message request. */
	spmd_build_spmc_message(get_gpregs_ctx(&ctx->cpu_ctx), PSCI_CPU_OFF);

	rc = spmd_spm_core_sync_entry(ctx);
	if (rc != 0) {
		ERROR("%s failed (%d) on CPU%u\n", __func__, rc, linear_id);
	}

	/* TODO expect FFA_DIRECT_MSG_RESP returned from SPMC */

exit:
	ctx->state = SPMC_STATE_OFF;

	VERBOSE("CPU %u off!\n", linear_id);

	return 0;
153
154
155
156
157
158
159
160
}

/*******************************************************************************
 * Structure populated by the SPM Dispatcher to perform any bookkeeping before
 * PSCI executes a power mgmt. operation.
 ******************************************************************************/
const spd_pm_ops_t spmd_pm = {
	.svc_on_finish = spmd_cpu_on_finish_handler,
161
	.svc_off = spmd_cpu_off_handler
162
};