spm_main.c 13.6 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
/*
 * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
 *
 * SPDX-License-Identifier: BSD-3-Clause
 */

#include <arch_helpers.h>
#include <assert.h>
#include <bl31.h>
#include <context_mgmt.h>
#include <debug.h>
#include <errno.h>
#include <platform.h>
#include <runtime_svc.h>
#include <secure_partition.h>
#include <smcc.h>
#include <smcc_helpers.h>
#include <spinlock.h>
#include <spm_svc.h>
#include <utils.h>
#include <xlat_tables_v2.h>

#include "spm_private.h"

/* Lock used for SP_MEMORY_ATTRIBUTES_GET and SP_MEMORY_ATTRIBUTES_SET */
static spinlock_t mem_attr_smc_lock;

/*******************************************************************************
 * Secure Partition context information.
 ******************************************************************************/
static secure_partition_context_t sp_ctx;
unsigned int sp_init_in_progress;

/*******************************************************************************
 * Replace the S-EL1 re-entry information with S-EL0 re-entry
 * information
 ******************************************************************************/
void spm_setup_next_eret_into_sel0(cpu_context_t *secure_context)
{
	assert(secure_context == cm_get_context(SECURE));

	cm_set_elr_spsr_el3(SECURE, read_elr_el1(), read_spsr_el1());
}

/*******************************************************************************
 * This function takes an SP context pointer and:
 * 1. Applies the S-EL1 system register context from sp_ctx->cpu_ctx.
 * 2. Saves the current C runtime state (callee-saved registers) on the stack
 *    frame and saves a reference to this state.
 * 3. Calls el3_exit() so that the EL3 system and general purpose registers
51
 *    from the sp_ctx->cpu_ctx are used to enter the secure partition image.
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
 ******************************************************************************/
static uint64_t spm_synchronous_sp_entry(secure_partition_context_t *sp_ctx_ptr)
{
	uint64_t rc;

	assert(sp_ctx_ptr != NULL);
	assert(sp_ctx_ptr->c_rt_ctx == 0);
	assert(cm_get_context(SECURE) == &sp_ctx_ptr->cpu_ctx);

	/* Apply the Secure EL1 system register context and switch to it */
	cm_el1_sysregs_context_restore(SECURE);
	cm_set_next_eret_context(SECURE);

	VERBOSE("%s: We're about to enter the Secure partition...\n", __func__);

	rc = spm_secure_partition_enter(&sp_ctx_ptr->c_rt_ctx);
#if ENABLE_ASSERTIONS
	sp_ctx_ptr->c_rt_ctx = 0;
#endif

	return rc;
}


/*******************************************************************************
 * This function takes a Secure partition context pointer and:
78
 * 1. Saves the S-EL1 system register context to sp_ctx->cpu_ctx.
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
 * 2. Restores the current C runtime state (callee saved registers) from the
 *    stack frame using the reference to this state saved in
 *    spm_secure_partition_enter().
 * 3. It does not need to save any general purpose or EL3 system register state
 *    as the generic smc entry routine should have saved those.
 ******************************************************************************/
static void __dead2 spm_synchronous_sp_exit(
			secure_partition_context_t *sp_ctx_ptr, uint64_t ret)
{
	assert(sp_ctx_ptr != NULL);
	/* Save the Secure EL1 system register context */
	assert(cm_get_context(SECURE) == &sp_ctx_ptr->cpu_ctx);
	cm_el1_sysregs_context_save(SECURE);

	assert(sp_ctx_ptr->c_rt_ctx != 0);
	spm_secure_partition_exit(sp_ctx_ptr->c_rt_ctx, ret);

	/* Should never reach here */
	assert(0);
}

/*******************************************************************************
 * This function passes control to the Secure Partition image (BL32) for the
 * first time on the primary cpu after a cold boot. It assumes that a valid
 * secure context has already been created by spm_setup() which can be directly
104
 * used. This function performs a synchronous entry into the Secure partition.
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
 * The SP passes control back to this routine through a SMC.
 ******************************************************************************/
int32_t spm_init(void)
{
	entry_point_info_t *secure_partition_ep_info;
	uint64_t rc;

	VERBOSE("%s entry\n", __func__);

	/*
	 * Get information about the Secure Partition (BL32) image. Its
	 * absence is a critical failure.
	 */
	secure_partition_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
	assert(secure_partition_ep_info);

	/*
	 * Initialise the common context and then overlay the S-EL0 specific
	 * context on top of it.
	 */
	cm_init_my_context(secure_partition_ep_info);
	secure_partition_setup();

	/*
129
	 * Arrange for an entry into the secure partition.
130
131
132
133
134
	 */
	sp_init_in_progress = 1;
	rc = spm_synchronous_sp_entry(&sp_ctx);
	assert(rc == 0);
	sp_init_in_progress = 0;
135
	VERBOSE("SP_MEMORY_ATTRIBUTES_SET_AARCH64 availability has been revoked\n");
136
137
138
139
140

	return rc;
}

/*******************************************************************************
141
 * Given a secure partition entrypoint info pointer, entry point PC & pointer to
142
 * a context data structure, this function will initialize the SPM context and
143
 * entry point info for the secure partition.
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
 ******************************************************************************/
void spm_init_sp_ep_state(struct entry_point_info *sp_ep_info,
			  uint64_t pc,
			  secure_partition_context_t *sp_ctx_ptr)
{
	uint32_t ep_attr;

	assert(sp_ep_info);
	assert(pc);
	assert(sp_ctx_ptr);

	cm_set_context(&sp_ctx_ptr->cpu_ctx, SECURE);

	/* initialise an entrypoint to set up the CPU context */
	ep_attr = SECURE | EP_ST_ENABLE;
	if (read_sctlr_el3() & SCTLR_EE_BIT)
		ep_attr |= EP_EE_BIG;
	SET_PARAM_HEAD(sp_ep_info, PARAM_EP, VERSION_1, ep_attr);

	sp_ep_info->pc = pc;
164
	/* The secure partition runs in S-EL0. */
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
	sp_ep_info->spsr = SPSR_64(MODE_EL0,
				   MODE_SP_EL0,
				   DISABLE_ALL_EXCEPTIONS);

	zeromem(&sp_ep_info->args, sizeof(sp_ep_info->args));
}

/*******************************************************************************
 * Secure Partition Manager setup. The SPM finds out the SP entrypoint if not
 * already known and initialises the context for entry into the SP for its
 * initialisation.
 ******************************************************************************/
int32_t spm_setup(void)
{
	entry_point_info_t *secure_partition_ep_info;

	VERBOSE("%s entry\n", __func__);

	/*
	 * Get information about the Secure Partition (BL32) image. Its
	 * absence is a critical failure.
	 */
	secure_partition_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
	if (!secure_partition_ep_info) {
		WARN("No SPM provided by BL2 boot loader, Booting device"
			" without SPM initialization. SMCs destined for SPM"
			" will return SMC_UNK\n");
		return 1;
	}

	/*
	 * If there's no valid entry point for SP, we return a non-zero value
	 * signalling failure initializing the service. We bail out without
	 * registering any handlers
	 */
	if (!secure_partition_ep_info->pc) {
		return 1;
	}

	spm_init_sp_ep_state(secure_partition_ep_info,
			      secure_partition_ep_info->pc,
			      &sp_ctx);

	/*
	 * All SPM initialization done. Now register our init function with
	 * BL31 for deferred invocation
	 */
	bl31_register_bl32_init(&spm_init);

	VERBOSE("%s exit\n", __func__);

	return 0;
}

/*
 * Attributes are encoded using a different format in the SMC interface than in
 * the Trusted Firmware, where the mmap_attr_t enum type is used. This function
 * converts an attributes value from the SMC format to the mmap_attr_t format by
 * setting MT_RW/MT_RO, MT_USER/MT_PRIVILEGED and MT_EXECUTE/MT_EXECUTE_NEVER.
 * The other fields are left as 0 because they are ignored by the function
 * change_mem_attributes().
 */
static mmap_attr_t smc_attr_to_mmap_attr(unsigned int attributes)
{
	mmap_attr_t tf_attr = 0;

231
232
	unsigned int access = (attributes & SP_MEMORY_ATTRIBUTES_ACCESS_MASK)
			      >> SP_MEMORY_ATTRIBUTES_ACCESS_SHIFT;
233

234
	if (access == SP_MEMORY_ATTRIBUTES_ACCESS_RW) {
235
		tf_attr |= MT_RW | MT_USER;
236
	} else if (access ==  SP_MEMORY_ATTRIBUTES_ACCESS_RO) {
237
238
239
		tf_attr |= MT_RO | MT_USER;
	} else {
		/* Other values are reserved. */
240
		assert(access ==  SP_MEMORY_ATTRIBUTES_ACCESS_NOACCESS);
241
242
243
244
		/* The only requirement is that there's no access from EL0 */
		tf_attr |= MT_RO | MT_PRIVILEGED;
	}

245
	if ((attributes & SP_MEMORY_ATTRIBUTES_NON_EXEC) == 0) {
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
		tf_attr |= MT_EXECUTE;
	} else {
		tf_attr |= MT_EXECUTE_NEVER;
	}

	return tf_attr;
}

/*
 * This function converts attributes from the Trusted Firmware format into the
 * SMC interface format.
 */
static int smc_mmap_to_smc_attr(mmap_attr_t attr)
{
	int smc_attr = 0;

	int data_access;

	if ((attr & MT_USER) == 0) {
		/* No access from EL0. */
266
		data_access = SP_MEMORY_ATTRIBUTES_ACCESS_NOACCESS;
267
268
269
	} else {
		if ((attr & MT_RW) != 0) {
			assert(MT_TYPE(attr) != MT_DEVICE);
270
			data_access = SP_MEMORY_ATTRIBUTES_ACCESS_RW;
271
		} else {
272
			data_access = SP_MEMORY_ATTRIBUTES_ACCESS_RO;
273
274
275
		}
	}

276
277
	smc_attr |= (data_access & SP_MEMORY_ATTRIBUTES_ACCESS_MASK)
		    << SP_MEMORY_ATTRIBUTES_ACCESS_SHIFT;
278
279

	if (attr & MT_EXECUTE_NEVER) {
280
		smc_attr |= SP_MEMORY_ATTRIBUTES_NON_EXEC;
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
	}

	return smc_attr;
}

static int spm_memory_attributes_get_smc_handler(uintptr_t base_va)
{
	spin_lock(&mem_attr_smc_lock);

	mmap_attr_t attributes;
	int rc = get_mem_attributes(secure_partition_xlat_ctx_handle,
				     base_va, &attributes);

	spin_unlock(&mem_attr_smc_lock);

	/* Convert error codes of get_mem_attributes() into SPM ones. */
	assert(rc == 0 || rc == -EINVAL);

	if (rc == 0) {
		return smc_mmap_to_smc_attr(attributes);
	} else {
		return SPM_INVALID_PARAMETER;
	}
}

static int spm_memory_attributes_set_smc_handler(u_register_t page_address,
					u_register_t pages_count,
					u_register_t smc_attributes)
{
	uintptr_t base_va = (uintptr_t) page_address;
	size_t size = (size_t) (pages_count * PAGE_SIZE);
	unsigned int attributes = (unsigned int) smc_attributes;

	INFO("  Start address  : 0x%lx\n", base_va);
	INFO("  Number of pages: %i (%zi bytes)\n", (int) pages_count, size);
	INFO("  Attributes     : 0x%x\n", attributes);

	spin_lock(&mem_attr_smc_lock);

	int ret = change_mem_attributes(secure_partition_xlat_ctx_handle,
			base_va, size, smc_attr_to_mmap_attr(attributes));

	spin_unlock(&mem_attr_smc_lock);

	/* Convert error codes of change_mem_attributes() into SPM ones. */
	assert(ret == 0 || ret == -EINVAL);

	return (ret == 0) ? SPM_SUCCESS : SPM_INVALID_PARAMETER;
}


uint64_t spm_smc_handler(uint32_t smc_fid,
			 uint64_t x1,
			 uint64_t x2,
			 uint64_t x3,
			 uint64_t x4,
			 void *cookie,
			 void *handle,
			 uint64_t flags)
{
	cpu_context_t *ns_cpu_context;
	unsigned int ns;

	/* Determine which security state this SMC originated from */
	ns = is_caller_non_secure(flags);

	if (ns == SMC_FROM_SECURE) {

		/* Handle SMCs from Secure world. */

		switch (smc_fid) {

353
		case SPM_VERSION_AARCH32:
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
			SMC_RET1(handle, SPM_VERSION_COMPILED);

		case SP_EVENT_COMPLETE_AARCH64:
			assert(handle == cm_get_context(SECURE));
			cm_el1_sysregs_context_save(SECURE);
			spm_setup_next_eret_into_sel0(handle);

			if (sp_init_in_progress) {
				/*
				 * SPM reports completion. The SPM must have
				 * initiated the original request through a
				 * synchronous entry into the secure
				 * partition. Jump back to the original C
				 * runtime context.
				 */
				spm_synchronous_sp_exit(&sp_ctx, x1);
				assert(0);
			}

			/*
			 * This is the result from the Secure partition of an
			 * earlier request. Copy the result into the non-secure
			 * context, save the secure state and return to the
			 * non-secure state.
			 */

			/* Get a reference to the non-secure context */
			ns_cpu_context = cm_get_context(NON_SECURE);
			assert(ns_cpu_context);

			/* Restore non-secure state */
			cm_el1_sysregs_context_restore(NON_SECURE);
			cm_set_next_eret_context(NON_SECURE);

			/* Return to normal world */
			SMC_RET1(ns_cpu_context, x1);

391
392
		case SP_MEMORY_ATTRIBUTES_GET_AARCH64:
			INFO("Received SP_MEMORY_ATTRIBUTES_GET_AARCH64 SMC\n");
393
394

			if (!sp_init_in_progress) {
395
				WARN("SP_MEMORY_ATTRIBUTES_GET_AARCH64 is available at boot time only\n");
396
397
398
399
				SMC_RET1(handle, SPM_NOT_SUPPORTED);
			}
			SMC_RET1(handle, spm_memory_attributes_get_smc_handler(x1));

400
401
		case SP_MEMORY_ATTRIBUTES_SET_AARCH64:
			INFO("Received SP_MEMORY_ATTRIBUTES_SET_AARCH64 SMC\n");
402
403

			if (!sp_init_in_progress) {
404
				WARN("SP_MEMORY_ATTRIBUTES_SET_AARCH64 is available at boot time only\n");
405
406
407
408
409
410
411
412
413
414
415
416
				SMC_RET1(handle, SPM_NOT_SUPPORTED);
			}
			SMC_RET1(handle, spm_memory_attributes_set_smc_handler(x1, x2, x3));
		default:
			break;
		}
	} else {

		/* Handle SMCs from Non-secure world. */

		switch (smc_fid) {

417
418
		case SP_VERSION_AARCH64:
		case SP_VERSION_AARCH32:
419
420
			SMC_RET1(handle, SP_VERSION_COMPILED);

421
422
		case MM_COMMUNICATE_AARCH32:
		case MM_COMMUNICATE_AARCH64:
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
		{
			uint64_t mm_cookie = x1;
			uint64_t comm_buffer_address = x2;
			uint64_t comm_size_address = x3;

			/* Cookie. Reserved for future use. It must be zero. */
			if (mm_cookie != 0) {
				ERROR("MM_COMMUNICATE: cookie is not zero\n");
				SMC_RET1(handle, SPM_INVALID_PARAMETER);
			}

			if (comm_buffer_address == 0) {
				ERROR("MM_COMMUNICATE: comm_buffer_address is zero\n");
				SMC_RET1(handle, SPM_INVALID_PARAMETER);
			}

			if (comm_size_address != 0) {
				VERBOSE("MM_COMMUNICATE: comm_size_address is not 0 as recommended.\n");
			}
442
443
444
445
446
447
448
449
450
451
452
453

			/* Save the Normal world context */
			cm_el1_sysregs_context_save(NON_SECURE);

			/*
			 * Restore the secure world context and prepare for
			 * entry in S-EL0
			 */
			assert(&sp_ctx.cpu_ctx == cm_get_context(SECURE));
			cm_el1_sysregs_context_restore(SECURE);
			cm_set_next_eret_context(SECURE);

454
455
456
			SMC_RET4(&sp_ctx.cpu_ctx, smc_fid, comm_buffer_address,
				 comm_size_address, plat_my_core_pos());
		}
457

458
459
		case SP_MEMORY_ATTRIBUTES_GET_AARCH64:
		case SP_MEMORY_ATTRIBUTES_SET_AARCH64:
460
461
462
463
464
465
466
467
468
469
			/* SMC interfaces reserved for secure callers. */
			SMC_RET1(handle, SPM_NOT_SUPPORTED);

		default:
			break;
		}
	}

	SMC_RET1(handle, SMC_UNK);
}