trusty.c 12.6 KB
Newer Older
1
/*
2
 * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
3
 *
dp-arm's avatar
dp-arm committed
4
 * SPDX-License-Identifier: BSD-3-Clause
5
6
 */

Anthony Zhou's avatar
Anthony Zhou committed
7
8
#include <arch_helpers.h>
#include <assert.h> /* for context_mgmt.h */
9
#include <bl31.h>
Isla Mitchell's avatar
Isla Mitchell committed
10
#include <bl_common.h>
11
12
13
14
15
16
17
18
#include <context_mgmt.h>
#include <debug.h>
#include <interrupt_mgmt.h>
#include <platform.h>
#include <runtime_svc.h>
#include <string.h>

#include "sm_err.h"
Isla Mitchell's avatar
Isla Mitchell committed
19
#include "smcall.h"
20

Anthony Zhou's avatar
Anthony Zhou committed
21
22
23
/* macro to check if Hypervisor is enabled in the HCR_EL2 register */
#define HYP_ENABLE_FLAG		0x286001

24
25
struct trusty_stack {
	uint8_t space[PLATFORM_STACK_SIZE] __aligned(16);
26
	uint32_t end;
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
};

struct trusty_cpu_ctx {
	cpu_context_t	cpu_ctx;
	void		*saved_sp;
	uint32_t	saved_security_state;
	int		fiq_handler_active;
	uint64_t	fiq_handler_pc;
	uint64_t	fiq_handler_cpsr;
	uint64_t	fiq_handler_sp;
	uint64_t	fiq_pc;
	uint64_t	fiq_cpsr;
	uint64_t	fiq_sp_el1;
	gp_regs_t	fiq_gpregs;
	struct trusty_stack	secure_stack;
};

struct args {
	uint64_t	r0;
	uint64_t	r1;
	uint64_t	r2;
	uint64_t	r3;
Anthony Zhou's avatar
Anthony Zhou committed
49
50
51
52
	uint64_t	r4;
	uint64_t	r5;
	uint64_t	r6;
	uint64_t	r7;
53
54
55
56
57
};

struct trusty_cpu_ctx trusty_cpu_ctx[PLATFORM_CORE_COUNT];

struct args trusty_init_context_stack(void **sp, void *new_stack);
Anthony Zhou's avatar
Anthony Zhou committed
58
struct args trusty_context_switch_helper(void **sp, void *smc_params);
59

60
61
static uint32_t current_vmid;

62
63
64
65
66
static struct trusty_cpu_ctx *get_trusty_ctx(void)
{
	return &trusty_cpu_ctx[plat_my_core_pos()];
}

Anthony Zhou's avatar
Anthony Zhou committed
67
68
69
70
71
72
73
static uint32_t is_hypervisor_mode(void)
{
	uint64_t hcr = read_hcr();

	return !!(hcr & HYP_ENABLE_FLAG);
}

74
75
76
77
78
static struct args trusty_context_switch(uint32_t security_state, uint64_t r0,
					 uint64_t r1, uint64_t r2, uint64_t r3)
{
	struct args ret;
	struct trusty_cpu_ctx *ctx = get_trusty_ctx();
Anthony Zhou's avatar
Anthony Zhou committed
79
	struct trusty_cpu_ctx *ctx_smc;
80
81
82

	assert(ctx->saved_security_state != security_state);

Anthony Zhou's avatar
Anthony Zhou committed
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
	ret.r7 = 0;
	if (is_hypervisor_mode()) {
		/* According to the ARM DEN0028A spec, VMID is stored in x7 */
		ctx_smc = cm_get_context(NON_SECURE);
		assert(ctx_smc);
		ret.r7 = SMC_GET_GP(ctx_smc, CTX_GPREG_X7);
	}
	/* r4, r5, r6 reserved for future use. */
	ret.r6 = 0;
	ret.r5 = 0;
	ret.r4 = 0;
	ret.r3 = r3;
	ret.r2 = r2;
	ret.r1 = r1;
	ret.r0 = r0;

99
100
101
102
103
104
105
106
	/*
	 * To avoid the additional overhead in PSCI flow, skip FP context
	 * saving/restoring in case of CPU suspend and resume, asssuming that
	 * when it's needed the PSCI caller has preserved FP context before
	 * going here.
	 */
	if (r0 != SMC_FC_CPU_SUSPEND && r0 != SMC_FC_CPU_RESUME)
		fpregs_context_save(get_fpregs_ctx(cm_get_context(security_state)));
107
108
109
	cm_el1_sysregs_context_save(security_state);

	ctx->saved_security_state = security_state;
Anthony Zhou's avatar
Anthony Zhou committed
110
	ret = trusty_context_switch_helper(&ctx->saved_sp, &ret);
111
112
113
114

	assert(ctx->saved_security_state == !security_state);

	cm_el1_sysregs_context_restore(security_state);
115
116
117
	if (r0 != SMC_FC_CPU_SUSPEND && r0 != SMC_FC_CPU_RESUME)
		fpregs_context_restore(get_fpregs_ctx(cm_get_context(security_state)));

118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
	cm_set_next_eret_context(security_state);

	return ret;
}

static uint64_t trusty_fiq_handler(uint32_t id,
				   uint32_t flags,
				   void *handle,
				   void *cookie)
{
	struct args ret;
	struct trusty_cpu_ctx *ctx = get_trusty_ctx();

	assert(!is_caller_secure(flags));

	ret = trusty_context_switch(NON_SECURE, SMC_FC_FIQ_ENTER, 0, 0, 0);
	if (ret.r0) {
		SMC_RET0(handle);
	}

	if (ctx->fiq_handler_active) {
		INFO("%s: fiq handler already active\n", __func__);
		SMC_RET0(handle);
	}

	ctx->fiq_handler_active = 1;
	memcpy(&ctx->fiq_gpregs, get_gpregs_ctx(handle), sizeof(ctx->fiq_gpregs));
	ctx->fiq_pc = SMC_GET_EL3(handle, CTX_ELR_EL3);
	ctx->fiq_cpsr = SMC_GET_EL3(handle, CTX_SPSR_EL3);
	ctx->fiq_sp_el1 = read_ctx_reg(get_sysregs_ctx(handle), CTX_SP_EL1);

	write_ctx_reg(get_sysregs_ctx(handle), CTX_SP_EL1, ctx->fiq_handler_sp);
	cm_set_elr_spsr_el3(NON_SECURE, ctx->fiq_handler_pc, ctx->fiq_handler_cpsr);

	SMC_RET0(handle);
}

static uint64_t trusty_set_fiq_handler(void *handle, uint64_t cpu,
			uint64_t handler, uint64_t stack)
{
	struct trusty_cpu_ctx *ctx;

	if (cpu >= PLATFORM_CORE_COUNT) {
		ERROR("%s: cpu %ld >= %d\n", __func__, cpu, PLATFORM_CORE_COUNT);
		return SM_ERR_INVALID_PARAMETERS;
	}

	ctx = &trusty_cpu_ctx[cpu];
	ctx->fiq_handler_pc = handler;
	ctx->fiq_handler_cpsr = SMC_GET_EL3(handle, CTX_SPSR_EL3);
	ctx->fiq_handler_sp = stack;

	SMC_RET1(handle, 0);
}

static uint64_t trusty_get_fiq_regs(void *handle)
{
	struct trusty_cpu_ctx *ctx = get_trusty_ctx();
	uint64_t sp_el0 = read_ctx_reg(&ctx->fiq_gpregs, CTX_GPREG_SP_EL0);

	SMC_RET4(handle, ctx->fiq_pc, ctx->fiq_cpsr, sp_el0, ctx->fiq_sp_el1);
}

static uint64_t trusty_fiq_exit(void *handle, uint64_t x1, uint64_t x2, uint64_t x3)
{
	struct args ret;
	struct trusty_cpu_ctx *ctx = get_trusty_ctx();

	if (!ctx->fiq_handler_active) {
		NOTICE("%s: fiq handler not active\n", __func__);
		SMC_RET1(handle, SM_ERR_INVALID_PARAMETERS);
	}

	ret = trusty_context_switch(NON_SECURE, SMC_FC_FIQ_EXIT, 0, 0, 0);
	if (ret.r0 != 1) {
		INFO("%s(%p) SMC_FC_FIQ_EXIT returned unexpected value, %ld\n",
		       __func__, handle, ret.r0);
	}

	/*
	 * Restore register state to state recorded on fiq entry.
	 *
	 * x0, sp_el1, pc and cpsr need to be restored because el1 cannot
	 * restore them.
	 *
	 * x1-x4 and x8-x17 need to be restored here because smc_handler64
	 * corrupts them (el1 code also restored them).
	 */
	memcpy(get_gpregs_ctx(handle), &ctx->fiq_gpregs, sizeof(ctx->fiq_gpregs));
	ctx->fiq_handler_active = 0;
	write_ctx_reg(get_sysregs_ctx(handle), CTX_SP_EL1, ctx->fiq_sp_el1);
	cm_set_elr_spsr_el3(NON_SECURE, ctx->fiq_pc, ctx->fiq_cpsr);

	SMC_RET0(handle);
}

static uint64_t trusty_smc_handler(uint32_t smc_fid,
			 uint64_t x1,
			 uint64_t x2,
			 uint64_t x3,
			 uint64_t x4,
			 void *cookie,
			 void *handle,
			 uint64_t flags)
{
	struct args ret;
224
	uint32_t vmid = 0;
225
226
227
228
229
230
231
	entry_point_info_t *ep_info = bl31_plat_get_next_image_ep_info(SECURE);

	/*
	 * Return success for SET_ROT_PARAMS if Trusty is not present, as
	 * Verified Boot is not even supported and returning success here
	 * would not compromise the boot process.
	 */
232
	if (!ep_info && (smc_fid == SMC_YC_SET_ROT_PARAMS)) {
233
234
235
236
		SMC_RET1(handle, 0);
	} else if (!ep_info) {
		SMC_RET1(handle, SMC_UNK);
	}
237
238

	if (is_caller_secure(flags)) {
239
		if (smc_fid == SMC_YC_NS_RETURN) {
240
			ret = trusty_context_switch(SECURE, x1, 0, 0, 0);
Anthony Zhou's avatar
Anthony Zhou committed
241
242
			SMC_RET8(handle, ret.r0, ret.r1, ret.r2, ret.r3,
				 ret.r4, ret.r5, ret.r6, ret.r7);
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
		}
		INFO("%s (0x%x, 0x%lx, 0x%lx, 0x%lx, 0x%lx, %p, %p, 0x%lx) \
		     cpu %d, unknown smc\n",
		     __func__, smc_fid, x1, x2, x3, x4, cookie, handle, flags,
		     plat_my_core_pos());
		SMC_RET1(handle, SMC_UNK);
	} else {
		switch (smc_fid) {
		case SMC_FC64_SET_FIQ_HANDLER:
			return trusty_set_fiq_handler(handle, x1, x2, x3);
		case SMC_FC64_GET_FIQ_REGS:
			return trusty_get_fiq_regs(handle);
		case SMC_FC_FIQ_EXIT:
			return trusty_fiq_exit(handle, x1, x2, x3);
		default:
258
259
260
261
262
263
264
265
266
267
268
269
			if (is_hypervisor_mode())
				vmid = SMC_GET_GP(handle, CTX_GPREG_X7);

			if ((current_vmid != 0) && (current_vmid != vmid)) {
				/* This message will cause SMC mechanism
				 * abnormal in multi-guest environment.
				 * Change it to WARN in case you need it.
				 */
				VERBOSE("Previous SMC not finished.\n");
				SMC_RET1(handle, SM_ERR_BUSY);
			}
			current_vmid = vmid;
270
271
			ret = trusty_context_switch(NON_SECURE, smc_fid, x1,
				x2, x3);
272
			current_vmid = 0;
273
274
275
276
277
278
279
			SMC_RET1(handle, ret.r0);
		}
	}
}

static int32_t trusty_init(void)
{
280
	void el3_exit(void);
281
	entry_point_info_t *ep_info;
Anthony Zhou's avatar
Anthony Zhou committed
282
	struct args zero_args = {0};
283
284
285
286
287
	struct trusty_cpu_ctx *ctx = get_trusty_ctx();
	uint32_t cpu = plat_my_core_pos();
	int reg_width = GET_RW(read_ctx_reg(get_el3state_ctx(&ctx->cpu_ctx),
			       CTX_SPSR_EL3));

288
289
290
291
	/*
	 * Get information about the Trusty image. Its absence is a critical
	 * failure.
	 */
292
	ep_info = bl31_plat_get_next_image_ep_info(SECURE);
293
	assert(ep_info);
294

295
	fpregs_context_save(get_fpregs_ctx(cm_get_context(NON_SECURE)));
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
	cm_el1_sysregs_context_save(NON_SECURE);

	cm_set_context(&ctx->cpu_ctx, SECURE);
	cm_init_my_context(ep_info);

	/*
	 * Adjust secondary cpu entry point for 32 bit images to the
	 * end of exeption vectors
	 */
	if ((cpu != 0) && (reg_width == MODE_RW_32)) {
		INFO("trusty: cpu %d, adjust entry point to 0x%lx\n",
		     cpu, ep_info->pc + (1U << 5));
		cm_set_elr_el3(SECURE, ep_info->pc + (1U << 5));
	}

	cm_el1_sysregs_context_restore(SECURE);
312
	fpregs_context_restore(get_fpregs_ctx(cm_get_context(SECURE)));
313
314
315
	cm_set_next_eret_context(SECURE);

	ctx->saved_security_state = ~0; /* initial saved state is invalid */
316
	trusty_init_context_stack(&ctx->saved_sp, &ctx->secure_stack.end);
317

Anthony Zhou's avatar
Anthony Zhou committed
318
	trusty_context_switch_helper(&ctx->saved_sp, &zero_args);
319
320

	cm_el1_sysregs_context_restore(NON_SECURE);
321
	fpregs_context_restore(get_fpregs_ctx(cm_get_context(NON_SECURE)));
322
323
324
325
326
	cm_set_next_eret_context(NON_SECURE);

	return 0;
}

327
static void trusty_cpu_suspend(uint32_t off)
328
329
330
{
	struct args ret;

331
	ret = trusty_context_switch(NON_SECURE, SMC_FC_CPU_SUSPEND, off, 0, 0);
332
333
	if (ret.r0 != 0) {
		INFO("%s: cpu %d, SMC_FC_CPU_SUSPEND returned unexpected value, %ld\n",
334
		     __func__, plat_my_core_pos(), ret.r0);
335
336
337
	}
}

338
static void trusty_cpu_resume(uint32_t on)
339
340
341
{
	struct args ret;

342
	ret = trusty_context_switch(NON_SECURE, SMC_FC_CPU_RESUME, on, 0, 0);
343
344
	if (ret.r0 != 0) {
		INFO("%s: cpu %d, SMC_FC_CPU_RESUME returned unexpected value, %ld\n",
345
		     __func__, plat_my_core_pos(), ret.r0);
346
347
348
349
350
	}
}

static int32_t trusty_cpu_off_handler(uint64_t unused)
{
351
	trusty_cpu_suspend(1);
352
353
354
355
356
357
358
359
360
361
362

	return 0;
}

static void trusty_cpu_on_finish_handler(uint64_t unused)
{
	struct trusty_cpu_ctx *ctx = get_trusty_ctx();

	if (!ctx->saved_sp) {
		trusty_init();
	} else {
363
		trusty_cpu_resume(1);
364
365
366
367
368
	}
}

static void trusty_cpu_suspend_handler(uint64_t unused)
{
369
	trusty_cpu_suspend(0);
370
371
372
373
}

static void trusty_cpu_suspend_finish_handler(uint64_t unused)
{
374
	trusty_cpu_resume(0);
375
376
377
378
379
380
381
382
383
}

static const spd_pm_ops_t trusty_pm = {
	.svc_off = trusty_cpu_off_handler,
	.svc_suspend = trusty_cpu_suspend_handler,
	.svc_on_finish = trusty_cpu_on_finish_handler,
	.svc_suspend_finish = trusty_cpu_suspend_finish_handler,
};

384
385
386
387
388
389
390
391
392
393
void plat_trusty_set_boot_args(aapcs64_params_t *args);

#ifdef TSP_SEC_MEM_SIZE
#pragma weak plat_trusty_set_boot_args
void plat_trusty_set_boot_args(aapcs64_params_t *args)
{
	args->arg0 = TSP_SEC_MEM_SIZE;
}
#endif

394
395
396
static int32_t trusty_setup(void)
{
	entry_point_info_t *ep_info;
397
	uint32_t instr;
398
399
	uint32_t flags;
	int ret;
400
	int aarch32 = 0;
401

402
	/* Get trusty's entry point info */
403
404
405
406
407
408
	ep_info = bl31_plat_get_next_image_ep_info(SECURE);
	if (!ep_info) {
		INFO("Trusty image missing.\n");
		return -1;
	}

409
	instr = *(uint32_t *)ep_info->pc;
410

411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
	if (instr >> 24 == 0xea) {
		INFO("trusty: Found 32 bit image\n");
		aarch32 = 1;
	} else if (instr >> 8 == 0xd53810 || instr >> 16 == 0x9400) {
		INFO("trusty: Found 64 bit image\n");
	} else {
		NOTICE("trusty: Found unknown image, 0x%x\n", instr);
	}

	SET_PARAM_HEAD(ep_info, PARAM_EP, VERSION_1, SECURE | EP_ST_ENABLE);
	if (!aarch32)
		ep_info->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX,
					DISABLE_ALL_EXCEPTIONS);
	else
		ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
					    SPSR_E_LITTLE,
					    DAIF_FIQ_BIT |
					    DAIF_IRQ_BIT |
					    DAIF_ABT_BIT);
	memset(&ep_info->args, 0, sizeof(ep_info->args));
	plat_trusty_set_boot_args(&ep_info->args);
432

433
	/* register init handler */
434
435
	bl31_register_bl32_init(trusty_init);

436
	/* register power management hooks */
437
438
	psci_register_spd_pm_hook(&trusty_pm);

439
	/* register interrupt handler */
440
441
442
443
444
445
446
447
	flags = 0;
	set_interrupt_rm_flag(flags, NON_SECURE);
	ret = register_interrupt_type_handler(INTR_TYPE_S_EL1,
					      trusty_fiq_handler,
					      flags);
	if (ret)
		ERROR("trusty: failed to register fiq handler, ret = %d\n", ret);

448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
	if (aarch32) {
		entry_point_info_t *ns_ep_info;
		uint32_t spsr;

		ns_ep_info = bl31_plat_get_next_image_ep_info(NON_SECURE);
		if (!ep_info) {
			NOTICE("Trusty: non-secure image missing.\n");
			return -1;
		}
		spsr = ns_ep_info->spsr;
		if (GET_RW(spsr) == MODE_RW_64 && GET_EL(spsr) == MODE_EL2) {
			spsr &= ~(MODE_EL_MASK << MODE_EL_SHIFT);
			spsr |= MODE_EL1 << MODE_EL_SHIFT;
		}
		if (GET_RW(spsr) == MODE_RW_32 && GET_M32(spsr) == MODE32_hyp) {
			spsr &= ~(MODE32_MASK << MODE32_SHIFT);
			spsr |= MODE32_svc << MODE32_SHIFT;
		}
		if (spsr != ns_ep_info->spsr) {
			NOTICE("Trusty: Switch bl33 from EL2 to EL1 (spsr 0x%x -> 0x%x)\n",
			       ns_ep_info->spsr, spsr);
			ns_ep_info->spsr = spsr;
		}
	}

473
474
475
476
477
478
479
480
481
482
483
484
485
486
	return 0;
}

/* Define a SPD runtime service descriptor for fast SMC calls */
DECLARE_RT_SVC(
	trusty_fast,

	OEN_TOS_START,
	SMC_ENTITY_SECURE_MONITOR,
	SMC_TYPE_FAST,
	trusty_setup,
	trusty_smc_handler
);

487
/* Define a SPD runtime service descriptor for yielding SMC calls */
488
489
490
DECLARE_RT_SVC(
	trusty_std,

491
	OEN_TAP_START,
492
	SMC_ENTITY_SECURE_MONITOR,
493
	SMC_TYPE_YIELD,
494
495
496
	NULL,
	trusty_smc_handler
);