trusty.c 12.7 KB
Newer Older
1
/*
2
 * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
3
 *
dp-arm's avatar
dp-arm committed
4
 * SPDX-License-Identifier: BSD-3-Clause
5
6
 */

7
#include <assert.h>
8
#include <stdbool.h>
9
10
#include <string.h>

11
12
13
14
15
16
17
18
19
#include <arch_helpers.h>
#include <bl31/bl31.h>
#include <bl31/interrupt_mgmt.h>
#include <common/bl_common.h>
#include <common/debug.h>
#include <common/runtime_svc.h>
#include <lib/el3_runtime/context_mgmt.h>
#include <plat/common/platform.h>

20
#include "sm_err.h"
Isla Mitchell's avatar
Isla Mitchell committed
21
#include "smcall.h"
22

Anthony Zhou's avatar
Anthony Zhou committed
23
24
25
/* macro to check if Hypervisor is enabled in the HCR_EL2 register */
#define HYP_ENABLE_FLAG		0x286001

26
27
struct trusty_stack {
	uint8_t space[PLATFORM_STACK_SIZE] __aligned(16);
28
	uint32_t end;
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
};

struct trusty_cpu_ctx {
	cpu_context_t	cpu_ctx;
	void		*saved_sp;
	uint32_t	saved_security_state;
	int		fiq_handler_active;
	uint64_t	fiq_handler_pc;
	uint64_t	fiq_handler_cpsr;
	uint64_t	fiq_handler_sp;
	uint64_t	fiq_pc;
	uint64_t	fiq_cpsr;
	uint64_t	fiq_sp_el1;
	gp_regs_t	fiq_gpregs;
	struct trusty_stack	secure_stack;
};

struct args {
	uint64_t	r0;
	uint64_t	r1;
	uint64_t	r2;
	uint64_t	r3;
Anthony Zhou's avatar
Anthony Zhou committed
51
52
53
54
	uint64_t	r4;
	uint64_t	r5;
	uint64_t	r6;
	uint64_t	r7;
55
56
};

57
static struct trusty_cpu_ctx trusty_cpu_ctx[PLATFORM_CORE_COUNT];
58
59

struct args trusty_init_context_stack(void **sp, void *new_stack);
Anthony Zhou's avatar
Anthony Zhou committed
60
struct args trusty_context_switch_helper(void **sp, void *smc_params);
61

62
63
static uint32_t current_vmid;

64
65
66
67
68
static struct trusty_cpu_ctx *get_trusty_ctx(void)
{
	return &trusty_cpu_ctx[plat_my_core_pos()];
}

Anthony Zhou's avatar
Anthony Zhou committed
69
70
71
72
73
74
75
static uint32_t is_hypervisor_mode(void)
{
	uint64_t hcr = read_hcr();

	return !!(hcr & HYP_ENABLE_FLAG);
}

76
77
78
79
80
static struct args trusty_context_switch(uint32_t security_state, uint64_t r0,
					 uint64_t r1, uint64_t r2, uint64_t r3)
{
	struct args ret;
	struct trusty_cpu_ctx *ctx = get_trusty_ctx();
Anthony Zhou's avatar
Anthony Zhou committed
81
	struct trusty_cpu_ctx *ctx_smc;
82
83
84

	assert(ctx->saved_security_state != security_state);

Anthony Zhou's avatar
Anthony Zhou committed
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
	ret.r7 = 0;
	if (is_hypervisor_mode()) {
		/* According to the ARM DEN0028A spec, VMID is stored in x7 */
		ctx_smc = cm_get_context(NON_SECURE);
		assert(ctx_smc);
		ret.r7 = SMC_GET_GP(ctx_smc, CTX_GPREG_X7);
	}
	/* r4, r5, r6 reserved for future use. */
	ret.r6 = 0;
	ret.r5 = 0;
	ret.r4 = 0;
	ret.r3 = r3;
	ret.r2 = r2;
	ret.r1 = r1;
	ret.r0 = r0;

101
102
103
104
105
106
107
108
	/*
	 * To avoid the additional overhead in PSCI flow, skip FP context
	 * saving/restoring in case of CPU suspend and resume, asssuming that
	 * when it's needed the PSCI caller has preserved FP context before
	 * going here.
	 */
	if (r0 != SMC_FC_CPU_SUSPEND && r0 != SMC_FC_CPU_RESUME)
		fpregs_context_save(get_fpregs_ctx(cm_get_context(security_state)));
109
110
111
	cm_el1_sysregs_context_save(security_state);

	ctx->saved_security_state = security_state;
Anthony Zhou's avatar
Anthony Zhou committed
112
	ret = trusty_context_switch_helper(&ctx->saved_sp, &ret);
113
114
115
116

	assert(ctx->saved_security_state == !security_state);

	cm_el1_sysregs_context_restore(security_state);
117
118
119
	if (r0 != SMC_FC_CPU_SUSPEND && r0 != SMC_FC_CPU_RESUME)
		fpregs_context_restore(get_fpregs_ctx(cm_get_context(security_state)));

120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
	cm_set_next_eret_context(security_state);

	return ret;
}

static uint64_t trusty_fiq_handler(uint32_t id,
				   uint32_t flags,
				   void *handle,
				   void *cookie)
{
	struct args ret;
	struct trusty_cpu_ctx *ctx = get_trusty_ctx();

	assert(!is_caller_secure(flags));

	ret = trusty_context_switch(NON_SECURE, SMC_FC_FIQ_ENTER, 0, 0, 0);
	if (ret.r0) {
		SMC_RET0(handle);
	}

	if (ctx->fiq_handler_active) {
		INFO("%s: fiq handler already active\n", __func__);
		SMC_RET0(handle);
	}

	ctx->fiq_handler_active = 1;
	memcpy(&ctx->fiq_gpregs, get_gpregs_ctx(handle), sizeof(ctx->fiq_gpregs));
	ctx->fiq_pc = SMC_GET_EL3(handle, CTX_ELR_EL3);
	ctx->fiq_cpsr = SMC_GET_EL3(handle, CTX_SPSR_EL3);
	ctx->fiq_sp_el1 = read_ctx_reg(get_sysregs_ctx(handle), CTX_SP_EL1);

	write_ctx_reg(get_sysregs_ctx(handle), CTX_SP_EL1, ctx->fiq_handler_sp);
	cm_set_elr_spsr_el3(NON_SECURE, ctx->fiq_handler_pc, ctx->fiq_handler_cpsr);

	SMC_RET0(handle);
}

static uint64_t trusty_set_fiq_handler(void *handle, uint64_t cpu,
			uint64_t handler, uint64_t stack)
{
	struct trusty_cpu_ctx *ctx;

	if (cpu >= PLATFORM_CORE_COUNT) {
163
		ERROR("%s: cpu %lld >= %d\n", __func__, cpu, PLATFORM_CORE_COUNT);
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
		return SM_ERR_INVALID_PARAMETERS;
	}

	ctx = &trusty_cpu_ctx[cpu];
	ctx->fiq_handler_pc = handler;
	ctx->fiq_handler_cpsr = SMC_GET_EL3(handle, CTX_SPSR_EL3);
	ctx->fiq_handler_sp = stack;

	SMC_RET1(handle, 0);
}

static uint64_t trusty_get_fiq_regs(void *handle)
{
	struct trusty_cpu_ctx *ctx = get_trusty_ctx();
	uint64_t sp_el0 = read_ctx_reg(&ctx->fiq_gpregs, CTX_GPREG_SP_EL0);

	SMC_RET4(handle, ctx->fiq_pc, ctx->fiq_cpsr, sp_el0, ctx->fiq_sp_el1);
}

static uint64_t trusty_fiq_exit(void *handle, uint64_t x1, uint64_t x2, uint64_t x3)
{
	struct args ret;
	struct trusty_cpu_ctx *ctx = get_trusty_ctx();

	if (!ctx->fiq_handler_active) {
		NOTICE("%s: fiq handler not active\n", __func__);
		SMC_RET1(handle, SM_ERR_INVALID_PARAMETERS);
	}

	ret = trusty_context_switch(NON_SECURE, SMC_FC_FIQ_EXIT, 0, 0, 0);
	if (ret.r0 != 1) {
195
		INFO("%s(%p) SMC_FC_FIQ_EXIT returned unexpected value, %lld\n",
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
		       __func__, handle, ret.r0);
	}

	/*
	 * Restore register state to state recorded on fiq entry.
	 *
	 * x0, sp_el1, pc and cpsr need to be restored because el1 cannot
	 * restore them.
	 *
	 * x1-x4 and x8-x17 need to be restored here because smc_handler64
	 * corrupts them (el1 code also restored them).
	 */
	memcpy(get_gpregs_ctx(handle), &ctx->fiq_gpregs, sizeof(ctx->fiq_gpregs));
	ctx->fiq_handler_active = 0;
	write_ctx_reg(get_sysregs_ctx(handle), CTX_SP_EL1, ctx->fiq_sp_el1);
	cm_set_elr_spsr_el3(NON_SECURE, ctx->fiq_pc, ctx->fiq_cpsr);

	SMC_RET0(handle);
}

216
217
218
219
220
static uintptr_t trusty_smc_handler(uint32_t smc_fid,
			 u_register_t x1,
			 u_register_t x2,
			 u_register_t x3,
			 u_register_t x4,
221
222
			 void *cookie,
			 void *handle,
223
			 u_register_t flags)
224
225
{
	struct args ret;
226
	uint32_t vmid = 0;
227
228
229
230
231
232
233
	entry_point_info_t *ep_info = bl31_plat_get_next_image_ep_info(SECURE);

	/*
	 * Return success for SET_ROT_PARAMS if Trusty is not present, as
	 * Verified Boot is not even supported and returning success here
	 * would not compromise the boot process.
	 */
234
	if (!ep_info && (smc_fid == SMC_YC_SET_ROT_PARAMS)) {
235
236
237
238
		SMC_RET1(handle, 0);
	} else if (!ep_info) {
		SMC_RET1(handle, SMC_UNK);
	}
239
240

	if (is_caller_secure(flags)) {
241
		if (smc_fid == SMC_YC_NS_RETURN) {
242
			ret = trusty_context_switch(SECURE, x1, 0, 0, 0);
Anthony Zhou's avatar
Anthony Zhou committed
243
244
			SMC_RET8(handle, ret.r0, ret.r1, ret.r2, ret.r3,
				 ret.r4, ret.r5, ret.r6, ret.r7);
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
		}
		INFO("%s (0x%x, 0x%lx, 0x%lx, 0x%lx, 0x%lx, %p, %p, 0x%lx) \
		     cpu %d, unknown smc\n",
		     __func__, smc_fid, x1, x2, x3, x4, cookie, handle, flags,
		     plat_my_core_pos());
		SMC_RET1(handle, SMC_UNK);
	} else {
		switch (smc_fid) {
		case SMC_FC64_SET_FIQ_HANDLER:
			return trusty_set_fiq_handler(handle, x1, x2, x3);
		case SMC_FC64_GET_FIQ_REGS:
			return trusty_get_fiq_regs(handle);
		case SMC_FC_FIQ_EXIT:
			return trusty_fiq_exit(handle, x1, x2, x3);
		default:
260
261
262
263
264
265
266
267
268
269
270
271
			if (is_hypervisor_mode())
				vmid = SMC_GET_GP(handle, CTX_GPREG_X7);

			if ((current_vmid != 0) && (current_vmid != vmid)) {
				/* This message will cause SMC mechanism
				 * abnormal in multi-guest environment.
				 * Change it to WARN in case you need it.
				 */
				VERBOSE("Previous SMC not finished.\n");
				SMC_RET1(handle, SM_ERR_BUSY);
			}
			current_vmid = vmid;
272
273
			ret = trusty_context_switch(NON_SECURE, smc_fid, x1,
				x2, x3);
274
			current_vmid = 0;
275
276
277
278
279
280
281
			SMC_RET1(handle, ret.r0);
		}
	}
}

static int32_t trusty_init(void)
{
282
	void el3_exit(void);
283
	entry_point_info_t *ep_info;
Anthony Zhou's avatar
Anthony Zhou committed
284
	struct args zero_args = {0};
285
286
287
288
289
	struct trusty_cpu_ctx *ctx = get_trusty_ctx();
	uint32_t cpu = plat_my_core_pos();
	int reg_width = GET_RW(read_ctx_reg(get_el3state_ctx(&ctx->cpu_ctx),
			       CTX_SPSR_EL3));

290
291
292
293
	/*
	 * Get information about the Trusty image. Its absence is a critical
	 * failure.
	 */
294
	ep_info = bl31_plat_get_next_image_ep_info(SECURE);
295
	assert(ep_info);
296

297
	fpregs_context_save(get_fpregs_ctx(cm_get_context(NON_SECURE)));
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
	cm_el1_sysregs_context_save(NON_SECURE);

	cm_set_context(&ctx->cpu_ctx, SECURE);
	cm_init_my_context(ep_info);

	/*
	 * Adjust secondary cpu entry point for 32 bit images to the
	 * end of exeption vectors
	 */
	if ((cpu != 0) && (reg_width == MODE_RW_32)) {
		INFO("trusty: cpu %d, adjust entry point to 0x%lx\n",
		     cpu, ep_info->pc + (1U << 5));
		cm_set_elr_el3(SECURE, ep_info->pc + (1U << 5));
	}

	cm_el1_sysregs_context_restore(SECURE);
314
	fpregs_context_restore(get_fpregs_ctx(cm_get_context(SECURE)));
315
316
317
	cm_set_next_eret_context(SECURE);

	ctx->saved_security_state = ~0; /* initial saved state is invalid */
318
	trusty_init_context_stack(&ctx->saved_sp, &ctx->secure_stack.end);
319

Anthony Zhou's avatar
Anthony Zhou committed
320
	trusty_context_switch_helper(&ctx->saved_sp, &zero_args);
321
322

	cm_el1_sysregs_context_restore(NON_SECURE);
323
	fpregs_context_restore(get_fpregs_ctx(cm_get_context(NON_SECURE)));
324
325
	cm_set_next_eret_context(NON_SECURE);

326
	return 1;
327
328
}

329
static void trusty_cpu_suspend(uint32_t off)
330
331
332
{
	struct args ret;

333
	ret = trusty_context_switch(NON_SECURE, SMC_FC_CPU_SUSPEND, off, 0, 0);
334
	if (ret.r0 != 0) {
335
		INFO("%s: cpu %d, SMC_FC_CPU_SUSPEND returned unexpected value, %lld\n",
336
		     __func__, plat_my_core_pos(), ret.r0);
337
338
339
	}
}

340
static void trusty_cpu_resume(uint32_t on)
341
342
343
{
	struct args ret;

344
	ret = trusty_context_switch(NON_SECURE, SMC_FC_CPU_RESUME, on, 0, 0);
345
	if (ret.r0 != 0) {
346
		INFO("%s: cpu %d, SMC_FC_CPU_RESUME returned unexpected value, %lld\n",
347
		     __func__, plat_my_core_pos(), ret.r0);
348
349
350
	}
}

351
static int32_t trusty_cpu_off_handler(u_register_t unused)
352
{
353
	trusty_cpu_suspend(1);
354
355
356
357

	return 0;
}

358
static void trusty_cpu_on_finish_handler(u_register_t unused)
359
360
361
362
363
364
{
	struct trusty_cpu_ctx *ctx = get_trusty_ctx();

	if (!ctx->saved_sp) {
		trusty_init();
	} else {
365
		trusty_cpu_resume(1);
366
367
368
	}
}

369
static void trusty_cpu_suspend_handler(u_register_t unused)
370
{
371
	trusty_cpu_suspend(0);
372
373
}

374
static void trusty_cpu_suspend_finish_handler(u_register_t unused)
375
{
376
	trusty_cpu_resume(0);
377
378
379
380
381
382
383
384
385
}

static const spd_pm_ops_t trusty_pm = {
	.svc_off = trusty_cpu_off_handler,
	.svc_suspend = trusty_cpu_suspend_handler,
	.svc_on_finish = trusty_cpu_on_finish_handler,
	.svc_suspend_finish = trusty_cpu_suspend_finish_handler,
};

386
387
388
389
390
391
392
393
394
395
void plat_trusty_set_boot_args(aapcs64_params_t *args);

#ifdef TSP_SEC_MEM_SIZE
#pragma weak plat_trusty_set_boot_args
void plat_trusty_set_boot_args(aapcs64_params_t *args)
{
	args->arg0 = TSP_SEC_MEM_SIZE;
}
#endif

396
397
398
static int32_t trusty_setup(void)
{
	entry_point_info_t *ep_info;
399
	uint32_t instr;
400
401
	uint32_t flags;
	int ret;
402
	bool aarch32 = false;
403

404
	/* Get trusty's entry point info */
405
406
407
408
409
410
	ep_info = bl31_plat_get_next_image_ep_info(SECURE);
	if (!ep_info) {
		INFO("Trusty image missing.\n");
		return -1;
	}

411
	instr = *(uint32_t *)ep_info->pc;
412

413
	if (instr >> 24 == 0xeaU) {
414
		INFO("trusty: Found 32 bit image\n");
415
		aarch32 = true;
416
	} else if (instr >> 8 == 0xd53810U || instr >> 16 == 0x9400U) {
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
		INFO("trusty: Found 64 bit image\n");
	} else {
		NOTICE("trusty: Found unknown image, 0x%x\n", instr);
	}

	SET_PARAM_HEAD(ep_info, PARAM_EP, VERSION_1, SECURE | EP_ST_ENABLE);
	if (!aarch32)
		ep_info->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX,
					DISABLE_ALL_EXCEPTIONS);
	else
		ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
					    SPSR_E_LITTLE,
					    DAIF_FIQ_BIT |
					    DAIF_IRQ_BIT |
					    DAIF_ABT_BIT);
432
	(void)memset(&ep_info->args, 0, sizeof(ep_info->args));
433
	plat_trusty_set_boot_args(&ep_info->args);
434

435
	/* register init handler */
436
437
	bl31_register_bl32_init(trusty_init);

438
	/* register power management hooks */
439
440
	psci_register_spd_pm_hook(&trusty_pm);

441
	/* register interrupt handler */
442
443
444
445
446
447
448
449
	flags = 0;
	set_interrupt_rm_flag(flags, NON_SECURE);
	ret = register_interrupt_type_handler(INTR_TYPE_S_EL1,
					      trusty_fiq_handler,
					      flags);
	if (ret)
		ERROR("trusty: failed to register fiq handler, ret = %d\n", ret);

450
451
452
453
454
	if (aarch32) {
		entry_point_info_t *ns_ep_info;
		uint32_t spsr;

		ns_ep_info = bl31_plat_get_next_image_ep_info(NON_SECURE);
455
		if (ns_ep_info == NULL) {
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
			NOTICE("Trusty: non-secure image missing.\n");
			return -1;
		}
		spsr = ns_ep_info->spsr;
		if (GET_RW(spsr) == MODE_RW_64 && GET_EL(spsr) == MODE_EL2) {
			spsr &= ~(MODE_EL_MASK << MODE_EL_SHIFT);
			spsr |= MODE_EL1 << MODE_EL_SHIFT;
		}
		if (GET_RW(spsr) == MODE_RW_32 && GET_M32(spsr) == MODE32_hyp) {
			spsr &= ~(MODE32_MASK << MODE32_SHIFT);
			spsr |= MODE32_svc << MODE32_SHIFT;
		}
		if (spsr != ns_ep_info->spsr) {
			NOTICE("Trusty: Switch bl33 from EL2 to EL1 (spsr 0x%x -> 0x%x)\n",
			       ns_ep_info->spsr, spsr);
			ns_ep_info->spsr = spsr;
		}
	}

475
476
477
478
479
480
481
482
483
484
485
486
487
488
	return 0;
}

/* Define a SPD runtime service descriptor for fast SMC calls */
DECLARE_RT_SVC(
	trusty_fast,

	OEN_TOS_START,
	SMC_ENTITY_SECURE_MONITOR,
	SMC_TYPE_FAST,
	trusty_setup,
	trusty_smc_handler
);

489
/* Define a SPD runtime service descriptor for yielding SMC calls */
490
491
492
DECLARE_RT_SVC(
	trusty_std,

493
	OEN_TAP_START,
494
	SMC_ENTITY_SECURE_MONITOR,
495
	SMC_TYPE_YIELD,
496
497
498
	NULL,
	trusty_smc_handler
);