execution_state_switch.c 4.99 KB
Newer Older
1
/*
Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
2
 * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
3
 *
David Cunado's avatar
David Cunado committed
4
 * SPDX-License-Identifier: BSD-3-Clause
5
6
7
8
9
10
11
12
 */

#include <arch_helpers.h>
#include <arm_sip_svc.h>
#include <context.h>
#include <context_mgmt.h>
#include <plat_arm.h>
#include <psci.h>
Antonio Nino Diaz's avatar
Antonio Nino Diaz committed
13
#include <smccc_helpers.h>
14
#include <stdbool.h>
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
#include <string.h>
#include <utils.h>

/*
 * Handle SMC from a lower exception level to switch its execution state
 * (either from AArch64 to AArch32, or vice versa).
 *
 * smc_fid:
 *	SMC function ID - either ARM_SIP_SVC_STATE_SWITCH_64 or
 *	ARM_SIP_SVC_STATE_SWITCH_32.
 * pc_hi, pc_lo:
 *	PC upon re-entry to the calling exception level; width dependent on the
 *	calling exception level.
 * cookie_hi, cookie_lo:
 *	Opaque pointer pairs received from the caller to pass it back, upon
 *	re-entry.
 * handle:
 *	Handle to saved context.
 */
int arm_execution_state_switch(unsigned int smc_fid,
		uint32_t pc_hi,
		uint32_t pc_lo,
		uint32_t cookie_hi,
		uint32_t cookie_lo,
		void *handle)
{
	/* Execution state can be switched only if EL3 is AArch64 */
#ifdef AARCH64
43
44
	bool caller_64, thumb = false, from_el2;
	unsigned int el, endianness;
45
46
47
48
49
50
51
52
53
54
	u_register_t spsr, pc, scr, sctlr;
	entry_point_info_t ep;
	cpu_context_t *ctx = (cpu_context_t *) handle;
	el3_state_t *el3_ctx = get_el3state_ctx(ctx);

	/* That the SMC originated from NS is already validated by the caller */

	/*
	 * Disallow state switch if any of the secondaries have been brought up.
	 */
55
	if (psci_secondaries_brought_up() != 0)
56
57
58
59
60
61
62
63
64
65
		goto exec_denied;

	spsr = read_ctx_reg(el3_ctx, CTX_SPSR_EL3);
	caller_64 = (GET_RW(spsr) == MODE_RW_64);

	if (caller_64) {
		/*
		 * If the call originated from AArch64, expect 32-bit pointers when
		 * switching to AArch32.
		 */
66
		if ((pc_hi != 0U) || (cookie_hi != 0U))
67
68
69
70
71
			goto invalid_param;

		pc = pc_lo;

		/* Instruction state when entering AArch32 */
72
		thumb = (pc & 1U) != 0U;
73
74
75
76
77
78
	} else {
		/* Construct AArch64 PC */
		pc = (((u_register_t) pc_hi) << 32) | pc_lo;
	}

	/* Make sure PC is 4-byte aligned, except for Thumb */
79
	if (((pc & 0x3U) != 0U) && !thumb)
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
		goto invalid_param;

	/*
	 * EL3 controls register width of the immediate lower EL only. Expect
	 * this request from EL2/Hyp unless:
	 *
	 * - EL2 is not implemented;
	 * - EL2 is implemented, but was disabled. This can be inferred from
	 *   SCR_EL3.HCE.
	 */
	from_el2 = caller_64 ? (GET_EL(spsr) == MODE_EL2) :
		(GET_M32(spsr) == MODE32_hyp);
	scr = read_ctx_reg(el3_ctx, CTX_SCR_EL3);
	if (!from_el2) {
		/* The call is from NS privilege level other than HYP */

		/*
		 * Disallow switching state if there's a Hypervisor in place;
		 * this request must be taken up with the Hypervisor instead.
		 */
100
		if ((scr & SCR_HCE_BIT) != 0U)
101
102
103
104
105
106
107
108
109
			goto exec_denied;
	}

	/*
	 * Return to the caller using the same endianness. Extract
	 * endianness bit from the respective system control register
	 * directly.
	 */
	sctlr = from_el2 ? read_sctlr_el2() : read_sctlr_el1();
110
	endianness = ((sctlr & SCTLR_EE_BIT) != 0U) ? 1U : 0U;
111
112
113

	/* Construct SPSR for the exception state we're about to switch to */
	if (caller_64) {
114
		unsigned long long impl;
115
116
117
118
119

		/*
		 * Switching from AArch64 to AArch32. Ensure this CPU implements
		 * the target EL in AArch32.
		 */
120
		impl = from_el2 ? el_implemented(2) : el_implemented(1);
121
122
123
124
125
		if (impl != EL_IMPL_A64_A32)
			goto exec_denied;

		/* Return to the equivalent AArch32 privilege level */
		el = from_el2 ? MODE32_hyp : MODE32_svc;
126
127
		spsr = SPSR_MODE32((u_register_t) el,
				thumb ? SPSR_T_THUMB : SPSR_T_ARM,
128
129
130
131
132
133
134
135
				endianness, DISABLE_ALL_EXCEPTIONS);
	} else {
		/*
		 * Switching from AArch32 to AArch64. Since it's not possible to
		 * implement an EL as AArch32-only (from which this call was
		 * raised), it's safe to assume AArch64 is also implemented.
		 */
		el = from_el2 ? MODE_EL2 : MODE_EL1;
136
137
		spsr = SPSR_64((u_register_t) el, MODE_SP_ELX,
				DISABLE_ALL_EXCEPTIONS);
138
139
140
141
142
143
144
145
146
147
148
149
	}

	/*
	 * Use the context management library to re-initialize the existing
	 * context with the execution state flipped. Since the library takes
	 * entry_point_info_t pointer as the argument, construct a dummy one
	 * with PC, state width, endianness, security etc. appropriately set.
	 * Other entries in the entry point structure are irrelevant for
	 * purpose.
	 */
	zeromem(&ep, sizeof(ep));
	ep.pc = pc;
150
	ep.spsr = (uint32_t) spsr;
151
	SET_PARAM_HEAD(&ep, PARAM_EP, VERSION_1,
152
153
154
			((unsigned int) ((endianness != 0U) ? EP_EE_BIG :
				EP_EE_LITTLE)
			 | NON_SECURE | EP_ST_DISABLE));
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178

	/*
	 * Re-initialize the system register context, and exit EL3 as if for the
	 * first time. State switch is effectively a soft reset of the
	 * calling EL.
	 */
	cm_init_my_context(&ep);
	cm_prepare_el3_exit(NON_SECURE);

	/*
	 * State switch success. The caller of SMC wouldn't see the SMC
	 * returning. Instead, execution starts at the supplied entry point,
	 * with context pointers populated in registers 0 and 1.
	 */
	SMC_RET2(handle, cookie_hi, cookie_lo);

invalid_param:
	SMC_RET1(handle, STATE_SW_E_PARAM);

exec_denied:
#endif
	/* State switch denied */
	SMC_RET1(handle, STATE_SW_E_DENIED);
}