Commit b634fa91 authored by Jeenu Viswambharan's avatar Jeenu Viswambharan
Browse files

SiP: MISRA fixes for execution state switch


These changes address most of the required MISRA rules. In the process,
some from generic code is also fixed.

No functional changes.

Change-Id: I707dbec9b34b802397e99da2f5ae738165d6feba
Signed-off-by: default avatarJeenu Viswambharan <jeenu.viswambharan@arm.com>
parent 30a8d96e
Showing with 24 additions and 19 deletions
+24 -19
/* /*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
* *
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
#define SET_PARAM_HEAD(_p, _type, _ver, _attr) do { \ #define SET_PARAM_HEAD(_p, _type, _ver, _attr) do { \
(_p)->h.type = (uint8_t)(_type); \ (_p)->h.type = (uint8_t)(_type); \
(_p)->h.version = (uint8_t)(_ver); \ (_p)->h.version = (uint8_t)(_ver); \
(_p)->h.size = (uint16_t)sizeof(*_p); \ (_p)->h.size = (uint16_t)sizeof(*(_p)); \
(_p)->h.attr = (uint32_t)(_attr) ; \ (_p)->h.attr = (uint32_t)(_attr) ; \
} while (0) } while (0)
......
/* /*
* Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
* *
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
...@@ -64,9 +64,9 @@ static uintptr_t arm_sip_handler(unsigned int smc_fid, ...@@ -64,9 +64,9 @@ static uintptr_t arm_sip_handler(unsigned int smc_fid,
/* /*
* Pointers used in execution state switch are all 32 bits wide * Pointers used in execution state switch are all 32 bits wide
*/ */
return arm_execution_state_switch(smc_fid, (uint32_t) x1, return (uintptr_t) arm_execution_state_switch(smc_fid,
(uint32_t) x2, (uint32_t) x3, (uint32_t) x4, (uint32_t) x1, (uint32_t) x2, (uint32_t) x3,
handle); (uint32_t) x4, handle);
} }
case ARM_SIP_SVC_CALL_COUNT: case ARM_SIP_SVC_CALL_COUNT:
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <plat_arm.h> #include <plat_arm.h>
#include <psci.h> #include <psci.h>
#include <smccc_helpers.h> #include <smccc_helpers.h>
#include <stdbool.h>
#include <string.h> #include <string.h>
#include <utils.h> #include <utils.h>
...@@ -39,7 +40,8 @@ int arm_execution_state_switch(unsigned int smc_fid, ...@@ -39,7 +40,8 @@ int arm_execution_state_switch(unsigned int smc_fid,
{ {
/* Execution state can be switched only if EL3 is AArch64 */ /* Execution state can be switched only if EL3 is AArch64 */
#ifdef AARCH64 #ifdef AARCH64
int caller_64, from_el2, el, endianness, thumb = 0; bool caller_64, thumb = false, from_el2;
unsigned int el, endianness;
u_register_t spsr, pc, scr, sctlr; u_register_t spsr, pc, scr, sctlr;
entry_point_info_t ep; entry_point_info_t ep;
cpu_context_t *ctx = (cpu_context_t *) handle; cpu_context_t *ctx = (cpu_context_t *) handle;
...@@ -50,7 +52,7 @@ int arm_execution_state_switch(unsigned int smc_fid, ...@@ -50,7 +52,7 @@ int arm_execution_state_switch(unsigned int smc_fid,
/* /*
* Disallow state switch if any of the secondaries have been brought up. * Disallow state switch if any of the secondaries have been brought up.
*/ */
if (psci_secondaries_brought_up()) if (psci_secondaries_brought_up() != 0)
goto exec_denied; goto exec_denied;
spsr = read_ctx_reg(el3_ctx, CTX_SPSR_EL3); spsr = read_ctx_reg(el3_ctx, CTX_SPSR_EL3);
...@@ -61,20 +63,20 @@ int arm_execution_state_switch(unsigned int smc_fid, ...@@ -61,20 +63,20 @@ int arm_execution_state_switch(unsigned int smc_fid,
* If the call originated from AArch64, expect 32-bit pointers when * If the call originated from AArch64, expect 32-bit pointers when
* switching to AArch32. * switching to AArch32.
*/ */
if ((pc_hi != 0) || (cookie_hi != 0)) if ((pc_hi != 0U) || (cookie_hi != 0U))
goto invalid_param; goto invalid_param;
pc = pc_lo; pc = pc_lo;
/* Instruction state when entering AArch32 */ /* Instruction state when entering AArch32 */
thumb = pc & 1; thumb = (pc & 1U) != 0U;
} else { } else {
/* Construct AArch64 PC */ /* Construct AArch64 PC */
pc = (((u_register_t) pc_hi) << 32) | pc_lo; pc = (((u_register_t) pc_hi) << 32) | pc_lo;
} }
/* Make sure PC is 4-byte aligned, except for Thumb */ /* Make sure PC is 4-byte aligned, except for Thumb */
if ((pc & 0x3) && !thumb) if (((pc & 0x3U) != 0U) && !thumb)
goto invalid_param; goto invalid_param;
/* /*
...@@ -95,7 +97,7 @@ int arm_execution_state_switch(unsigned int smc_fid, ...@@ -95,7 +97,7 @@ int arm_execution_state_switch(unsigned int smc_fid,
* Disallow switching state if there's a Hypervisor in place; * Disallow switching state if there's a Hypervisor in place;
* this request must be taken up with the Hypervisor instead. * this request must be taken up with the Hypervisor instead.
*/ */
if (scr & SCR_HCE_BIT) if ((scr & SCR_HCE_BIT) != 0U)
goto exec_denied; goto exec_denied;
} }
...@@ -105,11 +107,11 @@ int arm_execution_state_switch(unsigned int smc_fid, ...@@ -105,11 +107,11 @@ int arm_execution_state_switch(unsigned int smc_fid,
* directly. * directly.
*/ */
sctlr = from_el2 ? read_sctlr_el2() : read_sctlr_el1(); sctlr = from_el2 ? read_sctlr_el2() : read_sctlr_el1();
endianness = !!(sctlr & SCTLR_EE_BIT); endianness = ((sctlr & SCTLR_EE_BIT) != 0U) ? 1U : 0U;
/* Construct SPSR for the exception state we're about to switch to */ /* Construct SPSR for the exception state we're about to switch to */
if (caller_64) { if (caller_64) {
int impl; unsigned long long impl;
/* /*
* Switching from AArch64 to AArch32. Ensure this CPU implements * Switching from AArch64 to AArch32. Ensure this CPU implements
...@@ -121,7 +123,8 @@ int arm_execution_state_switch(unsigned int smc_fid, ...@@ -121,7 +123,8 @@ int arm_execution_state_switch(unsigned int smc_fid,
/* Return to the equivalent AArch32 privilege level */ /* Return to the equivalent AArch32 privilege level */
el = from_el2 ? MODE32_hyp : MODE32_svc; el = from_el2 ? MODE32_hyp : MODE32_svc;
spsr = SPSR_MODE32(el, thumb ? SPSR_T_THUMB : SPSR_T_ARM, spsr = SPSR_MODE32((u_register_t) el,
thumb ? SPSR_T_THUMB : SPSR_T_ARM,
endianness, DISABLE_ALL_EXCEPTIONS); endianness, DISABLE_ALL_EXCEPTIONS);
} else { } else {
/* /*
...@@ -130,7 +133,8 @@ int arm_execution_state_switch(unsigned int smc_fid, ...@@ -130,7 +133,8 @@ int arm_execution_state_switch(unsigned int smc_fid,
* raised), it's safe to assume AArch64 is also implemented. * raised), it's safe to assume AArch64 is also implemented.
*/ */
el = from_el2 ? MODE_EL2 : MODE_EL1; el = from_el2 ? MODE_EL2 : MODE_EL1;
spsr = SPSR_64(el, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); spsr = SPSR_64((u_register_t) el, MODE_SP_ELX,
DISABLE_ALL_EXCEPTIONS);
} }
/* /*
...@@ -143,10 +147,11 @@ int arm_execution_state_switch(unsigned int smc_fid, ...@@ -143,10 +147,11 @@ int arm_execution_state_switch(unsigned int smc_fid,
*/ */
zeromem(&ep, sizeof(ep)); zeromem(&ep, sizeof(ep));
ep.pc = pc; ep.pc = pc;
ep.spsr = spsr; ep.spsr = (uint32_t) spsr;
SET_PARAM_HEAD(&ep, PARAM_EP, VERSION_1, SET_PARAM_HEAD(&ep, PARAM_EP, VERSION_1,
((endianness ? EP_EE_BIG : EP_EE_LITTLE) | NON_SECURE | ((unsigned int) ((endianness != 0U) ? EP_EE_BIG :
EP_ST_DISABLE)); EP_EE_LITTLE)
| NON_SECURE | EP_ST_DISABLE));
/* /*
* Re-initialize the system register context, and exit EL3 as if for the * Re-initialize the system register context, and exit EL3 as if for the
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment