Commit a0fee747 authored by Antonio Nino Diaz's avatar Antonio Nino Diaz
Browse files

context_mgmt: Fix MISRA defects



The macro EL_IMPLEMENTED() has been deprecated in favour of the new
function el_implemented().

Change-Id: Ic9b1b81480b5e019b50a050e8c1a199991bf0ca9
Signed-off-by: default avatarAntonio Nino Diaz <antonio.ninodiaz@arm.com>
parent 3c1fb7a7
/* /*
* Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
* *
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
...@@ -49,9 +49,9 @@ void bl1_prepare_next_image(unsigned int image_id) ...@@ -49,9 +49,9 @@ void bl1_prepare_next_image(unsigned int image_id)
* Ensure that the build flag to save AArch32 system registers in CPU * Ensure that the build flag to save AArch32 system registers in CPU
* context is not set for AArch64-only platforms. * context is not set for AArch64-only platforms.
*/ */
if (EL_IMPLEMENTED(1) == EL_IMPL_A64ONLY) { if (el_implemented(1) == EL_IMPL_A64ONLY) {
ERROR("EL1 supports AArch64-only. Please set build flag " ERROR("EL1 supports AArch64-only. Please set build flag "
"CTX_INCLUDE_AARCH32_REGS = 0"); "CTX_INCLUDE_AARCH32_REGS = 0\n");
panic(); panic();
} }
#endif #endif
...@@ -76,7 +76,7 @@ void bl1_prepare_next_image(unsigned int image_id) ...@@ -76,7 +76,7 @@ void bl1_prepare_next_image(unsigned int image_id)
DISABLE_ALL_EXCEPTIONS); DISABLE_ALL_EXCEPTIONS);
} else { } else {
/* Use EL2 if supported; else use EL1. */ /* Use EL2 if supported; else use EL1. */
if (EL_IMPLEMENTED(2)) { if (el_implemented(2) != EL_IMPL_NONE) {
next_bl_ep->spsr = SPSR_64(MODE_EL2, MODE_SP_ELX, next_bl_ep->spsr = SPSR_64(MODE_EL2, MODE_SP_ELX,
DISABLE_ALL_EXCEPTIONS); DISABLE_ALL_EXCEPTIONS);
} else { } else {
......
...@@ -159,9 +159,9 @@ void __init bl31_prepare_next_image_entry(void) ...@@ -159,9 +159,9 @@ void __init bl31_prepare_next_image_entry(void)
* Ensure that the build flag to save AArch32 system registers in CPU * Ensure that the build flag to save AArch32 system registers in CPU
* context is not set for AArch64-only platforms. * context is not set for AArch64-only platforms.
*/ */
if (EL_IMPLEMENTED(1) == EL_IMPL_A64ONLY) { if (el_implemented(1) == EL_IMPL_A64ONLY) {
ERROR("EL1 supports AArch64-only. Please set build flag " ERROR("EL1 supports AArch64-only. Please set build flag "
"CTX_INCLUDE_AARCH32_REGS = 0"); "CTX_INCLUDE_AARCH32_REGS = 0\n");
panic(); panic();
} }
#endif #endif
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
#define PARAM_EP_SECURITY_MASK U(0x1) #define PARAM_EP_SECURITY_MASK U(0x1)
/* Secure or Non-secure image */ /* Secure or Non-secure image */
#define GET_SECURITY_STATE(x) (x & PARAM_EP_SECURITY_MASK) #define GET_SECURITY_STATE(x) ((x) & PARAM_EP_SECURITY_MASK)
#define SET_SECURITY_STATE(x, security) \ #define SET_SECURITY_STATE(x, security) \
((x) = ((x) & ~PARAM_EP_SECURITY_MASK) | (security)) ((x) = ((x) & ~PARAM_EP_SECURITY_MASK) | (security))
......
...@@ -4,8 +4,8 @@ ...@@ -4,8 +4,8 @@
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
#ifndef __ARCH_HELPERS_H__ #ifndef ARCH_HELPERS_H
#define __ARCH_HELPERS_H__ #define ARCH_HELPERS_H
#include <arch.h> /* for additional register definitions */ #include <arch.h> /* for additional register definitions */
#include <cdefs.h> #include <cdefs.h>
...@@ -381,4 +381,4 @@ static inline unsigned int get_current_el(void) ...@@ -381,4 +381,4 @@ static inline unsigned int get_current_el(void)
#define write_icc_sgi0r_el1(_v) \ #define write_icc_sgi0r_el1(_v) \
write64_icc_sgi0r_el1(_v) write64_icc_sgi0r_el1(_v)
#endif /* __ARCH_HELPERS_H__ */ #endif /* ARCH_HELPERS_H */
...@@ -4,11 +4,12 @@ ...@@ -4,11 +4,12 @@
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
#ifndef __ARCH_HELPERS_H__ #ifndef ARCH_HELPERS_H
#define __ARCH_HELPERS_H__ #define ARCH_HELPERS_H
#include <arch.h> /* for additional register definitions */ #include <arch.h> /* for additional register definitions */
#include <cdefs.h> /* For __dead2 */ #include <cdefs.h> /* For __dead2 */
#include <stdbool.h>
#include <stdint.h> #include <stdint.h>
#include <string.h> #include <string.h>
...@@ -363,12 +364,22 @@ static inline unsigned int get_current_el(void) ...@@ -363,12 +364,22 @@ static inline unsigned int get_current_el(void)
} }
/* /*
* Check if an EL is implemented from AA64PFR0 register fields. 'el' argument * Check if an EL is implemented from AA64PFR0 register fields.
* must be one of 1, 2 or 3.
*/ */
#define EL_IMPLEMENTED(el) \ static inline uint64_t el_implemented(unsigned int el)
((read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL##el##_SHIFT) \ {
& ID_AA64PFR0_ELX_MASK) if (el > 3U) {
return EL_IMPL_NONE;
} else {
unsigned int shift = ID_AA64PFR0_EL1_SHIFT * el;
return (read_id_aa64pfr0_el1() >> shift) & ID_AA64PFR0_ELX_MASK;
}
}
#if !ERROR_DEPRECATED
#define EL_IMPLEMENTED(_el) el_implemented(_el)
#endif
/* Previously defined accesor functions with incomplete register names */ /* Previously defined accesor functions with incomplete register names */
...@@ -389,4 +400,4 @@ static inline unsigned int get_current_el(void) ...@@ -389,4 +400,4 @@ static inline unsigned int get_current_el(void)
#define read_cpacr() read_cpacr_el1() #define read_cpacr() read_cpacr_el1()
#define write_cpacr(_v) write_cpacr_el1(_v) #define write_cpacr(_v) write_cpacr_el1(_v)
#endif /* __ARCH_HELPERS_H__ */ #endif /* ARCH_HELPERS_H */
/* /*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
* *
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
#ifndef __CONTEXT_H__ #ifndef CONTEXT_H
#define __CONTEXT_H__ #define CONTEXT_H
#include <utils_def.h>
/******************************************************************************* /*******************************************************************************
* Constants that allow assembler code to access members of and the 'regs' * Constants that allow assembler code to access members of and the 'regs'
* structure at their correct offsets. * structure at their correct offsets.
******************************************************************************/ ******************************************************************************/
#define CTX_REGS_OFFSET 0x0 #define CTX_REGS_OFFSET U(0x0)
#define CTX_GPREG_R0 0x0 #define CTX_GPREG_R0 U(0x0)
#define CTX_GPREG_R1 0x4 #define CTX_GPREG_R1 U(0x4)
#define CTX_GPREG_R2 0x8 #define CTX_GPREG_R2 U(0x8)
#define CTX_GPREG_R3 0xC #define CTX_GPREG_R3 U(0xC)
#define CTX_LR 0x10 #define CTX_LR U(0x10)
#define CTX_SCR 0x14 #define CTX_SCR U(0x14)
#define CTX_SPSR 0x18 #define CTX_SPSR U(0x18)
#define CTX_NS_SCTLR 0x1C #define CTX_NS_SCTLR U(0x1C)
#define CTX_REGS_END 0x20 #define CTX_REGS_END U(0x20)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
...@@ -31,7 +33,7 @@ ...@@ -31,7 +33,7 @@
* Common constants to help define the 'cpu_context' structure and its * Common constants to help define the 'cpu_context' structure and its
* members below. * members below.
*/ */
#define WORD_SHIFT 2 #define WORD_SHIFT U(2)
#define DEFINE_REG_STRUCT(name, num_regs) \ #define DEFINE_REG_STRUCT(name, num_regs) \
typedef struct name { \ typedef struct name { \
uint32_t _regs[num_regs]; \ uint32_t _regs[num_regs]; \
...@@ -64,4 +66,4 @@ CASSERT(CTX_REGS_OFFSET == __builtin_offsetof(cpu_context_t, regs_ctx), \ ...@@ -64,4 +66,4 @@ CASSERT(CTX_REGS_OFFSET == __builtin_offsetof(cpu_context_t, regs_ctx), \
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __CONTEXT_H__ */ #endif /* CONTEXT_H */
...@@ -4,8 +4,8 @@ ...@@ -4,8 +4,8 @@
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
#ifndef __CONTEXT_H__ #ifndef CONTEXT_H
#define __CONTEXT_H__ #define CONTEXT_H
#include <utils_def.h> #include <utils_def.h>
...@@ -347,4 +347,4 @@ void fpregs_context_restore(fp_regs_t *regs); ...@@ -347,4 +347,4 @@ void fpregs_context_restore(fp_regs_t *regs);
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __CONTEXT_H__ */ #endif /* CONTEXT_H */
/* /*
* Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
* *
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
#ifndef __CM_H__ #ifndef CONTEXT_MGMT_H
#define __CM_H__ #define CONTEXT_MGMT_H
#include <arch.h> #include <arch.h>
#include <assert.h> #include <assert.h>
#include <context.h> #include <context.h>
#include <context_mgmt.h>
#include <stdint.h> #include <stdint.h>
/******************************************************************************* /*******************************************************************************
...@@ -80,4 +79,4 @@ void *cm_get_next_context(void); ...@@ -80,4 +79,4 @@ void *cm_get_next_context(void);
void cm_set_next_context(void *context); void cm_set_next_context(void *context);
#endif /* AARCH32 */ #endif /* AARCH32 */
#endif /* __CM_H__ */ #endif /* CONTEXT_MGMT_H */
...@@ -144,9 +144,9 @@ void init_cpu_data_ptr(void); ...@@ -144,9 +144,9 @@ void init_cpu_data_ptr(void);
void init_cpu_ops(void); void init_cpu_ops(void);
#define get_cpu_data(_m) _cpu_data()->_m #define get_cpu_data(_m) _cpu_data()->_m
#define set_cpu_data(_m, _v) _cpu_data()->_m = _v #define set_cpu_data(_m, _v) _cpu_data()->_m = (_v)
#define get_cpu_data_by_index(_ix, _m) _cpu_data_by_index(_ix)->_m #define get_cpu_data_by_index(_ix, _m) _cpu_data_by_index(_ix)->_m
#define set_cpu_data_by_index(_ix, _m, _v) _cpu_data_by_index(_ix)->_m = _v #define set_cpu_data_by_index(_ix, _m, _v) _cpu_data_by_index(_ix)->_m = (_v)
/* ((cpu_data_t *)0)->_m is a dummy to get the sizeof the struct member _m */ /* ((cpu_data_t *)0)->_m is a dummy to get the sizeof the struct member _m */
#define flush_cpu_data(_m) flush_dcache_range((uintptr_t) \ #define flush_cpu_data(_m) flush_dcache_range((uintptr_t) \
&(_cpu_data()->_m), \ &(_cpu_data()->_m), \
......
...@@ -57,7 +57,7 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) ...@@ -57,7 +57,7 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
uint32_t scr, sctlr; uint32_t scr, sctlr;
regs_t *reg_ctx; regs_t *reg_ctx;
assert(ctx); assert(ctx != NULL);
security_state = GET_SECURITY_STATE(ep->h.attr); security_state = GET_SECURITY_STATE(ep->h.attr);
...@@ -97,7 +97,7 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) ...@@ -97,7 +97,7 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
assert(((ep->spsr >> SPSR_E_SHIFT) & SPSR_E_MASK) == assert(((ep->spsr >> SPSR_E_SHIFT) & SPSR_E_MASK) ==
(EP_GET_EE(ep->h.attr) >> EP_EE_SHIFT)); (EP_GET_EE(ep->h.attr) >> EP_EE_SHIFT));
sctlr = EP_GET_EE(ep->h.attr) ? SCTLR_EE_BIT : 0; sctlr = (EP_GET_EE(ep->h.attr) != 0U) ? SCTLR_EE_BIT : 0U;
sctlr |= (SCTLR_RESET_VAL & ~(SCTLR_TE_BIT | SCTLR_V_BIT)); sctlr |= (SCTLR_RESET_VAL & ~(SCTLR_TE_BIT | SCTLR_V_BIT));
write_ctx_reg(reg_ctx, CTX_NS_SCTLR, sctlr); write_ctx_reg(reg_ctx, CTX_NS_SCTLR, sctlr);
} }
...@@ -178,11 +178,11 @@ void cm_prepare_el3_exit(uint32_t security_state) ...@@ -178,11 +178,11 @@ void cm_prepare_el3_exit(uint32_t security_state)
cpu_context_t *ctx = cm_get_context(security_state); cpu_context_t *ctx = cm_get_context(security_state);
bool el2_unused = false; bool el2_unused = false;
assert(ctx); assert(ctx != NULL);
if (security_state == NON_SECURE) { if (security_state == NON_SECURE) {
scr = read_ctx_reg(get_regs_ctx(ctx), CTX_SCR); scr = read_ctx_reg(get_regs_ctx(ctx), CTX_SCR);
if (scr & SCR_HCE_BIT) { if ((scr & SCR_HCE_BIT) != 0U) {
/* Use SCTLR value to initialize HSCTLR */ /* Use SCTLR value to initialize HSCTLR */
hsctlr = read_ctx_reg(get_regs_ctx(ctx), hsctlr = read_ctx_reg(get_regs_ctx(ctx),
CTX_NS_SCTLR); CTX_NS_SCTLR);
...@@ -199,8 +199,8 @@ void cm_prepare_el3_exit(uint32_t security_state) ...@@ -199,8 +199,8 @@ void cm_prepare_el3_exit(uint32_t security_state)
write_scr(read_scr() & ~SCR_NS_BIT); write_scr(read_scr() & ~SCR_NS_BIT);
isb(); isb();
} else if (read_id_pfr1() & } else if ((read_id_pfr1() &
(ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) { (ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) != 0U) {
el2_unused = true; el2_unused = true;
/* /*
......
...@@ -68,7 +68,7 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) ...@@ -68,7 +68,7 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
gp_regs_t *gp_regs; gp_regs_t *gp_regs;
unsigned long sctlr_elx, actlr_elx; unsigned long sctlr_elx, actlr_elx;
assert(ctx); assert(ctx != NULL);
security_state = GET_SECURITY_STATE(ep->h.attr); security_state = GET_SECURITY_STATE(ep->h.attr);
...@@ -84,7 +84,7 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) ...@@ -84,7 +84,7 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
* the required value depending on the state of the SPSR_EL3 and the * the required value depending on the state of the SPSR_EL3 and the
* Security state and entrypoint attributes of the next EL. * Security state and entrypoint attributes of the next EL.
*/ */
scr_el3 = read_scr(); scr_el3 = (uint32_t)read_scr();
scr_el3 &= ~(SCR_NS_BIT | SCR_RW_BIT | SCR_FIQ_BIT | SCR_IRQ_BIT | scr_el3 &= ~(SCR_NS_BIT | SCR_RW_BIT | SCR_FIQ_BIT | SCR_IRQ_BIT |
SCR_ST_BIT | SCR_HCE_BIT); SCR_ST_BIT | SCR_HCE_BIT);
/* /*
...@@ -103,7 +103,7 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) ...@@ -103,7 +103,7 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
* Secure timer registers to EL3, from AArch64 state only, if specified * Secure timer registers to EL3, from AArch64 state only, if specified
* by the entrypoint attributes. * by the entrypoint attributes.
*/ */
if (EP_GET_ST(ep->h.attr)) if (EP_GET_ST(ep->h.attr) != 0U)
scr_el3 |= SCR_ST_BIT; scr_el3 |= SCR_ST_BIT;
#if !HANDLE_EA_EL3_FIRST #if !HANDLE_EA_EL3_FIRST
...@@ -133,10 +133,9 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) ...@@ -133,10 +133,9 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
* AArch64 and next EL is EL2, or if next execution state is AArch32 and * AArch64 and next EL is EL2, or if next execution state is AArch32 and
* next mode is Hyp. * next mode is Hyp.
*/ */
if ((GET_RW(ep->spsr) == MODE_RW_64 if (((GET_RW(ep->spsr) == MODE_RW_64) && (GET_EL(ep->spsr) == MODE_EL2))
&& GET_EL(ep->spsr) == MODE_EL2) || ((GET_RW(ep->spsr) != MODE_RW_64)
|| (GET_RW(ep->spsr) != MODE_RW_64 && (GET_M32(ep->spsr) == MODE32_hyp))) {
&& GET_M32(ep->spsr) == MODE32_hyp)) {
scr_el3 |= SCR_HCE_BIT; scr_el3 |= SCR_HCE_BIT;
} }
...@@ -151,7 +150,7 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) ...@@ -151,7 +150,7 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
* SCTLR.M, SCTLR.C and SCTLR.I: These fields must be zero (as * SCTLR.M, SCTLR.C and SCTLR.I: These fields must be zero (as
* required by PSCI specification) * required by PSCI specification)
*/ */
sctlr_elx = EP_GET_EE(ep->h.attr) ? SCTLR_EE_BIT : 0; sctlr_elx = (EP_GET_EE(ep->h.attr) != 0U) ? SCTLR_EE_BIT : 0U;
if (GET_RW(ep->spsr) == MODE_RW_64) if (GET_RW(ep->spsr) == MODE_RW_64)
sctlr_elx |= SCTLR_EL1_RES1; sctlr_elx |= SCTLR_EL1_RES1;
else { else {
...@@ -291,20 +290,21 @@ void cm_prepare_el3_exit(uint32_t security_state) ...@@ -291,20 +290,21 @@ void cm_prepare_el3_exit(uint32_t security_state)
uint32_t sctlr_elx, scr_el3, mdcr_el2; uint32_t sctlr_elx, scr_el3, mdcr_el2;
cpu_context_t *ctx = cm_get_context(security_state); cpu_context_t *ctx = cm_get_context(security_state);
bool el2_unused = false; bool el2_unused = false;
uint64_t hcr_el2 = 0; uint64_t hcr_el2 = 0U;
assert(ctx); assert(ctx != NULL);
if (security_state == NON_SECURE) { if (security_state == NON_SECURE) {
scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3); scr_el3 = (uint32_t)read_ctx_reg(get_el3state_ctx(ctx),
if (scr_el3 & SCR_HCE_BIT) { CTX_SCR_EL3);
if ((scr_el3 & SCR_HCE_BIT) != 0U) {
/* Use SCTLR_EL1.EE value to initialise sctlr_el2 */ /* Use SCTLR_EL1.EE value to initialise sctlr_el2 */
sctlr_elx = read_ctx_reg(get_sysregs_ctx(ctx), sctlr_elx = (uint32_t)read_ctx_reg(get_sysregs_ctx(ctx),
CTX_SCTLR_EL1); CTX_SCTLR_EL1);
sctlr_elx &= SCTLR_EE_BIT; sctlr_elx &= SCTLR_EE_BIT;
sctlr_elx |= SCTLR_EL2_RES1; sctlr_elx |= SCTLR_EL2_RES1;
write_sctlr_el2(sctlr_elx); write_sctlr_el2(sctlr_elx);
} else if (EL_IMPLEMENTED(2)) { } else if (el_implemented(2) != EL_IMPL_NONE) {
el2_unused = true; el2_unused = true;
/* /*
...@@ -314,7 +314,7 @@ void cm_prepare_el3_exit(uint32_t security_state) ...@@ -314,7 +314,7 @@ void cm_prepare_el3_exit(uint32_t security_state)
* Set EL2 register width appropriately: Set HCR_EL2 * Set EL2 register width appropriately: Set HCR_EL2
* field to match SCR_EL3.RW. * field to match SCR_EL3.RW.
*/ */
if (scr_el3 & SCR_RW_BIT) if ((scr_el3 & SCR_RW_BIT) != 0U)
hcr_el2 |= HCR_RW_BIT; hcr_el2 |= HCR_RW_BIT;
/* /*
...@@ -470,7 +470,7 @@ void cm_el1_sysregs_context_save(uint32_t security_state) ...@@ -470,7 +470,7 @@ void cm_el1_sysregs_context_save(uint32_t security_state)
cpu_context_t *ctx; cpu_context_t *ctx;
ctx = cm_get_context(security_state); ctx = cm_get_context(security_state);
assert(ctx); assert(ctx != NULL);
el1_sysregs_context_save(get_sysregs_ctx(ctx)); el1_sysregs_context_save(get_sysregs_ctx(ctx));
...@@ -487,7 +487,7 @@ void cm_el1_sysregs_context_restore(uint32_t security_state) ...@@ -487,7 +487,7 @@ void cm_el1_sysregs_context_restore(uint32_t security_state)
cpu_context_t *ctx; cpu_context_t *ctx;
ctx = cm_get_context(security_state); ctx = cm_get_context(security_state);
assert(ctx); assert(ctx != NULL);
el1_sysregs_context_restore(get_sysregs_ctx(ctx)); el1_sysregs_context_restore(get_sysregs_ctx(ctx));
...@@ -509,7 +509,7 @@ void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint) ...@@ -509,7 +509,7 @@ void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint)
el3_state_t *state; el3_state_t *state;
ctx = cm_get_context(security_state); ctx = cm_get_context(security_state);
assert(ctx); assert(ctx != NULL);
/* Populate EL3 state so that ERET jumps to the correct entry */ /* Populate EL3 state so that ERET jumps to the correct entry */
state = get_el3state_ctx(ctx); state = get_el3state_ctx(ctx);
...@@ -527,7 +527,7 @@ void cm_set_elr_spsr_el3(uint32_t security_state, ...@@ -527,7 +527,7 @@ void cm_set_elr_spsr_el3(uint32_t security_state,
el3_state_t *state; el3_state_t *state;
ctx = cm_get_context(security_state); ctx = cm_get_context(security_state);
assert(ctx); assert(ctx != NULL);
/* Populate EL3 state so that ERET jumps to the correct entry */ /* Populate EL3 state so that ERET jumps to the correct entry */
state = get_el3state_ctx(ctx); state = get_el3state_ctx(ctx);
...@@ -549,21 +549,21 @@ void cm_write_scr_el3_bit(uint32_t security_state, ...@@ -549,21 +549,21 @@ void cm_write_scr_el3_bit(uint32_t security_state,
uint32_t scr_el3; uint32_t scr_el3;
ctx = cm_get_context(security_state); ctx = cm_get_context(security_state);
assert(ctx); assert(ctx != NULL);
/* Ensure that the bit position is a valid one */ /* Ensure that the bit position is a valid one */
assert((1 << bit_pos) & SCR_VALID_BIT_MASK); assert(((1U << bit_pos) & SCR_VALID_BIT_MASK) != 0U);
/* Ensure that the 'value' is only a bit wide */ /* Ensure that the 'value' is only a bit wide */
assert(value <= 1); assert(value <= 1U);
/* /*
* Get the SCR_EL3 value from the cpu context, clear the desired bit * Get the SCR_EL3 value from the cpu context, clear the desired bit
* and set it to its new value. * and set it to its new value.
*/ */
state = get_el3state_ctx(ctx); state = get_el3state_ctx(ctx);
scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); scr_el3 = (uint32_t)read_ctx_reg(state, CTX_SCR_EL3);
scr_el3 &= ~(1 << bit_pos); scr_el3 &= ~(1U << bit_pos);
scr_el3 |= value << bit_pos; scr_el3 |= value << bit_pos;
write_ctx_reg(state, CTX_SCR_EL3, scr_el3); write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
} }
...@@ -578,11 +578,11 @@ uint32_t cm_get_scr_el3(uint32_t security_state) ...@@ -578,11 +578,11 @@ uint32_t cm_get_scr_el3(uint32_t security_state)
el3_state_t *state; el3_state_t *state;
ctx = cm_get_context(security_state); ctx = cm_get_context(security_state);
assert(ctx); assert(ctx != NULL);
/* Populate EL3 state so that ERET jumps to the correct entry */ /* Populate EL3 state so that ERET jumps to the correct entry */
state = get_el3state_ctx(ctx); state = get_el3state_ctx(ctx);
return read_ctx_reg(state, CTX_SCR_EL3); return (uint32_t)read_ctx_reg(state, CTX_SCR_EL3);
} }
/******************************************************************************* /*******************************************************************************
...@@ -595,7 +595,7 @@ void cm_set_next_eret_context(uint32_t security_state) ...@@ -595,7 +595,7 @@ void cm_set_next_eret_context(uint32_t security_state)
cpu_context_t *ctx; cpu_context_t *ctx;
ctx = cm_get_context(security_state); ctx = cm_get_context(security_state);
assert(ctx); assert(ctx != NULL);
cm_set_next_context(ctx); cm_set_next_context(ctx);
} }
...@@ -63,7 +63,7 @@ uint32_t arm_get_spsr_for_bl33_entry(void) ...@@ -63,7 +63,7 @@ uint32_t arm_get_spsr_for_bl33_entry(void)
uint32_t spsr; uint32_t spsr;
/* Figure out what mode we enter the non-secure world in */ /* Figure out what mode we enter the non-secure world in */
mode = EL_IMPLEMENTED(2) ? MODE_EL2 : MODE_EL1; mode = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1;
/* /*
* TODO: Consider the possibility of specifying the SPSR in * TODO: Consider the possibility of specifying the SPSR in
......
...@@ -117,7 +117,7 @@ int arm_execution_state_switch(unsigned int smc_fid, ...@@ -117,7 +117,7 @@ int arm_execution_state_switch(unsigned int smc_fid,
* Switching from AArch64 to AArch32. Ensure this CPU implements * Switching from AArch64 to AArch32. Ensure this CPU implements
* the target EL in AArch32. * the target EL in AArch32.
*/ */
impl = from_el2 ? EL_IMPLEMENTED(2) : EL_IMPLEMENTED(1); impl = from_el2 ? el_implemented(2) : el_implemented(1);
if (impl != EL_IMPL_A64_A32) if (impl != EL_IMPL_A64_A32)
goto exec_denied; goto exec_denied;
......
...@@ -99,7 +99,7 @@ uint32_t hikey_get_spsr_for_bl33_entry(void) ...@@ -99,7 +99,7 @@ uint32_t hikey_get_spsr_for_bl33_entry(void)
uint32_t spsr; uint32_t spsr;
/* Figure out what mode we enter the non-secure world in */ /* Figure out what mode we enter the non-secure world in */
mode = EL_IMPLEMENTED(2) ? MODE_EL2 : MODE_EL1; mode = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1;
/* /*
* TODO: Consider the possibility of specifying the SPSR in * TODO: Consider the possibility of specifying the SPSR in
......
...@@ -191,7 +191,7 @@ uint32_t hikey960_get_spsr_for_bl33_entry(void) ...@@ -191,7 +191,7 @@ uint32_t hikey960_get_spsr_for_bl33_entry(void)
uint32_t spsr; uint32_t spsr;
/* Figure out what mode we enter the non-secure world in */ /* Figure out what mode we enter the non-secure world in */
mode = EL_IMPLEMENTED(2) ? MODE_EL2 : MODE_EL1; mode = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1;
/* /*
* TODO: Consider the possibility of specifying the SPSR in * TODO: Consider the possibility of specifying the SPSR in
......
...@@ -147,7 +147,7 @@ uint32_t ls_get_spsr_for_bl33_entry(void) ...@@ -147,7 +147,7 @@ uint32_t ls_get_spsr_for_bl33_entry(void)
uint32_t spsr; uint32_t spsr;
/* Figure out what mode we enter the non-secure world in */ /* Figure out what mode we enter the non-secure world in */
mode = EL_IMPLEMENTED(2) ? MODE_EL2 : MODE_EL1; mode = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1;
/* /*
* TODO: Consider the possibility of specifying the SPSR in * TODO: Consider the possibility of specifying the SPSR in
......
...@@ -339,7 +339,7 @@ static entry_point_info_t *bl31_plat_get_next_kernel64_ep_info(void) ...@@ -339,7 +339,7 @@ static entry_point_info_t *bl31_plat_get_next_kernel64_ep_info(void)
next_image_info = &bl33_image_ep_info; next_image_info = &bl33_image_ep_info;
/* Figure out what mode we enter the non-secure world in */ /* Figure out what mode we enter the non-secure world in */
if (EL_IMPLEMENTED(2)) { if (el_implemented(2) != EL_IMPL_NONE) {
INFO("Kernel_EL2\n"); INFO("Kernel_EL2\n");
mode = MODE_EL2; mode = MODE_EL2;
} else{ } else{
......
/* /*
* Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
* *
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
...@@ -118,7 +118,7 @@ static uint32_t qemu_get_spsr_for_bl33_entry(void) ...@@ -118,7 +118,7 @@ static uint32_t qemu_get_spsr_for_bl33_entry(void)
unsigned int mode; unsigned int mode;
/* Figure out what mode we enter the non-secure world in */ /* Figure out what mode we enter the non-secure world in */
mode = EL_IMPLEMENTED(2) ? MODE_EL2 : MODE_EL1; mode = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1;
/* /*
* TODO: Consider the possibility of specifying the SPSR in * TODO: Consider the possibility of specifying the SPSR in
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment