Unverified Commit 2eedba9a authored by Antonio Niño Díaz's avatar Antonio Niño Díaz Committed by GitHub
Browse files

Merge pull request #1651 from antonio-nino-diaz-arm/an/rand-misra

Fix some MISRA defects
parents 392b1d59 195e363f
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <platform.h> #include <platform.h>
#include <platform_def.h> #include <platform_def.h>
#include <smccc_helpers.h> #include <smccc_helpers.h>
#include <stdbool.h>
#include <string.h> #include <string.h>
#include <utils.h> #include <utils.h>
...@@ -129,7 +130,7 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) ...@@ -129,7 +130,7 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
* When EL2 is implemented but unused `el2_unused` is non-zero, otherwise * When EL2 is implemented but unused `el2_unused` is non-zero, otherwise
* it is zero. * it is zero.
******************************************************************************/ ******************************************************************************/
static void enable_extensions_nonsecure(int el2_unused) static void enable_extensions_nonsecure(bool el2_unused)
{ {
#if IMAGE_BL32 #if IMAGE_BL32
#if ENABLE_AMU #if ENABLE_AMU
...@@ -175,7 +176,7 @@ void cm_prepare_el3_exit(uint32_t security_state) ...@@ -175,7 +176,7 @@ void cm_prepare_el3_exit(uint32_t security_state)
{ {
uint32_t hsctlr, scr; uint32_t hsctlr, scr;
cpu_context_t *ctx = cm_get_context(security_state); cpu_context_t *ctx = cm_get_context(security_state);
int el2_unused = 0; bool el2_unused = false;
assert(ctx); assert(ctx);
...@@ -200,7 +201,7 @@ void cm_prepare_el3_exit(uint32_t security_state) ...@@ -200,7 +201,7 @@ void cm_prepare_el3_exit(uint32_t security_state)
isb(); isb();
} else if (read_id_pfr1() & } else if (read_id_pfr1() &
(ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) { (ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) {
el2_unused = 1; el2_unused = true;
/* /*
* Set the NS bit to access NS copies of certain banked * Set the NS bit to access NS copies of certain banked
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <pubsub_events.h> #include <pubsub_events.h>
#include <smccc_helpers.h> #include <smccc_helpers.h>
#include <spe.h> #include <spe.h>
#include <stdbool.h>
#include <string.h> #include <string.h>
#include <sve.h> #include <sve.h>
#include <utils.h> #include <utils.h>
...@@ -231,7 +232,7 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) ...@@ -231,7 +232,7 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
* When EL2 is implemented but unused `el2_unused` is non-zero, otherwise * When EL2 is implemented but unused `el2_unused` is non-zero, otherwise
* it is zero. * it is zero.
******************************************************************************/ ******************************************************************************/
static void enable_extensions_nonsecure(int el2_unused) static void enable_extensions_nonsecure(bool el2_unused)
{ {
#if IMAGE_BL31 #if IMAGE_BL31
#if ENABLE_SPE_FOR_LOWER_ELS #if ENABLE_SPE_FOR_LOWER_ELS
...@@ -289,7 +290,7 @@ void cm_prepare_el3_exit(uint32_t security_state) ...@@ -289,7 +290,7 @@ void cm_prepare_el3_exit(uint32_t security_state)
{ {
uint32_t sctlr_elx, scr_el3, mdcr_el2; uint32_t sctlr_elx, scr_el3, mdcr_el2;
cpu_context_t *ctx = cm_get_context(security_state); cpu_context_t *ctx = cm_get_context(security_state);
int el2_unused = 0; bool el2_unused = false;
uint64_t hcr_el2 = 0; uint64_t hcr_el2 = 0;
assert(ctx); assert(ctx);
...@@ -304,7 +305,7 @@ void cm_prepare_el3_exit(uint32_t security_state) ...@@ -304,7 +305,7 @@ void cm_prepare_el3_exit(uint32_t security_state)
sctlr_elx |= SCTLR_EL2_RES1; sctlr_elx |= SCTLR_EL2_RES1;
write_sctlr_el2(sctlr_elx); write_sctlr_el2(sctlr_elx);
} else if (EL_IMPLEMENTED(2)) { } else if (EL_IMPLEMENTED(2)) {
el2_unused = 1; el2_unused = true;
/* /*
* EL2 present but unused, need to disable safely. * EL2 present but unused, need to disable safely.
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <arch_helpers.h> #include <arch_helpers.h>
#include <platform.h> #include <platform.h>
#include <pubsub_events.h> #include <pubsub_events.h>
#include <stdbool.h>
#define AMU_GROUP0_NR_COUNTERS 4 #define AMU_GROUP0_NR_COUNTERS 4
...@@ -20,17 +21,17 @@ struct amu_ctx { ...@@ -20,17 +21,17 @@ struct amu_ctx {
static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT]; static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
int amu_supported(void) bool amu_supported(void)
{ {
uint64_t features; uint64_t features;
features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT; features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
return (features & ID_PFR0_AMU_MASK) == 1; return (features & ID_PFR0_AMU_MASK) == 1U;
} }
void amu_enable(int el2_unused) void amu_enable(bool el2_unused)
{ {
if (amu_supported() == 0) if (!amu_supported())
return; return;
if (el2_unused) { if (el2_unused) {
...@@ -54,8 +55,8 @@ void amu_enable(int el2_unused) ...@@ -54,8 +55,8 @@ void amu_enable(int el2_unused)
/* Read the group 0 counter identified by the given `idx`. */ /* Read the group 0 counter identified by the given `idx`. */
uint64_t amu_group0_cnt_read(int idx) uint64_t amu_group0_cnt_read(int idx)
{ {
assert(amu_supported() != 0); assert(amu_supported());
assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS); assert((idx >= 0) && (idx < AMU_GROUP0_NR_COUNTERS));
return amu_group0_cnt_read_internal(idx); return amu_group0_cnt_read_internal(idx);
} }
...@@ -63,8 +64,8 @@ uint64_t amu_group0_cnt_read(int idx) ...@@ -63,8 +64,8 @@ uint64_t amu_group0_cnt_read(int idx)
/* Write the group 0 counter identified by the given `idx` with `val`. */ /* Write the group 0 counter identified by the given `idx` with `val`. */
void amu_group0_cnt_write(int idx, uint64_t val) void amu_group0_cnt_write(int idx, uint64_t val)
{ {
assert(amu_supported() != 0); assert(amu_supported());
assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS); assert((idx >= 0) && (idx < AMU_GROUP0_NR_COUNTERS));
amu_group0_cnt_write_internal(idx, val); amu_group0_cnt_write_internal(idx, val);
isb(); isb();
...@@ -73,8 +74,8 @@ void amu_group0_cnt_write(int idx, uint64_t val) ...@@ -73,8 +74,8 @@ void amu_group0_cnt_write(int idx, uint64_t val)
/* Read the group 1 counter identified by the given `idx`. */ /* Read the group 1 counter identified by the given `idx`. */
uint64_t amu_group1_cnt_read(int idx) uint64_t amu_group1_cnt_read(int idx)
{ {
assert(amu_supported() != 0); assert(amu_supported());
assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS); assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS));
return amu_group1_cnt_read_internal(idx); return amu_group1_cnt_read_internal(idx);
} }
...@@ -82,8 +83,8 @@ uint64_t amu_group1_cnt_read(int idx) ...@@ -82,8 +83,8 @@ uint64_t amu_group1_cnt_read(int idx)
/* Write the group 1 counter identified by the given `idx` with `val`. */ /* Write the group 1 counter identified by the given `idx` with `val`. */
void amu_group1_cnt_write(int idx, uint64_t val) void amu_group1_cnt_write(int idx, uint64_t val)
{ {
assert(amu_supported() != 0); assert(amu_supported());
assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS); assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS));
amu_group1_cnt_write_internal(idx, val); amu_group1_cnt_write_internal(idx, val);
isb(); isb();
...@@ -91,8 +92,8 @@ void amu_group1_cnt_write(int idx, uint64_t val) ...@@ -91,8 +92,8 @@ void amu_group1_cnt_write(int idx, uint64_t val)
void amu_group1_set_evtype(int idx, unsigned int val) void amu_group1_set_evtype(int idx, unsigned int val)
{ {
assert(amu_supported() != 0); assert(amu_supported());
assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS); assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS));
amu_group1_set_evtype_internal(idx, val); amu_group1_set_evtype_internal(idx, val);
isb(); isb();
...@@ -103,7 +104,7 @@ static void *amu_context_save(const void *arg) ...@@ -103,7 +104,7 @@ static void *amu_context_save(const void *arg)
struct amu_ctx *ctx; struct amu_ctx *ctx;
int i; int i;
if (amu_supported() == 0) if (!amu_supported())
return (void *)-1; return (void *)-1;
ctx = &amu_ctxs[plat_my_core_pos()]; ctx = &amu_ctxs[plat_my_core_pos()];
...@@ -126,7 +127,7 @@ static void *amu_context_save(const void *arg) ...@@ -126,7 +127,7 @@ static void *amu_context_save(const void *arg)
for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++) for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
ctx->group1_cnts[i] = amu_group1_cnt_read(i); ctx->group1_cnts[i] = amu_group1_cnt_read(i);
return 0; return (void *)0;
} }
static void *amu_context_restore(const void *arg) static void *amu_context_restore(const void *arg)
...@@ -134,13 +135,13 @@ static void *amu_context_restore(const void *arg) ...@@ -134,13 +135,13 @@ static void *amu_context_restore(const void *arg)
struct amu_ctx *ctx; struct amu_ctx *ctx;
int i; int i;
if (amu_supported() == 0) if (!amu_supported())
return (void *)-1; return (void *)-1;
ctx = &amu_ctxs[plat_my_core_pos()]; ctx = &amu_ctxs[plat_my_core_pos()];
/* Counters were disabled in `amu_context_save()` */ /* Counters were disabled in `amu_context_save()` */
assert(read_amcntenset0() == 0 && read_amcntenset1() == 0); assert((read_amcntenset0() == 0U) && (read_amcntenset1() == 0U));
/* Restore group 0 counters */ /* Restore group 0 counters */
for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++) for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
...@@ -153,7 +154,7 @@ static void *amu_context_restore(const void *arg) ...@@ -153,7 +154,7 @@ static void *amu_context_restore(const void *arg)
/* Enable group 1 counters */ /* Enable group 1 counters */
write_amcntenset1(AMU_GROUP1_COUNTERS_MASK); write_amcntenset1(AMU_GROUP1_COUNTERS_MASK);
return 0; return (void *)0;
} }
SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save); SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <assert.h> #include <assert.h>
#include <platform.h> #include <platform.h>
#include <pubsub_events.h> #include <pubsub_events.h>
#include <stdbool.h>
#define AMU_GROUP0_NR_COUNTERS 4 #define AMU_GROUP0_NR_COUNTERS 4
...@@ -21,23 +22,23 @@ struct amu_ctx { ...@@ -21,23 +22,23 @@ struct amu_ctx {
static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT]; static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
int amu_supported(void) bool amu_supported(void)
{ {
uint64_t features; uint64_t features;
features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT; features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT;
return (features & ID_AA64PFR0_AMU_MASK) == 1; return (features & ID_AA64PFR0_AMU_MASK) == 1U;
} }
/* /*
* Enable counters. This function is meant to be invoked * Enable counters. This function is meant to be invoked
* by the context management library before exiting from EL3. * by the context management library before exiting from EL3.
*/ */
void amu_enable(int el2_unused) void amu_enable(bool el2_unused)
{ {
uint64_t v; uint64_t v;
if (amu_supported() == 0) if (!amu_supported())
return; return;
if (el2_unused) { if (el2_unused) {
...@@ -67,8 +68,8 @@ void amu_enable(int el2_unused) ...@@ -67,8 +68,8 @@ void amu_enable(int el2_unused)
/* Read the group 0 counter identified by the given `idx`. */ /* Read the group 0 counter identified by the given `idx`. */
uint64_t amu_group0_cnt_read(int idx) uint64_t amu_group0_cnt_read(int idx)
{ {
assert(amu_supported() != 0); assert(amu_supported());
assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS); assert((idx >= 0) && (idx < AMU_GROUP0_NR_COUNTERS));
return amu_group0_cnt_read_internal(idx); return amu_group0_cnt_read_internal(idx);
} }
...@@ -76,8 +77,8 @@ uint64_t amu_group0_cnt_read(int idx) ...@@ -76,8 +77,8 @@ uint64_t amu_group0_cnt_read(int idx)
/* Write the group 0 counter identified by the given `idx` with `val`. */ /* Write the group 0 counter identified by the given `idx` with `val`. */
void amu_group0_cnt_write(int idx, uint64_t val) void amu_group0_cnt_write(int idx, uint64_t val)
{ {
assert(amu_supported() != 0); assert(amu_supported());
assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS); assert((idx >= 0) && (idx < AMU_GROUP0_NR_COUNTERS));
amu_group0_cnt_write_internal(idx, val); amu_group0_cnt_write_internal(idx, val);
isb(); isb();
...@@ -86,8 +87,8 @@ void amu_group0_cnt_write(int idx, uint64_t val) ...@@ -86,8 +87,8 @@ void amu_group0_cnt_write(int idx, uint64_t val)
/* Read the group 1 counter identified by the given `idx`. */ /* Read the group 1 counter identified by the given `idx`. */
uint64_t amu_group1_cnt_read(int idx) uint64_t amu_group1_cnt_read(int idx)
{ {
assert(amu_supported() != 0); assert(amu_supported());
assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS); assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS));
return amu_group1_cnt_read_internal(idx); return amu_group1_cnt_read_internal(idx);
} }
...@@ -95,8 +96,8 @@ uint64_t amu_group1_cnt_read(int idx) ...@@ -95,8 +96,8 @@ uint64_t amu_group1_cnt_read(int idx)
/* Write the group 1 counter identified by the given `idx` with `val`. */ /* Write the group 1 counter identified by the given `idx` with `val`. */
void amu_group1_cnt_write(int idx, uint64_t val) void amu_group1_cnt_write(int idx, uint64_t val)
{ {
assert(amu_supported() != 0); assert(amu_supported());
assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS); assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS));
amu_group1_cnt_write_internal(idx, val); amu_group1_cnt_write_internal(idx, val);
isb(); isb();
...@@ -108,8 +109,8 @@ void amu_group1_cnt_write(int idx, uint64_t val) ...@@ -108,8 +109,8 @@ void amu_group1_cnt_write(int idx, uint64_t val)
*/ */
void amu_group1_set_evtype(int idx, unsigned int val) void amu_group1_set_evtype(int idx, unsigned int val)
{ {
assert(amu_supported() != 0); assert(amu_supported());
assert (idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS); assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS));
amu_group1_set_evtype_internal(idx, val); amu_group1_set_evtype_internal(idx, val);
isb(); isb();
...@@ -120,14 +121,14 @@ static void *amu_context_save(const void *arg) ...@@ -120,14 +121,14 @@ static void *amu_context_save(const void *arg)
struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()]; struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
int i; int i;
if (amu_supported() == 0) if (!amu_supported())
return (void *)-1; return (void *)-1;
/* Assert that group 0/1 counter configuration is what we expect */ /* Assert that group 0/1 counter configuration is what we expect */
assert(read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK && assert((read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK) &&
read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK); (read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK));
assert((sizeof(int) * 8) - __builtin_clz(AMU_GROUP1_COUNTERS_MASK) assert(((sizeof(int) * 8) - __builtin_clz(AMU_GROUP1_COUNTERS_MASK))
<= AMU_GROUP1_NR_COUNTERS); <= AMU_GROUP1_NR_COUNTERS);
/* /*
...@@ -146,7 +147,7 @@ static void *amu_context_save(const void *arg) ...@@ -146,7 +147,7 @@ static void *amu_context_save(const void *arg)
for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++) for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
ctx->group1_cnts[i] = amu_group1_cnt_read(i); ctx->group1_cnts[i] = amu_group1_cnt_read(i);
return 0; return (void *)0;
} }
static void *amu_context_restore(const void *arg) static void *amu_context_restore(const void *arg)
...@@ -154,30 +155,30 @@ static void *amu_context_restore(const void *arg) ...@@ -154,30 +155,30 @@ static void *amu_context_restore(const void *arg)
struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()]; struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
int i; int i;
if (amu_supported() == 0) if (!amu_supported())
return (void *)-1; return (void *)-1;
/* Counters were disabled in `amu_context_save()` */ /* Counters were disabled in `amu_context_save()` */
assert(read_amcntenset0_el0() == 0 && read_amcntenset1_el0() == 0); assert((read_amcntenset0_el0() == 0U) && (read_amcntenset1_el0() == 0U));
assert((sizeof(int) * 8) - __builtin_clz(AMU_GROUP1_COUNTERS_MASK) assert(((sizeof(int) * 8U) - __builtin_clz(AMU_GROUP1_COUNTERS_MASK))
<= AMU_GROUP1_NR_COUNTERS); <= AMU_GROUP1_NR_COUNTERS);
/* Restore group 0 counters */ /* Restore group 0 counters */
for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++) for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
if (AMU_GROUP0_COUNTERS_MASK & (1U << i)) if ((AMU_GROUP0_COUNTERS_MASK & (1U << i)) != 0U)
amu_group0_cnt_write(i, ctx->group0_cnts[i]); amu_group0_cnt_write(i, ctx->group0_cnts[i]);
/* Restore group 1 counters */ /* Restore group 1 counters */
for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++) for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
if (AMU_GROUP1_COUNTERS_MASK & (1U << i)) if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U)
amu_group1_cnt_write(i, ctx->group1_cnts[i]); amu_group1_cnt_write(i, ctx->group1_cnts[i]);
/* Restore group 0/1 counter configuration */ /* Restore group 0/1 counter configuration */
write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK); write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK); write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK);
return 0; return (void *)0;
} }
SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save); SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
......
...@@ -16,7 +16,7 @@ bool mpam_supported(void) ...@@ -16,7 +16,7 @@ bool mpam_supported(void)
return ((features & ID_AA64PFR0_MPAM_MASK) != 0U); return ((features & ID_AA64PFR0_MPAM_MASK) != 0U);
} }
void mpam_enable(int el2_unused) void mpam_enable(bool el2_unused)
{ {
if (!mpam_supported()) if (!mpam_supported())
return; return;
...@@ -31,7 +31,7 @@ void mpam_enable(int el2_unused) ...@@ -31,7 +31,7 @@ void mpam_enable(int el2_unused)
* If EL2 is implemented but unused, disable trapping to EL2 when lower * If EL2 is implemented but unused, disable trapping to EL2 when lower
* ELs access their own MPAM registers. * ELs access their own MPAM registers.
*/ */
if (el2_unused != 0) { if (el2_unused) {
write_mpam2_el2(0); write_mpam2_el2(0);
if ((read_mpamidr_el1() & MPAMIDR_HAS_HCR_BIT) != 0U) if ((read_mpamidr_el1() & MPAMIDR_HAS_HCR_BIT) != 0U)
......
...@@ -8,26 +8,30 @@ ...@@ -8,26 +8,30 @@
#include <arch_helpers.h> #include <arch_helpers.h>
#include <pubsub.h> #include <pubsub.h>
#include <spe.h> #include <spe.h>
#include <stdbool.h>
/* static inline void psb_csync(void)
* The assembler does not yet understand the psb csync mnemonic {
* so use the equivalent hint instruction. /*
*/ * The assembler does not yet understand the psb csync mnemonic
#define psb_csync() asm volatile("hint #17") * so use the equivalent hint instruction.
*/
__asm__ volatile("hint #17");
}
int spe_supported(void) bool spe_supported(void)
{ {
uint64_t features; uint64_t features;
features = read_id_aa64dfr0_el1() >> ID_AA64DFR0_PMS_SHIFT; features = read_id_aa64dfr0_el1() >> ID_AA64DFR0_PMS_SHIFT;
return (features & ID_AA64DFR0_PMS_MASK) == 1; return (features & ID_AA64DFR0_PMS_MASK) == 1U;
} }
void spe_enable(int el2_unused) void spe_enable(bool el2_unused)
{ {
uint64_t v; uint64_t v;
if (spe_supported() == 0) if (!spe_supported())
return; return;
if (el2_unused) { if (el2_unused) {
...@@ -59,7 +63,7 @@ void spe_disable(void) ...@@ -59,7 +63,7 @@ void spe_disable(void)
{ {
uint64_t v; uint64_t v;
if (spe_supported() == 0) if (!spe_supported())
return; return;
/* Drain buffered data */ /* Drain buffered data */
...@@ -75,13 +79,14 @@ void spe_disable(void) ...@@ -75,13 +79,14 @@ void spe_disable(void)
static void *spe_drain_buffers_hook(const void *arg) static void *spe_drain_buffers_hook(const void *arg)
{ {
if (spe_supported() == 0) if (!spe_supported())
return (void *)-1; return (void *)-1;
/* Drain buffered data */ /* Drain buffered data */
psb_csync(); psb_csync();
dsbnsh(); dsbnsh();
return 0;
return (void *)0;
} }
SUBSCRIBE_TO_EVENT(cm_entering_secure_world, spe_drain_buffers_hook); SUBSCRIBE_TO_EVENT(cm_entering_secure_world, spe_drain_buffers_hook);
...@@ -7,21 +7,22 @@ ...@@ -7,21 +7,22 @@
#include <arch.h> #include <arch.h>
#include <arch_helpers.h> #include <arch_helpers.h>
#include <pubsub.h> #include <pubsub.h>
#include <stdbool.h>
#include <sve.h> #include <sve.h>
int sve_supported(void) bool sve_supported(void)
{ {
uint64_t features; uint64_t features;
features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_SVE_SHIFT; features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_SVE_SHIFT;
return (features & ID_AA64PFR0_SVE_MASK) == 1; return (features & ID_AA64PFR0_SVE_MASK) == 1U;
} }
static void *disable_sve_hook(const void *arg) static void *disable_sve_hook(const void *arg)
{ {
uint64_t cptr; uint64_t cptr;
if (sve_supported() == 0) if (!sve_supported())
return (void *)-1; return (void *)-1;
/* /*
...@@ -39,14 +40,14 @@ static void *disable_sve_hook(const void *arg) ...@@ -39,14 +40,14 @@ static void *disable_sve_hook(const void *arg)
* No explicit ISB required here as ERET to switch to Secure * No explicit ISB required here as ERET to switch to Secure
* world covers it * world covers it
*/ */
return 0; return (void *)0;
} }
static void *enable_sve_hook(const void *arg) static void *enable_sve_hook(const void *arg)
{ {
uint64_t cptr; uint64_t cptr;
if (sve_supported() == 0) if (!sve_supported())
return (void *)-1; return (void *)-1;
/* /*
...@@ -60,14 +61,14 @@ static void *enable_sve_hook(const void *arg) ...@@ -60,14 +61,14 @@ static void *enable_sve_hook(const void *arg)
* No explicit ISB required here as ERET to switch to Non-secure * No explicit ISB required here as ERET to switch to Non-secure
* world covers it * world covers it
*/ */
return 0; return (void *)0;
} }
void sve_enable(int el2_unused) void sve_enable(bool el2_unused)
{ {
uint64_t cptr; uint64_t cptr;
if (sve_supported() == 0) if (!sve_supported())
return; return;
#if CTX_INCLUDE_FPREGS #if CTX_INCLUDE_FPREGS
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
IMPORT_SYM(uintptr_t, __PMF_SVC_DESCS_START__, PMF_SVC_DESCS_START); IMPORT_SYM(uintptr_t, __PMF_SVC_DESCS_START__, PMF_SVC_DESCS_START);
IMPORT_SYM(uintptr_t, __PMF_SVC_DESCS_END__, PMF_SVC_DESCS_END); IMPORT_SYM(uintptr_t, __PMF_SVC_DESCS_END__, PMF_SVC_DESCS_END);
IMPORT_SYM(uintptr_t, __PMF_PERCPU_TIMESTAMP_END__, PMF_PERCPU_TIMESTAMP_END); IMPORT_SYM(uintptr_t, __PMF_PERCPU_TIMESTAMP_END__, PMF_PERCPU_TIMESTAMP_END);
IMPORT_SYM(intptr_t, __PMF_TIMESTAMP_START__, PMF_TIMESTAMP_ARRAY_START); IMPORT_SYM(uintptr_t, __PMF_TIMESTAMP_START__, PMF_TIMESTAMP_ARRAY_START);
#define PMF_PERCPU_TIMESTAMP_SIZE (PMF_PERCPU_TIMESTAMP_END - PMF_TIMESTAMP_ARRAY_START) #define PMF_PERCPU_TIMESTAMP_SIZE (PMF_PERCPU_TIMESTAMP_END - PMF_TIMESTAMP_ARRAY_START)
...@@ -67,15 +67,15 @@ int pmf_setup(void) ...@@ -67,15 +67,15 @@ int pmf_setup(void)
pmf_svc_descs = (pmf_svc_desc_t *) PMF_SVC_DESCS_START; pmf_svc_descs = (pmf_svc_desc_t *) PMF_SVC_DESCS_START;
for (ii = 0; ii < pmf_svc_descs_num; ii++) { for (ii = 0; ii < pmf_svc_descs_num; ii++) {
assert(pmf_svc_descs[ii].get_ts); assert(pmf_svc_descs[ii].get_ts != NULL);
/* /*
* Call the initialization routine for this * Call the initialization routine for this
* PMF service, if it is defined. * PMF service, if it is defined.
*/ */
if (pmf_svc_descs[ii].init) { if (pmf_svc_descs[ii].init != NULL) {
rc = pmf_svc_descs[ii].init(); rc = pmf_svc_descs[ii].init();
if (rc) { if (rc != 0) {
WARN("Could not initialize PMF" WARN("Could not initialize PMF"
"service %s - skipping \n", "service %s - skipping \n",
pmf_svc_descs[ii].name); pmf_svc_descs[ii].name);
...@@ -125,7 +125,7 @@ static pmf_svc_desc_t *get_service(unsigned int tid) ...@@ -125,7 +125,7 @@ static pmf_svc_desc_t *get_service(unsigned int tid)
if (pmf_num_services == 0) if (pmf_num_services == 0)
return NULL; return NULL;
assert(pmf_svc_descs); assert(pmf_svc_descs != NULL);
do { do {
mid = (low + high) / 2; mid = (low + high) / 2;
...@@ -158,7 +158,7 @@ int pmf_get_timestamp_smc(unsigned int tid, ...@@ -158,7 +158,7 @@ int pmf_get_timestamp_smc(unsigned int tid,
unsigned long long *ts_value) unsigned long long *ts_value)
{ {
pmf_svc_desc_t *svc_desc; pmf_svc_desc_t *svc_desc;
assert(ts_value); assert(ts_value != NULL);
/* Search for registered service. */ /* Search for registered service. */
svc_desc = get_service(tid); svc_desc = get_service(tid);
...@@ -247,7 +247,7 @@ unsigned long long __pmf_get_timestamp(uintptr_t base_addr, ...@@ -247,7 +247,7 @@ unsigned long long __pmf_get_timestamp(uintptr_t base_addr,
unsigned long long *ts_addr = (unsigned long long *)calc_ts_addr(base_addr, unsigned long long *ts_addr = (unsigned long long *)calc_ts_addr(base_addr,
tid, cpuid); tid, cpuid);
if (flags & PMF_CACHE_MAINT) if ((flags & PMF_CACHE_MAINT) != 0U)
inv_dcache_range((uintptr_t)ts_addr, sizeof(unsigned long long)); inv_dcache_range((uintptr_t)ts_addr, sizeof(unsigned long long));
return *ts_addr; return *ts_addr;
......
...@@ -37,7 +37,8 @@ uintptr_t pmf_smc_handler(unsigned int smc_fid, ...@@ -37,7 +37,8 @@ uintptr_t pmf_smc_handler(unsigned int smc_fid,
* x0 --> error code. * x0 --> error code.
* x1 - x2 --> time-stamp value. * x1 - x2 --> time-stamp value.
*/ */
rc = pmf_get_timestamp_smc(x1, x2, x3, &ts_value); rc = pmf_get_timestamp_smc((unsigned int)x1, x2,
(unsigned int)x3, &ts_value);
SMC_RET3(handle, rc, (uint32_t)ts_value, SMC_RET3(handle, rc, (uint32_t)ts_value,
(uint32_t)(ts_value >> 32)); (uint32_t)(ts_value >> 32));
} }
...@@ -49,7 +50,8 @@ uintptr_t pmf_smc_handler(unsigned int smc_fid, ...@@ -49,7 +50,8 @@ uintptr_t pmf_smc_handler(unsigned int smc_fid,
* x0 --> error code. * x0 --> error code.
* x1 --> time-stamp value. * x1 --> time-stamp value.
*/ */
rc = pmf_get_timestamp_smc(x1, x2, x3, &ts_value); rc = pmf_get_timestamp_smc((unsigned int)x1, x2,
(unsigned int)x3, &ts_value);
SMC_RET2(handle, rc, ts_value); SMC_RET2(handle, rc, ts_value);
} }
} }
......
...@@ -58,7 +58,7 @@ static uintptr_t arm_sip_handler(unsigned int smc_fid, ...@@ -58,7 +58,7 @@ static uintptr_t arm_sip_handler(unsigned int smc_fid,
/* Validate supplied entry point */ /* Validate supplied entry point */
pc = (u_register_t) ((x1 << 32) | (uint32_t) x2); pc = (u_register_t) ((x1 << 32) | (uint32_t) x2);
if (arm_validate_ns_entrypoint(pc)) if (arm_validate_ns_entrypoint(pc) != 0)
SMC_RET1(handle, STATE_SW_E_PARAM); SMC_RET1(handle, STATE_SW_E_PARAM);
/* /*
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment