Commit b3385aa0 authored by Mark Dykes's avatar Mark Dykes Committed by TrustedFirmware Code Review
Browse files

Merge "TF-A AMU extension: fix detection of group 1 counters." into integration

parents c6213c7e f3ccf036
...@@ -562,21 +562,14 @@ behaviour of the ``assert()`` function (for example, to save memory). ...@@ -562,21 +562,14 @@ behaviour of the ``assert()`` function (for example, to save memory).
doesn't print anything to the console. If ``PLAT_LOG_LEVEL_ASSERT`` isn't doesn't print anything to the console. If ``PLAT_LOG_LEVEL_ASSERT`` isn't
defined, it defaults to ``LOG_LEVEL``. defined, it defaults to ``LOG_LEVEL``.
If the platform port uses the Activity Monitor Unit, the following constants If the platform port uses the Activity Monitor Unit, the following constant
may be defined: may be defined:
- **PLAT_AMU_GROUP1_COUNTERS_MASK** - **PLAT_AMU_GROUP1_COUNTERS_MASK**
This mask reflects the set of group counters that should be enabled. The This mask reflects the set of group counters that should be enabled. The
maximum number of group 1 counters supported by AMUv1 is 16 so the mask maximum number of group 1 counters supported by AMUv1 is 16 so the mask
can be at most 0xffff. If the platform does not define this mask, no group 1 can be at most 0xffff. If the platform does not define this mask, no group 1
counters are enabled. If the platform defines this mask, the following counters are enabled.
constant needs to also be defined.
- **PLAT_AMU_GROUP1_NR_COUNTERS**
This value is used to allocate an array to save and restore the counters
specified by ``PLAT_AMU_GROUP1_COUNTERS_MASK`` on CPU suspend.
This value should be equal to the highest bit position set in the
mask, plus 1. The maximum number of group 1 counters in AMUv1 is 16.
File : plat_macros.S [mandatory] File : plat_macros.S [mandatory]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
......
...@@ -701,6 +701,16 @@ ...@@ -701,6 +701,16 @@
#define AMEVTYPER1E p15, 0, c13, c15, 6 #define AMEVTYPER1E p15, 0, c13, c15, 6
#define AMEVTYPER1F p15, 0, c13, c15, 7 #define AMEVTYPER1F p15, 0, c13, c15, 7
/* AMCFGR definitions */
#define AMCFGR_NCG_SHIFT U(28)
#define AMCFGR_NCG_MASK U(0xf)
#define AMCFGR_N_SHIFT U(0)
#define AMCFGR_N_MASK U(0xff)
/* AMCGCR definitions */
#define AMCGCR_CG1NC_SHIFT U(8)
#define AMCGCR_CG1NC_MASK U(0xff)
/******************************************************************************* /*******************************************************************************
* Definitions for DynamicIQ Shared Unit registers * Definitions for DynamicIQ Shared Unit registers
******************************************************************************/ ******************************************************************************/
......
...@@ -300,11 +300,16 @@ DEFINE_COPROCR_RW_FUNCS(prrr, PRRR) ...@@ -300,11 +300,16 @@ DEFINE_COPROCR_RW_FUNCS(prrr, PRRR)
DEFINE_COPROCR_RW_FUNCS(nmrr, NMRR) DEFINE_COPROCR_RW_FUNCS(nmrr, NMRR)
DEFINE_COPROCR_RW_FUNCS(dacr, DACR) DEFINE_COPROCR_RW_FUNCS(dacr, DACR)
/* Coproc registers for 32bit AMU support */
DEFINE_COPROCR_READ_FUNC(amcfgr, AMCFGR)
DEFINE_COPROCR_READ_FUNC(amcgcr, AMCGCR)
DEFINE_COPROCR_RW_FUNCS(amcntenset0, AMCNTENSET0) DEFINE_COPROCR_RW_FUNCS(amcntenset0, AMCNTENSET0)
DEFINE_COPROCR_RW_FUNCS(amcntenset1, AMCNTENSET1) DEFINE_COPROCR_RW_FUNCS(amcntenset1, AMCNTENSET1)
DEFINE_COPROCR_RW_FUNCS(amcntenclr0, AMCNTENCLR0) DEFINE_COPROCR_RW_FUNCS(amcntenclr0, AMCNTENCLR0)
DEFINE_COPROCR_RW_FUNCS(amcntenclr1, AMCNTENCLR1) DEFINE_COPROCR_RW_FUNCS(amcntenclr1, AMCNTENCLR1)
/* Coproc registers for 64bit AMU support */
DEFINE_COPROCR_RW_FUNCS_64(amevcntr00, AMEVCNTR00) DEFINE_COPROCR_RW_FUNCS_64(amevcntr00, AMEVCNTR00)
DEFINE_COPROCR_RW_FUNCS_64(amevcntr01, AMEVCNTR01) DEFINE_COPROCR_RW_FUNCS_64(amevcntr01, AMEVCNTR01)
DEFINE_COPROCR_RW_FUNCS_64(amevcntr02, AMEVCNTR02) DEFINE_COPROCR_RW_FUNCS_64(amevcntr02, AMEVCNTR02)
......
...@@ -898,9 +898,14 @@ ...@@ -898,9 +898,14 @@
#define AMEVTYPER1E_EL0 S3_3_C13_C15_6 #define AMEVTYPER1E_EL0 S3_3_C13_C15_6
#define AMEVTYPER1F_EL0 S3_3_C13_C15_7 #define AMEVTYPER1F_EL0 S3_3_C13_C15_7
/* AMCFGR_EL0 definitions */
#define AMCFGR_EL0_NCG_SHIFT U(28)
#define AMCFGR_EL0_NCG_MASK U(0xf)
#define AMCFGR_EL0_N_SHIFT U(0)
#define AMCFGR_EL0_N_MASK U(0xff)
/* AMCGCR_EL0 definitions */ /* AMCGCR_EL0 definitions */
#define AMCGCR_EL0_CG1NC_SHIFT U(8) #define AMCGCR_EL0_CG1NC_SHIFT U(8)
#define AMCGCR_EL0_CG1NC_LENGTH U(8)
#define AMCGCR_EL0_CG1NC_MASK U(0xff) #define AMCGCR_EL0_CG1NC_MASK U(0xff)
/* MPAM register definitions */ /* MPAM register definitions */
......
...@@ -482,7 +482,8 @@ DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_eoir1_el1, ICC_EOIR1_EL1) ...@@ -482,7 +482,8 @@ DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_eoir1_el1, ICC_EOIR1_EL1)
DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_sgi0r_el1, ICC_SGI0R_EL1) DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_sgi0r_el1, ICC_SGI0R_EL1)
DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sgi1r, ICC_SGI1R) DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sgi1r, ICC_SGI1R)
DEFINE_RENAME_SYSREG_RW_FUNCS(amcgcr_el0, AMCGCR_EL0) DEFINE_RENAME_SYSREG_READ_FUNC(amcfgr_el0, AMCFGR_EL0)
DEFINE_RENAME_SYSREG_READ_FUNC(amcgcr_el0, AMCGCR_EL0)
DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenclr0_el0, AMCNTENCLR0_EL0) DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenclr0_el0, AMCNTENCLR0_EL0)
DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenset0_el0, AMCNTENSET0_EL0) DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenset0_el0, AMCNTENSET0_EL0)
DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenclr1_el0, AMCNTENCLR1_EL0) DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenclr1_el0, AMCNTENCLR1_EL0)
......
/* /*
* Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
* *
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
...@@ -10,13 +10,14 @@ ...@@ -10,13 +10,14 @@
#include <stdbool.h> #include <stdbool.h>
#include <stdint.h> #include <stdint.h>
#include <platform_def.h>
#include <lib/cassert.h> #include <lib/cassert.h>
#include <lib/utils_def.h> #include <lib/utils_def.h>
#include <platform_def.h>
/* All group 0 counters */ /* All group 0 counters */
#define AMU_GROUP0_COUNTERS_MASK U(0xf) #define AMU_GROUP0_COUNTERS_MASK U(0xf)
#define AMU_GROUP0_NR_COUNTERS U(4)
#ifdef PLAT_AMU_GROUP1_COUNTERS_MASK #ifdef PLAT_AMU_GROUP1_COUNTERS_MASK
#define AMU_GROUP1_COUNTERS_MASK PLAT_AMU_GROUP1_COUNTERS_MASK #define AMU_GROUP1_COUNTERS_MASK PLAT_AMU_GROUP1_COUNTERS_MASK
...@@ -24,25 +25,67 @@ ...@@ -24,25 +25,67 @@
#define AMU_GROUP1_COUNTERS_MASK U(0) #define AMU_GROUP1_COUNTERS_MASK U(0)
#endif #endif
#ifdef PLAT_AMU_GROUP1_NR_COUNTERS /* Calculate number of group 1 counters */
#define AMU_GROUP1_NR_COUNTERS PLAT_AMU_GROUP1_NR_COUNTERS #if (AMU_GROUP1_COUNTERS_MASK & (1 << 15))
#define AMU_GROUP1_NR_COUNTERS 16U
#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 14))
#define AMU_GROUP1_NR_COUNTERS 15U
#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 13))
#define AMU_GROUP1_NR_COUNTERS 14U
#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 12))
#define AMU_GROUP1_NR_COUNTERS 13U
#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 11))
#define AMU_GROUP1_NR_COUNTERS 12U
#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 10))
#define AMU_GROUP1_NR_COUNTERS 11U
#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 9))
#define AMU_GROUP1_NR_COUNTERS 10U
#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 8))
#define AMU_GROUP1_NR_COUNTERS 9U
#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 7))
#define AMU_GROUP1_NR_COUNTERS 8U
#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 6))
#define AMU_GROUP1_NR_COUNTERS 7U
#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 5))
#define AMU_GROUP1_NR_COUNTERS 6U
#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 4))
#define AMU_GROUP1_NR_COUNTERS 5U
#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 3))
#define AMU_GROUP1_NR_COUNTERS 4U
#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 2))
#define AMU_GROUP1_NR_COUNTERS 3U
#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 1))
#define AMU_GROUP1_NR_COUNTERS 2U
#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 0))
#define AMU_GROUP1_NR_COUNTERS 1U
#else #else
#define AMU_GROUP1_NR_COUNTERS U(0) #define AMU_GROUP1_NR_COUNTERS 0U
#endif #endif
CASSERT(AMU_GROUP1_COUNTERS_MASK <= 0xffff, invalid_amu_group1_counters_mask); CASSERT(AMU_GROUP1_COUNTERS_MASK <= 0xffff, invalid_amu_group1_counters_mask);
CASSERT(AMU_GROUP1_NR_COUNTERS <= 16, invalid_amu_group1_nr_counters);
struct amu_ctx {
uint64_t group0_cnts[AMU_GROUP0_NR_COUNTERS];
#if AMU_GROUP1_NR_COUNTERS
uint64_t group1_cnts[AMU_GROUP1_NR_COUNTERS];
#endif
};
bool amu_supported(void); bool amu_supported(void);
void amu_enable(bool el2_unused); void amu_enable(bool el2_unused);
/* Group 0 configuration helpers */ /* Group 0 configuration helpers */
uint64_t amu_group0_cnt_read(int idx); uint64_t amu_group0_cnt_read(unsigned int idx);
void amu_group0_cnt_write(int idx, uint64_t val); void amu_group0_cnt_write(unsigned int idx, uint64_t val);
#if AMU_GROUP1_NR_COUNTERS
bool amu_group1_supported(void);
/* Group 1 configuration helpers */ /* Group 1 configuration helpers */
uint64_t amu_group1_cnt_read(int idx); uint64_t amu_group1_cnt_read(unsigned int idx);
void amu_group1_cnt_write(int idx, uint64_t val); void amu_group1_cnt_write(unsigned int idx, uint64_t val);
void amu_group1_set_evtype(int idx, unsigned int val); void amu_group1_set_evtype(unsigned int idx, unsigned int val);
#endif
#endif /* AMU_H */ #endif /* AMU_H */
/* /*
* Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
* *
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
...@@ -9,11 +9,11 @@ ...@@ -9,11 +9,11 @@
#include <stdint.h> #include <stdint.h>
uint64_t amu_group0_cnt_read_internal(int idx); uint64_t amu_group0_cnt_read_internal(unsigned int idx);
void amu_group0_cnt_write_internal(int idx, uint64_t val); void amu_group0_cnt_write_internal(unsigned int idx, uint64_t val);
uint64_t amu_group1_cnt_read_internal(int idx); uint64_t amu_group1_cnt_read_internal(unsigned int idx);
void amu_group1_cnt_write_internal(int idx, uint64_t val); void amu_group1_cnt_write_internal(unsigned int idx, uint64_t val);
void amu_group1_set_evtype_internal(int idx, unsigned int val); void amu_group1_set_evtype_internal(unsigned int idx, unsigned int val);
#endif /* AMU_PRIVATE_H */ #endif /* AMU_PRIVATE_H */
/* /*
* Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
* *
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
#include <assert.h>
#include <stdbool.h> #include <stdbool.h>
#include <arch.h> #include <arch.h>
#include <arch_helpers.h> #include <arch_helpers.h>
#include <lib/el3_runtime/pubsub_events.h> #include <lib/el3_runtime/pubsub_events.h>
#include <lib/extensions/amu.h> #include <lib/extensions/amu.h>
#include <lib/extensions/amu_private.h> #include <lib/extensions/amu_private.h>
#include <plat/common/platform.h>
#define AMU_GROUP0_NR_COUNTERS 4
struct amu_ctx { #include <plat/common/platform.h>
uint64_t group0_cnts[AMU_GROUP0_NR_COUNTERS];
uint64_t group1_cnts[AMU_GROUP1_NR_COUNTERS];
};
static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT]; static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
/* Check if AMUv1 for Armv8.4 or 8.6 is implemented */
bool amu_supported(void) bool amu_supported(void)
{ {
uint64_t features; uint32_t features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT; features &= ID_PFR0_AMU_MASK;
return (features & ID_PFR0_AMU_MASK) == 1U; return ((features == 1U) || (features == 2U));
} }
#if AMU_GROUP1_NR_COUNTERS
/* Check if group 1 counters is implemented */
bool amu_group1_supported(void)
{
uint32_t features = read_amcfgr() >> AMCFGR_NCG_SHIFT;
return (features & AMCFGR_NCG_MASK) == 1U;
}
#endif
/*
* Enable counters. This function is meant to be invoked
* by the context management library before exiting from EL3.
*/
void amu_enable(bool el2_unused) void amu_enable(bool el2_unused)
{ {
if (!amu_supported()) if (!amu_supported()) {
INFO("AMU is not implemented\n");
return; return;
}
#if AMU_GROUP1_NR_COUNTERS
/* Check and set presence of group 1 counters */
if (!amu_group1_supported()) {
ERROR("AMU Counter Group 1 is not implemented\n");
panic();
}
/* Check number of group 1 counters */
uint32_t cnt_num = (read_amcgcr() >> AMCGCR_CG1NC_SHIFT) &
AMCGCR_CG1NC_MASK;
VERBOSE("%s%u. %s%u\n",
"Number of AMU Group 1 Counters ", cnt_num,
"Requested number ", AMU_GROUP1_NR_COUNTERS);
if (cnt_num < AMU_GROUP1_NR_COUNTERS) {
ERROR("%s%u is less than %s%u\n",
"Number of AMU Group 1 Counters ", cnt_num,
"Requested number ", AMU_GROUP1_NR_COUNTERS);
panic();
}
#endif
if (el2_unused) { if (el2_unused) {
uint64_t v; uint64_t v;
...@@ -49,112 +84,156 @@ void amu_enable(bool el2_unused) ...@@ -49,112 +84,156 @@ void amu_enable(bool el2_unused)
/* Enable group 0 counters */ /* Enable group 0 counters */
write_amcntenset0(AMU_GROUP0_COUNTERS_MASK); write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
#if AMU_GROUP1_NR_COUNTERS
/* Enable group 1 counters */ /* Enable group 1 counters */
write_amcntenset1(AMU_GROUP1_COUNTERS_MASK); write_amcntenset1(AMU_GROUP1_COUNTERS_MASK);
#endif
} }
/* Read the group 0 counter identified by the given `idx`. */ /* Read the group 0 counter identified by the given `idx`. */
uint64_t amu_group0_cnt_read(int idx) uint64_t amu_group0_cnt_read(unsigned int idx)
{ {
assert(amu_supported()); assert(amu_supported());
assert((idx >= 0) && (idx < AMU_GROUP0_NR_COUNTERS)); assert(idx < AMU_GROUP0_NR_COUNTERS);
return amu_group0_cnt_read_internal(idx); return amu_group0_cnt_read_internal(idx);
} }
/* Write the group 0 counter identified by the given `idx` with `val`. */ /* Write the group 0 counter identified by the given `idx` with `val` */
void amu_group0_cnt_write(int idx, uint64_t val) void amu_group0_cnt_write(unsigned int idx, uint64_t val)
{ {
assert(amu_supported()); assert(amu_supported());
assert((idx >= 0) && (idx < AMU_GROUP0_NR_COUNTERS)); assert(idx < AMU_GROUP0_NR_COUNTERS);
amu_group0_cnt_write_internal(idx, val); amu_group0_cnt_write_internal(idx, val);
isb(); isb();
} }
/* Read the group 1 counter identified by the given `idx`. */ #if AMU_GROUP1_NR_COUNTERS
uint64_t amu_group1_cnt_read(int idx) /* Read the group 1 counter identified by the given `idx` */
uint64_t amu_group1_cnt_read(unsigned int idx)
{ {
assert(amu_supported()); assert(amu_supported());
assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS)); assert(amu_group1_supported());
assert(idx < AMU_GROUP1_NR_COUNTERS);
return amu_group1_cnt_read_internal(idx); return amu_group1_cnt_read_internal(idx);
} }
/* Write the group 1 counter identified by the given `idx` with `val`. */ /* Write the group 1 counter identified by the given `idx` with `val` */
void amu_group1_cnt_write(int idx, uint64_t val) void amu_group1_cnt_write(unsigned int idx, uint64_t val)
{ {
assert(amu_supported()); assert(amu_supported());
assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS)); assert(amu_group1_supported());
assert(idx < AMU_GROUP1_NR_COUNTERS);
amu_group1_cnt_write_internal(idx, val); amu_group1_cnt_write_internal(idx, val);
isb(); isb();
} }
void amu_group1_set_evtype(int idx, unsigned int val) /*
* Program the event type register for the given `idx` with
* the event number `val`
*/
void amu_group1_set_evtype(unsigned int idx, unsigned int val)
{ {
assert(amu_supported()); assert(amu_supported());
assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS)); assert(amu_group1_supported());
assert(idx < AMU_GROUP1_NR_COUNTERS);
amu_group1_set_evtype_internal(idx, val); amu_group1_set_evtype_internal(idx, val);
isb(); isb();
} }
#endif /* AMU_GROUP1_NR_COUNTERS */
static void *amu_context_save(const void *arg) static void *amu_context_save(const void *arg)
{ {
struct amu_ctx *ctx; struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
int i; unsigned int i;
if (!amu_supported()) if (!amu_supported()) {
return (void *)-1; return (void *)-1;
}
ctx = &amu_ctxs[plat_my_core_pos()]; #if AMU_GROUP1_NR_COUNTERS
if (!amu_group1_supported()) {
/* Assert that group 0 counter configuration is what we expect */ return (void *)-1;
assert(read_amcntenset0() == AMU_GROUP0_COUNTERS_MASK && }
read_amcntenset1() == AMU_GROUP1_COUNTERS_MASK); #endif
/* Assert that group 0/1 counter configuration is what we expect */
assert(read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK);
#if AMU_GROUP1_NR_COUNTERS
assert(read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK);
#endif
/* /*
* Disable group 0 counters to avoid other observers like SCP sampling * Disable group 0/1 counters to avoid other observers like SCP sampling
* counter values from the future via the memory mapped view. * counter values from the future via the memory mapped view.
*/ */
write_amcntenclr0(AMU_GROUP0_COUNTERS_MASK); write_amcntenclr0(AMU_GROUP0_COUNTERS_MASK);
#if AMU_GROUP1_NR_COUNTERS
write_amcntenclr1(AMU_GROUP1_COUNTERS_MASK); write_amcntenclr1(AMU_GROUP1_COUNTERS_MASK);
#endif
isb(); isb();
for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++) /* Save all group 0 counters */
for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
ctx->group0_cnts[i] = amu_group0_cnt_read(i); ctx->group0_cnts[i] = amu_group0_cnt_read(i);
}
for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++) #if AMU_GROUP1_NR_COUNTERS
ctx->group1_cnts[i] = amu_group1_cnt_read(i); /* Save group 1 counters */
for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
ctx->group1_cnts[i] = amu_group1_cnt_read(i);
}
}
#endif
return (void *)0; return (void *)0;
} }
static void *amu_context_restore(const void *arg) static void *amu_context_restore(const void *arg)
{ {
struct amu_ctx *ctx; struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
int i; unsigned int i;
if (!amu_supported()) if (!amu_supported()) {
return (void *)-1; return (void *)-1;
}
ctx = &amu_ctxs[plat_my_core_pos()]; #if AMU_GROUP1_NR_COUNTERS
if (!amu_group1_supported()) {
return (void *)-1;
}
#endif
/* Counters were disabled in `amu_context_save()` */ /* Counters were disabled in `amu_context_save()` */
assert((read_amcntenset0() == 0U) && (read_amcntenset1() == 0U)); assert(read_amcntenset0_el0() == 0U);
#if AMU_GROUP1_NR_COUNTERS
assert(read_amcntenset1_el0() == 0U);
#endif
/* Restore group 0 counters */ /* Restore all group 0 counters */
for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++) for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
amu_group0_cnt_write(i, ctx->group0_cnts[i]); amu_group0_cnt_write(i, ctx->group0_cnts[i]);
for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++) }
amu_group1_cnt_write(i, ctx->group1_cnts[i]);
/* Enable group 0 counters */ /* Restore group 0 counter configuration */
write_amcntenset0(AMU_GROUP0_COUNTERS_MASK); write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
/* Enable group 1 counters */ #if AMU_GROUP1_NR_COUNTERS
/* Restore group 1 counters */
for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
amu_group1_cnt_write(i, ctx->group1_cnts[i]);
}
}
/* Restore group 1 counter configuration */
write_amcntenset1(AMU_GROUP1_COUNTERS_MASK); write_amcntenset1(AMU_GROUP1_COUNTERS_MASK);
#endif
return (void *)0; return (void *)0;
} }
......
/* /*
* Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
* *
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
...@@ -9,38 +9,68 @@ ...@@ -9,38 +9,68 @@
#include <arch.h> #include <arch.h>
#include <arch_helpers.h> #include <arch_helpers.h>
#include <lib/el3_runtime/pubsub_events.h> #include <lib/el3_runtime/pubsub_events.h>
#include <lib/extensions/amu.h> #include <lib/extensions/amu.h>
#include <lib/extensions/amu_private.h> #include <lib/extensions/amu_private.h>
#include <plat/common/platform.h>
#define AMU_GROUP0_NR_COUNTERS 4 #include <plat/common/platform.h>
struct amu_ctx {
uint64_t group0_cnts[AMU_GROUP0_NR_COUNTERS];
uint64_t group1_cnts[AMU_GROUP1_NR_COUNTERS];
};
static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT]; static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
/* Check if AMUv1 for Armv8.4 or 8.6 is implemented */
bool amu_supported(void) bool amu_supported(void)
{ {
uint64_t features; uint64_t features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT;
features &= ID_AA64PFR0_AMU_MASK;
return ((features == 1U) || (features == 2U));
}
#if AMU_GROUP1_NR_COUNTERS
/* Check if group 1 counters is implemented */
bool amu_group1_supported(void)
{
uint64_t features = read_amcfgr_el0() >> AMCFGR_EL0_NCG_SHIFT;
features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT; return (features & AMCFGR_EL0_NCG_MASK) == 1U;
return (features & ID_AA64PFR0_AMU_MASK) == 1U;
} }
#endif
/* /*
* Enable counters. This function is meant to be invoked * Enable counters. This function is meant to be invoked
* by the context management library before exiting from EL3. * by the context management library before exiting from EL3.
*/ */
void amu_enable(bool el2_unused) void amu_enable(bool el2_unused)
{ {
uint64_t v; uint64_t v;
if (!amu_supported()) if (!amu_supported()) {
INFO("AMU is not implemented\n");
return; return;
}
#if AMU_GROUP1_NR_COUNTERS
/* Check and set presence of group 1 counters */
if (!amu_group1_supported()) {
ERROR("AMU Counter Group 1 is not implemented\n");
panic();
}
/* Check number of group 1 counters */
uint64_t cnt_num = (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) &
AMCGCR_EL0_CG1NC_MASK;
VERBOSE("%s%llu. %s%u\n",
"Number of AMU Group 1 Counters ", cnt_num,
"Requested number ", AMU_GROUP1_NR_COUNTERS);
if (cnt_num < AMU_GROUP1_NR_COUNTERS) {
ERROR("%s%llu is less than %s%u\n",
"Number of AMU Group 1 Counters ", cnt_num,
"Requested number ", AMU_GROUP1_NR_COUNTERS);
panic();
}
#endif
if (el2_unused) { if (el2_unused) {
/* /*
...@@ -62,43 +92,49 @@ void amu_enable(bool el2_unused) ...@@ -62,43 +92,49 @@ void amu_enable(bool el2_unused)
/* Enable group 0 counters */ /* Enable group 0 counters */
write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK); write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
#if AMU_GROUP1_NR_COUNTERS
/* Enable group 1 counters */ /* Enable group 1 counters */
write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK); write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK);
#endif
} }
/* Read the group 0 counter identified by the given `idx`. */ /* Read the group 0 counter identified by the given `idx`. */
uint64_t amu_group0_cnt_read(int idx) uint64_t amu_group0_cnt_read(unsigned int idx)
{ {
assert(amu_supported()); assert(amu_supported());
assert((idx >= 0) && (idx < AMU_GROUP0_NR_COUNTERS)); assert(idx < AMU_GROUP0_NR_COUNTERS);
return amu_group0_cnt_read_internal(idx); return amu_group0_cnt_read_internal(idx);
} }
/* Write the group 0 counter identified by the given `idx` with `val`. */ /* Write the group 0 counter identified by the given `idx` with `val` */
void amu_group0_cnt_write(int idx, uint64_t val) void amu_group0_cnt_write(unsigned int idx, uint64_t val)
{ {
assert(amu_supported()); assert(amu_supported());
assert((idx >= 0) && (idx < AMU_GROUP0_NR_COUNTERS)); assert(idx < AMU_GROUP0_NR_COUNTERS);
amu_group0_cnt_write_internal(idx, val); amu_group0_cnt_write_internal(idx, val);
isb(); isb();
} }
/* Read the group 1 counter identified by the given `idx`. */ #if AMU_GROUP1_NR_COUNTERS
uint64_t amu_group1_cnt_read(int idx) /* Read the group 1 counter identified by the given `idx` */
uint64_t amu_group1_cnt_read(unsigned int idx)
{ {
assert(amu_supported()); assert(amu_supported());
assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS)); assert(amu_group1_supported());
assert(idx < AMU_GROUP1_NR_COUNTERS);
return amu_group1_cnt_read_internal(idx); return amu_group1_cnt_read_internal(idx);
} }
/* Write the group 1 counter identified by the given `idx` with `val`. */ /* Write the group 1 counter identified by the given `idx` with `val` */
void amu_group1_cnt_write(int idx, uint64_t val) void amu_group1_cnt_write(unsigned int idx, uint64_t val)
{ {
assert(amu_supported()); assert(amu_supported());
assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS)); assert(amu_group1_supported());
assert(idx < AMU_GROUP1_NR_COUNTERS);
amu_group1_cnt_write_internal(idx, val); amu_group1_cnt_write_internal(idx, val);
isb(); isb();
...@@ -106,78 +142,106 @@ void amu_group1_cnt_write(int idx, uint64_t val) ...@@ -106,78 +142,106 @@ void amu_group1_cnt_write(int idx, uint64_t val)
/* /*
* Program the event type register for the given `idx` with * Program the event type register for the given `idx` with
* the event number `val`. * the event number `val`
*/ */
void amu_group1_set_evtype(int idx, unsigned int val) void amu_group1_set_evtype(unsigned int idx, unsigned int val)
{ {
assert(amu_supported()); assert(amu_supported());
assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS)); assert(amu_group1_supported());
assert(idx < AMU_GROUP1_NR_COUNTERS);
amu_group1_set_evtype_internal(idx, val); amu_group1_set_evtype_internal(idx, val);
isb(); isb();
} }
#endif /* AMU_GROUP1_NR_COUNTERS */
static void *amu_context_save(const void *arg) static void *amu_context_save(const void *arg)
{ {
struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()]; struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
int i; unsigned int i;
if (!amu_supported()) if (!amu_supported()) {
return (void *)-1; return (void *)-1;
}
#if AMU_GROUP1_NR_COUNTERS
if (!amu_group1_supported()) {
return (void *)-1;
}
#endif
/* Assert that group 0/1 counter configuration is what we expect */ /* Assert that group 0/1 counter configuration is what we expect */
assert((read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK) && assert(read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK);
(read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK));
assert(((sizeof(int) * 8) - __builtin_clz(AMU_GROUP1_COUNTERS_MASK))
<= AMU_GROUP1_NR_COUNTERS);
#if AMU_GROUP1_NR_COUNTERS
assert(read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK);
#endif
/* /*
* Disable group 0/1 counters to avoid other observers like SCP sampling * Disable group 0/1 counters to avoid other observers like SCP sampling
* counter values from the future via the memory mapped view. * counter values from the future via the memory mapped view.
*/ */
write_amcntenclr0_el0(AMU_GROUP0_COUNTERS_MASK); write_amcntenclr0_el0(AMU_GROUP0_COUNTERS_MASK);
#if AMU_GROUP1_NR_COUNTERS
write_amcntenclr1_el0(AMU_GROUP1_COUNTERS_MASK); write_amcntenclr1_el0(AMU_GROUP1_COUNTERS_MASK);
#endif
isb(); isb();
/* Save group 0 counters */ /* Save all group 0 counters */
for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++) for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
ctx->group0_cnts[i] = amu_group0_cnt_read(i); ctx->group0_cnts[i] = amu_group0_cnt_read(i);
}
#if AMU_GROUP1_NR_COUNTERS
/* Save group 1 counters */ /* Save group 1 counters */
for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++) for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
ctx->group1_cnts[i] = amu_group1_cnt_read(i); if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
ctx->group1_cnts[i] = amu_group1_cnt_read(i);
}
}
#endif
return (void *)0; return (void *)0;
} }
static void *amu_context_restore(const void *arg) static void *amu_context_restore(const void *arg)
{ {
struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()]; struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
int i; unsigned int i;
if (!amu_supported()) if (!amu_supported()) {
return (void *)-1; return (void *)-1;
}
#if AMU_GROUP1_NR_COUNTERS
if (!amu_group1_supported()) {
return (void *)-1;
}
#endif
/* Counters were disabled in `amu_context_save()` */ /* Counters were disabled in `amu_context_save()` */
assert((read_amcntenset0_el0() == 0U) && (read_amcntenset1_el0() == 0U)); assert(read_amcntenset0_el0() == 0U);
assert(((sizeof(int) * 8U) - __builtin_clz(AMU_GROUP1_COUNTERS_MASK)) #if AMU_GROUP1_NR_COUNTERS
<= AMU_GROUP1_NR_COUNTERS); assert(read_amcntenset1_el0() == 0U);
#endif
/* Restore group 0 counters */ /* Restore all group 0 counters */
for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++) for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
if ((AMU_GROUP0_COUNTERS_MASK & (1U << i)) != 0U) amu_group0_cnt_write(i, ctx->group0_cnts[i]);
amu_group0_cnt_write(i, ctx->group0_cnts[i]); }
/* Restore group 0 counter configuration */
write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
#if AMU_GROUP1_NR_COUNTERS
/* Restore group 1 counters */ /* Restore group 1 counters */
for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++) for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
amu_group1_cnt_write(i, ctx->group1_cnts[i]); amu_group1_cnt_write(i, ctx->group1_cnts[i]);
}
}
/* Restore group 0/1 counter configuration */ /* Restore group 1 counter configuration */
write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK); write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK);
#endif
return (void *)0; return (void *)0;
} }
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment