Commit f3ccf036 authored by Alexei Fedorov's avatar Alexei Fedorov
Browse files

TF-A AMU extension: fix detection of group 1 counters.



This patch fixes the bug when AMUv1 group1 counters was
always assumed being implemented without checking for its
presence which was causing exception otherwise.
The AMU extension code was also modified as listed below:
- Added detection of AMUv1 for ARMv8.6
- 'PLAT_AMU_GROUP1_NR_COUNTERS' build option is removed and
number of group1 counters 'AMU_GROUP1_NR_COUNTERS' is now
calculated based on 'AMU_GROUP1_COUNTERS_MASK' value
- Added bit fields definitions and access functions for
AMCFGR_EL0/AMCFGR and AMCGCR_EL0/AMCGCR registers
- Unification of amu.c Aarch64 and Aarch32 source files
- Bug fixes and TF-A coding style compliant changes.

Change-Id: I14e407be62c3026ebc674ec7045e240ccb71e1fb
Signed-off-by: default avatarAlexei Fedorov <Alexei.Fedorov@arm.com>
parent 8ae3a91c
......@@ -562,21 +562,14 @@ behaviour of the ``assert()`` function (for example, to save memory).
doesn't print anything to the console. If ``PLAT_LOG_LEVEL_ASSERT`` isn't
defined, it defaults to ``LOG_LEVEL``.
If the platform port uses the Activity Monitor Unit, the following constants
If the platform port uses the Activity Monitor Unit, the following constant
may be defined:
- **PLAT_AMU_GROUP1_COUNTERS_MASK**
This mask reflects the set of group counters that should be enabled. The
maximum number of group 1 counters supported by AMUv1 is 16 so the mask
can be at most 0xffff. If the platform does not define this mask, no group 1
counters are enabled. If the platform defines this mask, the following
constant needs to also be defined.
- **PLAT_AMU_GROUP1_NR_COUNTERS**
This value is used to allocate an array to save and restore the counters
specified by ``PLAT_AMU_GROUP1_COUNTERS_MASK`` on CPU suspend.
This value should be equal to the highest bit position set in the
mask, plus 1. The maximum number of group 1 counters in AMUv1 is 16.
counters are enabled.
File : plat_macros.S [mandatory]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
......
......@@ -701,6 +701,16 @@
#define AMEVTYPER1E p15, 0, c13, c15, 6
#define AMEVTYPER1F p15, 0, c13, c15, 7
/* AMCFGR definitions */
#define AMCFGR_NCG_SHIFT U(28)
#define AMCFGR_NCG_MASK U(0xf)
#define AMCFGR_N_SHIFT U(0)
#define AMCFGR_N_MASK U(0xff)
/* AMCGCR definitions */
#define AMCGCR_CG1NC_SHIFT U(8)
#define AMCGCR_CG1NC_MASK U(0xff)
/*******************************************************************************
* Definitions for DynamicIQ Shared Unit registers
******************************************************************************/
......
......@@ -300,11 +300,16 @@ DEFINE_COPROCR_RW_FUNCS(prrr, PRRR)
DEFINE_COPROCR_RW_FUNCS(nmrr, NMRR)
DEFINE_COPROCR_RW_FUNCS(dacr, DACR)
/* Coproc registers for 32bit AMU support */
DEFINE_COPROCR_READ_FUNC(amcfgr, AMCFGR)
DEFINE_COPROCR_READ_FUNC(amcgcr, AMCGCR)
DEFINE_COPROCR_RW_FUNCS(amcntenset0, AMCNTENSET0)
DEFINE_COPROCR_RW_FUNCS(amcntenset1, AMCNTENSET1)
DEFINE_COPROCR_RW_FUNCS(amcntenclr0, AMCNTENCLR0)
DEFINE_COPROCR_RW_FUNCS(amcntenclr1, AMCNTENCLR1)
/* Coproc registers for 64bit AMU support */
DEFINE_COPROCR_RW_FUNCS_64(amevcntr00, AMEVCNTR00)
DEFINE_COPROCR_RW_FUNCS_64(amevcntr01, AMEVCNTR01)
DEFINE_COPROCR_RW_FUNCS_64(amevcntr02, AMEVCNTR02)
......
......@@ -898,9 +898,14 @@
#define AMEVTYPER1E_EL0 S3_3_C13_C15_6
#define AMEVTYPER1F_EL0 S3_3_C13_C15_7
/* AMCFGR_EL0 definitions */
#define AMCFGR_EL0_NCG_SHIFT U(28)
#define AMCFGR_EL0_NCG_MASK U(0xf)
#define AMCFGR_EL0_N_SHIFT U(0)
#define AMCFGR_EL0_N_MASK U(0xff)
/* AMCGCR_EL0 definitions */
#define AMCGCR_EL0_CG1NC_SHIFT U(8)
#define AMCGCR_EL0_CG1NC_LENGTH U(8)
#define AMCGCR_EL0_CG1NC_MASK U(0xff)
/* MPAM register definitions */
......
......@@ -482,7 +482,8 @@ DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_eoir1_el1, ICC_EOIR1_EL1)
DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_sgi0r_el1, ICC_SGI0R_EL1)
DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sgi1r, ICC_SGI1R)
DEFINE_RENAME_SYSREG_RW_FUNCS(amcgcr_el0, AMCGCR_EL0)
DEFINE_RENAME_SYSREG_READ_FUNC(amcfgr_el0, AMCFGR_EL0)
DEFINE_RENAME_SYSREG_READ_FUNC(amcgcr_el0, AMCGCR_EL0)
DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenclr0_el0, AMCNTENCLR0_EL0)
DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenset0_el0, AMCNTENSET0_EL0)
DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenclr1_el0, AMCNTENCLR1_EL0)
......
/*
* Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
......@@ -10,13 +10,14 @@
#include <stdbool.h>
#include <stdint.h>
#include <platform_def.h>
#include <lib/cassert.h>
#include <lib/utils_def.h>
#include <platform_def.h>
/* All group 0 counters */
#define AMU_GROUP0_COUNTERS_MASK U(0xf)
#define AMU_GROUP0_NR_COUNTERS U(4)
#ifdef PLAT_AMU_GROUP1_COUNTERS_MASK
#define AMU_GROUP1_COUNTERS_MASK PLAT_AMU_GROUP1_COUNTERS_MASK
......@@ -24,25 +25,67 @@
#define AMU_GROUP1_COUNTERS_MASK U(0)
#endif
#ifdef PLAT_AMU_GROUP1_NR_COUNTERS
#define AMU_GROUP1_NR_COUNTERS PLAT_AMU_GROUP1_NR_COUNTERS
/* Calculate number of group 1 counters */
#if (AMU_GROUP1_COUNTERS_MASK & (1 << 15))
#define AMU_GROUP1_NR_COUNTERS 16U
#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 14))
#define AMU_GROUP1_NR_COUNTERS 15U
#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 13))
#define AMU_GROUP1_NR_COUNTERS 14U
#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 12))
#define AMU_GROUP1_NR_COUNTERS 13U
#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 11))
#define AMU_GROUP1_NR_COUNTERS 12U
#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 10))
#define AMU_GROUP1_NR_COUNTERS 11U
#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 9))
#define AMU_GROUP1_NR_COUNTERS 10U
#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 8))
#define AMU_GROUP1_NR_COUNTERS 9U
#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 7))
#define AMU_GROUP1_NR_COUNTERS 8U
#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 6))
#define AMU_GROUP1_NR_COUNTERS 7U
#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 5))
#define AMU_GROUP1_NR_COUNTERS 6U
#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 4))
#define AMU_GROUP1_NR_COUNTERS 5U
#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 3))
#define AMU_GROUP1_NR_COUNTERS 4U
#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 2))
#define AMU_GROUP1_NR_COUNTERS 3U
#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 1))
#define AMU_GROUP1_NR_COUNTERS 2U
#elif (AMU_GROUP1_COUNTERS_MASK & (1 << 0))
#define AMU_GROUP1_NR_COUNTERS 1U
#else
#define AMU_GROUP1_NR_COUNTERS U(0)
#define AMU_GROUP1_NR_COUNTERS 0U
#endif
CASSERT(AMU_GROUP1_COUNTERS_MASK <= 0xffff, invalid_amu_group1_counters_mask);
CASSERT(AMU_GROUP1_NR_COUNTERS <= 16, invalid_amu_group1_nr_counters);
struct amu_ctx {
uint64_t group0_cnts[AMU_GROUP0_NR_COUNTERS];
#if AMU_GROUP1_NR_COUNTERS
uint64_t group1_cnts[AMU_GROUP1_NR_COUNTERS];
#endif
};
bool amu_supported(void);
void amu_enable(bool el2_unused);
/* Group 0 configuration helpers */
uint64_t amu_group0_cnt_read(int idx);
void amu_group0_cnt_write(int idx, uint64_t val);
uint64_t amu_group0_cnt_read(unsigned int idx);
void amu_group0_cnt_write(unsigned int idx, uint64_t val);
#if AMU_GROUP1_NR_COUNTERS
bool amu_group1_supported(void);
/* Group 1 configuration helpers */
uint64_t amu_group1_cnt_read(int idx);
void amu_group1_cnt_write(int idx, uint64_t val);
void amu_group1_set_evtype(int idx, unsigned int val);
uint64_t amu_group1_cnt_read(unsigned int idx);
void amu_group1_cnt_write(unsigned int idx, uint64_t val);
void amu_group1_set_evtype(unsigned int idx, unsigned int val);
#endif
#endif /* AMU_H */
/*
* Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
......@@ -9,11 +9,11 @@
#include <stdint.h>
uint64_t amu_group0_cnt_read_internal(int idx);
void amu_group0_cnt_write_internal(int idx, uint64_t val);
uint64_t amu_group0_cnt_read_internal(unsigned int idx);
void amu_group0_cnt_write_internal(unsigned int idx, uint64_t val);
uint64_t amu_group1_cnt_read_internal(int idx);
void amu_group1_cnt_write_internal(int idx, uint64_t val);
void amu_group1_set_evtype_internal(int idx, unsigned int val);
uint64_t amu_group1_cnt_read_internal(unsigned int idx);
void amu_group1_cnt_write_internal(unsigned int idx, uint64_t val);
void amu_group1_set_evtype_internal(unsigned int idx, unsigned int val);
#endif /* AMU_PRIVATE_H */
/*
* Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
#include <stdbool.h>
#include <arch.h>
#include <arch_helpers.h>
#include <lib/el3_runtime/pubsub_events.h>
#include <lib/extensions/amu.h>
#include <lib/extensions/amu_private.h>
#include <plat/common/platform.h>
#define AMU_GROUP0_NR_COUNTERS 4
struct amu_ctx {
uint64_t group0_cnts[AMU_GROUP0_NR_COUNTERS];
uint64_t group1_cnts[AMU_GROUP1_NR_COUNTERS];
};
#include <plat/common/platform.h>
static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
/* Check if AMUv1 for Armv8.4 or 8.6 is implemented */
bool amu_supported(void)
{
uint64_t features;
uint32_t features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
return (features & ID_PFR0_AMU_MASK) == 1U;
features &= ID_PFR0_AMU_MASK;
return ((features == 1U) || (features == 2U));
}
#if AMU_GROUP1_NR_COUNTERS
/* Check if group 1 counters is implemented */
bool amu_group1_supported(void)
{
uint32_t features = read_amcfgr() >> AMCFGR_NCG_SHIFT;
return (features & AMCFGR_NCG_MASK) == 1U;
}
#endif
/*
* Enable counters. This function is meant to be invoked
* by the context management library before exiting from EL3.
*/
void amu_enable(bool el2_unused)
{
if (!amu_supported())
if (!amu_supported()) {
INFO("AMU is not implemented\n");
return;
}
#if AMU_GROUP1_NR_COUNTERS
/* Check and set presence of group 1 counters */
if (!amu_group1_supported()) {
ERROR("AMU Counter Group 1 is not implemented\n");
panic();
}
/* Check number of group 1 counters */
uint32_t cnt_num = (read_amcgcr() >> AMCGCR_CG1NC_SHIFT) &
AMCGCR_CG1NC_MASK;
VERBOSE("%s%u. %s%u\n",
"Number of AMU Group 1 Counters ", cnt_num,
"Requested number ", AMU_GROUP1_NR_COUNTERS);
if (cnt_num < AMU_GROUP1_NR_COUNTERS) {
ERROR("%s%u is less than %s%u\n",
"Number of AMU Group 1 Counters ", cnt_num,
"Requested number ", AMU_GROUP1_NR_COUNTERS);
panic();
}
#endif
if (el2_unused) {
uint64_t v;
......@@ -49,112 +84,156 @@ void amu_enable(bool el2_unused)
/* Enable group 0 counters */
write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
#if AMU_GROUP1_NR_COUNTERS
/* Enable group 1 counters */
write_amcntenset1(AMU_GROUP1_COUNTERS_MASK);
#endif
}
/* Read the group 0 counter identified by the given `idx`. */
uint64_t amu_group0_cnt_read(int idx)
uint64_t amu_group0_cnt_read(unsigned int idx)
{
assert(amu_supported());
assert((idx >= 0) && (idx < AMU_GROUP0_NR_COUNTERS));
assert(idx < AMU_GROUP0_NR_COUNTERS);
return amu_group0_cnt_read_internal(idx);
}
/* Write the group 0 counter identified by the given `idx` with `val`. */
void amu_group0_cnt_write(int idx, uint64_t val)
/* Write the group 0 counter identified by the given `idx` with `val` */
void amu_group0_cnt_write(unsigned int idx, uint64_t val)
{
assert(amu_supported());
assert((idx >= 0) && (idx < AMU_GROUP0_NR_COUNTERS));
assert(idx < AMU_GROUP0_NR_COUNTERS);
amu_group0_cnt_write_internal(idx, val);
isb();
}
/* Read the group 1 counter identified by the given `idx`. */
uint64_t amu_group1_cnt_read(int idx)
#if AMU_GROUP1_NR_COUNTERS
/* Read the group 1 counter identified by the given `idx` */
uint64_t amu_group1_cnt_read(unsigned int idx)
{
assert(amu_supported());
assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS));
assert(amu_group1_supported());
assert(idx < AMU_GROUP1_NR_COUNTERS);
return amu_group1_cnt_read_internal(idx);
}
/* Write the group 1 counter identified by the given `idx` with `val`. */
void amu_group1_cnt_write(int idx, uint64_t val)
/* Write the group 1 counter identified by the given `idx` with `val` */
void amu_group1_cnt_write(unsigned int idx, uint64_t val)
{
assert(amu_supported());
assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS));
assert(amu_group1_supported());
assert(idx < AMU_GROUP1_NR_COUNTERS);
amu_group1_cnt_write_internal(idx, val);
isb();
}
void amu_group1_set_evtype(int idx, unsigned int val)
/*
* Program the event type register for the given `idx` with
* the event number `val`
*/
void amu_group1_set_evtype(unsigned int idx, unsigned int val)
{
assert(amu_supported());
assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS));
assert(amu_group1_supported());
assert(idx < AMU_GROUP1_NR_COUNTERS);
amu_group1_set_evtype_internal(idx, val);
isb();
}
#endif /* AMU_GROUP1_NR_COUNTERS */
static void *amu_context_save(const void *arg)
{
struct amu_ctx *ctx;
int i;
struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
unsigned int i;
if (!amu_supported())
if (!amu_supported()) {
return (void *)-1;
}
ctx = &amu_ctxs[plat_my_core_pos()];
/* Assert that group 0 counter configuration is what we expect */
assert(read_amcntenset0() == AMU_GROUP0_COUNTERS_MASK &&
read_amcntenset1() == AMU_GROUP1_COUNTERS_MASK);
#if AMU_GROUP1_NR_COUNTERS
if (!amu_group1_supported()) {
return (void *)-1;
}
#endif
/* Assert that group 0/1 counter configuration is what we expect */
assert(read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK);
#if AMU_GROUP1_NR_COUNTERS
assert(read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK);
#endif
/*
* Disable group 0 counters to avoid other observers like SCP sampling
* Disable group 0/1 counters to avoid other observers like SCP sampling
* counter values from the future via the memory mapped view.
*/
write_amcntenclr0(AMU_GROUP0_COUNTERS_MASK);
#if AMU_GROUP1_NR_COUNTERS
write_amcntenclr1(AMU_GROUP1_COUNTERS_MASK);
#endif
isb();
for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
/* Save all group 0 counters */
for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
ctx->group0_cnts[i] = amu_group0_cnt_read(i);
}
for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
ctx->group1_cnts[i] = amu_group1_cnt_read(i);
#if AMU_GROUP1_NR_COUNTERS
/* Save group 1 counters */
for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
ctx->group1_cnts[i] = amu_group1_cnt_read(i);
}
}
#endif
return (void *)0;
}
static void *amu_context_restore(const void *arg)
{
struct amu_ctx *ctx;
int i;
struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
unsigned int i;
if (!amu_supported())
if (!amu_supported()) {
return (void *)-1;
}
ctx = &amu_ctxs[plat_my_core_pos()];
#if AMU_GROUP1_NR_COUNTERS
if (!amu_group1_supported()) {
return (void *)-1;
}
#endif
/* Counters were disabled in `amu_context_save()` */
assert((read_amcntenset0() == 0U) && (read_amcntenset1() == 0U));
assert(read_amcntenset0_el0() == 0U);
#if AMU_GROUP1_NR_COUNTERS
assert(read_amcntenset1_el0() == 0U);
#endif
/* Restore group 0 counters */
for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
/* Restore all group 0 counters */
for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
amu_group0_cnt_write(i, ctx->group0_cnts[i]);
for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
amu_group1_cnt_write(i, ctx->group1_cnts[i]);
}
/* Enable group 0 counters */
/* Restore group 0 counter configuration */
write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
/* Enable group 1 counters */
#if AMU_GROUP1_NR_COUNTERS
/* Restore group 1 counters */
for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
amu_group1_cnt_write(i, ctx->group1_cnts[i]);
}
}
/* Restore group 1 counter configuration */
write_amcntenset1(AMU_GROUP1_COUNTERS_MASK);
#endif
return (void *)0;
}
......
/*
* Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
......@@ -9,38 +9,68 @@
#include <arch.h>
#include <arch_helpers.h>
#include <lib/el3_runtime/pubsub_events.h>
#include <lib/extensions/amu.h>
#include <lib/extensions/amu_private.h>
#include <plat/common/platform.h>
#define AMU_GROUP0_NR_COUNTERS 4
struct amu_ctx {
uint64_t group0_cnts[AMU_GROUP0_NR_COUNTERS];
uint64_t group1_cnts[AMU_GROUP1_NR_COUNTERS];
};
#include <plat/common/platform.h>
static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
/* Check if AMUv1 for Armv8.4 or 8.6 is implemented */
bool amu_supported(void)
{
uint64_t features;
uint64_t features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT;
features &= ID_AA64PFR0_AMU_MASK;
return ((features == 1U) || (features == 2U));
}
#if AMU_GROUP1_NR_COUNTERS
/* Check if group 1 counters is implemented */
bool amu_group1_supported(void)
{
uint64_t features = read_amcfgr_el0() >> AMCFGR_EL0_NCG_SHIFT;
features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT;
return (features & ID_AA64PFR0_AMU_MASK) == 1U;
return (features & AMCFGR_EL0_NCG_MASK) == 1U;
}
#endif
/*
* Enable counters. This function is meant to be invoked
* Enable counters. This function is meant to be invoked
* by the context management library before exiting from EL3.
*/
void amu_enable(bool el2_unused)
{
uint64_t v;
if (!amu_supported())
if (!amu_supported()) {
INFO("AMU is not implemented\n");
return;
}
#if AMU_GROUP1_NR_COUNTERS
/* Check and set presence of group 1 counters */
if (!amu_group1_supported()) {
ERROR("AMU Counter Group 1 is not implemented\n");
panic();
}
/* Check number of group 1 counters */
uint64_t cnt_num = (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) &
AMCGCR_EL0_CG1NC_MASK;
VERBOSE("%s%llu. %s%u\n",
"Number of AMU Group 1 Counters ", cnt_num,
"Requested number ", AMU_GROUP1_NR_COUNTERS);
if (cnt_num < AMU_GROUP1_NR_COUNTERS) {
ERROR("%s%llu is less than %s%u\n",
"Number of AMU Group 1 Counters ", cnt_num,
"Requested number ", AMU_GROUP1_NR_COUNTERS);
panic();
}
#endif
if (el2_unused) {
/*
......@@ -62,43 +92,49 @@ void amu_enable(bool el2_unused)
/* Enable group 0 counters */
write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
#if AMU_GROUP1_NR_COUNTERS
/* Enable group 1 counters */
write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK);
#endif
}
/* Read the group 0 counter identified by the given `idx`. */
uint64_t amu_group0_cnt_read(int idx)
uint64_t amu_group0_cnt_read(unsigned int idx)
{
assert(amu_supported());
assert((idx >= 0) && (idx < AMU_GROUP0_NR_COUNTERS));
assert(idx < AMU_GROUP0_NR_COUNTERS);
return amu_group0_cnt_read_internal(idx);
}
/* Write the group 0 counter identified by the given `idx` with `val`. */
void amu_group0_cnt_write(int idx, uint64_t val)
/* Write the group 0 counter identified by the given `idx` with `val` */
void amu_group0_cnt_write(unsigned int idx, uint64_t val)
{
assert(amu_supported());
assert((idx >= 0) && (idx < AMU_GROUP0_NR_COUNTERS));
assert(idx < AMU_GROUP0_NR_COUNTERS);
amu_group0_cnt_write_internal(idx, val);
isb();
}
/* Read the group 1 counter identified by the given `idx`. */
uint64_t amu_group1_cnt_read(int idx)
#if AMU_GROUP1_NR_COUNTERS
/* Read the group 1 counter identified by the given `idx` */
uint64_t amu_group1_cnt_read(unsigned int idx)
{
assert(amu_supported());
assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS));
assert(amu_group1_supported());
assert(idx < AMU_GROUP1_NR_COUNTERS);
return amu_group1_cnt_read_internal(idx);
}
/* Write the group 1 counter identified by the given `idx` with `val`. */
void amu_group1_cnt_write(int idx, uint64_t val)
/* Write the group 1 counter identified by the given `idx` with `val` */
void amu_group1_cnt_write(unsigned int idx, uint64_t val)
{
assert(amu_supported());
assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS));
assert(amu_group1_supported());
assert(idx < AMU_GROUP1_NR_COUNTERS);
amu_group1_cnt_write_internal(idx, val);
isb();
......@@ -106,78 +142,106 @@ void amu_group1_cnt_write(int idx, uint64_t val)
/*
* Program the event type register for the given `idx` with
* the event number `val`.
* the event number `val`
*/
void amu_group1_set_evtype(int idx, unsigned int val)
void amu_group1_set_evtype(unsigned int idx, unsigned int val)
{
assert(amu_supported());
assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS));
assert(amu_group1_supported());
assert(idx < AMU_GROUP1_NR_COUNTERS);
amu_group1_set_evtype_internal(idx, val);
isb();
}
#endif /* AMU_GROUP1_NR_COUNTERS */
static void *amu_context_save(const void *arg)
{
struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
int i;
unsigned int i;
if (!amu_supported())
if (!amu_supported()) {
return (void *)-1;
}
#if AMU_GROUP1_NR_COUNTERS
if (!amu_group1_supported()) {
return (void *)-1;
}
#endif
/* Assert that group 0/1 counter configuration is what we expect */
assert((read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK) &&
(read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK));
assert(((sizeof(int) * 8) - __builtin_clz(AMU_GROUP1_COUNTERS_MASK))
<= AMU_GROUP1_NR_COUNTERS);
assert(read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK);
#if AMU_GROUP1_NR_COUNTERS
assert(read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK);
#endif
/*
* Disable group 0/1 counters to avoid other observers like SCP sampling
* counter values from the future via the memory mapped view.
*/
write_amcntenclr0_el0(AMU_GROUP0_COUNTERS_MASK);
#if AMU_GROUP1_NR_COUNTERS
write_amcntenclr1_el0(AMU_GROUP1_COUNTERS_MASK);
#endif
isb();
/* Save group 0 counters */
for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
/* Save all group 0 counters */
for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
ctx->group0_cnts[i] = amu_group0_cnt_read(i);
}
#if AMU_GROUP1_NR_COUNTERS
/* Save group 1 counters */
for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
ctx->group1_cnts[i] = amu_group1_cnt_read(i);
for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
ctx->group1_cnts[i] = amu_group1_cnt_read(i);
}
}
#endif
return (void *)0;
}
static void *amu_context_restore(const void *arg)
{
struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
int i;
unsigned int i;
if (!amu_supported())
if (!amu_supported()) {
return (void *)-1;
}
#if AMU_GROUP1_NR_COUNTERS
if (!amu_group1_supported()) {
return (void *)-1;
}
#endif
/* Counters were disabled in `amu_context_save()` */
assert((read_amcntenset0_el0() == 0U) && (read_amcntenset1_el0() == 0U));
assert(read_amcntenset0_el0() == 0U);
assert(((sizeof(int) * 8U) - __builtin_clz(AMU_GROUP1_COUNTERS_MASK))
<= AMU_GROUP1_NR_COUNTERS);
#if AMU_GROUP1_NR_COUNTERS
assert(read_amcntenset1_el0() == 0U);
#endif
/* Restore group 0 counters */
for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
if ((AMU_GROUP0_COUNTERS_MASK & (1U << i)) != 0U)
amu_group0_cnt_write(i, ctx->group0_cnts[i]);
/* Restore all group 0 counters */
for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
amu_group0_cnt_write(i, ctx->group0_cnts[i]);
}
/* Restore group 0 counter configuration */
write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
#if AMU_GROUP1_NR_COUNTERS
/* Restore group 1 counters */
for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U)
for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
amu_group1_cnt_write(i, ctx->group1_cnts[i]);
}
}
/* Restore group 0/1 counter configuration */
write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
/* Restore group 1 counter configuration */
write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK);
#endif
return (void *)0;
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment