Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
Menu
Open sidebar
adam.huang
Arm Trusted Firmware
Commits
ef4c1e19
Commit
ef4c1e19
authored
Mar 02, 2021
by
Manish Pandey
Committed by
TrustedFirmware Code Review
Mar 02, 2021
Browse files
Merge "Enable v8.6 AMU enhancements (FEAT_AMUv1p1)" into integration
parents
174551d5
873d4241
Changes
15
Show whitespace changes
Inline
Side-by-side
Makefile
View file @
ef4c1e19
...
...
@@ -895,6 +895,7 @@ $(eval $(call assert_booleans,\
DYN_DISABLE_AUTH
\
EL3_EXCEPTION_HANDLING
\
ENABLE_AMU
\
AMU_RESTRICT_COUNTERS
\
ENABLE_ASSERTIONS
\
ENABLE_MPAM_FOR_LOWER_ELS
\
ENABLE_PIE
\
...
...
@@ -984,6 +985,7 @@ $(eval $(call add_defines,\
DECRYPTION_SUPPORT_${DECRYPTION_SUPPORT}
\
DISABLE_MTPMU
\
ENABLE_AMU
\
AMU_RESTRICT_COUNTERS
\
ENABLE_ASSERTIONS
\
ENABLE_BTI
\
ENABLE_MPAM_FOR_LOWER_ELS
\
...
...
docs/getting_started/build-options.rst
View file @
ef4c1e19
...
...
@@ -22,6 +22,10 @@ Common build options
directory containing the SP source, relative to the ``bl32/``; the directory
is expected to contain a makefile called ``<aarch32_sp-value>.mk``.
- ``AMU_RESTRICT_COUNTERS``: Register reads to the group 1 counters will return
zero at all but the highest implemented exception level. Reads from the
memory mapped view are unaffected by this control.
- ``ARCH`` : Choose the target build architecture for TF-A. It can take either
``aarch64`` or ``aarch32`` as values. By default, it is defined to
``aarch64``.
...
...
include/arch/aarch32/arch.h
View file @
ef4c1e19
/*
* Copyright (c) 2016-202
0
, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016-202
1
, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
...
...
@@ -116,6 +116,9 @@
#define ID_PFR0_AMU_SHIFT U(20)
#define ID_PFR0_AMU_LENGTH U(4)
#define ID_PFR0_AMU_MASK U(0xf)
#define ID_PFR0_AMU_NOT_SUPPORTED U(0x0)
#define ID_PFR0_AMU_V1 U(0x1)
#define ID_PFR0_AMU_V1P1 U(0x2)
#define ID_PFR0_DIT_SHIFT U(24)
#define ID_PFR0_DIT_LENGTH U(4)
...
...
@@ -653,7 +656,7 @@
#define PAR_ADDR_MASK (BIT_64(40) - ULL(1))
/* 40-bits-wide page address */
/*******************************************************************************
* Definitions for system register interface to AMU for
ARMv8.4 onwards
* Definitions for system register interface to AMU for
FEAT_AMUv1
******************************************************************************/
#define AMCR p15, 0, c13, c2, 0
#define AMCFGR p15, 0, c13, c2, 1
...
...
@@ -712,6 +715,9 @@
#define AMEVTYPER1E p15, 0, c13, c15, 6
#define AMEVTYPER1F p15, 0, c13, c15, 7
/* AMCR definitions */
#define AMCR_CG1RZ_BIT (ULL(1) << 17)
/* AMCFGR definitions */
#define AMCFGR_NCG_SHIFT U(28)
#define AMCFGR_NCG_MASK U(0xf)
...
...
include/arch/aarch32/arch_helpers.h
View file @
ef4c1e19
/*
* Copyright (c) 2016-202
0
, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016-202
1
, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
...
...
@@ -303,6 +303,7 @@ DEFINE_COPROCR_RW_FUNCS(dacr, DACR)
/* Coproc registers for 32bit AMU support */
DEFINE_COPROCR_READ_FUNC
(
amcfgr
,
AMCFGR
)
DEFINE_COPROCR_READ_FUNC
(
amcgcr
,
AMCGCR
)
DEFINE_COPROCR_RW_FUNCS
(
amcr
,
AMCR
)
DEFINE_COPROCR_RW_FUNCS
(
amcntenset0
,
AMCNTENSET0
)
DEFINE_COPROCR_RW_FUNCS
(
amcntenset1
,
AMCNTENSET1
)
...
...
include/arch/aarch64/arch.h
View file @
ef4c1e19
/*
* Copyright (c) 2013-202
0
, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-202
1
, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
...
...
@@ -161,6 +161,9 @@
#define ID_AA64PFR0_EL3_SHIFT U(12)
#define ID_AA64PFR0_AMU_SHIFT U(44)
#define ID_AA64PFR0_AMU_MASK ULL(0xf)
#define ID_AA64PFR0_AMU_NOT_SUPPORTED U(0x0)
#define ID_AA64PFR0_AMU_V1 U(0x1)
#define ID_AA64PFR0_AMU_V1P1 U(0x2)
#define ID_AA64PFR0_ELX_MASK ULL(0xf)
#define ID_AA64PFR0_GIC_SHIFT U(24)
#define ID_AA64PFR0_GIC_WIDTH U(4)
...
...
@@ -406,6 +409,7 @@
#define SCR_RES1_BITS ((U(1) << 4) | (U(1) << 5))
#define SCR_TWEDEL_SHIFT U(30)
#define SCR_TWEDEL_MASK ULL(0xf)
#define SCR_AMVOFFEN_BIT (UL(1) << 35)
#define SCR_TWEDEn_BIT (UL(1) << 29)
#define SCR_ECVEN_BIT (UL(1) << 28)
#define SCR_FGTEN_BIT (UL(1) << 27)
...
...
@@ -479,6 +483,7 @@
#define VTTBR_BADDR_SHIFT U(0)
/* HCR definitions */
#define HCR_AMVOFFEN_BIT (ULL(1) << 51)
#define HCR_API_BIT (ULL(1) << 41)
#define HCR_APK_BIT (ULL(1) << 40)
#define HCR_E2H_BIT (ULL(1) << 34)
...
...
@@ -913,7 +918,7 @@
#define MPAM3_EL3 S3_6_C10_C5_0
/*******************************************************************************
* Definitions for system register interface to AMU for
ARMv8.4 onwards
* Definitions for system register interface to AMU for
FEAT_AMUv1
******************************************************************************/
#define AMCR_EL0 S3_3_C13_C2_0
#define AMCFGR_EL0 S3_3_C13_C2_1
...
...
@@ -991,6 +996,50 @@
#define MPAMIDR_HAS_HCR_BIT (ULL(1) << 17)
/*******************************************************************************
* Definitions for system register interface to AMU for FEAT_AMUv1p1
******************************************************************************/
/* Definition for register defining which virtual offsets are implemented. */
#define AMCG1IDR_EL0 S3_3_C13_C2_6
#define AMCG1IDR_CTR_MASK ULL(0xffff)
#define AMCG1IDR_CTR_SHIFT U(0)
#define AMCG1IDR_VOFF_MASK ULL(0xffff)
#define AMCG1IDR_VOFF_SHIFT U(16)
/* New bit added to AMCR_EL0 */
#define AMCR_CG1RZ_BIT (ULL(0x1) << 17)
/*
* Definitions for virtual offset registers for architected activity monitor
* event counters.
* AMEVCNTVOFF01_EL2 intentionally left undefined, as it does not exist.
*/
#define AMEVCNTVOFF00_EL2 S3_4_C13_C8_0
#define AMEVCNTVOFF02_EL2 S3_4_C13_C8_2
#define AMEVCNTVOFF03_EL2 S3_4_C13_C8_3
/*
* Definitions for virtual offset registers for auxiliary activity monitor event
* counters.
*/
#define AMEVCNTVOFF10_EL2 S3_4_C13_C10_0
#define AMEVCNTVOFF11_EL2 S3_4_C13_C10_1
#define AMEVCNTVOFF12_EL2 S3_4_C13_C10_2
#define AMEVCNTVOFF13_EL2 S3_4_C13_C10_3
#define AMEVCNTVOFF14_EL2 S3_4_C13_C10_4
#define AMEVCNTVOFF15_EL2 S3_4_C13_C10_5
#define AMEVCNTVOFF16_EL2 S3_4_C13_C10_6
#define AMEVCNTVOFF17_EL2 S3_4_C13_C10_7
#define AMEVCNTVOFF18_EL2 S3_4_C13_C11_0
#define AMEVCNTVOFF19_EL2 S3_4_C13_C11_1
#define AMEVCNTVOFF1A_EL2 S3_4_C13_C11_2
#define AMEVCNTVOFF1B_EL2 S3_4_C13_C11_3
#define AMEVCNTVOFF1C_EL2 S3_4_C13_C11_4
#define AMEVCNTVOFF1D_EL2 S3_4_C13_C11_5
#define AMEVCNTVOFF1E_EL2 S3_4_C13_C11_6
#define AMEVCNTVOFF1F_EL2 S3_4_C13_C11_7
/*******************************************************************************
* RAS system registers
******************************************************************************/
...
...
include/arch/aarch64/arch_features.h
View file @
ef4c1e19
/*
* Copyright (c) 2019-202
0
, Arm Limited. All rights reserved.
* Copyright (c) 2019-202
1
, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
...
...
@@ -82,6 +82,12 @@ static inline bool is_armv8_5_rng_present(void)
ID_AA64ISAR0_RNDR_MASK
);
}
static
inline
bool
is_armv8_6_feat_amuv1p1_present
(
void
)
{
return
(((
read_id_aa64pfr0_el1
()
>>
ID_AA64PFR0_AMU_SHIFT
)
&
ID_AA64PFR0_AMU_MASK
)
>=
ID_AA64PFR0_AMU_V1P1
);
}
/*
* Return MPAM version:
*
...
...
include/arch/aarch64/arch_helpers.h
View file @
ef4c1e19
/*
* Copyright (c) 2013-202
0
, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-202
1
, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
...
...
@@ -485,6 +485,8 @@ DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sgi1r, ICC_SGI1R)
DEFINE_RENAME_SYSREG_READ_FUNC
(
amcfgr_el0
,
AMCFGR_EL0
)
DEFINE_RENAME_SYSREG_READ_FUNC
(
amcgcr_el0
,
AMCGCR_EL0
)
DEFINE_RENAME_SYSREG_READ_FUNC
(
amcg1idr_el0
,
AMCG1IDR_EL0
)
DEFINE_RENAME_SYSREG_RW_FUNCS
(
amcr_el0
,
AMCR_EL0
)
DEFINE_RENAME_SYSREG_RW_FUNCS
(
amcntenclr0_el0
,
AMCNTENCLR0_EL0
)
DEFINE_RENAME_SYSREG_RW_FUNCS
(
amcntenset0_el0
,
AMCNTENSET0_EL0
)
DEFINE_RENAME_SYSREG_RW_FUNCS
(
amcntenclr1_el0
,
AMCNTENCLR1_EL0
)
...
...
include/lib/extensions/amu.h
View file @
ef4c1e19
/*
* Copyright (c) 2017-202
0
, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017-202
1
, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
...
...
@@ -66,19 +66,31 @@ CASSERT(AMU_GROUP1_COUNTERS_MASK <= 0xffff, invalid_amu_group1_counters_mask);
struct
amu_ctx
{
uint64_t
group0_cnts
[
AMU_GROUP0_NR_COUNTERS
];
#if __aarch64__
/* Architected event counter 1 does not have an offset register. */
uint64_t
group0_voffsets
[
AMU_GROUP0_NR_COUNTERS
-
1
];
#endif
#if AMU_GROUP1_NR_COUNTERS
uint64_t
group1_cnts
[
AMU_GROUP1_NR_COUNTERS
];
#if __aarch64__
uint64_t
group1_voffsets
[
AMU_GROUP1_NR_COUNTERS
];
#endif
#endif
};
bool
amu_supported
(
void
);
unsigned
int
amu_get_version
(
void
);
void
amu_enable
(
bool
el2_unused
);
/* Group 0 configuration helpers */
uint64_t
amu_group0_cnt_read
(
unsigned
int
idx
);
void
amu_group0_cnt_write
(
unsigned
int
idx
,
uint64_t
val
);
#if __aarch64__
uint64_t
amu_group0_voffset_read
(
unsigned
int
idx
);
void
amu_group0_voffset_write
(
unsigned
int
idx
,
uint64_t
val
);
#endif
#if AMU_GROUP1_NR_COUNTERS
bool
amu_group1_supported
(
void
);
...
...
@@ -86,6 +98,12 @@ bool amu_group1_supported(void);
uint64_t
amu_group1_cnt_read
(
unsigned
int
idx
);
void
amu_group1_cnt_write
(
unsigned
int
idx
,
uint64_t
val
);
void
amu_group1_set_evtype
(
unsigned
int
idx
,
unsigned
int
val
);
#if __aarch64__
uint64_t
amu_group1_voffset_read
(
unsigned
int
idx
);
void
amu_group1_voffset_write
(
unsigned
int
idx
,
uint64_t
val
);
#endif
#endif
#endif
/* AMU_H */
include/lib/extensions/amu_private.h
View file @
ef4c1e19
/*
* Copyright (c) 2017-202
0
, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017-202
1
, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
...
...
@@ -16,4 +16,12 @@ uint64_t amu_group1_cnt_read_internal(unsigned int idx);
void
amu_group1_cnt_write_internal
(
unsigned
int
idx
,
uint64_t
val
);
void
amu_group1_set_evtype_internal
(
unsigned
int
idx
,
unsigned
int
val
);
#if __aarch64__
uint64_t
amu_group0_voffset_read_internal
(
unsigned
int
idx
);
void
amu_group0_voffset_write_internal
(
unsigned
int
idx
,
uint64_t
val
);
uint64_t
amu_group1_voffset_read_internal
(
unsigned
int
idx
);
void
amu_group1_voffset_write_internal
(
unsigned
int
idx
,
uint64_t
val
);
#endif
#endif
/* AMU_PRIVATE_H */
lib/el3_runtime/aarch64/context_mgmt.c
View file @
ef4c1e19
/*
* Copyright (c) 2013-202
0
, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-202
1
, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
...
...
@@ -216,6 +216,16 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
scr_el3
|=
SCR_EEL2_BIT
;
}
/*
* FEAT_AMUv1p1 virtual offset registers are only accessible from EL3
* and EL2, when clear, this bit traps accesses from EL2 so we set it
* to 1 when EL2 is present.
*/
if
(
is_armv8_6_feat_amuv1p1_present
()
&&
(
el_implemented
(
2
)
!=
EL_IMPL_NONE
))
{
scr_el3
|=
SCR_AMVOFFEN_BIT
;
}
/*
* Initialise SCTLR_EL1 to the reset value corresponding to the target
* execution state setting all fields rather than relying of the hw.
...
...
lib/extensions/amu/aarch32/amu.c
View file @
ef4c1e19
/*
* Copyright (c) 2017-202
0
, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017-202
1
, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
...
...
@@ -18,13 +18,17 @@
static
struct
amu_ctx
amu_ctxs
[
PLATFORM_CORE_COUNT
];
/* Check if AMUv1 for Armv8.4 or 8.6 is implemented */
bool
amu_supported
(
void
)
/*
* Get AMU version value from pfr0.
* Return values
* ID_PFR0_AMU_V1: FEAT_AMUv1 supported (introduced in ARM v8.4)
* ID_PFR0_AMU_V1P1: FEAT_AMUv1p1 supported (introduced in ARM v8.6)
* ID_PFR0_AMU_NOT_SUPPORTED: not supported
*/
unsigned
int
amu_get_version
(
void
)
{
uint32_t
features
=
read_id_pfr0
()
>>
ID_PFR0_AMU_SHIFT
;
features
&=
ID_PFR0_AMU_MASK
;
return
((
features
==
1U
)
||
(
features
==
2U
));
return
(
unsigned
int
)(
read_id_pfr0
()
>>
ID_PFR0_AMU_SHIFT
)
&
ID_PFR0_AMU_MASK
;
}
#if AMU_GROUP1_NR_COUNTERS
...
...
@@ -43,7 +47,7 @@ bool amu_group1_supported(void)
*/
void
amu_enable
(
bool
el2_unused
)
{
if
(
!
amu_
supported
()
)
{
if
(
amu_
get_version
()
==
ID_PFR0_AMU_NOT_SUPPORTED
)
{
return
;
}
...
...
@@ -87,12 +91,31 @@ void amu_enable(bool el2_unused)
/* Enable group 1 counters */
write_amcntenset1
(
AMU_GROUP1_COUNTERS_MASK
);
#endif
/* Initialize FEAT_AMUv1p1 features if present. */
if
(
amu_get_version
()
<
ID_PFR0_AMU_V1P1
)
{
return
;
}
#if AMU_RESTRICT_COUNTERS
/*
* FEAT_AMUv1p1 adds a register field to restrict access to group 1
* counters at all but the highest implemented EL. This is controlled
* with the AMU_RESTRICT_COUNTERS compile time flag, when set, system
* register reads at lower ELs return zero. Reads from the memory
* mapped view are unaffected.
*/
VERBOSE
(
"AMU group 1 counter access restricted.
\n
"
);
write_amcr
(
read_amcr
()
|
AMCR_CG1RZ_BIT
);
#else
write_amcr
(
read_amcr
()
&
~
AMCR_CG1RZ_BIT
);
#endif
}
/* Read the group 0 counter identified by the given `idx`. */
uint64_t
amu_group0_cnt_read
(
unsigned
int
idx
)
{
assert
(
amu_
supported
()
);
assert
(
amu_
get_version
()
!=
ID_PFR0_AMU_NOT_SUPPORTED
);
assert
(
idx
<
AMU_GROUP0_NR_COUNTERS
);
return
amu_group0_cnt_read_internal
(
idx
);
...
...
@@ -101,7 +124,7 @@ uint64_t amu_group0_cnt_read(unsigned int idx)
/* Write the group 0 counter identified by the given `idx` with `val` */
void
amu_group0_cnt_write
(
unsigned
int
idx
,
uint64_t
val
)
{
assert
(
amu_
supported
()
);
assert
(
amu_
get_version
()
!=
ID_PFR0_AMU_NOT_SUPPORTED
);
assert
(
idx
<
AMU_GROUP0_NR_COUNTERS
);
amu_group0_cnt_write_internal
(
idx
,
val
);
...
...
@@ -112,7 +135,7 @@ void amu_group0_cnt_write(unsigned int idx, uint64_t val)
/* Read the group 1 counter identified by the given `idx` */
uint64_t
amu_group1_cnt_read
(
unsigned
int
idx
)
{
assert
(
amu_
supported
()
);
assert
(
amu_
get_version
()
!=
ID_PFR0_AMU_NOT_SUPPORTED
);
assert
(
amu_group1_supported
());
assert
(
idx
<
AMU_GROUP1_NR_COUNTERS
);
...
...
@@ -122,7 +145,7 @@ uint64_t amu_group1_cnt_read(unsigned int idx)
/* Write the group 1 counter identified by the given `idx` with `val` */
void
amu_group1_cnt_write
(
unsigned
int
idx
,
uint64_t
val
)
{
assert
(
amu_
supported
()
);
assert
(
amu_
get_version
()
!=
ID_PFR0_AMU_NOT_SUPPORTED
);
assert
(
amu_group1_supported
());
assert
(
idx
<
AMU_GROUP1_NR_COUNTERS
);
...
...
@@ -136,7 +159,7 @@ void amu_group1_cnt_write(unsigned int idx, uint64_t val)
*/
void
amu_group1_set_evtype
(
unsigned
int
idx
,
unsigned
int
val
)
{
assert
(
amu_
supported
()
);
assert
(
amu_
get_version
()
!=
ID_PFR0_AMU_NOT_SUPPORTED
);
assert
(
amu_group1_supported
());
assert
(
idx
<
AMU_GROUP1_NR_COUNTERS
);
...
...
@@ -150,7 +173,7 @@ static void *amu_context_save(const void *arg)
struct
amu_ctx
*
ctx
=
&
amu_ctxs
[
plat_my_core_pos
()];
unsigned
int
i
;
if
(
!
amu_
supported
()
)
{
if
(
amu_
get_version
()
==
ID_PFR0_AMU_NOT_SUPPORTED
)
{
return
(
void
*
)
-
1
;
}
...
...
@@ -197,7 +220,7 @@ static void *amu_context_restore(const void *arg)
struct
amu_ctx
*
ctx
=
&
amu_ctxs
[
plat_my_core_pos
()];
unsigned
int
i
;
if
(
!
amu_
supported
()
)
{
if
(
amu_
get_version
()
==
ID_PFR0_AMU_NOT_SUPPORTED
)
{
return
(
void
*
)
-
1
;
}
...
...
lib/extensions/amu/aarch32/amu_helpers.S
View file @
ef4c1e19
/*
*
Copyright
(
c
)
201
8
,
ARM
Limited
and
Contributors
.
All
rights
reserved
.
*
Copyright
(
c
)
20
2
1
,
ARM
Limited
and
Contributors
.
All
rights
reserved
.
*
*
SPDX
-
License
-
Identifier
:
BSD
-
3
-
Clause
*/
...
...
lib/extensions/amu/aarch64/amu.c
View file @
ef4c1e19
/*
* Copyright (c) 2017-202
0
, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017-202
1
, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
...
...
@@ -8,6 +8,7 @@
#include <stdbool.h>
#include <arch.h>
#include <arch_features.h>
#include <arch_helpers.h>
#include <lib/el3_runtime/pubsub_events.h>
...
...
@@ -18,13 +19,17 @@
static
struct
amu_ctx
amu_ctxs
[
PLATFORM_CORE_COUNT
];
/* Check if AMUv1 for Armv8.4 or 8.6 is implemented */
bool
amu_supported
(
void
)
/*
* Get AMU version value from aa64pfr0.
* Return values
* ID_AA64PFR0_AMU_V1: FEAT_AMUv1 supported (introduced in ARM v8.4)
* ID_AA64PFR0_AMU_V1P1: FEAT_AMUv1p1 supported (introduced in ARM v8.6)
* ID_AA64PFR0_AMU_NOT_SUPPORTED: not supported
*/
unsigned
int
amu_get_version
(
void
)
{
uint64_t
features
=
read_id_aa64pfr0_el1
()
>>
ID_AA64PFR0_AMU_SHIFT
;
features
&=
ID_AA64PFR0_AMU_MASK
;
return
((
features
==
1U
)
||
(
features
==
2U
));
return
(
unsigned
int
)(
read_id_aa64pfr0_el1
()
>>
ID_AA64PFR0_AMU_SHIFT
)
&
ID_AA64PFR0_AMU_MASK
;
}
#if AMU_GROUP1_NR_COUNTERS
...
...
@@ -44,8 +49,9 @@ bool amu_group1_supported(void)
void
amu_enable
(
bool
el2_unused
)
{
uint64_t
v
;
unsigned
int
amu_version
=
amu_get_version
();
if
(
!
amu_
supported
()
)
{
if
(
amu_
version
==
ID_AA64PFR0_AMU_NOT_SUPPORTED
)
{
return
;
}
...
...
@@ -96,12 +102,36 @@ void amu_enable(bool el2_unused)
/* Enable group 1 counters */
write_amcntenset1_el0
(
AMU_GROUP1_COUNTERS_MASK
);
#endif
/* Initialize FEAT_AMUv1p1 features if present. */
if
(
amu_version
<
ID_AA64PFR0_AMU_V1P1
)
{
return
;
}
if
(
el2_unused
)
{
/* Make sure virtual offsets are disabled if EL2 not used. */
write_hcr_el2
(
read_hcr_el2
()
&
~
HCR_AMVOFFEN_BIT
);
}
#if AMU_RESTRICT_COUNTERS
/*
* FEAT_AMUv1p1 adds a register field to restrict access to group 1
* counters at all but the highest implemented EL. This is controlled
* with the AMU_RESTRICT_COUNTERS compile time flag, when set, system
* register reads at lower ELs return zero. Reads from the memory
* mapped view are unaffected.
*/
VERBOSE
(
"AMU group 1 counter access restricted.
\n
"
);
write_amcr_el0
(
read_amcr_el0
()
|
AMCR_CG1RZ_BIT
);
#else
write_amcr_el0
(
read_amcr_el0
()
&
~
AMCR_CG1RZ_BIT
);
#endif
}
/* Read the group 0 counter identified by the given `idx`. */
uint64_t
amu_group0_cnt_read
(
unsigned
int
idx
)
{
assert
(
amu_
supported
()
);
assert
(
amu_
get_version
()
!=
ID_AA64PFR0_AMU_NOT_SUPPORTED
);
assert
(
idx
<
AMU_GROUP0_NR_COUNTERS
);
return
amu_group0_cnt_read_internal
(
idx
);
...
...
@@ -110,18 +140,49 @@ uint64_t amu_group0_cnt_read(unsigned int idx)
/* Write the group 0 counter identified by the given `idx` with `val` */
void
amu_group0_cnt_write
(
unsigned
int
idx
,
uint64_t
val
)
{
assert
(
amu_
supported
()
);
assert
(
amu_
get_version
()
!=
ID_AA64PFR0_AMU_NOT_SUPPORTED
);
assert
(
idx
<
AMU_GROUP0_NR_COUNTERS
);
amu_group0_cnt_write_internal
(
idx
,
val
);
isb
();
}
/*
* Read the group 0 offset register for a given index. Index must be 0, 2,
* or 3, the register for 1 does not exist.
*
* Using this function requires FEAT_AMUv1p1 support.
*/
uint64_t
amu_group0_voffset_read
(
unsigned
int
idx
)
{
assert
(
amu_get_version
()
>=
ID_AA64PFR0_AMU_V1P1
);
assert
(
idx
<
AMU_GROUP0_NR_COUNTERS
);
assert
(
idx
!=
1U
);
return
amu_group0_voffset_read_internal
(
idx
);
}
/*
* Write the group 0 offset register for a given index. Index must be 0, 2, or
* 3, the register for 1 does not exist.
*
* Using this function requires FEAT_AMUv1p1 support.
*/
void
amu_group0_voffset_write
(
unsigned
int
idx
,
uint64_t
val
)
{
assert
(
amu_get_version
()
>=
ID_AA64PFR0_AMU_V1P1
);
assert
(
idx
<
AMU_GROUP0_NR_COUNTERS
);
assert
(
idx
!=
1U
);
amu_group0_voffset_write_internal
(
idx
,
val
);
isb
();
}
#if AMU_GROUP1_NR_COUNTERS
/* Read the group 1 counter identified by the given `idx` */
uint64_t
amu_group1_cnt_read
(
unsigned
int
idx
)
{
assert
(
amu_
supported
()
);
assert
(
amu_
get_version
()
!=
ID_AA64PFR0_AMU_NOT_SUPPORTED
);
assert
(
amu_group1_supported
());
assert
(
idx
<
AMU_GROUP1_NR_COUNTERS
);
...
...
@@ -131,7 +192,7 @@ uint64_t amu_group1_cnt_read(unsigned int idx)
/* Write the group 1 counter identified by the given `idx` with `val` */
void
amu_group1_cnt_write
(
unsigned
int
idx
,
uint64_t
val
)
{
assert
(
amu_
supported
()
);
assert
(
amu_
get_version
()
!=
ID_AA64PFR0_AMU_NOT_SUPPORTED
);
assert
(
amu_group1_supported
());
assert
(
idx
<
AMU_GROUP1_NR_COUNTERS
);
...
...
@@ -139,13 +200,46 @@ void amu_group1_cnt_write(unsigned int idx, uint64_t val)
isb
();
}
/*
* Read the group 1 offset register for a given index.
*
* Using this function requires FEAT_AMUv1p1 support.
*/
uint64_t
amu_group1_voffset_read
(
unsigned
int
idx
)
{
assert
(
amu_get_version
()
>=
ID_AA64PFR0_AMU_V1P1
);
assert
(
amu_group1_supported
());
assert
(
idx
<
AMU_GROUP1_NR_COUNTERS
);
assert
(((
read_amcg1idr_el0
()
>>
AMCG1IDR_VOFF_SHIFT
)
&
(
1ULL
<<
idx
))
!=
0ULL
);
return
amu_group1_voffset_read_internal
(
idx
);
}
/*
* Write the group 1 offset register for a given index.
*
* Using this function requires FEAT_AMUv1p1 support.
*/
void
amu_group1_voffset_write
(
unsigned
int
idx
,
uint64_t
val
)
{
assert
(
amu_get_version
()
>=
ID_AA64PFR0_AMU_V1P1
);
assert
(
amu_group1_supported
());
assert
(
idx
<
AMU_GROUP1_NR_COUNTERS
);
assert
(((
read_amcg1idr_el0
()
>>
AMCG1IDR_VOFF_SHIFT
)
&
(
1ULL
<<
idx
))
!=
0ULL
);
amu_group1_voffset_write_internal
(
idx
,
val
);
isb
();
}
/*
* Program the event type register for the given `idx` with
* the event number `val`
*/
void
amu_group1_set_evtype
(
unsigned
int
idx
,
unsigned
int
val
)
{
assert
(
amu_
supported
()
);
assert
(
amu_
get_version
()
!=
ID_AA64PFR0_AMU_NOT_SUPPORTED
);
assert
(
amu_group1_supported
());
assert
(
idx
<
AMU_GROUP1_NR_COUNTERS
);
...
...
@@ -159,7 +253,7 @@ static void *amu_context_save(const void *arg)
struct
amu_ctx
*
ctx
=
&
amu_ctxs
[
plat_my_core_pos
()];
unsigned
int
i
;
if
(
!
amu_
supported
()
)
{
if
(
amu_
get_version
()
==
ID_AA64PFR0_AMU_NOT_SUPPORTED
)
{
return
(
void
*
)
-
1
;
}
...
...
@@ -190,13 +284,37 @@ static void *amu_context_save(const void *arg)
ctx
->
group0_cnts
[
i
]
=
amu_group0_cnt_read
(
i
);
}
/* Save group 0 virtual offsets if supported and enabled. */
if
((
amu_get_version
()
>=
ID_AA64PFR0_AMU_V1P1
)
&&
((
read_hcr_el2
()
&
HCR_AMVOFFEN_BIT
)
!=
0ULL
))
{
/* Not using a loop because count is fixed and index 1 DNE. */
ctx
->
group0_voffsets
[
0U
]
=
amu_group0_voffset_read
(
0U
);
ctx
->
group0_voffsets
[
1U
]
=
amu_group0_voffset_read
(
2U
);
ctx
->
group0_voffsets
[
2U
]
=
amu_group0_voffset_read
(
3U
);
}
#if AMU_GROUP1_NR_COUNTERS
/* Save group 1 counters */
for
(
i
=
0U
;
i
<
AMU_GROUP1_NR_COUNTERS
;
i
++
)
{
if
((
AMU_GROUP1_COUNTERS_MASK
&
(
1U
<<
i
))
!=
0U
)
{
if
((
AMU_GROUP1_COUNTERS_MASK
&
(
1U
L
<<
i
))
!=
0U
)
{
ctx
->
group1_cnts
[
i
]
=
amu_group1_cnt_read
(
i
);
}
}
/* Save group 1 virtual offsets if supported and enabled. */
if
((
amu_get_version
()
>=
ID_AA64PFR0_AMU_V1P1
)
&&
((
read_hcr_el2
()
&
HCR_AMVOFFEN_BIT
)
!=
0ULL
))
{
u_register_t
amcg1idr
=
read_amcg1idr_el0
()
>>
AMCG1IDR_VOFF_SHIFT
;
amcg1idr
=
amcg1idr
&
AMU_GROUP1_COUNTERS_MASK
;
for
(
i
=
0U
;
i
<
AMU_GROUP1_NR_COUNTERS
;
i
++
)
{
if
(((
amcg1idr
>>
i
)
&
1ULL
)
!=
0ULL
)
{
ctx
->
group1_voffsets
[
i
]
=
amu_group1_voffset_read
(
i
);
}
}
}
#endif
return
(
void
*
)
0
;
}
...
...
@@ -206,7 +324,7 @@ static void *amu_context_restore(const void *arg)
struct
amu_ctx
*
ctx
=
&
amu_ctxs
[
plat_my_core_pos
()];
unsigned
int
i
;
if
(
!
amu_
supported
()
)
{
if
(
amu_
get_version
()
==
ID_AA64PFR0_AMU_NOT_SUPPORTED
)
{
return
(
void
*
)
-
1
;
}
...
...
@@ -227,17 +345,41 @@ static void *amu_context_restore(const void *arg)
amu_group0_cnt_write
(
i
,
ctx
->
group0_cnts
[
i
]);
}
/* Restore group 0 virtual offsets if supported and enabled. */
if
((
amu_get_version
()
>=
ID_AA64PFR0_AMU_V1P1
)
&&
((
read_hcr_el2
()
&
HCR_AMVOFFEN_BIT
)
!=
0ULL
))
{
/* Not using a loop because count is fixed and index 1 DNE. */
amu_group0_voffset_write
(
0U
,
ctx
->
group0_voffsets
[
0U
]);
amu_group0_voffset_write
(
2U
,
ctx
->
group0_voffsets
[
1U
]);
amu_group0_voffset_write
(
3U
,
ctx
->
group0_voffsets
[
2U
]);
}
/* Restore group 0 counter configuration */
write_amcntenset0_el0
(
AMU_GROUP0_COUNTERS_MASK
);
#if AMU_GROUP1_NR_COUNTERS
/* Restore group 1 counters */
for
(
i
=
0U
;
i
<
AMU_GROUP1_NR_COUNTERS
;
i
++
)
{
if
((
AMU_GROUP1_COUNTERS_MASK
&
(
1U
<<
i
))
!=
0U
)
{
if
((
AMU_GROUP1_COUNTERS_MASK
&
(
1U
L
<<
i
))
!=
0U
)
{
amu_group1_cnt_write
(
i
,
ctx
->
group1_cnts
[
i
]);
}
}
/* Restore group 1 virtual offsets if supported and enabled. */
if
((
amu_get_version
()
>=
ID_AA64PFR0_AMU_V1P1
)
&&
((
read_hcr_el2
()
&
HCR_AMVOFFEN_BIT
)
!=
0ULL
))
{
u_register_t
amcg1idr
=
read_amcg1idr_el0
()
>>
AMCG1IDR_VOFF_SHIFT
;
amcg1idr
=
amcg1idr
&
AMU_GROUP1_COUNTERS_MASK
;
for
(
i
=
0U
;
i
<
AMU_GROUP1_NR_COUNTERS
;
i
++
)
{
if
(((
amcg1idr
>>
i
)
&
1ULL
)
!=
0ULL
)
{
amu_group1_voffset_write
(
i
,
ctx
->
group1_voffsets
[
i
]);
}
}
}
/* Restore group 1 counter configuration */
write_amcntenset1_el0
(
AMU_GROUP1_COUNTERS_MASK
);
#endif
...
...
lib/extensions/amu/aarch64/amu_helpers.S
View file @
ef4c1e19
/*
*
Copyright
(
c
)
2017
-
201
9
,
ARM
Limited
and
Contributors
.
All
rights
reserved
.
*
Copyright
(
c
)
2017
-
20
2
1
,
ARM
Limited
and
Contributors
.
All
rights
reserved
.
*
*
SPDX
-
License
-
Identifier
:
BSD
-
3
-
Clause
*/
...
...
@@ -14,6 +14,12 @@
.
globl
amu_group1_cnt_write_internal
.
globl
amu_group1_set_evtype_internal
/
*
FEAT_AMUv1p1
virtualisation
offset
register
functions
*/
.
globl
amu_group0_voffset_read_internal
.
globl
amu_group0_voffset_write_internal
.
globl
amu_group1_voffset_read_internal
.
globl
amu_group1_voffset_write_internal
/*
*
uint64_t
amu_group0_cnt_read_internal
(
int
idx
)
;
*
...
...
@@ -211,3 +217,169 @@ func amu_group1_set_evtype_internal
write
AMEVTYPER1E_EL0
/*
index
14
*/
write
AMEVTYPER1F_EL0
/*
index
15
*/
endfunc
amu_group1_set_evtype_internal
/*
*
Accessor
functions
for
virtual
offset
registers
added
with
FEAT_AMUv1p1
*/
/*
*
uint64_t
amu_group0_voffset_read_internal
(
int
idx
)
;
*
*
Given
`
idx
`
,
read
the
corresponding
AMU
virtual
offset
register
*
and
return
it
in
`
x0
`
.
*/
func
amu_group0_voffset_read_internal
adr
x1
,
1
f
#if ENABLE_ASSERTIONS
/
*
*
It
can
be
dangerous
to
call
this
function
with
an
*
out
of
bounds
index
.
Ensure
`
idx
`
is
valid
.
*/
tst
x0
,
#
~
3
ASM_ASSERT
(
eq
)
/
*
Make
sure
idx
!=
1
since
AMEVCNTVOFF01_EL2
does
not
exist
*/
cmp
x0
,
#
1
ASM_ASSERT
(
ne
)
#endif
/
*
*
Given
`
idx
`
calculate
address
of
mrs
/
ret
instruction
pair
*
in
the
table
below
.
*/
add
x1
,
x1
,
x0
,
lsl
#
3
/*
each
mrs
/
ret
sequence
is
8
bytes
*/
#if ENABLE_BTI
add
x1
,
x1
,
x0
,
lsl
#
2
/*
+
"bti j"
instruction
*/
#endif
br
x1
1
:
read
AMEVCNTVOFF00_EL2
/*
index
0
*/
.
skip
8
/*
AMEVCNTVOFF01_EL2
does
not
exist
*/
#if ENABLE_BTI
.
skip
4
#endif
read
AMEVCNTVOFF02_EL2
/*
index
2
*/
read
AMEVCNTVOFF03_EL2
/*
index
3
*/
endfunc
amu_group0_voffset_read_internal
/*
*
void
amu_group0_voffset_write_internal
(
int
idx
,
uint64_t
val
)
;
*
*
Given
`
idx
`
,
write
`
val
`
to
the
corresponding
AMU
virtual
offset
register
.
*/
func
amu_group0_voffset_write_internal
adr
x2
,
1
f
#if ENABLE_ASSERTIONS
/
*
*
It
can
be
dangerous
to
call
this
function
with
an
*
out
of
bounds
index
.
Ensure
`
idx
`
is
valid
.
*/
tst
x0
,
#
~
3
ASM_ASSERT
(
eq
)
/
*
Make
sure
idx
!=
1
since
AMEVCNTVOFF01_EL2
does
not
exist
*/
cmp
x0
,
#
1
ASM_ASSERT
(
ne
)
#endif
/
*
*
Given
`
idx
`
calculate
address
of
mrs
/
ret
instruction
pair
*
in
the
table
below
.
*/
add
x2
,
x2
,
x0
,
lsl
#
3
/*
each
msr
/
ret
sequence
is
8
bytes
*/
#if ENABLE_BTI
add
x2
,
x2
,
x0
,
lsl
#
2
/*
+
"bti j"
instruction
*/
#endif
br
x2
1
:
write
AMEVCNTVOFF00_EL2
/*
index
0
*/
.
skip
8
/*
AMEVCNTVOFF01_EL2
does
not
exist
*/
#if ENABLE_BTI
.
skip
4
#endif
write
AMEVCNTVOFF02_EL2
/*
index
2
*/
write
AMEVCNTVOFF03_EL2
/*
index
3
*/
endfunc
amu_group0_voffset_write_internal
/*
*
uint64_t
amu_group1_voffset_read_internal
(
int
idx
)
;
*
*
Given
`
idx
`
,
read
the
corresponding
AMU
virtual
offset
register
*
and
return
it
in
`
x0
`
.
*/
func
amu_group1_voffset_read_internal
adr
x1
,
1
f
#if ENABLE_ASSERTIONS
/
*
*
It
can
be
dangerous
to
call
this
function
with
an
*
out
of
bounds
index
.
Ensure
`
idx
`
is
valid
.
*/
tst
x0
,
#
~
0xF
ASM_ASSERT
(
eq
)
#endif
/
*
*
Given
`
idx
`
calculate
address
of
mrs
/
ret
instruction
pair
*
in
the
table
below
.
*/
add
x1
,
x1
,
x0
,
lsl
#
3
/*
each
mrs
/
ret
sequence
is
8
bytes
*/
#if ENABLE_BTI
add
x1
,
x1
,
x0
,
lsl
#
2
/*
+
"bti j"
instruction
*/
#endif
br
x1
1
:
read
AMEVCNTVOFF10_EL2
/*
index
0
*/
read
AMEVCNTVOFF11_EL2
/*
index
1
*/
read
AMEVCNTVOFF12_EL2
/*
index
2
*/
read
AMEVCNTVOFF13_EL2
/*
index
3
*/
read
AMEVCNTVOFF14_EL2
/*
index
4
*/
read
AMEVCNTVOFF15_EL2
/*
index
5
*/
read
AMEVCNTVOFF16_EL2
/*
index
6
*/
read
AMEVCNTVOFF17_EL2
/*
index
7
*/
read
AMEVCNTVOFF18_EL2
/*
index
8
*/
read
AMEVCNTVOFF19_EL2
/*
index
9
*/
read
AMEVCNTVOFF1A_EL2
/*
index
10
*/
read
AMEVCNTVOFF1B_EL2
/*
index
11
*/
read
AMEVCNTVOFF1C_EL2
/*
index
12
*/
read
AMEVCNTVOFF1D_EL2
/*
index
13
*/
read
AMEVCNTVOFF1E_EL2
/*
index
14
*/
read
AMEVCNTVOFF1F_EL2
/*
index
15
*/
endfunc
amu_group1_voffset_read_internal
/*
*
void
amu_group1_voffset_write_internal
(
int
idx
,
uint64_t
val
)
;
*
*
Given
`
idx
`
,
write
`
val
`
to
the
corresponding
AMU
virtual
offset
register
.
*/
func
amu_group1_voffset_write_internal
adr
x2
,
1
f
#if ENABLE_ASSERTIONS
/
*
*
It
can
be
dangerous
to
call
this
function
with
an
*
out
of
bounds
index
.
Ensure
`
idx
`
is
valid
.
*/
tst
x0
,
#
~
0xF
ASM_ASSERT
(
eq
)
#endif
/
*
*
Given
`
idx
`
calculate
address
of
mrs
/
ret
instruction
pair
*
in
the
table
below
.
*/
add
x2
,
x2
,
x0
,
lsl
#
3
/*
each
msr
/
ret
sequence
is
8
bytes
*/
#if ENABLE_BTI
add
x2
,
x2
,
x0
,
lsl
#
2
/*
+
"bti j"
instruction
*/
#endif
br
x2
1
:
write
AMEVCNTVOFF10_EL2
/*
index
0
*/
write
AMEVCNTVOFF11_EL2
/*
index
1
*/
write
AMEVCNTVOFF12_EL2
/*
index
2
*/
write
AMEVCNTVOFF13_EL2
/*
index
3
*/
write
AMEVCNTVOFF14_EL2
/*
index
4
*/
write
AMEVCNTVOFF15_EL2
/*
index
5
*/
write
AMEVCNTVOFF16_EL2
/*
index
6
*/
write
AMEVCNTVOFF17_EL2
/*
index
7
*/
write
AMEVCNTVOFF18_EL2
/*
index
8
*/
write
AMEVCNTVOFF19_EL2
/*
index
9
*/
write
AMEVCNTVOFF1A_EL2
/*
index
10
*/
write
AMEVCNTVOFF1B_EL2
/*
index
11
*/
write
AMEVCNTVOFF1C_EL2
/*
index
12
*/
write
AMEVCNTVOFF1D_EL2
/*
index
13
*/
write
AMEVCNTVOFF1E_EL2
/*
index
14
*/
write
AMEVCNTVOFF1F_EL2
/*
index
15
*/
endfunc
amu_group1_voffset_write_internal
make_helpers/defaults.mk
View file @
ef4c1e19
...
...
@@ -294,6 +294,7 @@ endif
CTX_INCLUDE_MTE_REGS
:=
0
ENABLE_AMU
:=
0
AMU_RESTRICT_COUNTERS
:=
0
# By default, enable Scalable Vector Extension if implemented for Non-secure
# lower ELs
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment