Unverified Commit 9c00555b authored by davidcunado-arm's avatar davidcunado-arm Committed by GitHub
Browse files

Merge pull request #1253 from dp-arm/dp/amu32

AMUv1 support for AArch32
parents 956defc7 c70da546
......@@ -23,7 +23,8 @@ BL32_SOURCES += lib/pmf/pmf_main.c
endif
ifeq (${ENABLE_AMU}, 1)
BL32_SOURCES += lib/extensions/amu/aarch32/amu.c
BL32_SOURCES += lib/extensions/amu/aarch32/amu.c\
lib/extensions/amu/aarch32/amu_helpers.S
endif
ifeq (${WORKAROUND_CVE_2017_5715},1)
......
......@@ -544,7 +544,7 @@
#define AMCNTENCLR0 p15, 0, c13, c2, 4
#define AMCNTENSET0 p15, 0, c13, c2, 5
#define AMCNTENCLR1 p15, 0, c13, c3, 0
#define AMCNTENSET1 p15, 0, c13, c1, 1
#define AMCNTENSET1 p15, 0, c13, c3, 1
/* Activity Monitor Group 0 Event Counter Registers */
#define AMEVCNTR00 p15, 0, c0
......@@ -558,4 +558,40 @@
#define AMEVTYPER02 p15, 0, c13, c6, 2
#define AMEVTYPER03 p15, 0, c13, c6, 3
/* Activity Monitor Group 1 Event Counter Registers */
#define AMEVCNTR10 p15, 0, c4
#define AMEVCNTR11 p15, 1, c4
#define AMEVCNTR12 p15, 2, c4
#define AMEVCNTR13 p15, 3, c4
#define AMEVCNTR14 p15, 4, c4
#define AMEVCNTR15 p15, 5, c4
#define AMEVCNTR16 p15, 6, c4
#define AMEVCNTR17 p15, 7, c4
#define AMEVCNTR18 p15, 0, c5
#define AMEVCNTR19 p15, 1, c5
#define AMEVCNTR1A p15, 2, c5
#define AMEVCNTR1B p15, 3, c5
#define AMEVCNTR1C p15, 4, c5
#define AMEVCNTR1D p15, 5, c5
#define AMEVCNTR1E p15, 6, c5
#define AMEVCNTR1F p15, 7, c5
/* Activity Monitor Group 1 Event Type Registers */
#define AMEVTYPER10 p15, 0, c13, c14, 0
#define AMEVTYPER11 p15, 0, c13, c14, 1
#define AMEVTYPER12 p15, 0, c13, c14, 2
#define AMEVTYPER13 p15, 0, c13, c14, 3
#define AMEVTYPER14 p15, 0, c13, c14, 4
#define AMEVTYPER15 p15, 0, c13, c14, 5
#define AMEVTYPER16 p15, 0, c13, c14, 6
#define AMEVTYPER17 p15, 0, c13, c14, 7
#define AMEVTYPER18 p15, 0, c13, c15, 0
#define AMEVTYPER19 p15, 0, c13, c15, 1
#define AMEVTYPER1A p15, 0, c13, c15, 2
#define AMEVTYPER1B p15, 0, c13, c15, 3
#define AMEVTYPER1C p15, 0, c13, c15, 4
#define AMEVTYPER1D p15, 0, c13, c15, 5
#define AMEVTYPER1E p15, 0, c13, c15, 6
#define AMEVTYPER1F p15, 0, c13, c15, 7
#endif /* __ARCH_H__ */
......@@ -7,10 +7,10 @@
#ifndef __AMU_H__
#define __AMU_H__
#include <sys/cdefs.h> /* for CASSERT() */
#include <cassert.h>
#include <platform_def.h>
#include <stdint.h>
#include <sys/cdefs.h> /* for CASSERT() */
/* All group 0 counters */
#define AMU_GROUP0_COUNTERS_MASK 0xf
......
......@@ -5,8 +5,8 @@
*/
#include <cortex_a75.h>
#include <pubsub_events.h>
#include <platform.h>
#include <pubsub_events.h>
struct amu_ctx {
uint64_t cnts[CORTEX_A75_AMU_NR_COUNTERS];
......
......@@ -5,6 +5,7 @@
*/
#include <amu.h>
#include <amu_private.h>
#include <arch.h>
#include <arch_helpers.h>
#include <platform.h>
......@@ -14,21 +15,26 @@
struct amu_ctx {
uint64_t group0_cnts[AMU_GROUP0_NR_COUNTERS];
uint64_t group1_cnts[AMU_GROUP1_NR_COUNTERS];
};
static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
void amu_enable(int el2_unused)
int amu_supported(void)
{
uint64_t features;
features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
if ((features & ID_PFR0_AMU_MASK) != 1)
return (features & ID_PFR0_AMU_MASK) == 1;
}
void amu_enable(int el2_unused)
{
if (!amu_supported())
return;
if (el2_unused) {
uint64_t v;
/*
* Non-secure access from EL0 or EL1 to the Activity Monitor
* registers do not trap to EL2.
......@@ -40,15 +46,64 @@ void amu_enable(int el2_unused)
/* Enable group 0 counters */
write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
/* Enable group 1 counters */
write_amcntenset1(AMU_GROUP1_COUNTERS_MASK);
}
/* Read the group 0 counter identified by the given `idx`. */
uint64_t amu_group0_cnt_read(int idx)
{
assert(amu_supported());
assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS);
return amu_group0_cnt_read_internal(idx);
}
/* Write the group 0 counter identified by the given `idx` with `val`. */
void amu_group0_cnt_write(int idx, uint64_t val)
{
assert(amu_supported());
assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS);
amu_group0_cnt_write_internal(idx, val);
isb();
}
/* Read the group 1 counter identified by the given `idx`. */
uint64_t amu_group1_cnt_read(int idx)
{
assert(amu_supported());
assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
return amu_group1_cnt_read_internal(idx);
}
/* Write the group 1 counter identified by the given `idx` with `val`. */
void amu_group1_cnt_write(int idx, uint64_t val)
{
assert(amu_supported());
assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
amu_group1_cnt_write_internal(idx, val);
isb();
}
void amu_group1_set_evtype(int idx, unsigned int val)
{
assert(amu_supported());
assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
amu_group1_set_evtype_internal(idx, val);
isb();
}
static void *amu_context_save(const void *arg)
{
struct amu_ctx *ctx;
uint64_t features;
int i;
features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
if ((features & ID_PFR0_AMU_MASK) != 1)
if (!amu_supported())
return (void *)-1;
ctx = &amu_ctxs[plat_my_core_pos()];
......@@ -61,12 +116,14 @@ static void *amu_context_save(const void *arg)
* counter values from the future via the memory mapped view.
*/
write_amcntenclr0(AMU_GROUP0_COUNTERS_MASK);
write_amcntenclr1(AMU_GROUP1_COUNTERS_MASK);
isb();
ctx->group0_cnts[0] = read64_amevcntr00();
ctx->group0_cnts[1] = read64_amevcntr01();
ctx->group0_cnts[2] = read64_amevcntr02();
ctx->group0_cnts[3] = read64_amevcntr03();
for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
ctx->group0_cnts[i] = amu_group0_cnt_read(i);
for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
ctx->group1_cnts[i] = amu_group1_cnt_read(i);
return 0;
}
......@@ -75,6 +132,7 @@ static void *amu_context_restore(const void *arg)
{
struct amu_ctx *ctx;
uint64_t features;
int i;
features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
if ((features & ID_PFR0_AMU_MASK) != 1)
......@@ -86,19 +144,16 @@ static void *amu_context_restore(const void *arg)
assert(read_amcntenset0() == 0);
/* Restore group 0 counters */
if (AMU_GROUP0_COUNTERS_MASK & (1U << 0))
write64_amevcntr00(ctx->group0_cnts[0]);
if (AMU_GROUP0_COUNTERS_MASK & (1U << 1))
write64_amevcntr01(ctx->group0_cnts[1]);
if (AMU_GROUP0_COUNTERS_MASK & (1U << 2))
write64_amevcntr02(ctx->group0_cnts[2]);
if (AMU_GROUP0_COUNTERS_MASK & (1U << 3))
write64_amevcntr03(ctx->group0_cnts[3]);
isb();
for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
amu_group0_cnt_write(i, ctx->group0_cnts[i]);
for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
amu_group1_cnt_write(i, ctx->group1_cnts[i]);
/* Enable group 0 counters */
write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
/* Enable group 1 counters */
write_amcntenset1(AMU_GROUP1_COUNTERS_MASK);
return 0;
}
......
/*
* Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <assert_macros.S>
#include <asm_macros.S>
.globl amu_group0_cnt_read_internal
.globl amu_group0_cnt_write_internal
.globl amu_group1_cnt_read_internal
.globl amu_group1_cnt_write_internal
.globl amu_group1_set_evtype_internal
/*
* uint64_t amu_group0_cnt_read_internal(int idx);
*
* Given `idx`, read the corresponding AMU counter
* and return it in `r0`.
*/
func amu_group0_cnt_read_internal
#if ENABLE_ASSERTIONS
/* `idx` should be between [0, 3] */
mov r1, r0
lsr r1, r1, #2
cmp r1, #0
ASM_ASSERT(eq)
#endif
/*
* Given `idx` calculate address of ldcopr16/bx lr instruction pair
* in the table below.
*/
adr r1, 1f
lsl r0, r0, #3 /* each ldcopr16/bx lr sequence is 8 bytes */
add r1, r1, r0
bx r1
1:
ldcopr16 r0, r1, AMEVCNTR00 /* index 0 */
bx lr
ldcopr16 r0, r1, AMEVCNTR01 /* index 1 */
bx lr
ldcopr16 r0, r1, AMEVCNTR02 /* index 2 */
bx lr
ldcopr16 r0, r1, AMEVCNTR03 /* index 3 */
bx lr
endfunc amu_group0_cnt_read_internal
/*
* void amu_group0_cnt_write_internal(int idx, uint64_t val);
*
* Given `idx`, write `val` to the corresponding AMU counter.
*/
func amu_group0_cnt_write_internal
#if ENABLE_ASSERTIONS
/* `idx` should be between [0, 3] */
mov r2, r0
lsr r2, r2, #2
cmp r2, #0
ASM_ASSERT(eq)
#endif
/*
* Given `idx` calculate address of stcopr16/bx lr instruction pair
* in the table below.
*/
adr r2, 1f
lsl r0, r0, #3 /* each stcopr16/bx lr sequence is 8 bytes */
add r2, r2, r0
bx r2
1:
stcopr16 r0,r1, AMEVCNTR00 /* index 0 */
bx lr
stcopr16 r0,r1, AMEVCNTR01 /* index 1 */
bx lr
stcopr16 r0,r1, AMEVCNTR02 /* index 2 */
bx lr
stcopr16 r0,r1, AMEVCNTR03 /* index 3 */
bx lr
endfunc amu_group0_cnt_write_internal
/*
* uint64_t amu_group1_cnt_read_internal(int idx);
*
* Given `idx`, read the corresponding AMU counter
* and return it in `r0`.
*/
func amu_group1_cnt_read_internal
#if ENABLE_ASSERTIONS
/* `idx` should be between [0, 15] */
mov r2, r0
lsr r2, r2, #4
cmp r2, #0
ASM_ASSERT(eq)
#endif
/*
* Given `idx` calculate address of ldcopr16/bx lr instruction pair
* in the table below.
*/
adr r1, 1f
lsl r0, r0, #3 /* each ldcopr16/bx lr sequence is 8 bytes */
add r1, r1, r0
bx r1
1:
ldcopr16 r0,r1, AMEVCNTR10 /* index 0 */
bx lr
ldcopr16 r0,r1, AMEVCNTR11 /* index 1 */
bx lr
ldcopr16 r0,r1, AMEVCNTR12 /* index 2 */
bx lr
ldcopr16 r0,r1, AMEVCNTR13 /* index 3 */
bx lr
ldcopr16 r0,r1, AMEVCNTR14 /* index 4 */
bx lr
ldcopr16 r0,r1, AMEVCNTR15 /* index 5 */
bx lr
ldcopr16 r0,r1, AMEVCNTR16 /* index 6 */
bx lr
ldcopr16 r0,r1, AMEVCNTR17 /* index 7 */
bx lr
ldcopr16 r0,r1, AMEVCNTR18 /* index 8 */
bx lr
ldcopr16 r0,r1, AMEVCNTR19 /* index 9 */
bx lr
ldcopr16 r0,r1, AMEVCNTR1A /* index 10 */
bx lr
ldcopr16 r0,r1, AMEVCNTR1B /* index 11 */
bx lr
ldcopr16 r0,r1, AMEVCNTR1C /* index 12 */
bx lr
ldcopr16 r0,r1, AMEVCNTR1D /* index 13 */
bx lr
ldcopr16 r0,r1, AMEVCNTR1E /* index 14 */
bx lr
ldcopr16 r0,r1, AMEVCNTR1F /* index 15 */
bx lr
endfunc amu_group1_cnt_read_internal
/*
* void amu_group1_cnt_write_internal(int idx, uint64_t val);
*
* Given `idx`, write `val` to the corresponding AMU counter.
*/
func amu_group1_cnt_write_internal
#if ENABLE_ASSERTIONS
/* `idx` should be between [0, 15] */
mov r2, r0
lsr r2, r2, #4
cmp r2, #0
ASM_ASSERT(eq)
#endif
/*
* Given `idx` calculate address of ldcopr16/bx lr instruction pair
* in the table below.
*/
adr r2, 1f
lsl r0, r0, #3 /* each stcopr16/bx lr sequence is 8 bytes */
add r2, r2, r0
bx r2
1:
stcopr16 r0,r1, AMEVCNTR10 /* index 0 */
bx lr
stcopr16 r0,r1, AMEVCNTR11 /* index 1 */
bx lr
stcopr16 r0,r1, AMEVCNTR12 /* index 2 */
bx lr
stcopr16 r0,r1, AMEVCNTR13 /* index 3 */
bx lr
stcopr16 r0,r1, AMEVCNTR14 /* index 4 */
bx lr
stcopr16 r0,r1, AMEVCNTR15 /* index 5 */
bx lr
stcopr16 r0,r1, AMEVCNTR16 /* index 6 */
bx lr
stcopr16 r0,r1, AMEVCNTR17 /* index 7 */
bx lr
stcopr16 r0,r1, AMEVCNTR18 /* index 8 */
bx lr
stcopr16 r0,r1, AMEVCNTR19 /* index 9 */
bx lr
stcopr16 r0,r1, AMEVCNTR1A /* index 10 */
bx lr
stcopr16 r0,r1, AMEVCNTR1B /* index 11 */
bx lr
stcopr16 r0,r1, AMEVCNTR1C /* index 12 */
bx lr
stcopr16 r0,r1, AMEVCNTR1D /* index 13 */
bx lr
stcopr16 r0,r1, AMEVCNTR1E /* index 14 */
bx lr
stcopr16 r0,r1, AMEVCNTR1F /* index 15 */
bx lr
endfunc amu_group1_cnt_write_internal
/*
* void amu_group1_set_evtype_internal(int idx, unsigned int val);
*
* Program the AMU event type register indexed by `idx`
* with the value `val`.
*/
func amu_group1_set_evtype_internal
#if ENABLE_ASSERTIONS
/* `idx` should be between [0, 15] */
mov r2, r0
lsr r2, r2, #4
cmp r2, #0
ASM_ASSERT(eq)
/* val should be between [0, 65535] */
mov r2, r1
lsr r2, r2, #16
cmp r2, #0
ASM_ASSERT(eq)
#endif
/*
* Given `idx` calculate address of stcopr/bx lr instruction pair
* in the table below.
*/
adr r2, 1f
lsl r0, r0, #3 /* each stcopr/bx lr sequence is 8 bytes */
add r2, r2, r0
bx r2
1:
stcopr r0, AMEVTYPER10 /* index 0 */
bx lr
stcopr r0, AMEVTYPER11 /* index 1 */
bx lr
stcopr r0, AMEVTYPER12 /* index 2 */
bx lr
stcopr r0, AMEVTYPER13 /* index 3 */
bx lr
stcopr r0, AMEVTYPER14 /* index 4 */
bx lr
stcopr r0, AMEVTYPER15 /* index 5 */
bx lr
stcopr r0, AMEVTYPER16 /* index 6 */
bx lr
stcopr r0, AMEVTYPER17 /* index 7 */
bx lr
stcopr r0, AMEVTYPER18 /* index 8 */
bx lr
stcopr r0, AMEVTYPER19 /* index 9 */
bx lr
stcopr r0, AMEVTYPER1A /* index 10 */
bx lr
stcopr r0, AMEVTYPER1B /* index 11 */
bx lr
stcopr r0, AMEVTYPER1C /* index 12 */
bx lr
stcopr r0, AMEVTYPER1D /* index 13 */
bx lr
stcopr r0, AMEVTYPER1E /* index 14 */
bx lr
stcopr r0, AMEVTYPER1F /* index 15 */
bx lr
endfunc amu_group1_set_evtype_internal
......@@ -172,7 +172,6 @@ static void *amu_context_restore(const void *arg)
for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
if (AMU_GROUP1_COUNTERS_MASK & (1U << i))
amu_group1_cnt_write(i, ctx->group1_cnts[i]);
isb();
/* Restore group 0/1 counter configuration */
write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment