amu.c 5.61 KB
Newer Older
1
/*
2
 * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
3
4
5
6
 *
 * SPDX-License-Identifier: BSD-3-Clause
 */

7
#include <assert.h>
8
9
#include <stdbool.h>

10
11
#include <arch.h>
#include <arch_helpers.h>
12

13
14
15
#include <lib/el3_runtime/pubsub_events.h>
#include <lib/extensions/amu.h>
#include <lib/extensions/amu_private.h>
16

17
#include <plat/common/platform.h>
18
19

static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
20

21
/* Check if AMUv1 for Armv8.4 or 8.6 is implemented */
22
bool amu_supported(void)
23
{
24
	uint32_t features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
25

26
27
	features &= ID_PFR0_AMU_MASK;
	return ((features == 1U) || (features == 2U));
28
29
}

30
31
32
33
34
35
36
37
38
39
40
41
42
43
#if AMU_GROUP1_NR_COUNTERS
/* Check if group 1 counters is implemented */
bool amu_group1_supported(void)
{
	uint32_t features = read_amcfgr() >> AMCFGR_NCG_SHIFT;

	return (features & AMCFGR_NCG_MASK) == 1U;
}
#endif

/*
 * Enable counters. This function is meant to be invoked
 * by the context management library before exiting from EL3.
 */
44
void amu_enable(bool el2_unused)
45
{
46
	if (!amu_supported()) {
47
		return;
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
	}

#if AMU_GROUP1_NR_COUNTERS
	/* Check and set presence of group 1 counters */
	if (!amu_group1_supported()) {
		ERROR("AMU Counter Group 1 is not implemented\n");
		panic();
	}

	/* Check number of group 1 counters */
	uint32_t cnt_num = (read_amcgcr() >> AMCGCR_CG1NC_SHIFT) &
				AMCGCR_CG1NC_MASK;
	VERBOSE("%s%u. %s%u\n",
		"Number of AMU Group 1 Counters ", cnt_num,
		"Requested number ", AMU_GROUP1_NR_COUNTERS);

	if (cnt_num < AMU_GROUP1_NR_COUNTERS) {
		ERROR("%s%u is less than %s%u\n",
		"Number of AMU Group 1 Counters ", cnt_num,
		"Requested number ", AMU_GROUP1_NR_COUNTERS);
		panic();
	}
#endif
71

72
73
74
75
76
77
78
79
80
	if (el2_unused) {
		uint64_t v;
		/*
		 * Non-secure access from EL0 or EL1 to the Activity Monitor
		 * registers do not trap to EL2.
		 */
		v = read_hcptr();
		v &= ~TAM_BIT;
		write_hcptr(v);
81
	}
82
83
84

	/* Enable group 0 counters */
	write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
85

86
#if AMU_GROUP1_NR_COUNTERS
87
88
	/* Enable group 1 counters */
	write_amcntenset1(AMU_GROUP1_COUNTERS_MASK);
89
#endif
90
91
92
}

/* Read the group 0 counter identified by the given `idx`. */
93
uint64_t amu_group0_cnt_read(unsigned int idx)
94
{
95
	assert(amu_supported());
96
	assert(idx < AMU_GROUP0_NR_COUNTERS);
97
98
99
100

	return amu_group0_cnt_read_internal(idx);
}

101
102
/* Write the group 0 counter identified by the given `idx` with `val` */
void amu_group0_cnt_write(unsigned  int idx, uint64_t val)
103
{
104
	assert(amu_supported());
105
	assert(idx < AMU_GROUP0_NR_COUNTERS);
106
107
108
109
110

	amu_group0_cnt_write_internal(idx, val);
	isb();
}

111
112
113
#if AMU_GROUP1_NR_COUNTERS
/* Read the group 1 counter identified by the given `idx` */
uint64_t amu_group1_cnt_read(unsigned  int idx)
114
{
115
	assert(amu_supported());
116
117
	assert(amu_group1_supported());
	assert(idx < AMU_GROUP1_NR_COUNTERS);
118
119
120
121

	return amu_group1_cnt_read_internal(idx);
}

122
123
/* Write the group 1 counter identified by the given `idx` with `val` */
void amu_group1_cnt_write(unsigned  int idx, uint64_t val)
124
{
125
	assert(amu_supported());
126
127
	assert(amu_group1_supported());
	assert(idx < AMU_GROUP1_NR_COUNTERS);
128
129
130
131
132

	amu_group1_cnt_write_internal(idx, val);
	isb();
}

133
134
135
136
137
/*
 * Program the event type register for the given `idx` with
 * the event number `val`
 */
void amu_group1_set_evtype(unsigned int idx, unsigned int val)
138
{
139
	assert(amu_supported());
140
141
	assert(amu_group1_supported());
	assert(idx < AMU_GROUP1_NR_COUNTERS);
142
143
144

	amu_group1_set_evtype_internal(idx, val);
	isb();
145
}
146
#endif	/* AMU_GROUP1_NR_COUNTERS */
147
148
149

static void *amu_context_save(const void *arg)
{
150
151
	struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
	unsigned int i;
152

153
	if (!amu_supported()) {
154
		return (void *)-1;
155
	}
156

157
158
159
160
161
162
163
#if AMU_GROUP1_NR_COUNTERS
	if (!amu_group1_supported()) {
		return (void *)-1;
	}
#endif
	/* Assert that group 0/1 counter configuration is what we expect */
	assert(read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK);
164

165
166
167
#if AMU_GROUP1_NR_COUNTERS
	assert(read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK);
#endif
168
	/*
169
	 * Disable group 0/1 counters to avoid other observers like SCP sampling
170
171
172
	 * counter values from the future via the memory mapped view.
	 */
	write_amcntenclr0(AMU_GROUP0_COUNTERS_MASK);
173
174

#if AMU_GROUP1_NR_COUNTERS
175
	write_amcntenclr1(AMU_GROUP1_COUNTERS_MASK);
176
#endif
177
178
	isb();

179
180
	/* Save all group 0 counters */
	for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
181
		ctx->group0_cnts[i] = amu_group0_cnt_read(i);
182
	}
183

184
185
186
187
188
189
190
191
#if AMU_GROUP1_NR_COUNTERS
	/* Save group 1 counters */
	for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
		if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
			ctx->group1_cnts[i] = amu_group1_cnt_read(i);
		}
	}
#endif
192
	return (void *)0;
193
194
195
196
}

static void *amu_context_restore(const void *arg)
{
197
198
	struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
	unsigned int i;
199

200
	if (!amu_supported()) {
201
		return (void *)-1;
202
	}
203

204
205
206
207
208
#if AMU_GROUP1_NR_COUNTERS
	if (!amu_group1_supported()) {
		return (void *)-1;
	}
#endif
209
	/* Counters were disabled in `amu_context_save()` */
210
211
212
213
214
	assert(read_amcntenset0_el0() == 0U);

#if AMU_GROUP1_NR_COUNTERS
	assert(read_amcntenset1_el0() == 0U);
#endif
215

216
217
	/* Restore all group 0 counters */
	for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
218
		amu_group0_cnt_write(i, ctx->group0_cnts[i]);
219
	}
220

221
	/* Restore group 0 counter configuration */
222
223
	write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);

224
225
226
227
228
229
230
231
232
#if AMU_GROUP1_NR_COUNTERS
	/* Restore group 1 counters */
	for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
		if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
			amu_group1_cnt_write(i, ctx->group1_cnts[i]);
		}
	}

	/* Restore group 1 counter configuration */
233
	write_amcntenset1(AMU_GROUP1_COUNTERS_MASK);
234
235
#endif

236
	return (void *)0;
237
238
239
240
}

SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);