amu.c 10.5 KB
Newer Older
1
/*
2
 * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
3
4
5
6
 *
 * SPDX-License-Identifier: BSD-3-Clause
 */

7
#include <assert.h>
8
#include <stdbool.h>
9

10
#include <arch.h>
11
#include <arch_features.h>
12
#include <arch_helpers.h>
13

14
15
16
17
#include <lib/el3_runtime/pubsub_events.h>
#include <lib/extensions/amu.h>
#include <lib/extensions/amu_private.h>

18
#include <plat/common/platform.h>
19
20
21

static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];

22
23
24
25
26
27
28
29
/*
 * Get AMU version value from aa64pfr0.
 * Return values
 *   ID_AA64PFR0_AMU_V1: FEAT_AMUv1 supported (introduced in ARM v8.4)
 *   ID_AA64PFR0_AMU_V1P1: FEAT_AMUv1p1 supported (introduced in ARM v8.6)
 *   ID_AA64PFR0_AMU_NOT_SUPPORTED: not supported
 */
unsigned int amu_get_version(void)
30
{
31
32
	return (unsigned int)(read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT) &
		ID_AA64PFR0_AMU_MASK;
33
34
35
36
37
38
39
}

#if AMU_GROUP1_NR_COUNTERS
/* Check if group 1 counters is implemented */
bool amu_group1_supported(void)
{
	uint64_t features = read_amcfgr_el0() >> AMCFGR_EL0_NCG_SHIFT;
40

41
	return (features & AMCFGR_EL0_NCG_MASK) == 1U;
42
}
43
#endif
44

45
/*
46
 * Enable counters. This function is meant to be invoked
47
48
 * by the context management library before exiting from EL3.
 */
49
void amu_enable(bool el2_unused)
50
51
{
	uint64_t v;
52
	unsigned int amu_version = amu_get_version();
53

54
	if (amu_version == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
55
		return;
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
	}

#if AMU_GROUP1_NR_COUNTERS
	/* Check and set presence of group 1 counters */
	if (!amu_group1_supported()) {
		ERROR("AMU Counter Group 1 is not implemented\n");
		panic();
	}

	/* Check number of group 1 counters */
	uint64_t cnt_num = (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) &
				AMCGCR_EL0_CG1NC_MASK;
	VERBOSE("%s%llu. %s%u\n",
		"Number of AMU Group 1 Counters ", cnt_num,
		"Requested number ", AMU_GROUP1_NR_COUNTERS);

	if (cnt_num < AMU_GROUP1_NR_COUNTERS) {
		ERROR("%s%llu is less than %s%u\n",
		"Number of AMU Group 1 Counters ", cnt_num,
		"Requested number ", AMU_GROUP1_NR_COUNTERS);
		panic();
	}
#endif
79
80

	if (el2_unused) {
81
		/*
82
83
		 * CPTR_EL2.TAM: Set to zero so any accesses to
		 * the Activity Monitor registers do not trap to EL2.
84
		 */
85
86
87
		v = read_cptr_el2();
		v &= ~CPTR_EL2_TAM_BIT;
		write_cptr_el2(v);
88
	}
89
90
91
92
93
94
95
96
97
98
99

	/*
	 * CPTR_EL3.TAM: Set to zero so that any accesses to
	 * the Activity Monitor registers do not trap to EL3.
	 */
	v = read_cptr_el3();
	v &= ~TAM_BIT;
	write_cptr_el3(v);

	/* Enable group 0 counters */
	write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
100
101

#if AMU_GROUP1_NR_COUNTERS
102
103
	/* Enable group 1 counters */
	write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK);
104
#endif
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128

	/* Initialize FEAT_AMUv1p1 features if present. */
	if (amu_version < ID_AA64PFR0_AMU_V1P1) {
		return;
	}

	if (el2_unused) {
		/* Make sure virtual offsets are disabled if EL2 not used. */
		write_hcr_el2(read_hcr_el2() & ~HCR_AMVOFFEN_BIT);
	}

#if AMU_RESTRICT_COUNTERS
	/*
	 * FEAT_AMUv1p1 adds a register field to restrict access to group 1
	 * counters at all but the highest implemented EL.  This is controlled
	 * with the AMU_RESTRICT_COUNTERS compile time flag, when set, system
	 * register reads at lower ELs return zero.  Reads from the memory
	 * mapped view are unaffected.
	 */
	VERBOSE("AMU group 1 counter access restricted.\n");
	write_amcr_el0(read_amcr_el0() | AMCR_CG1RZ_BIT);
#else
	write_amcr_el0(read_amcr_el0() & ~AMCR_CG1RZ_BIT);
#endif
129
130
131
}

/* Read the group 0 counter identified by the given `idx`. */
132
uint64_t amu_group0_cnt_read(unsigned int idx)
133
{
134
	assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
135
	assert(idx < AMU_GROUP0_NR_COUNTERS);
136
137
138
139

	return amu_group0_cnt_read_internal(idx);
}

140
141
/* Write the group 0 counter identified by the given `idx` with `val` */
void amu_group0_cnt_write(unsigned  int idx, uint64_t val)
142
{
143
	assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
144
	assert(idx < AMU_GROUP0_NR_COUNTERS);
145
146
147
148
149

	amu_group0_cnt_write_internal(idx, val);
	isb();
}

150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
/*
 * Read the group 0 offset register for a given index. Index must be 0, 2,
 * or 3, the register for 1 does not exist.
 *
 * Using this function requires FEAT_AMUv1p1 support.
 */
uint64_t amu_group0_voffset_read(unsigned int idx)
{
	assert(amu_get_version() >= ID_AA64PFR0_AMU_V1P1);
	assert(idx < AMU_GROUP0_NR_COUNTERS);
	assert(idx != 1U);

	return amu_group0_voffset_read_internal(idx);
}

/*
 * Write the group 0 offset register for a given index. Index must be 0, 2, or
 * 3, the register for 1 does not exist.
 *
 * Using this function requires FEAT_AMUv1p1 support.
 */
void amu_group0_voffset_write(unsigned int idx, uint64_t val)
{
	assert(amu_get_version() >= ID_AA64PFR0_AMU_V1P1);
	assert(idx < AMU_GROUP0_NR_COUNTERS);
	assert(idx != 1U);

	amu_group0_voffset_write_internal(idx, val);
	isb();
}

181
182
#if AMU_GROUP1_NR_COUNTERS
/* Read the group 1 counter identified by the given `idx` */
183
uint64_t amu_group1_cnt_read(unsigned int idx)
184
{
185
	assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
186
187
	assert(amu_group1_supported());
	assert(idx < AMU_GROUP1_NR_COUNTERS);
188
189
190
191

	return amu_group1_cnt_read_internal(idx);
}

192
/* Write the group 1 counter identified by the given `idx` with `val` */
193
void amu_group1_cnt_write(unsigned int idx, uint64_t val)
194
{
195
	assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
196
197
	assert(amu_group1_supported());
	assert(idx < AMU_GROUP1_NR_COUNTERS);
198
199
200
201
202

	amu_group1_cnt_write_internal(idx, val);
	isb();
}

203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
/*
 * Read the group 1 offset register for a given index.
 *
 * Using this function requires FEAT_AMUv1p1 support.
 */
uint64_t amu_group1_voffset_read(unsigned int idx)
{
	assert(amu_get_version() >= ID_AA64PFR0_AMU_V1P1);
	assert(amu_group1_supported());
	assert(idx < AMU_GROUP1_NR_COUNTERS);
	assert(((read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) &
		(1ULL << idx)) != 0ULL);

	return amu_group1_voffset_read_internal(idx);
}

/*
 * Write the group 1 offset register for a given index.
 *
 * Using this function requires FEAT_AMUv1p1 support.
 */
void amu_group1_voffset_write(unsigned int idx, uint64_t val)
{
	assert(amu_get_version() >= ID_AA64PFR0_AMU_V1P1);
	assert(amu_group1_supported());
	assert(idx < AMU_GROUP1_NR_COUNTERS);
	assert(((read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) &
		(1ULL << idx)) != 0ULL);

	amu_group1_voffset_write_internal(idx, val);
	isb();
}

236
237
/*
 * Program the event type register for the given `idx` with
238
 * the event number `val`
239
 */
240
void amu_group1_set_evtype(unsigned int idx, unsigned int val)
241
{
242
	assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
243
244
	assert(amu_group1_supported());
	assert(idx < AMU_GROUP1_NR_COUNTERS);
245
246
247

	amu_group1_set_evtype_internal(idx, val);
	isb();
248
}
249
#endif	/* AMU_GROUP1_NR_COUNTERS */
250
251
252
253

static void *amu_context_save(const void *arg)
{
	struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
254
	unsigned int i;
255

256
	if (amu_get_version() == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
257
		return (void *)-1;
258
	}
259

260
261
262
263
264
#if AMU_GROUP1_NR_COUNTERS
	if (!amu_group1_supported()) {
		return (void *)-1;
	}
#endif
265
	/* Assert that group 0/1 counter configuration is what we expect */
266
	assert(read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK);
267

268
269
270
#if AMU_GROUP1_NR_COUNTERS
	assert(read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK);
#endif
271
272
273
274
275
	/*
	 * Disable group 0/1 counters to avoid other observers like SCP sampling
	 * counter values from the future via the memory mapped view.
	 */
	write_amcntenclr0_el0(AMU_GROUP0_COUNTERS_MASK);
276
277

#if AMU_GROUP1_NR_COUNTERS
278
	write_amcntenclr1_el0(AMU_GROUP1_COUNTERS_MASK);
279
#endif
280
281
	isb();

282
283
	/* Save all group 0 counters */
	for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
284
		ctx->group0_cnts[i] = amu_group0_cnt_read(i);
285
	}
286

287
288
289
290
291
292
293
294
295
	/* Save group 0 virtual offsets if supported and enabled. */
	if ((amu_get_version() >= ID_AA64PFR0_AMU_V1P1) &&
			((read_hcr_el2() & HCR_AMVOFFEN_BIT) != 0ULL)) {
		/* Not using a loop because count is fixed and index 1 DNE. */
		ctx->group0_voffsets[0U] = amu_group0_voffset_read(0U);
		ctx->group0_voffsets[1U] = amu_group0_voffset_read(2U);
		ctx->group0_voffsets[2U] = amu_group0_voffset_read(3U);
	}

296
#if AMU_GROUP1_NR_COUNTERS
297
	/* Save group 1 counters */
298
	for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
299
		if ((AMU_GROUP1_COUNTERS_MASK & (1UL << i)) != 0U) {
300
301
302
			ctx->group1_cnts[i] = amu_group1_cnt_read(i);
		}
	}
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317

	/* Save group 1 virtual offsets if supported and enabled. */
	if ((amu_get_version() >= ID_AA64PFR0_AMU_V1P1) &&
			((read_hcr_el2() & HCR_AMVOFFEN_BIT) != 0ULL)) {
		u_register_t amcg1idr = read_amcg1idr_el0() >>
			AMCG1IDR_VOFF_SHIFT;
		amcg1idr = amcg1idr & AMU_GROUP1_COUNTERS_MASK;

		for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
			if (((amcg1idr >> i) & 1ULL) != 0ULL) {
				ctx->group1_voffsets[i] =
					amu_group1_voffset_read(i);
			}
		}
	}
318
#endif
319
	return (void *)0;
320
321
322
323
324
}

static void *amu_context_restore(const void *arg)
{
	struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
325
	unsigned int i;
326

327
	if (amu_get_version() == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
328
		return (void *)-1;
329
	}
330

331
332
333
334
335
#if AMU_GROUP1_NR_COUNTERS
	if (!amu_group1_supported()) {
		return (void *)-1;
	}
#endif
336
	/* Counters were disabled in `amu_context_save()` */
337
	assert(read_amcntenset0_el0() == 0U);
338

339
340
341
#if AMU_GROUP1_NR_COUNTERS
	assert(read_amcntenset1_el0() == 0U);
#endif
342

343
344
345
346
	/* Restore all group 0 counters */
	for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
		amu_group0_cnt_write(i, ctx->group0_cnts[i]);
	}
347

348
349
350
351
352
353
354
355
356
	/* Restore group 0 virtual offsets if supported and enabled. */
	if ((amu_get_version() >= ID_AA64PFR0_AMU_V1P1) &&
			((read_hcr_el2() & HCR_AMVOFFEN_BIT) != 0ULL)) {
		/* Not using a loop because count is fixed and index 1 DNE. */
		amu_group0_voffset_write(0U, ctx->group0_voffsets[0U]);
		amu_group0_voffset_write(2U, ctx->group0_voffsets[1U]);
		amu_group0_voffset_write(3U, ctx->group0_voffsets[2U]);
	}

357
358
359
360
	/* Restore group 0 counter configuration */
	write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);

#if AMU_GROUP1_NR_COUNTERS
361
	/* Restore group 1 counters */
362
	for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
363
		if ((AMU_GROUP1_COUNTERS_MASK & (1UL << i)) != 0U) {
364
			amu_group1_cnt_write(i, ctx->group1_cnts[i]);
365
366
		}
	}
367

368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
	/* Restore group 1 virtual offsets if supported and enabled. */
	if ((amu_get_version() >= ID_AA64PFR0_AMU_V1P1) &&
			((read_hcr_el2() & HCR_AMVOFFEN_BIT) != 0ULL)) {
		u_register_t amcg1idr = read_amcg1idr_el0() >>
			AMCG1IDR_VOFF_SHIFT;
		amcg1idr = amcg1idr & AMU_GROUP1_COUNTERS_MASK;

		for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
			if (((amcg1idr >> i) & 1ULL) != 0ULL) {
				amu_group1_voffset_write(i,
					ctx->group1_voffsets[i]);
			}
		}
	}

383
	/* Restore group 1 counter configuration */
384
	write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK);
385
#endif
386

387
	return (void *)0;
388
389
390
391
}

SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);