amu.c 10.6 KB
Newer Older
1
/*
2
 * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
3
4
5
6
 *
 * SPDX-License-Identifier: BSD-3-Clause
 */

7
#include <assert.h>
8
#include <stdbool.h>
9

10
#include <arch.h>
11
#include <arch_features.h>
12
#include <arch_helpers.h>
13

14
15
16
17
#include <lib/el3_runtime/pubsub_events.h>
#include <lib/extensions/amu.h>
#include <lib/extensions/amu_private.h>

18
#include <plat/common/platform.h>
19
20
21

static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];

22
23
24
25
26
27
28
29
/*
 * Get AMU version value from aa64pfr0.
 * Return values
 *   ID_AA64PFR0_AMU_V1: FEAT_AMUv1 supported (introduced in ARM v8.4)
 *   ID_AA64PFR0_AMU_V1P1: FEAT_AMUv1p1 supported (introduced in ARM v8.6)
 *   ID_AA64PFR0_AMU_NOT_SUPPORTED: not supported
 */
unsigned int amu_get_version(void)
30
{
31
32
	return (unsigned int)(read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT) &
		ID_AA64PFR0_AMU_MASK;
33
34
35
36
37
38
39
}

#if AMU_GROUP1_NR_COUNTERS
/* Check if group 1 counters is implemented */
bool amu_group1_supported(void)
{
	uint64_t features = read_amcfgr_el0() >> AMCFGR_EL0_NCG_SHIFT;
40

41
	return (features & AMCFGR_EL0_NCG_MASK) == 1U;
42
}
43
#endif
44

45
/*
46
 * Enable counters. This function is meant to be invoked
47
48
 * by the context management library before exiting from EL3.
 */
49
void amu_enable(bool el2_unused, cpu_context_t *ctx)
50
51
{
	uint64_t v;
52
	unsigned int amu_version = amu_get_version();
53

54
	if (amu_version == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
55
		return;
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
	}

#if AMU_GROUP1_NR_COUNTERS
	/* Check and set presence of group 1 counters */
	if (!amu_group1_supported()) {
		ERROR("AMU Counter Group 1 is not implemented\n");
		panic();
	}

	/* Check number of group 1 counters */
	uint64_t cnt_num = (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) &
				AMCGCR_EL0_CG1NC_MASK;
	VERBOSE("%s%llu. %s%u\n",
		"Number of AMU Group 1 Counters ", cnt_num,
		"Requested number ", AMU_GROUP1_NR_COUNTERS);

	if (cnt_num < AMU_GROUP1_NR_COUNTERS) {
		ERROR("%s%llu is less than %s%u\n",
		"Number of AMU Group 1 Counters ", cnt_num,
		"Requested number ", AMU_GROUP1_NR_COUNTERS);
		panic();
	}
#endif
79
80

	if (el2_unused) {
81
		/*
82
83
		 * CPTR_EL2.TAM: Set to zero so any accesses to
		 * the Activity Monitor registers do not trap to EL2.
84
		 */
85
86
87
		v = read_cptr_el2();
		v &= ~CPTR_EL2_TAM_BIT;
		write_cptr_el2(v);
88
	}
89
90

	/*
91
92
	 * Retrieve and update the CPTR_EL3 value from the context mentioned
	 * in 'ctx'. Set CPTR_EL3.TAM to zero so that any accesses to
93
94
	 * the Activity Monitor registers do not trap to EL3.
	 */
95
	v = read_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3);
96
	v &= ~TAM_BIT;
97
	write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, v);
98
99
100

	/* Enable group 0 counters */
	write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
101
102

#if AMU_GROUP1_NR_COUNTERS
103
104
	/* Enable group 1 counters */
	write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK);
105
#endif
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129

	/* Initialize FEAT_AMUv1p1 features if present. */
	if (amu_version < ID_AA64PFR0_AMU_V1P1) {
		return;
	}

	if (el2_unused) {
		/* Make sure virtual offsets are disabled if EL2 not used. */
		write_hcr_el2(read_hcr_el2() & ~HCR_AMVOFFEN_BIT);
	}

#if AMU_RESTRICT_COUNTERS
	/*
	 * FEAT_AMUv1p1 adds a register field to restrict access to group 1
	 * counters at all but the highest implemented EL.  This is controlled
	 * with the AMU_RESTRICT_COUNTERS compile time flag, when set, system
	 * register reads at lower ELs return zero.  Reads from the memory
	 * mapped view are unaffected.
	 */
	VERBOSE("AMU group 1 counter access restricted.\n");
	write_amcr_el0(read_amcr_el0() | AMCR_CG1RZ_BIT);
#else
	write_amcr_el0(read_amcr_el0() & ~AMCR_CG1RZ_BIT);
#endif
130
131
132
}

/* Read the group 0 counter identified by the given `idx`. */
133
uint64_t amu_group0_cnt_read(unsigned int idx)
134
{
135
	assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
136
	assert(idx < AMU_GROUP0_NR_COUNTERS);
137
138
139
140

	return amu_group0_cnt_read_internal(idx);
}

141
142
/* Write the group 0 counter identified by the given `idx` with `val` */
void amu_group0_cnt_write(unsigned  int idx, uint64_t val)
143
{
144
	assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
145
	assert(idx < AMU_GROUP0_NR_COUNTERS);
146
147
148
149
150

	amu_group0_cnt_write_internal(idx, val);
	isb();
}

151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
/*
 * Read the group 0 offset register for a given index. Index must be 0, 2,
 * or 3, the register for 1 does not exist.
 *
 * Using this function requires FEAT_AMUv1p1 support.
 */
uint64_t amu_group0_voffset_read(unsigned int idx)
{
	assert(amu_get_version() >= ID_AA64PFR0_AMU_V1P1);
	assert(idx < AMU_GROUP0_NR_COUNTERS);
	assert(idx != 1U);

	return amu_group0_voffset_read_internal(idx);
}

/*
 * Write the group 0 offset register for a given index. Index must be 0, 2, or
 * 3, the register for 1 does not exist.
 *
 * Using this function requires FEAT_AMUv1p1 support.
 */
void amu_group0_voffset_write(unsigned int idx, uint64_t val)
{
	assert(amu_get_version() >= ID_AA64PFR0_AMU_V1P1);
	assert(idx < AMU_GROUP0_NR_COUNTERS);
	assert(idx != 1U);

	amu_group0_voffset_write_internal(idx, val);
	isb();
}

182
183
#if AMU_GROUP1_NR_COUNTERS
/* Read the group 1 counter identified by the given `idx` */
184
uint64_t amu_group1_cnt_read(unsigned int idx)
185
{
186
	assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
187
188
	assert(amu_group1_supported());
	assert(idx < AMU_GROUP1_NR_COUNTERS);
189
190
191
192

	return amu_group1_cnt_read_internal(idx);
}

193
/* Write the group 1 counter identified by the given `idx` with `val` */
194
void amu_group1_cnt_write(unsigned int idx, uint64_t val)
195
{
196
	assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
197
198
	assert(amu_group1_supported());
	assert(idx < AMU_GROUP1_NR_COUNTERS);
199
200
201
202
203

	amu_group1_cnt_write_internal(idx, val);
	isb();
}

204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
/*
 * Read the group 1 offset register for a given index.
 *
 * Using this function requires FEAT_AMUv1p1 support.
 */
uint64_t amu_group1_voffset_read(unsigned int idx)
{
	assert(amu_get_version() >= ID_AA64PFR0_AMU_V1P1);
	assert(amu_group1_supported());
	assert(idx < AMU_GROUP1_NR_COUNTERS);
	assert(((read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) &
		(1ULL << idx)) != 0ULL);

	return amu_group1_voffset_read_internal(idx);
}

/*
 * Write the group 1 offset register for a given index.
 *
 * Using this function requires FEAT_AMUv1p1 support.
 */
void amu_group1_voffset_write(unsigned int idx, uint64_t val)
{
	assert(amu_get_version() >= ID_AA64PFR0_AMU_V1P1);
	assert(amu_group1_supported());
	assert(idx < AMU_GROUP1_NR_COUNTERS);
	assert(((read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) &
		(1ULL << idx)) != 0ULL);

	amu_group1_voffset_write_internal(idx, val);
	isb();
}

237
238
/*
 * Program the event type register for the given `idx` with
239
 * the event number `val`
240
 */
241
void amu_group1_set_evtype(unsigned int idx, unsigned int val)
242
{
243
	assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
244
245
	assert(amu_group1_supported());
	assert(idx < AMU_GROUP1_NR_COUNTERS);
246
247
248

	amu_group1_set_evtype_internal(idx, val);
	isb();
249
}
250
#endif	/* AMU_GROUP1_NR_COUNTERS */
251
252
253
254

static void *amu_context_save(const void *arg)
{
	struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
255
	unsigned int i;
256

257
	if (amu_get_version() == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
258
		return (void *)-1;
259
	}
260

261
262
263
264
265
#if AMU_GROUP1_NR_COUNTERS
	if (!amu_group1_supported()) {
		return (void *)-1;
	}
#endif
266
	/* Assert that group 0/1 counter configuration is what we expect */
267
	assert(read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK);
268

269
270
271
#if AMU_GROUP1_NR_COUNTERS
	assert(read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK);
#endif
272
273
274
275
276
	/*
	 * Disable group 0/1 counters to avoid other observers like SCP sampling
	 * counter values from the future via the memory mapped view.
	 */
	write_amcntenclr0_el0(AMU_GROUP0_COUNTERS_MASK);
277
278

#if AMU_GROUP1_NR_COUNTERS
279
	write_amcntenclr1_el0(AMU_GROUP1_COUNTERS_MASK);
280
#endif
281
282
	isb();

283
284
	/* Save all group 0 counters */
	for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
285
		ctx->group0_cnts[i] = amu_group0_cnt_read(i);
286
	}
287

288
289
290
291
292
293
294
295
296
	/* Save group 0 virtual offsets if supported and enabled. */
	if ((amu_get_version() >= ID_AA64PFR0_AMU_V1P1) &&
			((read_hcr_el2() & HCR_AMVOFFEN_BIT) != 0ULL)) {
		/* Not using a loop because count is fixed and index 1 DNE. */
		ctx->group0_voffsets[0U] = amu_group0_voffset_read(0U);
		ctx->group0_voffsets[1U] = amu_group0_voffset_read(2U);
		ctx->group0_voffsets[2U] = amu_group0_voffset_read(3U);
	}

297
#if AMU_GROUP1_NR_COUNTERS
298
	/* Save group 1 counters */
299
	for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
300
		if ((AMU_GROUP1_COUNTERS_MASK & (1UL << i)) != 0U) {
301
302
303
			ctx->group1_cnts[i] = amu_group1_cnt_read(i);
		}
	}
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318

	/* Save group 1 virtual offsets if supported and enabled. */
	if ((amu_get_version() >= ID_AA64PFR0_AMU_V1P1) &&
			((read_hcr_el2() & HCR_AMVOFFEN_BIT) != 0ULL)) {
		u_register_t amcg1idr = read_amcg1idr_el0() >>
			AMCG1IDR_VOFF_SHIFT;
		amcg1idr = amcg1idr & AMU_GROUP1_COUNTERS_MASK;

		for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
			if (((amcg1idr >> i) & 1ULL) != 0ULL) {
				ctx->group1_voffsets[i] =
					amu_group1_voffset_read(i);
			}
		}
	}
319
#endif
320
	return (void *)0;
321
322
323
324
325
}

static void *amu_context_restore(const void *arg)
{
	struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
326
	unsigned int i;
327

328
	if (amu_get_version() == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
329
		return (void *)-1;
330
	}
331

332
333
334
335
336
#if AMU_GROUP1_NR_COUNTERS
	if (!amu_group1_supported()) {
		return (void *)-1;
	}
#endif
337
	/* Counters were disabled in `amu_context_save()` */
338
	assert(read_amcntenset0_el0() == 0U);
339

340
341
342
#if AMU_GROUP1_NR_COUNTERS
	assert(read_amcntenset1_el0() == 0U);
#endif
343

344
345
346
347
	/* Restore all group 0 counters */
	for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
		amu_group0_cnt_write(i, ctx->group0_cnts[i]);
	}
348

349
350
351
352
353
354
355
356
357
	/* Restore group 0 virtual offsets if supported and enabled. */
	if ((amu_get_version() >= ID_AA64PFR0_AMU_V1P1) &&
			((read_hcr_el2() & HCR_AMVOFFEN_BIT) != 0ULL)) {
		/* Not using a loop because count is fixed and index 1 DNE. */
		amu_group0_voffset_write(0U, ctx->group0_voffsets[0U]);
		amu_group0_voffset_write(2U, ctx->group0_voffsets[1U]);
		amu_group0_voffset_write(3U, ctx->group0_voffsets[2U]);
	}

358
359
360
361
	/* Restore group 0 counter configuration */
	write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);

#if AMU_GROUP1_NR_COUNTERS
362
	/* Restore group 1 counters */
363
	for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
364
		if ((AMU_GROUP1_COUNTERS_MASK & (1UL << i)) != 0U) {
365
			amu_group1_cnt_write(i, ctx->group1_cnts[i]);
366
367
		}
	}
368

369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
	/* Restore group 1 virtual offsets if supported and enabled. */
	if ((amu_get_version() >= ID_AA64PFR0_AMU_V1P1) &&
			((read_hcr_el2() & HCR_AMVOFFEN_BIT) != 0ULL)) {
		u_register_t amcg1idr = read_amcg1idr_el0() >>
			AMCG1IDR_VOFF_SHIFT;
		amcg1idr = amcg1idr & AMU_GROUP1_COUNTERS_MASK;

		for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
			if (((amcg1idr >> i) & 1ULL) != 0ULL) {
				amu_group1_voffset_write(i,
					ctx->group1_voffsets[i]);
			}
		}
	}

384
	/* Restore group 1 counter configuration */
385
	write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK);
386
#endif
387

388
	return (void *)0;
389
390
391
392
}

SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);