nvg.c 5.99 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
/*
 * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
 *
 * SPDX-License-Identifier: BSD-3-Clause
 */

#include <arch.h>
#include <arch_helpers.h>
#include <common/debug.h>
#include <denver.h>
#include <lib/mmio.h>
#include <mce_private.h>
#include <errno.h>

extern void nvg_set_request_data(uint64_t req, uint64_t data);
extern void nvg_set_request(uint64_t req);
extern uint64_t nvg_get_result(void);

int nvg_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time)
{
	/* check for allowed power state */
	if (state != TEGRA_ARI_CORE_C0 && state != TEGRA_ARI_CORE_C1 &&
	    state != TEGRA_ARI_CORE_C6 && state != TEGRA_ARI_CORE_C7) {
		ERROR("%s: unknown cstate (%d)\n", __func__, state);
		return EINVAL;
	}

	/* time (TSC ticks) until the core is expected to get a wake event */
	nvg_set_request_data(TEGRA_NVG_CHANNEL_WAKE_TIME, wake_time);

	/* set the core cstate */
	write_actlr_el1(state);

	return 0;
}

/*
 * This request allows updating of CLUSTER_CSTATE, CCPLEX_CSTATE and
 * SYSTEM_CSTATE values.
 */
int nvg_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccplex,
		uint32_t system, uint8_t sys_state_force, uint32_t wake_mask,
		uint8_t update_wake_mask)
{
	uint64_t val = 0;

	/* update CLUSTER_CSTATE? */
	if (cluster)
		val |= (cluster & CLUSTER_CSTATE_MASK) |
			CLUSTER_CSTATE_UPDATE_BIT;

	/* update CCPLEX_CSTATE? */
	if (ccplex)
		val |= (ccplex & CCPLEX_CSTATE_MASK) << CCPLEX_CSTATE_SHIFT |
			CCPLEX_CSTATE_UPDATE_BIT;

	/* update SYSTEM_CSTATE? */
	if (system)
		val |= ((system & SYSTEM_CSTATE_MASK) << SYSTEM_CSTATE_SHIFT) |
		       ((sys_state_force << SYSTEM_CSTATE_FORCE_UPDATE_SHIFT) |
			SYSTEM_CSTATE_UPDATE_BIT);

	/* update wake mask value? */
	if (update_wake_mask)
		val |= CSTATE_WAKE_MASK_UPDATE_BIT;

	/* set the wake mask */
	val &= CSTATE_WAKE_MASK_CLEAR;
	val |= ((uint64_t)wake_mask << CSTATE_WAKE_MASK_SHIFT);

	/* set the updated cstate info */
	nvg_set_request_data(TEGRA_NVG_CHANNEL_CSTATE_INFO, val);

	return 0;
}

int nvg_update_crossover_time(uint32_t ari_base, uint32_t type, uint32_t time)
{
	/* sanity check crossover type */
	if (type > TEGRA_ARI_CROSSOVER_CCP3_SC1)
		return EINVAL;

	/*
	 * The crossover threshold limit types start from
	 * TEGRA_CROSSOVER_TYPE_C1_C6 to TEGRA_CROSSOVER_TYPE_CCP3_SC7. The
	 * command indices for updating the threshold can be generated
	 * by adding the type to the NVG_SET_THRESHOLD_CROSSOVER_C1_C6
	 * command index.
	 */
	nvg_set_request_data(TEGRA_NVG_CHANNEL_CROSSOVER_C1_C6 + type,
		(uint64_t)time);

	return 0;
}

uint64_t nvg_read_cstate_stats(uint32_t ari_base, uint32_t state)
{
	/* sanity check state */
	if (state == 0)
		return EINVAL;

	/*
	 * The cstate types start from NVG_READ_CSTATE_STATS_SC7_ENTRIES
	 * to NVG_GET_LAST_CSTATE_ENTRY_A57_3. The command indices for
	 * reading the threshold can be generated by adding the type to
	 * the NVG_CLEAR_CSTATE_STATS command index.
	 */
	nvg_set_request(TEGRA_NVG_CHANNEL_CSTATE_STATS_CLEAR + state);

	return (int64_t)nvg_get_result();
}

int nvg_write_cstate_stats(uint32_t ari_base, uint32_t state, uint32_t stats)
{
	uint64_t val;

	/*
	 * The only difference between a CSTATE_STATS_WRITE and
	 * CSTATE_STATS_READ is the usage of the 63:32 in the request.
	 * 63:32 are set to '0' for a read, while a write contains the
	 * actual stats value to be written.
	 */
	val = ((uint64_t)stats << MCE_CSTATE_STATS_TYPE_SHIFT) | state;

	/*
	 * The cstate types start from NVG_READ_CSTATE_STATS_SC7_ENTRIES
	 * to NVG_GET_LAST_CSTATE_ENTRY_A57_3. The command indices for
	 * reading the threshold can be generated by adding the type to
	 * the NVG_CLEAR_CSTATE_STATS command index.
	 */
	nvg_set_request_data(TEGRA_NVG_CHANNEL_CSTATE_STATS_CLEAR + state, val);

	return 0;
}

int nvg_is_ccx_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time)
{
	/* This does not apply to the Denver cluster */
	return 0;
}

int nvg_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time)
{
	uint64_t val;

	/* check for allowed power state */
	if (state != TEGRA_ARI_CORE_C0 && state != TEGRA_ARI_CORE_C1 &&
	    state != TEGRA_ARI_CORE_C6 && state != TEGRA_ARI_CORE_C7) {
		ERROR("%s: unknown cstate (%d)\n", __func__, state);
		return EINVAL;
	}

	/*
	 * Request format -
	 * 63:32 = wake time
	 * 31:0 = C-state for this core
	 */
	val = ((uint64_t)wake_time << MCE_SC7_WAKE_TIME_SHIFT) |
			(state & MCE_SC7_ALLOWED_MASK);

	/* issue command to check if SC7 is allowed */
	nvg_set_request_data(TEGRA_NVG_CHANNEL_IS_SC7_ALLOWED, val);

	/* 1 = SC7 allowed, 0 = SC7 not allowed */
	return !!nvg_get_result();
}

int nvg_online_core(uint32_t ari_base, uint32_t core)
{
	int cpu = read_mpidr() & MPIDR_CPU_MASK;
	int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;

	/* sanity check code id */
	if ((core >= MCE_CORE_ID_MAX) || (cpu == core)) {
		ERROR("%s: unsupported core id (%d)\n", __func__, core);
		return EINVAL;
	}

	/*
	 * The Denver cluster has 2 CPUs only - 0, 1.
	 */
	if (impl == DENVER_IMPL && ((core == 2) || (core == 3))) {
		ERROR("%s: unknown core id (%d)\n", __func__, core);
		return EINVAL;
	}

	/* get a core online */
	nvg_set_request_data(TEGRA_NVG_CHANNEL_ONLINE_CORE, core & MCE_CORE_ID_MASK);

	return 0;
}

int nvg_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t enable)
{
	int val;

	/*
	 * If the enable bit is cleared, Auto-CC3 will be disabled by setting
	 * the SW visible voltage/frequency request registers for all non
	 * floorswept cores valid independent of StandbyWFI and disabling
	 * the IDLE voltage/frequency request register. If set, Auto-CC3
	 * will be enabled by setting the ARM SW visible voltage/frequency
	 * request registers for all non floorswept cores to be enabled by
	 * StandbyWFI or the equivalent signal, and always keeping the IDLE
	 * voltage/frequency request register enabled.
	 */
	val = (((freq & MCE_AUTO_CC3_FREQ_MASK) << MCE_AUTO_CC3_FREQ_SHIFT) |\
		((volt & MCE_AUTO_CC3_VTG_MASK) << MCE_AUTO_CC3_VTG_SHIFT) |\
		(enable ? MCE_AUTO_CC3_ENABLE_BIT : 0));

	nvg_set_request_data(TEGRA_NVG_CHANNEL_CC3_CTRL, val);

	return 0;
}