Commit 432b9905 authored by Achin Gupta's avatar Achin Gupta
Browse files

Merge pull request #361 from achingupta/for_sm/psci_proto_v5

For sm/psci proto v5
parents 9caf7e36 9d070b99
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
......@@ -45,50 +45,120 @@
*/
const spd_pm_ops_t *psci_spd_pm;
/*
* PSCI requested local power state map. This array is used to store the local
* power states requested by a CPU for power levels from level 1 to
* PLAT_MAX_PWR_LVL. It does not store the requested local power state for power
* level 0 (PSCI_CPU_PWR_LVL) as the requested and the target power state for a
* CPU are the same.
*
* During state coordination, the platform is passed an array containing the
* local states requested for a particular non cpu power domain by each cpu
* within the domain.
*
* TODO: Dense packing of the requested states will cause cache thrashing
* when multiple power domains write to it. If we allocate the requested
* states at each power level in a cache-line aligned per-domain memory,
* the cache thrashing can be avoided.
*/
static plat_local_state_t
psci_req_local_pwr_states[PLAT_MAX_PWR_LVL][PLATFORM_CORE_COUNT];
/*******************************************************************************
* Grand array that holds the platform's topology information for state
* management of affinity instances. Each node (aff_map_node) in the array
* corresponds to an affinity instance e.g. cluster, cpu within an mpidr
* Arrays that hold the platform's power domain tree information for state
* management of power domains.
* Each node in the array 'psci_non_cpu_pd_nodes' corresponds to a power domain
* which is an ancestor of a CPU power domain.
* Each node in the array 'psci_cpu_pd_nodes' corresponds to a cpu power domain
******************************************************************************/
aff_map_node_t psci_aff_map[PSCI_NUM_AFFS]
non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS]
#if USE_COHERENT_MEM
__attribute__ ((section("tzfw_coherent_mem")))
#endif
;
cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];
/*******************************************************************************
* Pointer to functions exported by the platform to complete power mgmt. ops
******************************************************************************/
const plat_pm_ops_t *psci_plat_pm_ops;
const plat_psci_ops_t *psci_plat_pm_ops;
/*******************************************************************************
* Check that the maximum affinity level supported by the platform makes sense
* ****************************************************************************/
CASSERT(PLATFORM_MAX_AFFLVL <= MPIDR_MAX_AFFLVL && \
PLATFORM_MAX_AFFLVL >= MPIDR_AFFLVL0, \
assert_platform_max_afflvl_check);
/******************************************************************************
* Check that the maximum power level supported by the platform makes sense
*****************************************************************************/
CASSERT(PLAT_MAX_PWR_LVL <= PSCI_MAX_PWR_LVL && \
PLAT_MAX_PWR_LVL >= PSCI_CPU_PWR_LVL, \
assert_platform_max_pwrlvl_check);
/*******************************************************************************
* This function is passed an array of pointers to affinity level nodes in the
* topology tree for an mpidr. It iterates through the nodes to find the highest
* affinity level which is marked as physically powered off.
******************************************************************************/
uint32_t psci_find_max_phys_off_afflvl(uint32_t start_afflvl,
uint32_t end_afflvl,
aff_map_node_t *mpidr_nodes[])
/*
* The plat_local_state used by the platform is one of these types: RUN,
* RETENTION and OFF. The platform can define further sub-states for each type
* apart from RUN. This categorization is done to verify the sanity of the
* psci_power_state passed by the platform and to print debug information. The
* categorization is done on the basis of the following conditions:
*
* 1. If (plat_local_state == 0) then the category is STATE_TYPE_RUN.
*
* 2. If (0 < plat_local_state <= PLAT_MAX_RET_STATE), then the category is
* STATE_TYPE_RETN.
*
* 3. If (plat_local_state > PLAT_MAX_RET_STATE), then the category is
* STATE_TYPE_OFF.
*/
typedef enum plat_local_state_type {
STATE_TYPE_RUN = 0,
STATE_TYPE_RETN,
STATE_TYPE_OFF
} plat_local_state_type_t;
/* The macro used to categorize plat_local_state. */
#define find_local_state_type(plat_local_state) \
((plat_local_state) ? ((plat_local_state > PLAT_MAX_RET_STATE) \
? STATE_TYPE_OFF : STATE_TYPE_RETN) \
: STATE_TYPE_RUN)
/******************************************************************************
* Check that the maximum retention level supported by the platform is less
* than the maximum off level.
*****************************************************************************/
CASSERT(PLAT_MAX_RET_STATE < PLAT_MAX_OFF_STATE, \
assert_platform_max_off_and_retn_state_check);
/******************************************************************************
* This function ensures that the power state parameter in a CPU_SUSPEND request
* is valid. If so, it returns the requested states for each power level.
*****************************************************************************/
int psci_validate_power_state(unsigned int power_state,
psci_power_state_t *state_info)
{
uint32_t max_afflvl = PSCI_INVALID_DATA;
/* Check SBZ bits in power state are zero */
if (psci_check_power_state(power_state))
return PSCI_E_INVALID_PARAMS;
for (; start_afflvl <= end_afflvl; start_afflvl++) {
if (mpidr_nodes[start_afflvl] == NULL)
continue;
assert(psci_plat_pm_ops->validate_power_state);
if (psci_get_phys_state(mpidr_nodes[start_afflvl]) ==
PSCI_STATE_OFF)
max_afflvl = start_afflvl;
}
/* Validate the power_state using platform pm_ops */
return psci_plat_pm_ops->validate_power_state(power_state, state_info);
}
return max_afflvl;
/******************************************************************************
* This function retrieves the `psci_power_state_t` for system suspend from
* the platform.
*****************************************************************************/
void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info)
{
/*
* Assert that the required pm_ops hook is implemented to ensure that
* the capability detected during psci_setup() is valid.
*/
assert(psci_plat_pm_ops->get_sys_suspend_power_state);
/*
* Query the platform for the power_state required for system suspend
*/
psci_plat_pm_ops->get_sys_suspend_power_state(state_info);
}
/*******************************************************************************
......@@ -99,24 +169,15 @@ uint32_t psci_find_max_phys_off_afflvl(uint32_t start_afflvl,
******************************************************************************/
unsigned int psci_is_last_on_cpu(void)
{
unsigned long mpidr = read_mpidr_el1() & MPIDR_AFFINITY_MASK;
unsigned int i;
for (i = psci_aff_limits[MPIDR_AFFLVL0].min;
i <= psci_aff_limits[MPIDR_AFFLVL0].max; i++) {
assert(psci_aff_map[i].level == MPIDR_AFFLVL0);
unsigned int cpu_idx, my_idx = plat_my_core_pos();
if (!(psci_aff_map[i].state & PSCI_AFF_PRESENT))
continue;
if (psci_aff_map[i].mpidr == mpidr) {
assert(psci_get_state(&psci_aff_map[i])
== PSCI_STATE_ON);
for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) {
if (cpu_idx == my_idx) {
assert(psci_get_aff_info_state() == AFF_STATE_ON);
continue;
}
if (psci_get_state(&psci_aff_map[i]) != PSCI_STATE_OFF)
if (psci_get_aff_info_state_by_idx(cpu_idx) != AFF_STATE_OFF)
return 0;
}
......@@ -124,205 +185,418 @@ unsigned int psci_is_last_on_cpu(void)
}
/*******************************************************************************
* This function saves the highest affinity level which is in OFF state. The
* affinity instance with which the level is associated is determined by the
* caller.
* Routine to return the maximum power level to traverse to after a cpu has
* been physically powered up. It is expected to be called immediately after
* reset from assembler code.
******************************************************************************/
void psci_set_max_phys_off_afflvl(uint32_t afflvl)
static unsigned int get_power_on_target_pwrlvl(void)
{
set_cpu_data(psci_svc_cpu_data.max_phys_off_afflvl, afflvl);
unsigned int pwrlvl;
/*
* Ensure that the saved value is flushed to main memory and any
* speculatively pre-fetched stale copies are invalidated from the
* caches of other cpus in the same coherency domain. This ensures that
* the value can be safely read irrespective of the state of the data
* cache.
* Assume that this cpu was suspended and retrieve its target power
* level. If it is invalid then it could only have been turned off
* earlier. PLAT_MAX_PWR_LVL will be the highest power level a
* cpu can be turned off to.
*/
flush_cpu_data(psci_svc_cpu_data.max_phys_off_afflvl);
pwrlvl = psci_get_suspend_pwrlvl();
if (pwrlvl == PSCI_INVALID_PWR_LVL)
pwrlvl = PLAT_MAX_PWR_LVL;
return pwrlvl;
}
/*******************************************************************************
* This function reads the saved highest affinity level which is in OFF
* state. The affinity instance with which the level is associated is determined
* by the caller.
******************************************************************************/
uint32_t psci_get_max_phys_off_afflvl(void)
/******************************************************************************
* Helper function to update the requested local power state array. This array
* does not store the requested state for the CPU power level. Hence an
* assertion is added to prevent us from accessing the wrong index.
*****************************************************************************/
static void psci_set_req_local_pwr_state(unsigned int pwrlvl,
unsigned int cpu_idx,
plat_local_state_t req_pwr_state)
{
/*
* Ensure that the last update of this value in this cpu's cache is
* flushed to main memory and any speculatively pre-fetched stale copies
* are invalidated from the caches of other cpus in the same coherency
* domain. This ensures that the value is always read from the main
* memory when it was written before the data cache was enabled.
*/
flush_cpu_data(psci_svc_cpu_data.max_phys_off_afflvl);
return get_cpu_data(psci_svc_cpu_data.max_phys_off_afflvl);
assert(pwrlvl > PSCI_CPU_PWR_LVL);
psci_req_local_pwr_states[pwrlvl - 1][cpu_idx] = req_pwr_state;
}
/*******************************************************************************
* Routine to return the maximum affinity level to traverse to after a cpu has
* been physically powered up. It is expected to be called immediately after
* reset from assembler code.
******************************************************************************/
int get_power_on_target_afflvl(void)
/******************************************************************************
* This function initializes the psci_req_local_pwr_states.
*****************************************************************************/
void psci_init_req_local_pwr_states(void)
{
int afflvl;
/* Initialize the requested state of all non CPU power domains as OFF */
memset(&psci_req_local_pwr_states, PLAT_MAX_OFF_STATE,
sizeof(psci_req_local_pwr_states));
}
#if DEBUG
unsigned int state;
aff_map_node_t *node;
/******************************************************************************
* Helper function to return a reference to an array containing the local power
* states requested by each cpu for a power domain at 'pwrlvl'. The size of the
* array will be the number of cpu power domains of which this power domain is
* an ancestor. These requested states will be used to determine a suitable
* target state for this power domain during psci state coordination. An
* assertion is added to prevent us from accessing the CPU power level.
*****************************************************************************/
static plat_local_state_t *psci_get_req_local_pwr_states(unsigned int pwrlvl,
unsigned int cpu_idx)
{
assert(pwrlvl > PSCI_CPU_PWR_LVL);
/* Retrieve our node from the topology tree */
node = psci_get_aff_map_node(read_mpidr_el1() & MPIDR_AFFINITY_MASK,
MPIDR_AFFLVL0);
assert(node);
return &psci_req_local_pwr_states[pwrlvl - 1][cpu_idx];
}
/*
* Sanity check the state of the cpu. It should be either suspend or "on
* pending"
*/
state = psci_get_state(node);
assert(state == PSCI_STATE_SUSPEND || state == PSCI_STATE_ON_PENDING);
/******************************************************************************
* Helper function to return the current local power state of each power domain
* from the current cpu power domain to its ancestor at the 'end_pwrlvl'. This
* function will be called after a cpu is powered on to find the local state
* each power domain has emerged from.
*****************************************************************************/
static void psci_get_target_local_pwr_states(unsigned int end_pwrlvl,
psci_power_state_t *target_state)
{
unsigned int parent_idx, lvl;
plat_local_state_t *pd_state = target_state->pwr_domain_state;
pd_state[PSCI_CPU_PWR_LVL] = psci_get_cpu_local_state();
parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node;
/* Copy the local power state from node to state_info */
for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
#if !USE_COHERENT_MEM
/*
* If using normal memory for psci_non_cpu_pd_nodes, we need
* to flush before reading the local power state as another
* cpu in the same power domain could have updated it and this
* code runs before caches are enabled.
*/
flush_dcache_range(
(uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
sizeof(psci_non_cpu_pd_nodes[parent_idx]));
#endif
pd_state[lvl] = psci_non_cpu_pd_nodes[parent_idx].local_state;
parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
}
/* Set the the higher levels to RUN */
for (; lvl <= PLAT_MAX_PWR_LVL; lvl++)
target_state->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN;
}
/******************************************************************************
* Helper function to set the target local power state that each power domain
* from the current cpu power domain to its ancestor at the 'end_pwrlvl' will
* enter. This function will be called after coordination of requested power
* states has been done for each power level.
*****************************************************************************/
static void psci_set_target_local_pwr_states(unsigned int end_pwrlvl,
const psci_power_state_t *target_state)
{
unsigned int parent_idx, lvl;
const plat_local_state_t *pd_state = target_state->pwr_domain_state;
psci_set_cpu_local_state(pd_state[PSCI_CPU_PWR_LVL]);
/*
* Assume that this cpu was suspended and retrieve its target affinity
* level. If it is invalid then it could only have been turned off
* earlier. PLATFORM_MAX_AFFLVL will be the highest affinity level a
* cpu can be turned off to.
* Need to flush as local_state will be accessed with Data Cache
* disabled during power on
*/
afflvl = psci_get_suspend_afflvl();
if (afflvl == PSCI_INVALID_DATA)
afflvl = PLATFORM_MAX_AFFLVL;
return afflvl;
flush_cpu_data(psci_svc_cpu_data.local_state);
parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node;
/* Copy the local_state from state_info */
for (lvl = 1; lvl <= end_pwrlvl; lvl++) {
psci_non_cpu_pd_nodes[parent_idx].local_state = pd_state[lvl];
#if !USE_COHERENT_MEM
flush_dcache_range(
(uintptr_t)&psci_non_cpu_pd_nodes[parent_idx],
sizeof(psci_non_cpu_pd_nodes[parent_idx]));
#endif
parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
}
}
/*******************************************************************************
* Simple routine to set the id of an affinity instance at a given level in the
* mpidr.
* PSCI helper function to get the parent nodes corresponding to a cpu_index.
******************************************************************************/
unsigned long mpidr_set_aff_inst(unsigned long mpidr,
unsigned char aff_inst,
int aff_lvl)
void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
unsigned int end_lvl,
unsigned int node_index[])
{
unsigned int parent_node = psci_cpu_pd_nodes[cpu_idx].parent_node;
int i;
for (i = PSCI_CPU_PWR_LVL + 1; i <= end_lvl; i++) {
*node_index++ = parent_node;
parent_node = psci_non_cpu_pd_nodes[parent_node].parent_node;
}
}
/******************************************************************************
* This function is invoked post CPU power up and initialization. It sets the
* affinity info state, target power state and requested power state for the
* current CPU and all its ancestor power domains to RUN.
*****************************************************************************/
void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl)
{
unsigned int parent_idx, cpu_idx = plat_my_core_pos(), lvl;
parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
/* Reset the local_state to RUN for the non cpu power domains. */
for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
psci_non_cpu_pd_nodes[parent_idx].local_state =
PSCI_LOCAL_STATE_RUN;
#if !USE_COHERENT_MEM
flush_dcache_range(
(uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
sizeof(psci_non_cpu_pd_nodes[parent_idx]));
#endif
psci_set_req_local_pwr_state(lvl,
cpu_idx,
PSCI_LOCAL_STATE_RUN);
parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
}
/* Set the affinity info state to ON */
psci_set_aff_info_state(AFF_STATE_ON);
psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN);
flush_cpu_data(psci_svc_cpu_data);
}
/******************************************************************************
* This function is passed the local power states requested for each power
* domain (state_info) between the current CPU domain and its ancestors until
* the target power level (end_pwrlvl). It updates the array of requested power
* states with this information.
*
* Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it
* retrieves the states requested by all the cpus of which the power domain at
* that level is an ancestor. It passes this information to the platform to
* coordinate and return the target power state. If the target state for a level
* is RUN then subsequent levels are not considered. At the CPU level, state
* coordination is not required. Hence, the requested and the target states are
* the same.
*
* The 'state_info' is updated with the target state for each level between the
* CPU and the 'end_pwrlvl' and returned to the caller.
*
* This function will only be invoked with data cache enabled and while
* powering down a core.
*****************************************************************************/
void psci_do_state_coordination(unsigned int end_pwrlvl,
psci_power_state_t *state_info)
{
unsigned long aff_shift;
unsigned int lvl, parent_idx, cpu_idx = plat_my_core_pos();
unsigned int start_idx, ncpus;
plat_local_state_t target_state, *req_states;
parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
/* For level 0, the requested state will be equivalent
to target state */
for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
/* First update the requested power state */
psci_set_req_local_pwr_state(lvl, cpu_idx,
state_info->pwr_domain_state[lvl]);
/* Get the requested power states for this power level */
start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx;
req_states = psci_get_req_local_pwr_states(lvl, start_idx);
/*
* Let the platform coordinate amongst the requested states at
* this power level and return the target local power state.
*/
ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus;
target_state = plat_get_target_pwr_state(lvl,
req_states,
ncpus);
assert(aff_lvl <= MPIDR_AFFLVL3);
state_info->pwr_domain_state[lvl] = target_state;
/* Break early if the negotiated target power state is RUN */
if (is_local_state_run(state_info->pwr_domain_state[lvl]))
break;
parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
}
/*
* Decide the number of bits to shift by depending upon
* the affinity level
* This is for cases when we break out of the above loop early because
* the target power state is RUN at a power level < end_pwlvl.
* We update the requested power state from state_info and then
* set the target state as RUN.
*/
aff_shift = get_afflvl_shift(aff_lvl);
for (lvl = lvl + 1; lvl <= end_pwrlvl; lvl++) {
psci_set_req_local_pwr_state(lvl, cpu_idx,
state_info->pwr_domain_state[lvl]);
state_info->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN;
/* Clear the existing affinity instance & set the new one*/
mpidr &= ~(((unsigned long)MPIDR_AFFLVL_MASK) << aff_shift);
mpidr |= ((unsigned long)aff_inst) << aff_shift;
}
return mpidr;
/* Update the target state in the power domain nodes */
psci_set_target_local_pwr_states(end_pwrlvl, state_info);
}
/*******************************************************************************
* This function sanity checks a range of affinity levels.
******************************************************************************/
int psci_check_afflvl_range(int start_afflvl, int end_afflvl)
/******************************************************************************
* This function validates a suspend request by making sure that if a standby
* state is requested then no power level is turned off and the highest power
* level is placed in a standby/retention state.
*
* It also ensures that the state level X will enter is not shallower than the
* state level X + 1 will enter.
*
* This validation will be enabled only for DEBUG builds as the platform is
* expected to perform these validations as well.
*****************************************************************************/
int psci_validate_suspend_req(const psci_power_state_t *state_info,
unsigned int is_power_down_state)
{
/* Sanity check the parameters passed */
if (end_afflvl > PLATFORM_MAX_AFFLVL)
unsigned int max_off_lvl, target_lvl, max_retn_lvl;
plat_local_state_t state;
plat_local_state_type_t req_state_type, deepest_state_type;
int i;
/* Find the target suspend power level */
target_lvl = psci_find_target_suspend_lvl(state_info);
if (target_lvl == PSCI_INVALID_PWR_LVL)
return PSCI_E_INVALID_PARAMS;
if (start_afflvl < MPIDR_AFFLVL0)
return PSCI_E_INVALID_PARAMS;
/* All power domain levels are in a RUN state to begin with */
deepest_state_type = STATE_TYPE_RUN;
for (i = target_lvl; i >= PSCI_CPU_PWR_LVL; i--) {
state = state_info->pwr_domain_state[i];
req_state_type = find_local_state_type(state);
/*
* While traversing from the highest power level to the lowest,
* the state requested for lower levels has to be the same or
* deeper i.e. equal to or greater than the state at the higher
* levels. If this condition is true, then the requested state
* becomes the deepest state encountered so far.
*/
if (req_state_type < deepest_state_type)
return PSCI_E_INVALID_PARAMS;
deepest_state_type = req_state_type;
}
/* Find the highest off power level */
max_off_lvl = psci_find_max_off_lvl(state_info);
/* The target_lvl is either equal to the max_off_lvl or max_retn_lvl */
max_retn_lvl = PSCI_INVALID_PWR_LVL;
if (target_lvl != max_off_lvl)
max_retn_lvl = target_lvl;
if (end_afflvl < start_afflvl)
/*
* If this is not a request for a power down state then max off level
* has to be invalid and max retention level has to be a valid power
* level.
*/
if (!is_power_down_state && (max_off_lvl != PSCI_INVALID_PWR_LVL ||
max_retn_lvl == PSCI_INVALID_PWR_LVL))
return PSCI_E_INVALID_PARAMS;
return PSCI_E_SUCCESS;
}
/*******************************************************************************
* This function is passed an array of pointers to affinity level nodes in the
* topology tree for an mpidr and the state which each node should transition
* to. It updates the state of each node between the specified affinity levels.
******************************************************************************/
void psci_do_afflvl_state_mgmt(uint32_t start_afflvl,
uint32_t end_afflvl,
aff_map_node_t *mpidr_nodes[],
uint32_t state)
/******************************************************************************
* This function finds the highest power level which will be powered down
* amongst all the power levels specified in the 'state_info' structure
*****************************************************************************/
unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info)
{
uint32_t level;
int i;
for (level = start_afflvl; level <= end_afflvl; level++) {
if (mpidr_nodes[level] == NULL)
continue;
psci_set_state(mpidr_nodes[level], state);
for (i = PLAT_MAX_PWR_LVL; i >= PSCI_CPU_PWR_LVL; i--) {
if (is_local_state_off(state_info->pwr_domain_state[i]))
return i;
}
return PSCI_INVALID_PWR_LVL;
}
/******************************************************************************
* This functions finds the level of the highest power domain which will be
* placed in a low power state during a suspend operation.
*****************************************************************************/
unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info)
{
int i;
for (i = PLAT_MAX_PWR_LVL; i >= PSCI_CPU_PWR_LVL; i--) {
if (!is_local_state_run(state_info->pwr_domain_state[i]))
return i;
}
return PSCI_INVALID_PWR_LVL;
}
/*******************************************************************************
* This function is passed an array of pointers to affinity level nodes in the
* topology tree for an mpidr. It picks up locks for each affinity level bottom
* up in the range specified.
* This function is passed a cpu_index and the highest level in the topology
* tree that the operation should be applied to. It picks up locks in order of
* increasing power domain level in the range specified.
******************************************************************************/
void psci_acquire_afflvl_locks(int start_afflvl,
int end_afflvl,
aff_map_node_t *mpidr_nodes[])
void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl,
unsigned int cpu_idx)
{
int level;
unsigned int parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
unsigned int level;
for (level = start_afflvl; level <= end_afflvl; level++) {
if (mpidr_nodes[level] == NULL)
continue;
psci_lock_get(mpidr_nodes[level]);
/* No locking required for level 0. Hence start locking from level 1 */
for (level = PSCI_CPU_PWR_LVL + 1; level <= end_pwrlvl; level++) {
psci_lock_get(&psci_non_cpu_pd_nodes[parent_idx]);
parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
}
}
/*******************************************************************************
* This function is passed an array of pointers to affinity level nodes in the
* topology tree for an mpidr. It releases the lock for each affinity level top
* down in the range specified.
* This function is passed a cpu_index and the highest level in the topology
* tree that the operation should be applied to. It releases the locks in order
* of decreasing power domain level in the range specified.
******************************************************************************/
void psci_release_afflvl_locks(int start_afflvl,
int end_afflvl,
aff_map_node_t *mpidr_nodes[])
void psci_release_pwr_domain_locks(unsigned int end_pwrlvl,
unsigned int cpu_idx)
{
unsigned int parent_idx, parent_nodes[PLAT_MAX_PWR_LVL] = {0};
int level;
for (level = end_afflvl; level >= start_afflvl; level--) {
if (mpidr_nodes[level] == NULL)
continue;
/* Get the parent nodes */
psci_get_parent_pwr_domain_nodes(cpu_idx, end_pwrlvl, parent_nodes);
psci_lock_release(mpidr_nodes[level]);
/* Unlock top down. No unlocking required for level 0. */
for (level = end_pwrlvl; level >= PSCI_CPU_PWR_LVL + 1; level--) {
parent_idx = parent_nodes[level - 1];
psci_lock_release(&psci_non_cpu_pd_nodes[parent_idx]);
}
}
/*******************************************************************************
* Simple routine to determine whether an affinity instance at a given level
* in an mpidr exists or not.
* Simple routine to determine whether a mpidr is valid or not.
******************************************************************************/
int psci_validate_mpidr(unsigned long mpidr, int level)
int psci_validate_mpidr(u_register_t mpidr)
{
aff_map_node_t *node;
node = psci_get_aff_map_node(mpidr, level);
if (node && (node->state & PSCI_AFF_PRESENT))
return PSCI_E_SUCCESS;
else
if (plat_core_pos_by_mpidr(mpidr) < 0)
return PSCI_E_INVALID_PARAMS;
return PSCI_E_SUCCESS;
}
/*******************************************************************************
* This function determines the full entrypoint information for the requested
* PSCI entrypoint on power on/resume and returns it.
******************************************************************************/
int psci_get_ns_ep_info(entry_point_info_t *ep,
uint64_t entrypoint, uint64_t context_id)
static int psci_get_ns_ep_info(entry_point_info_t *ep,
uintptr_t entrypoint,
u_register_t context_id)
{
uint32_t ep_attr, mode, sctlr, daif, ee;
uint32_t ns_scr_el3 = read_scr_el3();
uint32_t ns_sctlr_el1 = read_sctlr_el1();
unsigned long ep_attr, sctlr;
unsigned int daif, ee, mode;
unsigned long ns_scr_el3 = read_scr_el3();
unsigned long ns_sctlr_el1 = read_sctlr_el1();
sctlr = ns_scr_el3 & SCR_HCE_BIT ? read_sctlr_el2() : ns_sctlr_el1;
ee = 0;
......@@ -349,7 +623,7 @@ int psci_get_ns_ep_info(entry_point_info_t *ep,
* aarch64 EL
*/
if (entrypoint & 0x1)
return PSCI_E_INVALID_PARAMS;
return PSCI_E_INVALID_ADDRESS;
mode = ns_scr_el3 & SCR_HCE_BIT ? MODE_EL2 : MODE_EL1;
......@@ -371,209 +645,100 @@ int psci_get_ns_ep_info(entry_point_info_t *ep,
}
/*******************************************************************************
* This function takes a pointer to an affinity node in the topology tree and
* returns its state. State of a non-leaf node needs to be calculated.
* This function validates the entrypoint with the platform layer if the
* appropriate pm_ops hook is exported by the platform and returns the
* 'entry_point_info'.
******************************************************************************/
unsigned short psci_get_state(aff_map_node_t *node)
int psci_validate_entry_point(entry_point_info_t *ep,
uintptr_t entrypoint,
u_register_t context_id)
{
#if !USE_COHERENT_MEM
flush_dcache_range((uint64_t) node, sizeof(*node));
#endif
assert(node->level >= MPIDR_AFFLVL0 && node->level <= MPIDR_MAX_AFFLVL);
/* A cpu node just contains the state which can be directly returned */
if (node->level == MPIDR_AFFLVL0)
return (node->state >> PSCI_STATE_SHIFT) & PSCI_STATE_MASK;
/*
* For an affinity level higher than a cpu, the state has to be
* calculated. It depends upon the value of the reference count
* which is managed by each node at the next lower affinity level
* e.g. for a cluster, each cpu increments/decrements the reference
* count. If the reference count is 0 then the affinity level is
* OFF else ON.
*/
if (node->ref_count)
return PSCI_STATE_ON;
else
return PSCI_STATE_OFF;
}
int rc;
/*******************************************************************************
* This function takes a pointer to an affinity node in the topology tree and
* a target state. State of a non-leaf node needs to be converted to a reference
* count. State of a leaf node can be set directly.
******************************************************************************/
void psci_set_state(aff_map_node_t *node, unsigned short state)
{
assert(node->level >= MPIDR_AFFLVL0 && node->level <= MPIDR_MAX_AFFLVL);
/* Validate the entrypoint using platform psci_ops */
if (psci_plat_pm_ops->validate_ns_entrypoint) {
rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint);
if (rc != PSCI_E_SUCCESS)
return PSCI_E_INVALID_ADDRESS;
}
/*
* For an affinity level higher than a cpu, the state is used
* to decide whether the reference count is incremented or
* decremented. Entry into the ON_PENDING state does not have
* effect.
* Verify and derive the re-entry information for
* the non-secure world from the non-secure state from
* where this call originated.
*/
if (node->level > MPIDR_AFFLVL0) {
switch (state) {
case PSCI_STATE_ON:
node->ref_count++;
break;
case PSCI_STATE_OFF:
case PSCI_STATE_SUSPEND:
node->ref_count--;
break;
case PSCI_STATE_ON_PENDING:
/*
* An affinity level higher than a cpu will not undergo
* a state change when it is about to be turned on
*/
return;
default:
assert(0);
}
} else {
node->state &= ~(PSCI_STATE_MASK << PSCI_STATE_SHIFT);
node->state |= (state & PSCI_STATE_MASK) << PSCI_STATE_SHIFT;
}
#if !USE_COHERENT_MEM
flush_dcache_range((uint64_t) node, sizeof(*node));
#endif
}
/*******************************************************************************
* An affinity level could be on, on_pending, suspended or off. These are the
* logical states it can be in. Physically either it is off or on. When it is in
* the state on_pending then it is about to be turned on. It is not possible to
* tell whether that's actually happenned or not. So we err on the side of
* caution & treat the affinity level as being turned off.
******************************************************************************/
unsigned short psci_get_phys_state(aff_map_node_t *node)
{
unsigned int state;
state = psci_get_state(node);
return get_phys_state(state);
}
/*******************************************************************************
* This function takes an array of pointers to affinity instance nodes in the
* topology tree and calls the physical power on handler for the corresponding
* affinity levels
******************************************************************************/
static void psci_call_power_on_handlers(aff_map_node_t *mpidr_nodes[],
int start_afflvl,
int end_afflvl,
afflvl_power_on_finisher_t *pon_handlers)
{
int level;
aff_map_node_t *node;
for (level = end_afflvl; level >= start_afflvl; level--) {
node = mpidr_nodes[level];
if (node == NULL)
continue;
/*
* If we run into any trouble while powering up an
* affinity instance, then there is no recovery path
* so simply return an error and let the caller take
* care of the situation.
*/
pon_handlers[level](node);
}
rc = psci_get_ns_ep_info(ep, entrypoint, context_id);
return rc;
}
/*******************************************************************************
* Generic handler which is called when a cpu is physically powered on. It
* traverses through all the affinity levels performing generic, architectural,
* platform setup and state management e.g. for a cluster that's been powered
* on, it will call the platform specific code which will enable coherency at
* the interconnect level. For a cpu it could mean turning on the MMU etc.
*
* The state of all the relevant affinity levels is changed after calling the
* affinity level specific handlers as their actions would depend upon the state
* the affinity level is exiting from.
*
* The affinity level specific handlers are called in descending order i.e. from
* the highest to the lowest affinity level implemented by the platform because
* to turn on affinity level X it is neccesary to turn on affinity level X + 1
* first.
* traverses the node information and finds the highest power level powered
* off and performs generic, architectural, platform setup and state management
* to power on that power level and power levels below it.
* e.g. For a cpu that's been powered on, it will call the platform specific
* code to enable the gic cpu interface and for a cluster it will enable
* coherency at the interconnect level in addition to gic cpu interface.
******************************************************************************/
void psci_afflvl_power_on_finish(int start_afflvl,
int end_afflvl,
afflvl_power_on_finisher_t *pon_handlers)
void psci_power_up_finish(void)
{
mpidr_aff_map_nodes_t mpidr_nodes;
int rc;
unsigned int max_phys_off_afflvl;
unsigned int end_pwrlvl, cpu_idx = plat_my_core_pos();
psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
/*
* Collect the pointers to the nodes in the topology tree for
* each affinity instance in the mpidr. If this function does
* not return successfully then either the mpidr or the affinity
* levels are incorrect. Either case is an irrecoverable error.
* Verify that we have been explicitly turned ON or resumed from
* suspend.
*/
rc = psci_get_aff_map_nodes(read_mpidr_el1() & MPIDR_AFFINITY_MASK,
start_afflvl,
end_afflvl,
mpidr_nodes);
if (rc != PSCI_E_SUCCESS)
if (psci_get_aff_info_state() == AFF_STATE_OFF) {
ERROR("Unexpected affinity info state");
panic();
}
/*
* This function acquires the lock corresponding to each affinity
* level so that by the time all locks are taken, the system topology
* is snapshot and state management can be done safely.
* Get the maximum power domain level to traverse to after this cpu
* has been physically powered up.
*/
psci_acquire_afflvl_locks(start_afflvl,
end_afflvl,
mpidr_nodes);
max_phys_off_afflvl = psci_find_max_phys_off_afflvl(start_afflvl,
end_afflvl,
mpidr_nodes);
assert(max_phys_off_afflvl != PSCI_INVALID_DATA);
end_pwrlvl = get_power_on_target_pwrlvl();
/*
* Stash the highest affinity level that will come out of the OFF or
* SUSPEND states.
* This function acquires the lock corresponding to each power level so
* that by the time all locks are taken, the system topology is snapshot
* and state management can be done safely.
*/
psci_set_max_phys_off_afflvl(max_phys_off_afflvl);
psci_acquire_pwr_domain_locks(end_pwrlvl,
cpu_idx);
/* Perform generic, architecture and platform specific handling */
psci_call_power_on_handlers(mpidr_nodes,
start_afflvl,
end_afflvl,
pon_handlers);
psci_get_target_local_pwr_states(end_pwrlvl, &state_info);
/*
* This function updates the state of each affinity instance
* corresponding to the mpidr in the range of affinity levels
* specified.
* This CPU could be resuming from suspend or it could have just been
* turned on. To distinguish between these 2 cases, we examine the
* affinity state of the CPU:
* - If the affinity state is ON_PENDING then it has just been
* turned on.
* - Else it is resuming from suspend.
*
* Depending on the type of warm reset identified, choose the right set
* of power management handler and perform the generic, architecture
* and platform specific handling.
*/
psci_do_afflvl_state_mgmt(start_afflvl,
end_afflvl,
mpidr_nodes,
PSCI_STATE_ON);
if (psci_get_aff_info_state() == AFF_STATE_ON_PENDING)
psci_cpu_on_finish(cpu_idx, &state_info);
else
psci_cpu_suspend_finish(cpu_idx, &state_info);
/*
* Invalidate the entry for the highest affinity level stashed earlier.
* This ensures that any reads of this variable outside the power
* up/down sequences return PSCI_INVALID_DATA
* Set the requested and target state of this CPU and all the higher
* power domains which are ancestors of this CPU to run.
*/
psci_set_max_phys_off_afflvl(PSCI_INVALID_DATA);
psci_set_pwr_domains_to_run(end_pwrlvl);
/*
* This loop releases the lock corresponding to each affinity level
* This loop releases the lock corresponding to each power level
* in the reverse order to which they were acquired.
*/
psci_release_afflvl_locks(start_afflvl,
end_afflvl,
mpidr_nodes);
psci_release_pwr_domain_locks(end_pwrlvl,
cpu_idx);
}
/*******************************************************************************
......@@ -601,7 +766,7 @@ void psci_register_spd_pm_hook(const spd_pm_ops_t *pm)
* is resident through the mpidr parameter. Else the value of the parameter on
* return is undefined.
******************************************************************************/
int psci_spd_migrate_info(uint64_t *mpidr)
int psci_spd_migrate_info(u_register_t *mpidr)
{
int rc;
......@@ -618,31 +783,123 @@ int psci_spd_migrate_info(uint64_t *mpidr)
/*******************************************************************************
* This function prints the state of all affinity instances present in the
* This function prints the state of all power domains present in the
* system
******************************************************************************/
void psci_print_affinity_map(void)
void psci_print_power_domain_map(void)
{
#if LOG_LEVEL >= LOG_LEVEL_INFO
aff_map_node_t *node;
unsigned int idx;
plat_local_state_t state;
plat_local_state_type_t state_type;
/* This array maps to the PSCI_STATE_X definitions in psci.h */
static const char *psci_state_str[] = {
static const char *psci_state_type_str[] = {
"ON",
"RETENTION",
"OFF",
"ON_PENDING",
"SUSPEND"
};
INFO("PSCI Affinity Map:\n");
for (idx = 0; idx < PSCI_NUM_AFFS ; idx++) {
node = &psci_aff_map[idx];
if (!(node->state & PSCI_AFF_PRESENT)) {
continue;
}
INFO(" AffInst: Level %u, MPID 0x%lx, State %s\n",
node->level, node->mpidr,
psci_state_str[psci_get_state(node)]);
INFO("PSCI Power Domain Map:\n");
for (idx = 0; idx < (PSCI_NUM_PWR_DOMAINS - PLATFORM_CORE_COUNT);
idx++) {
state_type = find_local_state_type(
psci_non_cpu_pd_nodes[idx].local_state);
INFO(" Domain Node : Level %u, parent_node %d,"
" State %s (0x%x)\n",
psci_non_cpu_pd_nodes[idx].level,
psci_non_cpu_pd_nodes[idx].parent_node,
psci_state_type_str[state_type],
psci_non_cpu_pd_nodes[idx].local_state);
}
for (idx = 0; idx < PLATFORM_CORE_COUNT; idx++) {
state = psci_get_cpu_local_state_by_idx(idx);
state_type = find_local_state_type(state);
INFO(" CPU Node : MPID 0x%lx, parent_node %d,"
" State %s (0x%x)\n",
psci_cpu_pd_nodes[idx].mpidr,
psci_cpu_pd_nodes[idx].parent_node,
psci_state_type_str[state_type],
psci_get_cpu_local_state_by_idx(idx));
}
#endif
}
#if ENABLE_PLAT_COMPAT
/*******************************************************************************
* PSCI Compatibility helper function to return the 'power_state' parameter of
* the PSCI CPU SUSPEND request for the current CPU. Returns PSCI_INVALID_DATA
* if not invoked within CPU_SUSPEND for the current CPU.
******************************************************************************/
int psci_get_suspend_powerstate(void)
{
/* Sanity check to verify that CPU is within CPU_SUSPEND */
if (psci_get_aff_info_state() == AFF_STATE_ON &&
!is_local_state_run(psci_get_cpu_local_state()))
return psci_power_state_compat[plat_my_core_pos()];
return PSCI_INVALID_DATA;
}
/*******************************************************************************
* PSCI Compatibility helper function to return the state id of the current
* cpu encoded in the 'power_state' parameter. Returns PSCI_INVALID_DATA
* if not invoked within CPU_SUSPEND for the current CPU.
******************************************************************************/
int psci_get_suspend_stateid(void)
{
unsigned int power_state;
power_state = psci_get_suspend_powerstate();
if (power_state != PSCI_INVALID_DATA)
return psci_get_pstate_id(power_state);
return PSCI_INVALID_DATA;
}
/*******************************************************************************
* PSCI Compatibility helper function to return the state id encoded in the
* 'power_state' parameter of the CPU specified by 'mpidr'. Returns
* PSCI_INVALID_DATA if the CPU is not in CPU_SUSPEND.
******************************************************************************/
int psci_get_suspend_stateid_by_mpidr(unsigned long mpidr)
{
int cpu_idx = plat_core_pos_by_mpidr(mpidr);
if (cpu_idx == -1)
return PSCI_INVALID_DATA;
/* Sanity check to verify that the CPU is in CPU_SUSPEND */
if (psci_get_aff_info_state_by_idx(cpu_idx) == AFF_STATE_ON &&
!is_local_state_run(psci_get_cpu_local_state_by_idx(cpu_idx)))
return psci_get_pstate_id(psci_power_state_compat[cpu_idx]);
return PSCI_INVALID_DATA;
}
/*******************************************************************************
* This function returns highest affinity level which is in OFF
* state. The affinity instance with which the level is associated is
* determined by the caller.
******************************************************************************/
unsigned int psci_get_max_phys_off_afflvl(void)
{
psci_power_state_t state_info;
memset(&state_info, 0, sizeof(state_info));
psci_get_target_local_pwr_states(PLAT_MAX_PWR_LVL, &state_info);
return psci_find_target_suspend_lvl(&state_info);
}
/*******************************************************************************
* PSCI Compatibility helper function to return target affinity level requested
* for the CPU_SUSPEND. This function assumes affinity levels correspond to
* power domain levels on the platform.
******************************************************************************/
int psci_get_suspend_afflvl(void)
{
return psci_get_suspend_pwrlvl();
}
#endif
......@@ -34,25 +34,16 @@
#include <psci.h>
#include <xlat_tables.h>
.globl psci_aff_on_finish_entry
.globl psci_aff_suspend_finish_entry
.globl psci_entrypoint
.globl psci_power_down_wfi
/* -----------------------------------------------------
* This cpu has been physically powered up. Depending
* upon whether it was resumed from suspend or simply
* turned on, call the common power on finisher with
* the handlers (chosen depending upon original state).
* -----------------------------------------------------
/* --------------------------------------------------------------------
* This CPU has been physically powered up. It is either resuming from
* suspend or has simply been turned on. In both cases, call the power
* on finisher.
* --------------------------------------------------------------------
*/
func psci_aff_on_finish_entry
adr x23, psci_afflvl_on_finishers
b psci_aff_common_finish_entry
psci_aff_suspend_finish_entry:
adr x23, psci_afflvl_suspend_finishers
psci_aff_common_finish_entry:
func psci_entrypoint
/*
* On the warm boot path, most of the EL3 initialisations performed by
* 'el3_entrypoint_common' must be skipped:
......@@ -98,19 +89,10 @@ psci_aff_common_finish_entry:
mov x0, #DISABLE_DCACHE
bl bl31_plat_enable_mmu
/* ---------------------------------------------
* Call the finishers starting from affinity
* level 0.
* ---------------------------------------------
*/
bl get_power_on_target_afflvl
mov x2, x23
mov x1, x0
mov x0, #MPIDR_AFFLVL0
bl psci_afflvl_power_on_finish
bl psci_power_up_finish
b el3_exit
endfunc psci_aff_on_finish_entry
endfunc psci_entrypoint
/* --------------------------------------------
* This function is called to indicate to the
......
/*
* Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
......@@ -28,7 +28,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <asm_macros.S>
#include <assert_macros.S>
#include <platform_def.h>
......@@ -38,14 +37,13 @@
.globl psci_do_pwrup_cache_maintenance
/* -----------------------------------------------------------------------
* void psci_do_pwrdown_cache_maintenance(uint32_t affinity level);
* void psci_do_pwrdown_cache_maintenance(unsigned int power level);
*
* This function performs cache maintenance if the specified affinity
* level is the equal to the level of the highest affinity instance which
* will be/is physically powered off. The levels of cache affected are
* determined by the affinity level which is passed as the argument i.e.
* level 0 results in a flush of the L1 cache. Both the L1 and L2 caches
* are flushed for a higher affinity level.
* This function performs cache maintenance for the specified power
* level. The levels of cache affected are determined by the power
* level which is passed as the argument i.e. level 0 results
* in a flush of the L1 cache. Both the L1 and L2 caches are flushed
* for a higher power level.
*
* Additionally, this function also ensures that stack memory is correctly
* flushed out to avoid coherency issues due to a change in its memory
......@@ -56,28 +54,19 @@ func psci_do_pwrdown_cache_maintenance
stp x29, x30, [sp,#-16]!
stp x19, x20, [sp,#-16]!
mov x19, x0
bl psci_get_max_phys_off_afflvl
#if ASM_ASSERTION
cmp x0, #PSCI_INVALID_DATA
ASM_ASSERT(ne)
#endif
cmp x0, x19
b.ne 1f
/* ---------------------------------------------
* Determine to how many levels of cache will be
* subject to cache maintenance. Affinity level
* subject to cache maintenance. Power level
* 0 implies that only the cpu is being powered
* down. Only the L1 data cache needs to be
* flushed to the PoU in this case. For a higher
* affinity level we are assuming that a flush
* power level we are assuming that a flush
* of L1 data and L2 unified cache is enough.
* This information should be provided by the
* platform.
* ---------------------------------------------
*/
cmp x0, #MPIDR_AFFLVL0
cmp w0, #PSCI_CPU_PWR_LVL
b.eq do_core_pwr_dwn
bl prepare_cluster_pwr_dwn
b do_stack_maintenance
......@@ -92,8 +81,7 @@ do_core_pwr_dwn:
* ---------------------------------------------
*/
do_stack_maintenance:
mrs x0, mpidr_el1
bl platform_get_stack
bl plat_get_my_stack
/* ---------------------------------------------
* Calculate and store the size of the used
......@@ -116,7 +104,6 @@ do_stack_maintenance:
sub x1, sp, x0
bl inv_dcache_range
1:
ldp x19, x20, [sp], #16
ldp x29, x30, [sp], #16
ret
......@@ -147,8 +134,7 @@ func psci_do_pwrup_cache_maintenance
* stack base address in x0.
* ---------------------------------------------
*/
mrs x0, mpidr_el1
bl platform_get_stack
bl plat_get_my_stack
mov x1, sp
sub x1, x0, x1
mov x0, sp
......
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
......@@ -35,56 +35,39 @@
#include <platform.h>
#include <runtime_svc.h>
#include <std_svc.h>
#include <string.h>
#include "psci_private.h"
/*******************************************************************************
* PSCI frontend api for servicing SMCs. Described in the PSCI spec.
******************************************************************************/
int psci_cpu_on(unsigned long target_cpu,
unsigned long entrypoint,
unsigned long context_id)
int psci_cpu_on(u_register_t target_cpu,
uintptr_t entrypoint,
u_register_t context_id)
{
int rc;
unsigned int start_afflvl, end_afflvl;
unsigned int end_pwrlvl;
entry_point_info_t ep;
/* Determine if the cpu exists of not */
rc = psci_validate_mpidr(target_cpu, MPIDR_AFFLVL0);
if (rc != PSCI_E_SUCCESS) {
rc = psci_validate_mpidr(target_cpu);
if (rc != PSCI_E_SUCCESS)
return PSCI_E_INVALID_PARAMS;
}
/* Validate the entrypoint using platform pm_ops */
if (psci_plat_pm_ops->validate_ns_entrypoint) {
rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint);
if (rc != PSCI_E_SUCCESS) {
assert(rc == PSCI_E_INVALID_PARAMS);
return PSCI_E_INVALID_PARAMS;
}
}
/*
* Verify and derive the re-entry information for
* the non-secure world from the non-secure state from
* where this call originated.
*/
rc = psci_get_ns_ep_info(&ep, entrypoint, context_id);
/* Validate the entry point and get the entry_point_info */
rc = psci_validate_entry_point(&ep, entrypoint, context_id);
if (rc != PSCI_E_SUCCESS)
return rc;
/*
* To turn this cpu on, specify which affinity
* To turn this cpu on, specify which power
* levels need to be turned on
*/
start_afflvl = MPIDR_AFFLVL0;
end_afflvl = PLATFORM_MAX_AFFLVL;
rc = psci_afflvl_on(target_cpu,
end_pwrlvl = PLAT_MAX_PWR_LVL;
rc = psci_cpu_on_start(target_cpu,
&ep,
start_afflvl,
end_afflvl);
end_pwrlvl);
return rc;
}
......@@ -94,148 +77,125 @@ unsigned int psci_version(void)
}
int psci_cpu_suspend(unsigned int power_state,
unsigned long entrypoint,
unsigned long context_id)
uintptr_t entrypoint,
u_register_t context_id)
{
int rc;
unsigned int target_afflvl, pstate_type;
unsigned int target_pwrlvl, is_power_down_state;
entry_point_info_t ep;
psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
plat_local_state_t cpu_pd_state;
/* Check SBZ bits in power state are zero */
if (psci_validate_power_state(power_state))
return PSCI_E_INVALID_PARAMS;
/* Validate the power_state parameter */
rc = psci_validate_power_state(power_state, &state_info);
if (rc != PSCI_E_SUCCESS) {
assert(rc == PSCI_E_INVALID_PARAMS);
return rc;
}
/* Sanity check the requested state */
target_afflvl = psci_get_pstate_afflvl(power_state);
if (target_afflvl > PLATFORM_MAX_AFFLVL)
return PSCI_E_INVALID_PARAMS;
/*
* Get the value of the state type bit from the power state parameter.
*/
is_power_down_state = psci_get_pstate_type(power_state);
/* Validate the power_state using platform pm_ops */
if (psci_plat_pm_ops->validate_power_state) {
rc = psci_plat_pm_ops->validate_power_state(power_state);
if (rc != PSCI_E_SUCCESS) {
assert(rc == PSCI_E_INVALID_PARAMS);
return PSCI_E_INVALID_PARAMS;
}
}
/* Sanity check the requested suspend levels */
assert (psci_validate_suspend_req(&state_info, is_power_down_state)
== PSCI_E_SUCCESS);
target_pwrlvl = psci_find_target_suspend_lvl(&state_info);
/* Validate the entrypoint using platform pm_ops */
if (psci_plat_pm_ops->validate_ns_entrypoint) {
rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint);
if (rc != PSCI_E_SUCCESS) {
assert(rc == PSCI_E_INVALID_PARAMS);
/* Fast path for CPU standby.*/
if (is_cpu_standby_req(is_power_down_state, target_pwrlvl)) {
if (!psci_plat_pm_ops->cpu_standby)
return PSCI_E_INVALID_PARAMS;
}
}
/* Determine the 'state type' in the 'power_state' parameter */
pstate_type = psci_get_pstate_type(power_state);
/*
* Set the state of the CPU power domain to the platform
* specific retention state and enter the standby state.
*/
cpu_pd_state = state_info.pwr_domain_state[PSCI_CPU_PWR_LVL];
psci_set_cpu_local_state(cpu_pd_state);
psci_plat_pm_ops->cpu_standby(cpu_pd_state);
/*
* Ensure that we have a platform specific handler for entering
* a standby state.
*/
if (pstate_type == PSTATE_TYPE_STANDBY) {
if (!psci_plat_pm_ops->affinst_standby)
return PSCI_E_INVALID_PARAMS;
/* Upon exit from standby, set the state back to RUN. */
psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN);
psci_plat_pm_ops->affinst_standby(power_state);
return PSCI_E_SUCCESS;
}
/*
* Verify and derive the re-entry information for
* the non-secure world from the non-secure state from
* where this call originated.
* If a power down state has been requested, we need to verify entry
* point and program entry information.
*/
rc = psci_get_ns_ep_info(&ep, entrypoint, context_id);
if (rc != PSCI_E_SUCCESS)
return rc;
/* Save PSCI power state parameter for the core in suspend context */
psci_set_suspend_power_state(power_state);
if (is_power_down_state) {
rc = psci_validate_entry_point(&ep, entrypoint, context_id);
if (rc != PSCI_E_SUCCESS)
return rc;
}
/*
* Do what is needed to enter the power down state. Upon success,
* enter the final wfi which will power down this CPU.
* enter the final wfi which will power down this CPU. This function
* might return if the power down was abandoned for any reason, e.g.
* arrival of an interrupt
*/
psci_afflvl_suspend(&ep,
MPIDR_AFFLVL0,
target_afflvl);
psci_cpu_suspend_start(&ep,
target_pwrlvl,
&state_info,
is_power_down_state);
/* Reset PSCI power state parameter for the core. */
psci_set_suspend_power_state(PSCI_INVALID_DATA);
return PSCI_E_SUCCESS;
}
int psci_system_suspend(unsigned long entrypoint,
unsigned long context_id)
int psci_system_suspend(uintptr_t entrypoint, u_register_t context_id)
{
int rc;
unsigned int power_state;
psci_power_state_t state_info;
entry_point_info_t ep;
/* Validate the entrypoint using platform pm_ops */
if (psci_plat_pm_ops->validate_ns_entrypoint) {
rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint);
if (rc != PSCI_E_SUCCESS) {
assert(rc == PSCI_E_INVALID_PARAMS);
return PSCI_E_INVALID_PARAMS;
}
}
/* Check if the current CPU is the last ON CPU in the system */
if (!psci_is_last_on_cpu())
return PSCI_E_DENIED;
/*
* Verify and derive the re-entry information for
* the non-secure world from the non-secure state from
* where this call originated.
*/
rc = psci_get_ns_ep_info(&ep, entrypoint, context_id);
/* Validate the entry point and get the entry_point_info */
rc = psci_validate_entry_point(&ep, entrypoint, context_id);
if (rc != PSCI_E_SUCCESS)
return rc;
/*
* Assert that the required pm_ops hook is implemented to ensure that
* the capability detected during psci_setup() is valid.
*/
assert(psci_plat_pm_ops->get_sys_suspend_power_state);
/*
* Query the platform for the power_state required for system suspend
*/
power_state = psci_plat_pm_ops->get_sys_suspend_power_state();
/* Query the psci_power_state for system suspend */
psci_query_sys_suspend_pwrstate(&state_info);
/* Save PSCI power state parameter for the core in suspend context */
psci_set_suspend_power_state(power_state);
/* Ensure that the psci_power_state makes sense */
assert(psci_find_target_suspend_lvl(&state_info) == PLAT_MAX_PWR_LVL);
assert(psci_validate_suspend_req(&state_info, PSTATE_TYPE_POWERDOWN)
== PSCI_E_SUCCESS);
assert(is_local_state_off(state_info.pwr_domain_state[PLAT_MAX_PWR_LVL]));
/*
* Do what is needed to enter the power down state. Upon success,
* enter the final wfi which will power down this cpu.
* Do what is needed to enter the system suspend state. This function
* might return if the power down was abandoned for any reason, e.g.
* arrival of an interrupt
*/
psci_afflvl_suspend(&ep,
MPIDR_AFFLVL0,
PLATFORM_MAX_AFFLVL);
psci_cpu_suspend_start(&ep,
PLAT_MAX_PWR_LVL,
&state_info,
PSTATE_TYPE_POWERDOWN);
/* Reset PSCI power state parameter for the core. */
psci_set_suspend_power_state(PSCI_INVALID_DATA);
return PSCI_E_SUCCESS;
}
int psci_cpu_off(void)
{
int rc;
int target_afflvl = PLATFORM_MAX_AFFLVL;
unsigned int target_pwrlvl = PLAT_MAX_PWR_LVL;
/*
* Traverse from the highest to the lowest affinity level. When the
* lowest affinity level is hit, all the locks are acquired. State
* management is done immediately followed by cpu, cluster ...
* ..target_afflvl specific actions as this function unwinds back.
* Do what is needed to power off this CPU and possible higher power
* levels if it able to do so. Upon success, enter the final wfi
* which will power down this CPU.
*/
rc = psci_afflvl_off(MPIDR_AFFLVL0, target_afflvl);
rc = psci_do_cpu_off(target_pwrlvl);
/*
* The only error cpu_off can return is E_DENIED. So check if that's
......@@ -246,41 +206,27 @@ int psci_cpu_off(void)
return rc;
}
int psci_affinity_info(unsigned long target_affinity,
int psci_affinity_info(u_register_t target_affinity,
unsigned int lowest_affinity_level)
{
int rc = PSCI_E_INVALID_PARAMS;
unsigned int aff_state;
aff_map_node_t *node;
if (lowest_affinity_level > PLATFORM_MAX_AFFLVL)
return rc;
node = psci_get_aff_map_node(target_affinity, lowest_affinity_level);
if (node && (node->state & PSCI_AFF_PRESENT)) {
/*
* TODO: For affinity levels higher than 0 i.e. cpu, the
* state will always be either ON or OFF. Need to investigate
* how critical is it to support ON_PENDING here.
*/
aff_state = psci_get_state(node);
unsigned int target_idx;
/* A suspended cpu is available & on for the OS */
if (aff_state == PSCI_STATE_SUSPEND) {
aff_state = PSCI_STATE_ON;
}
/* We dont support level higher than PSCI_CPU_PWR_LVL */
if (lowest_affinity_level > PSCI_CPU_PWR_LVL)
return PSCI_E_INVALID_PARAMS;
rc = aff_state;
}
/* Calculate the cpu index of the target */
target_idx = plat_core_pos_by_mpidr(target_affinity);
if (target_idx == -1)
return PSCI_E_INVALID_PARAMS;
return rc;
return psci_get_aff_info_state_by_idx(target_idx);
}
int psci_migrate(unsigned long target_cpu)
int psci_migrate(u_register_t target_cpu)
{
int rc;
unsigned long resident_cpu_mpidr;
u_register_t resident_cpu_mpidr;
rc = psci_spd_migrate_info(&resident_cpu_mpidr);
if (rc != PSCI_TOS_UP_MIG_CAP)
......@@ -295,7 +241,7 @@ int psci_migrate(unsigned long target_cpu)
return PSCI_E_NOT_PRESENT;
/* Check the validity of the specified target cpu */
rc = psci_validate_mpidr(target_cpu, MPIDR_AFFLVL0);
rc = psci_validate_mpidr(target_cpu);
if (rc != PSCI_E_SUCCESS)
return PSCI_E_INVALID_PARAMS;
......@@ -309,14 +255,14 @@ int psci_migrate(unsigned long target_cpu)
int psci_migrate_info_type(void)
{
unsigned long resident_cpu_mpidr;
u_register_t resident_cpu_mpidr;
return psci_spd_migrate_info(&resident_cpu_mpidr);
}
long psci_migrate_info_up_cpu(void)
{
unsigned long resident_cpu_mpidr;
u_register_t resident_cpu_mpidr;
int rc;
/*
......@@ -332,7 +278,7 @@ long psci_migrate_info_up_cpu(void)
int psci_features(unsigned int psci_fid)
{
uint32_t local_caps = psci_caps;
unsigned int local_caps = psci_caps;
/* Check if it is a 64 bit function */
if (((psci_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_64)
......@@ -352,10 +298,9 @@ int psci_features(unsigned int psci_fid)
if (psci_fid == PSCI_CPU_SUSPEND_AARCH32 ||
psci_fid == PSCI_CPU_SUSPEND_AARCH64) {
/*
* The trusted firmware uses the original power state format
* and does not support OS Initiated Mode.
* The trusted firmware does not support OS Initiated Mode.
*/
return (FF_PSTATE_ORIG << FF_PSTATE_SHIFT) |
return (FF_PSTATE << FF_PSTATE_SHIFT) |
((!FF_SUPPORTS_OS_INIT_MODE) << FF_MODE_SUPPORT_SHIFT);
}
......
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
......@@ -32,158 +32,52 @@
#include <arch_helpers.h>
#include <assert.h>
#include <debug.h>
#include <platform.h>
#include <string.h>
#include "psci_private.h"
typedef void (*afflvl_off_handler_t)(aff_map_node_t *node);
/*******************************************************************************
* The next three functions implement a handler for each supported affinity
* level which is called when that affinity level is turned off.
/******************************************************************************
* Construct the psci_power_state to request power OFF at all power levels.
******************************************************************************/
static void psci_afflvl0_off(aff_map_node_t *cpu_node)
static void psci_set_power_off_state(psci_power_state_t *state_info)
{
assert(cpu_node->level == MPIDR_AFFLVL0);
/*
* Arch. management. Perform the necessary steps to flush all
* cpu caches.
*/
psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL0);
int lvl;
/*
* Plat. management: Perform platform specific actions to turn this
* cpu off e.g. exit cpu coherency, program the power controller etc.
*/
psci_plat_pm_ops->affinst_off(cpu_node->level,
psci_get_phys_state(cpu_node));
for (lvl = PSCI_CPU_PWR_LVL; lvl <= PLAT_MAX_PWR_LVL; lvl++)
state_info->pwr_domain_state[lvl] = PLAT_MAX_OFF_STATE;
}
static void psci_afflvl1_off(aff_map_node_t *cluster_node)
{
/* Sanity check the cluster level */
assert(cluster_node->level == MPIDR_AFFLVL1);
/*
* Arch. Management. Flush all levels of caches to PoC if
* the cluster is to be shutdown.
*/
psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL1);
/*
* Plat. Management. Allow the platform to do its cluster
* specific bookeeping e.g. turn off interconnect coherency,
* program the power controller etc.
*/
psci_plat_pm_ops->affinst_off(cluster_node->level,
psci_get_phys_state(cluster_node));
}
static void psci_afflvl2_off(aff_map_node_t *system_node)
{
/* Cannot go beyond this level */
assert(system_node->level == MPIDR_AFFLVL2);
/*
* Keep the physical state of the system handy to decide what
* action needs to be taken
*/
/*
* Arch. Management. Flush all levels of caches to PoC if
* the system is to be shutdown.
*/
psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL2);
/*
* Plat. Management : Allow the platform to do its bookeeping
* at this affinity level
*/
psci_plat_pm_ops->affinst_off(system_node->level,
psci_get_phys_state(system_node));
}
static const afflvl_off_handler_t psci_afflvl_off_handlers[] = {
psci_afflvl0_off,
psci_afflvl1_off,
psci_afflvl2_off,
};
/*******************************************************************************
* This function takes an array of pointers to affinity instance nodes in the
* topology tree and calls the off handler for the corresponding affinity
* levels
******************************************************************************/
static void psci_call_off_handlers(aff_map_node_t *mpidr_nodes[],
int start_afflvl,
int end_afflvl)
{
int level;
aff_map_node_t *node;
for (level = start_afflvl; level <= end_afflvl; level++) {
node = mpidr_nodes[level];
if (node == NULL)
continue;
psci_afflvl_off_handlers[level](node);
}
}
/*******************************************************************************
/******************************************************************************
* Top level handler which is called when a cpu wants to power itself down.
* It's assumed that along with turning the cpu off, higher affinity levels will
* be turned off as far as possible. It traverses through all the affinity
* levels performing generic, architectural, platform setup and state management
* e.g. for a cluster that's to be powered off, it will call the platform
* specific code which will disable coherency at the interconnect level if the
* cpu is the last in the cluster. For a cpu it could mean programming the power
* the power controller etc.
*
* The state of all the relevant affinity levels is changed prior to calling the
* affinity level specific handlers as their actions would depend upon the state
* the affinity level is about to enter.
*
* The affinity level specific handlers are called in ascending order i.e. from
* the lowest to the highest affinity level implemented by the platform because
* to turn off affinity level X it is neccesary to turn off affinity level X - 1
* first.
* It's assumed that along with turning the cpu power domain off, power
* domains at higher levels will be turned off as far as possible. It finds
* the highest level where a domain has to be powered off by traversing the
* node information and then performs generic, architectural, platform setup
* and state management required to turn OFF that power domain and domains
* below it. e.g. For a cpu that's to be powered OFF, it could mean programming
* the power controller whereas for a cluster that's to be powered off, it will
* call the platform specific code which will disable coherency at the
* interconnect level if the cpu is the last in the cluster and also the
* program the power controller.
******************************************************************************/
int psci_afflvl_off(int start_afflvl,
int end_afflvl)
int psci_do_cpu_off(unsigned int end_pwrlvl)
{
int rc;
mpidr_aff_map_nodes_t mpidr_nodes;
unsigned int max_phys_off_afflvl;
int rc, idx = plat_my_core_pos();
psci_power_state_t state_info;
/*
* This function must only be called on platforms where the
* CPU_OFF platform hooks have been implemented.
*/
assert(psci_plat_pm_ops->affinst_off);
/*
* Collect the pointers to the nodes in the topology tree for
* each affinity instance in the mpidr. If this function does
* not return successfully then either the mpidr or the affinity
* levels are incorrect. Either way, this an internal TF error
* therefore assert.
*/
rc = psci_get_aff_map_nodes(read_mpidr_el1() & MPIDR_AFFINITY_MASK,
start_afflvl,
end_afflvl,
mpidr_nodes);
assert(rc == PSCI_E_SUCCESS);
assert(psci_plat_pm_ops->pwr_domain_off);
/*
* This function acquires the lock corresponding to each affinity
* This function acquires the lock corresponding to each power
* level so that by the time all locks are taken, the system topology
* is snapshot and state management can be done safely.
*/
psci_acquire_afflvl_locks(start_afflvl,
end_afflvl,
mpidr_nodes);
psci_acquire_pwr_domain_locks(end_pwrlvl,
idx);
/*
* Call the cpu off handler registered by the Secure Payload Dispatcher
......@@ -196,45 +90,45 @@ int psci_afflvl_off(int start_afflvl,
goto exit;
}
/* Construct the psci_power_state for CPU_OFF */
psci_set_power_off_state(&state_info);
/*
* This function updates the state of each affinity instance
* corresponding to the mpidr in the range of affinity levels
* specified.
* This function is passed the requested state info and
* it returns the negotiated state info for each power level upto
* the end level specified.
*/
psci_do_afflvl_state_mgmt(start_afflvl,
end_afflvl,
mpidr_nodes,
PSCI_STATE_OFF);
max_phys_off_afflvl = psci_find_max_phys_off_afflvl(start_afflvl,
end_afflvl,
mpidr_nodes);
assert(max_phys_off_afflvl != PSCI_INVALID_DATA);
psci_do_state_coordination(end_pwrlvl, &state_info);
/* Stash the highest affinity level that will enter the OFF state. */
psci_set_max_phys_off_afflvl(max_phys_off_afflvl);
/* Perform generic, architecture and platform specific handling */
psci_call_off_handlers(mpidr_nodes,
start_afflvl,
end_afflvl);
/*
* Arch. management. Perform the necessary steps to flush all
* cpu caches.
*/
psci_do_pwrdown_cache_maintenance(psci_find_max_off_lvl(&state_info));
/*
* Invalidate the entry for the highest affinity level stashed earlier.
* This ensures that any reads of this variable outside the power
* up/down sequences return PSCI_INVALID_DATA.
*
* Plat. management: Perform platform specific actions to turn this
* cpu off e.g. exit cpu coherency, program the power controller etc.
*/
psci_set_max_phys_off_afflvl(PSCI_INVALID_DATA);
psci_plat_pm_ops->pwr_domain_off(&state_info);
exit:
/*
* Release the locks corresponding to each affinity level in the
* Release the locks corresponding to each power level in the
* reverse order to which they were acquired.
*/
psci_release_afflvl_locks(start_afflvl,
end_afflvl,
mpidr_nodes);
psci_release_pwr_domain_locks(end_pwrlvl,
idx);
/*
* Set the affinity info state to OFF. This writes directly to main
* memory as caches are disabled, so cache maintenance is required
* to ensure that later cached reads of aff_info_state return
* AFF_STATE_OFF.
*/
flush_cpu_data(psci_svc_cpu_data.aff_info_state);
psci_set_aff_info_state(AFF_STATE_OFF);
inv_cpu_data(psci_svc_cpu_data.aff_info_state);
/*
* Check if all actions needed to safely power down this cpu have
......
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
......@@ -40,212 +40,72 @@
#include <stddef.h>
#include "psci_private.h"
typedef int (*afflvl_on_handler_t)(unsigned long target_cpu,
aff_map_node_t *node);
/*******************************************************************************
* This function checks whether a cpu which has been requested to be turned on
* is OFF to begin with.
******************************************************************************/
static int cpu_on_validate_state(unsigned int psci_state)
static int cpu_on_validate_state(aff_info_state_t aff_state)
{
if (psci_state == PSCI_STATE_ON || psci_state == PSCI_STATE_SUSPEND)
if (aff_state == AFF_STATE_ON)
return PSCI_E_ALREADY_ON;
if (psci_state == PSCI_STATE_ON_PENDING)
if (aff_state == AFF_STATE_ON_PENDING)
return PSCI_E_ON_PENDING;
assert(psci_state == PSCI_STATE_OFF);
assert(aff_state == AFF_STATE_OFF);
return PSCI_E_SUCCESS;
}
/*******************************************************************************
* Handler routine to turn a cpu on. It takes care of any generic, architectural
* or platform specific setup required.
* TODO: Split this code across separate handlers for each type of setup?
* This function sets the aff_info_state in the per-cpu data of the CPU
* specified by cpu_idx
******************************************************************************/
static int psci_afflvl0_on(unsigned long target_cpu,
aff_map_node_t *cpu_node)
static void psci_set_aff_info_state_by_idx(unsigned int cpu_idx,
aff_info_state_t aff_state)
{
unsigned long psci_entrypoint;
/* Sanity check to safeguard against data corruption */
assert(cpu_node->level == MPIDR_AFFLVL0);
/* Set the secure world (EL3) re-entry point after BL1 */
psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
set_cpu_data_by_index(cpu_idx,
psci_svc_cpu_data.aff_info_state,
aff_state);
/*
* Plat. management: Give the platform the current state
* of the target cpu to allow it to perform the necessary
* steps to power on.
* Flush aff_info_state as it will be accessed with caches turned OFF.
*/
return psci_plat_pm_ops->affinst_on(target_cpu,
psci_entrypoint,
cpu_node->level,
psci_get_phys_state(cpu_node));
}
/*******************************************************************************
* Handler routine to turn a cluster on. It takes care or any generic, arch.
* or platform specific setup required.
* TODO: Split this code across separate handlers for each type of setup?
******************************************************************************/
static int psci_afflvl1_on(unsigned long target_cpu,
aff_map_node_t *cluster_node)
{
unsigned long psci_entrypoint;
assert(cluster_node->level == MPIDR_AFFLVL1);
/*
* There is no generic and arch. specific cluster
* management required
*/
/* State management: Is not required while turning a cluster on */
/*
* Plat. management: Give the platform the current state
* of the target cpu to allow it to perform the necessary
* steps to power on.
*/
psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
return psci_plat_pm_ops->affinst_on(target_cpu,
psci_entrypoint,
cluster_node->level,
psci_get_phys_state(cluster_node));
}
/*******************************************************************************
* Handler routine to turn a cluster of clusters on. It takes care or any
* generic, arch. or platform specific setup required.
* TODO: Split this code across separate handlers for each type of setup?
******************************************************************************/
static int psci_afflvl2_on(unsigned long target_cpu,
aff_map_node_t *system_node)
{
unsigned long psci_entrypoint;
/* Cannot go beyond affinity level 2 in this psci imp. */
assert(system_node->level == MPIDR_AFFLVL2);
/*
* There is no generic and arch. specific system management
* required
*/
/* State management: Is not required while turning a system on */
/*
* Plat. management: Give the platform the current state
* of the target cpu to allow it to perform the necessary
* steps to power on.
*/
psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
return psci_plat_pm_ops->affinst_on(target_cpu,
psci_entrypoint,
system_node->level,
psci_get_phys_state(system_node));
}
/* Private data structure to make this handlers accessible through indexing */
static const afflvl_on_handler_t psci_afflvl_on_handlers[] = {
psci_afflvl0_on,
psci_afflvl1_on,
psci_afflvl2_on,
};
/*******************************************************************************
* This function takes an array of pointers to affinity instance nodes in the
* topology tree and calls the on handler for the corresponding affinity
* levels
******************************************************************************/
static int psci_call_on_handlers(aff_map_node_t *target_cpu_nodes[],
int start_afflvl,
int end_afflvl,
unsigned long target_cpu)
{
int rc = PSCI_E_INVALID_PARAMS, level;
aff_map_node_t *node;
for (level = end_afflvl; level >= start_afflvl; level--) {
node = target_cpu_nodes[level];
if (node == NULL)
continue;
/*
* TODO: In case of an error should there be a way
* of undoing what we might have setup at higher
* affinity levels.
*/
rc = psci_afflvl_on_handlers[level](target_cpu,
node);
if (rc != PSCI_E_SUCCESS)
break;
}
return rc;
flush_cpu_data_by_index(cpu_idx, psci_svc_cpu_data.aff_info_state);
}
/*******************************************************************************
* Generic handler which is called to physically power on a cpu identified by
* its mpidr. It traverses through all the affinity levels performing generic,
* architectural, platform setup and state management e.g. for a cpu that is
* to be powered on, it will ensure that enough information is stashed for it
* to resume execution in the non-secure security state.
*
* The state of all the relevant affinity levels is changed after calling the
* affinity level specific handlers as their actions would depend upon the state
* the affinity level is currently in.
* its mpidr. It performs the generic, architectural, platform setup and state
* management to power on the target cpu e.g. it will ensure that
* enough information is stashed for it to resume execution in the non-secure
* security state.
*
* The affinity level specific handlers are called in descending order i.e. from
* the highest to the lowest affinity level implemented by the platform because
* to turn on affinity level X it is necessary to turn on affinity level X + 1
* first.
* The state of all the relevant power domains are changed after calling the
* platform handler as it can return error.
******************************************************************************/
int psci_afflvl_on(unsigned long target_cpu,
entry_point_info_t *ep,
int start_afflvl,
int end_afflvl)
int psci_cpu_on_start(u_register_t target_cpu,
entry_point_info_t *ep,
unsigned int end_pwrlvl)
{
int rc;
mpidr_aff_map_nodes_t target_cpu_nodes;
unsigned int target_idx = plat_core_pos_by_mpidr(target_cpu);
/*
* This function must only be called on platforms where the
* CPU_ON platform hooks have been implemented.
*/
assert(psci_plat_pm_ops->affinst_on &&
psci_plat_pm_ops->affinst_on_finish);
assert(psci_plat_pm_ops->pwr_domain_on &&
psci_plat_pm_ops->pwr_domain_on_finish);
/*
* Collect the pointers to the nodes in the topology tree for
* each affinity instance in the mpidr. If this function does
* not return successfully then either the mpidr or the affinity
* levels are incorrect.
*/
rc = psci_get_aff_map_nodes(target_cpu,
start_afflvl,
end_afflvl,
target_cpu_nodes);
assert(rc == PSCI_E_SUCCESS);
/*
* This function acquires the lock corresponding to each affinity
* level so that by the time all locks are taken, the system topology
* is snapshot and state management can be done safely.
*/
psci_acquire_afflvl_locks(start_afflvl,
end_afflvl,
target_cpu_nodes);
/* Protect against multiple CPUs trying to turn ON the same target CPU */
psci_spin_lock_cpu(target_idx);
/*
* Generic management: Ensure that the cpu is off to be
* turned on.
*/
rc = cpu_on_validate_state(psci_get_state(
target_cpu_nodes[MPIDR_AFFLVL0]));
rc = cpu_on_validate_state(psci_get_aff_info_state_by_idx(target_idx));
if (rc != PSCI_E_SUCCESS)
goto exit;
......@@ -258,69 +118,48 @@ int psci_afflvl_on(unsigned long target_cpu,
psci_spd_pm->svc_on(target_cpu);
/*
* This function updates the state of each affinity instance
* corresponding to the mpidr in the range of affinity levels
* specified.
* Set the Affinity info state of the target cpu to ON_PENDING.
*/
psci_do_afflvl_state_mgmt(start_afflvl,
end_afflvl,
target_cpu_nodes,
PSCI_STATE_ON_PENDING);
/* Perform generic, architecture and platform specific handling. */
rc = psci_call_on_handlers(target_cpu_nodes,
start_afflvl,
end_afflvl,
target_cpu);
psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING);
/*
* Perform generic, architecture and platform specific handling.
*/
/*
* Plat. management: Give the platform the current state
* of the target cpu to allow it to perform the necessary
* steps to power on.
*/
rc = psci_plat_pm_ops->pwr_domain_on(target_cpu);
assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL);
if (rc == PSCI_E_SUCCESS)
/* Store the re-entry information for the non-secure world. */
cm_init_context(target_cpu, ep);
cm_init_context_by_index(target_idx, ep);
else
/* Restore the state on error. */
psci_do_afflvl_state_mgmt(start_afflvl,
end_afflvl,
target_cpu_nodes,
PSCI_STATE_OFF);
exit:
/*
* This loop releases the lock corresponding to each affinity level
* in the reverse order to which they were acquired.
*/
psci_release_afflvl_locks(start_afflvl,
end_afflvl,
target_cpu_nodes);
psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_OFF);
exit:
psci_spin_unlock_cpu(target_idx);
return rc;
}
/*******************************************************************************
* The following functions finish an earlier affinity power on request. They
* are called by the common finisher routine in psci_common.c.
* The following function finish an earlier power on request. They
* are called by the common finisher routine in psci_common.c. The `state_info`
* is the psci_power_state from which this CPU has woken up from.
******************************************************************************/
static void psci_afflvl0_on_finish(aff_map_node_t *cpu_node)
void psci_cpu_on_finish(unsigned int cpu_idx,
psci_power_state_t *state_info)
{
unsigned int plat_state, state;
assert(cpu_node->level == MPIDR_AFFLVL0);
/* Ensure we have been explicitly woken up by another cpu */
state = psci_get_state(cpu_node);
assert(state == PSCI_STATE_ON_PENDING);
/*
* Plat. management: Perform the platform specific actions
* for this cpu e.g. enabling the gic or zeroing the mailbox
* register. The actual state of this cpu has already been
* changed.
*/
/* Get the physical state of this cpu */
plat_state = get_phys_state(state);
psci_plat_pm_ops->affinst_on_finish(cpu_node->level,
plat_state);
psci_plat_pm_ops->pwr_domain_on_finish(state_info);
/*
* Arch. management: Enable data cache and manage stack memory
......@@ -334,6 +173,18 @@ static void psci_afflvl0_on_finish(aff_map_node_t *cpu_node)
*/
bl31_arch_setup();
/*
* Lock the CPU spin lock to make sure that the context initialization
* is done. Since the lock is only used in this function to create
* a synchronization point with cpu_on_start(), it can be released
* immediately.
*/
psci_spin_lock_cpu(cpu_idx);
psci_spin_unlock_cpu(cpu_idx);
/* Ensure we have been explicitly woken up by another cpu */
assert(psci_get_aff_info_state() == AFF_STATE_ON_PENDING);
/*
* Call the cpu on finish handler registered by the Secure Payload
* Dispatcher to let it do any bookeeping. If the handler encounters an
......@@ -342,6 +193,10 @@ static void psci_afflvl0_on_finish(aff_map_node_t *cpu_node)
if (psci_spd_pm && psci_spd_pm->svc_on_finish)
psci_spd_pm->svc_on_finish(0);
/* Populate the mpidr field within the cpu node array */
/* This needs to be done only once */
psci_cpu_pd_nodes[cpu_idx].mpidr = read_mpidr() & MPIDR_AFFINITY_MASK;
/*
* Generic management: Now we just need to retrieve the
* information that we had stashed away during the cpu_on
......@@ -352,54 +207,3 @@ static void psci_afflvl0_on_finish(aff_map_node_t *cpu_node)
/* Clean caches before re-entering normal world */
dcsw_op_louis(DCCSW);
}
static void psci_afflvl1_on_finish(aff_map_node_t *cluster_node)
{
unsigned int plat_state;
assert(cluster_node->level == MPIDR_AFFLVL1);
/*
* Plat. management: Perform the platform specific actions
* as per the old state of the cluster e.g. enabling
* coherency at the interconnect depends upon the state with
* which this cluster was powered up. If anything goes wrong
* then assert as there is no way to recover from this
* situation.
*/
plat_state = psci_get_phys_state(cluster_node);
psci_plat_pm_ops->affinst_on_finish(cluster_node->level,
plat_state);
}
static void psci_afflvl2_on_finish(aff_map_node_t *system_node)
{
unsigned int plat_state;
/* Cannot go beyond this affinity level */
assert(system_node->level == MPIDR_AFFLVL2);
/*
* Currently, there are no architectural actions to perform
* at the system level.
*/
/*
* Plat. management: Perform the platform specific actions
* as per the old state of the cluster e.g. enabling
* coherency at the interconnect depends upon the state with
* which this cluster was powered up. If anything goes wrong
* then assert as there is no way to recover from this
* situation.
*/
plat_state = psci_get_phys_state(system_node);
psci_plat_pm_ops->affinst_on_finish(system_node->level,
plat_state);
}
const afflvl_power_on_finisher_t psci_afflvl_on_finishers[] = {
psci_afflvl0_on_finish,
psci_afflvl1_on_finish,
psci_afflvl2_on_finish,
};
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
......@@ -34,22 +34,30 @@
#include <arch.h>
#include <bakery_lock.h>
#include <bl_common.h>
#include <cpu_data.h>
#include <psci.h>
#include <spinlock.h>
/*
* The following helper macros abstract the interface to the Bakery
* Lock API.
*/
#if USE_COHERENT_MEM
#define psci_lock_init(aff_map, idx) bakery_lock_init(&(aff_map)[(idx)].lock)
#define psci_lock_get(node) bakery_lock_get(&((node)->lock))
#define psci_lock_release(node) bakery_lock_release(&((node)->lock))
#define psci_lock_init(non_cpu_pd_node, idx) \
bakery_lock_init(&(non_cpu_pd_node)[(idx)].lock)
#define psci_lock_get(non_cpu_pd_node) \
bakery_lock_get(&((non_cpu_pd_node)->lock))
#define psci_lock_release(non_cpu_pd_node) \
bakery_lock_release(&((non_cpu_pd_node)->lock))
#else
#define psci_lock_init(aff_map, idx) ((aff_map)[(idx)].aff_map_index = (idx))
#define psci_lock_get(node) bakery_lock_get((node)->aff_map_index, \
CPU_DATA_PSCI_LOCK_OFFSET)
#define psci_lock_release(node) bakery_lock_release((node)->aff_map_index,\
CPU_DATA_PSCI_LOCK_OFFSET)
#define psci_lock_init(non_cpu_pd_node, idx) \
((non_cpu_pd_node)[(idx)].lock_index = (idx))
#define psci_lock_get(non_cpu_pd_node) \
bakery_lock_get((non_cpu_pd_node)->lock_index, \
CPU_DATA_PSCI_LOCK_OFFSET)
#define psci_lock_release(non_cpu_pd_node) \
bakery_lock_release((non_cpu_pd_node)->lock_index, \
CPU_DATA_PSCI_LOCK_OFFSET)
#endif
/*
......@@ -72,39 +80,99 @@
define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64) | \
define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64))
/*
* Helper macros to get/set the fields of PSCI per-cpu data.
*/
#define psci_set_aff_info_state(aff_state) \
set_cpu_data(psci_svc_cpu_data.aff_info_state, aff_state)
#define psci_get_aff_info_state() \
get_cpu_data(psci_svc_cpu_data.aff_info_state)
#define psci_get_aff_info_state_by_idx(idx) \
get_cpu_data_by_index(idx, psci_svc_cpu_data.aff_info_state)
#define psci_get_suspend_pwrlvl() \
get_cpu_data(psci_svc_cpu_data.target_pwrlvl)
#define psci_set_suspend_pwrlvl(target_lvl) \
set_cpu_data(psci_svc_cpu_data.target_pwrlvl, target_lvl)
#define psci_set_cpu_local_state(state) \
set_cpu_data(psci_svc_cpu_data.local_state, state)
#define psci_get_cpu_local_state() \
get_cpu_data(psci_svc_cpu_data.local_state)
#define psci_get_cpu_local_state_by_idx(idx) \
get_cpu_data_by_index(idx, psci_svc_cpu_data.local_state)
/*
* Helper macros for the CPU level spinlocks
*/
#define psci_spin_lock_cpu(idx) spin_lock(&psci_cpu_pd_nodes[idx].cpu_lock)
#define psci_spin_unlock_cpu(idx) spin_unlock(&psci_cpu_pd_nodes[idx].cpu_lock)
/* Helper macro to identify a CPU standby request in PSCI Suspend call */
#define is_cpu_standby_req(is_power_down_state, retn_lvl) \
(((!(is_power_down_state)) && ((retn_lvl) == 0)) ? 1 : 0)
/*******************************************************************************
* The following two data structures hold the topology tree which in turn tracks
* the state of the all the affinity instances supported by the platform.
* The following two data structures implement the power domain tree. The tree
* is used to track the state of all the nodes i.e. power domain instances
* described by the platform. The tree consists of nodes that describe CPU power
* domains i.e. leaf nodes and all other power domains which are parents of a
* CPU power domain i.e. non-leaf nodes.
******************************************************************************/
typedef struct aff_map_node {
unsigned long mpidr;
unsigned char ref_count;
unsigned char state;
typedef struct non_cpu_pwr_domain_node {
/*
* Index of the first CPU power domain node level 0 which has this node
* as its parent.
*/
unsigned int cpu_start_idx;
/*
* Number of CPU power domains which are siblings of the domain indexed
* by 'cpu_start_idx' i.e. all the domains in the range 'cpu_start_idx
* -> cpu_start_idx + ncpus' have this node as their parent.
*/
unsigned int ncpus;
/*
* Index of the parent power domain node.
* TODO: Figure out whether to whether using pointer is more efficient.
*/
unsigned int parent_node;
plat_local_state_t local_state;
unsigned char level;
#if USE_COHERENT_MEM
bakery_lock_t lock;
#else
/* For indexing the bakery_info array in per CPU data */
unsigned char aff_map_index;
unsigned char lock_index;
#endif
} aff_map_node_t;
typedef struct aff_limits_node {
int min;
int max;
} aff_limits_node_t;
typedef aff_map_node_t (*mpidr_aff_map_nodes_t[MPIDR_MAX_AFFLVL + 1]);
typedef void (*afflvl_power_on_finisher_t)(aff_map_node_t *);
} non_cpu_pd_node_t;
typedef struct cpu_pwr_domain_node {
u_register_t mpidr;
/*
* Index of the parent power domain node.
* TODO: Figure out whether to whether using pointer is more efficient.
*/
unsigned int parent_node;
/*
* A CPU power domain does not require state coordination like its
* parent power domains. Hence this node does not include a bakery
* lock. A spinlock is required by the CPU_ON handler to prevent a race
* when multiple CPUs try to turn ON the same target CPU.
*/
spinlock_t cpu_lock;
} cpu_pd_node_t;
/*******************************************************************************
* Data prototypes
******************************************************************************/
extern const plat_pm_ops_t *psci_plat_pm_ops;
extern aff_map_node_t psci_aff_map[PSCI_NUM_AFFS];
extern aff_limits_node_t psci_aff_limits[MPIDR_MAX_AFFLVL + 1];
extern uint32_t psci_caps;
extern const plat_psci_ops_t *psci_plat_pm_ops;
extern non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS];
extern cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];
extern unsigned int psci_caps;
/*******************************************************************************
* SPD's power management hooks registered with PSCI
......@@ -115,62 +183,54 @@ extern const spd_pm_ops_t *psci_spd_pm;
* Function prototypes
******************************************************************************/
/* Private exported functions from psci_common.c */
unsigned short psci_get_state(aff_map_node_t *node);
unsigned short psci_get_phys_state(aff_map_node_t *node);
void psci_set_state(aff_map_node_t *node, unsigned short state);
unsigned long mpidr_set_aff_inst(unsigned long, unsigned char, int);
int psci_validate_mpidr(unsigned long, int);
int get_power_on_target_afflvl(void);
void psci_afflvl_power_on_finish(int,
int,
afflvl_power_on_finisher_t *);
int psci_get_ns_ep_info(entry_point_info_t *ep,
uint64_t entrypoint, uint64_t context_id);
int psci_check_afflvl_range(int start_afflvl, int end_afflvl);
void psci_do_afflvl_state_mgmt(uint32_t start_afflvl,
uint32_t end_afflvl,
aff_map_node_t *mpidr_nodes[],
uint32_t state);
void psci_acquire_afflvl_locks(int start_afflvl,
int end_afflvl,
aff_map_node_t *mpidr_nodes[]);
void psci_release_afflvl_locks(int start_afflvl,
int end_afflvl,
mpidr_aff_map_nodes_t mpidr_nodes);
void psci_print_affinity_map(void);
void psci_set_max_phys_off_afflvl(uint32_t afflvl);
uint32_t psci_find_max_phys_off_afflvl(uint32_t start_afflvl,
uint32_t end_afflvl,
aff_map_node_t *mpidr_nodes[]);
int psci_validate_power_state(unsigned int power_state,
psci_power_state_t *state_info);
void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info);
int psci_validate_mpidr(u_register_t mpidr);
void psci_init_req_local_pwr_states(void);
void psci_power_up_finish(void);
int psci_validate_entry_point(entry_point_info_t *ep,
uintptr_t entrypoint, u_register_t context_id);
void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
unsigned int end_lvl,
unsigned int node_index[]);
void psci_do_state_coordination(unsigned int end_pwrlvl,
psci_power_state_t *state_info);
void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl,
unsigned int cpu_idx);
void psci_release_pwr_domain_locks(unsigned int end_pwrlvl,
unsigned int cpu_idx);
int psci_validate_suspend_req(const psci_power_state_t *state_info,
unsigned int is_power_down_state_req);
unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info);
unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info);
void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl);
void psci_print_power_domain_map(void);
unsigned int psci_is_last_on_cpu(void);
int psci_spd_migrate_info(uint64_t *mpidr);
int psci_spd_migrate_info(u_register_t *mpidr);
/* Private exported functions from psci_setup.c */
int psci_get_aff_map_nodes(unsigned long mpidr,
int start_afflvl,
int end_afflvl,
aff_map_node_t *mpidr_nodes[]);
aff_map_node_t *psci_get_aff_map_node(unsigned long, int);
/* Private exported functions from psci_on.c */
int psci_cpu_on_start(unsigned long target_cpu,
entry_point_info_t *ep,
unsigned int end_pwrlvl);
/* Private exported functions from psci_affinity_on.c */
int psci_afflvl_on(unsigned long target_cpu,
entry_point_info_t *ep,
int start_afflvl,
int end_afflvl);
void psci_cpu_on_finish(unsigned int cpu_idx,
psci_power_state_t *state_info);
/* Private exported functions from psci_affinity_off.c */
int psci_afflvl_off(int, int);
/* Private exported functions from psci_cpu_off.c */
int psci_do_cpu_off(unsigned int end_pwrlvl);
/* Private exported functions from psci_affinity_suspend.c */
void psci_afflvl_suspend(entry_point_info_t *ep,
int start_afflvl,
int end_afflvl);
/* Private exported functions from psci_pwrlvl_suspend.c */
void psci_cpu_suspend_start(entry_point_info_t *ep,
unsigned int end_pwrlvl,
psci_power_state_t *state_info,
unsigned int is_power_down_state_req);
unsigned int psci_afflvl_suspend_finish(int, int);
void psci_set_suspend_power_state(unsigned int power_state);
void psci_cpu_suspend_finish(unsigned int cpu_idx,
psci_power_state_t *state_info);
/* Private exported functions from psci_helpers.S */
void psci_do_pwrdown_cache_maintenance(uint32_t affinity_level);
void psci_do_pwrdown_cache_maintenance(unsigned int pwr_level);
void psci_do_pwrup_cache_maintenance(void);
/* Private exported functions from psci_system_off.c */
......
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
......@@ -42,351 +42,225 @@
* Per cpu non-secure contexts used to program the architectural state prior
* return to the normal world.
* TODO: Use the memory allocator to set aside memory for the contexts instead
* of relying on platform defined constants. Using PSCI_NUM_AFFS will be an
* overkill.
* of relying on platform defined constants.
******************************************************************************/
static cpu_context_t psci_ns_context[PLATFORM_CORE_COUNT];
/*******************************************************************************
* In a system, a certain number of affinity instances are present at an
* affinity level. The cumulative number of instances across all levels are
* stored in 'psci_aff_map'. The topology tree has been flattenned into this
* array. To retrieve nodes, information about the extents of each affinity
* level i.e. start index and end index needs to be present. 'psci_aff_limits'
* stores this information.
******************************************************************************/
aff_limits_node_t psci_aff_limits[MPIDR_MAX_AFFLVL + 1];
/******************************************************************************
* Define the psci capability variable.
*****************************************************************************/
uint32_t psci_caps;
unsigned int psci_caps;
/*******************************************************************************
* Routines for retrieving the node corresponding to an affinity level instance
* in the mpidr. The first one uses binary search to find the node corresponding
* to the mpidr (key) at a particular affinity level. The second routine decides
* extents of the binary search at each affinity level.
* Function which initializes the 'psci_non_cpu_pd_nodes' or the
* 'psci_cpu_pd_nodes' corresponding to the power level.
******************************************************************************/
static int psci_aff_map_get_idx(unsigned long key,
int min_idx,
int max_idx)
static void psci_init_pwr_domain_node(unsigned int node_idx,
unsigned int parent_idx,
unsigned int level)
{
int mid;
if (level > PSCI_CPU_PWR_LVL) {
psci_non_cpu_pd_nodes[node_idx].level = level;
psci_lock_init(psci_non_cpu_pd_nodes, node_idx);
psci_non_cpu_pd_nodes[node_idx].parent_node = parent_idx;
psci_non_cpu_pd_nodes[node_idx].local_state =
PLAT_MAX_OFF_STATE;
} else {
psci_cpu_data_t *svc_cpu_data;
/*
* Terminating condition: If the max and min indices have crossed paths
* during the binary search then the key has not been found.
*/
if (max_idx < min_idx)
return PSCI_E_INVALID_PARAMS;
psci_cpu_pd_nodes[node_idx].parent_node = parent_idx;
/*
* Make sure we are within array limits.
*/
assert(min_idx >= 0 && max_idx < PSCI_NUM_AFFS);
/* Initialize with an invalid mpidr */
psci_cpu_pd_nodes[node_idx].mpidr = PSCI_INVALID_MPIDR;
/*
* Bisect the array around 'mid' and then recurse into the array chunk
* where the key is likely to be found. The mpidrs in each node in the
* 'psci_aff_map' for a given affinity level are stored in an ascending
* order which makes the binary search possible.
*/
mid = min_idx + ((max_idx - min_idx) >> 1); /* Divide by 2 */
if (psci_aff_map[mid].mpidr > key)
return psci_aff_map_get_idx(key, min_idx, mid - 1);
else if (psci_aff_map[mid].mpidr < key)
return psci_aff_map_get_idx(key, mid + 1, max_idx);
else
return mid;
}
svc_cpu_data =
&(_cpu_data_by_index(node_idx)->psci_svc_cpu_data);
aff_map_node_t *psci_get_aff_map_node(unsigned long mpidr, int aff_lvl)
{
int rc;
/* Set the Affinity Info for the cores as OFF */
svc_cpu_data->aff_info_state = AFF_STATE_OFF;
if (aff_lvl > PLATFORM_MAX_AFFLVL)
return NULL;
/* Invalidate the suspend level for the cpu */
svc_cpu_data->target_pwrlvl = PSCI_INVALID_PWR_LVL;
/* Right shift the mpidr to the required affinity level */
mpidr = mpidr_mask_lower_afflvls(mpidr, aff_lvl);
/* Set the power state to OFF state */
svc_cpu_data->local_state = PLAT_MAX_OFF_STATE;
rc = psci_aff_map_get_idx(mpidr,
psci_aff_limits[aff_lvl].min,
psci_aff_limits[aff_lvl].max);
if (rc >= 0)
return &psci_aff_map[rc];
else
return NULL;
flush_dcache_range((uintptr_t)svc_cpu_data,
sizeof(*svc_cpu_data));
cm_set_context_by_index(node_idx,
(void *) &psci_ns_context[node_idx],
NON_SECURE);
}
}
/*******************************************************************************
* This function populates an array with nodes corresponding to a given range of
* affinity levels in an mpidr. It returns successfully only when the affinity
* levels are correct, the mpidr is valid i.e. no affinity level is absent from
* the topology tree & the affinity instance at level 0 is not absent.
******************************************************************************/
int psci_get_aff_map_nodes(unsigned long mpidr,
int start_afflvl,
int end_afflvl,
aff_map_node_t *mpidr_nodes[])
* This functions updates cpu_start_idx and ncpus field for each of the node in
* psci_non_cpu_pd_nodes[]. It does so by comparing the parent nodes of each of
* the CPUs and check whether they match with the parent of the previous
* CPU. The basic assumption for this work is that children of the same parent
* are allocated adjacent indices. The platform should ensure this though proper
* mapping of the CPUs to indices via plat_core_pos_by_mpidr() and
* plat_my_core_pos() APIs.
*******************************************************************************/
static void psci_update_pwrlvl_limits(void)
{
int rc = PSCI_E_INVALID_PARAMS, level;
aff_map_node_t *node;
rc = psci_check_afflvl_range(start_afflvl, end_afflvl);
if (rc != PSCI_E_SUCCESS)
return rc;
for (level = start_afflvl; level <= end_afflvl; level++) {
/*
* Grab the node for each affinity level. No affinity level
* can be missing as that would mean that the topology tree
* is corrupted.
*/
node = psci_get_aff_map_node(mpidr, level);
if (node == NULL) {
rc = PSCI_E_INVALID_PARAMS;
break;
}
/*
* Skip absent affinity levels unless it's afffinity level 0.
* An absent cpu means that the mpidr is invalid. Save the
* pointer to the node for the present affinity level
*/
if (!(node->state & PSCI_AFF_PRESENT)) {
if (level == MPIDR_AFFLVL0) {
rc = PSCI_E_INVALID_PARAMS;
break;
int j;
unsigned int nodes_idx[PLAT_MAX_PWR_LVL] = {0};
unsigned int temp_index[PLAT_MAX_PWR_LVL], cpu_idx;
for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) {
psci_get_parent_pwr_domain_nodes(cpu_idx,
PLAT_MAX_PWR_LVL,
temp_index);
for (j = PLAT_MAX_PWR_LVL - 1; j >= 0; j--) {
if (temp_index[j] != nodes_idx[j]) {
nodes_idx[j] = temp_index[j];
psci_non_cpu_pd_nodes[nodes_idx[j]].cpu_start_idx
= cpu_idx;
}
mpidr_nodes[level] = NULL;
} else
mpidr_nodes[level] = node;
psci_non_cpu_pd_nodes[nodes_idx[j]].ncpus++;
}
}
return rc;
}
/*******************************************************************************
* Function which initializes the 'aff_map_node' corresponding to an affinity
* level instance. Each node has a unique mpidr, level and bakery lock. The data
* field is opaque and holds affinity level specific data e.g. for affinity
* level 0 it contains the index into arrays that hold the secure/non-secure
* state for a cpu that's been turned on/off
* Core routine to populate the power domain tree. The tree descriptor passed by
* the platform is populated breadth-first and the first entry in the map
* informs the number of root power domains. The parent nodes of the root nodes
* will point to an invalid entry(-1).
******************************************************************************/
static void psci_init_aff_map_node(unsigned long mpidr,
int level,
unsigned int idx)
static void populate_power_domain_tree(const unsigned char *topology)
{
unsigned char state;
uint32_t linear_id;
psci_aff_map[idx].mpidr = mpidr;
psci_aff_map[idx].level = level;
psci_lock_init(psci_aff_map, idx);
unsigned int i, j = 0, num_nodes_at_lvl = 1, num_nodes_at_next_lvl;
unsigned int node_index = 0, parent_node_index = 0, num_children;
int level = PLAT_MAX_PWR_LVL;
/*
* If an affinity instance is present then mark it as OFF to begin with.
* For each level the inputs are:
* - number of nodes at this level in plat_array i.e. num_nodes_at_level
* This is the sum of values of nodes at the parent level.
* - Index of first entry at this level in the plat_array i.e.
* parent_node_index.
* - Index of first free entry in psci_non_cpu_pd_nodes[] or
* psci_cpu_pd_nodes[] i.e. node_index depending upon the level.
*/
state = plat_get_aff_state(level, mpidr);
psci_aff_map[idx].state = state;
if (level == MPIDR_AFFLVL0) {
/*
* Mark the cpu as OFF. Higher affinity level reference counts
* have already been memset to 0
*/
if (state & PSCI_AFF_PRESENT)
psci_set_state(&psci_aff_map[idx], PSCI_STATE_OFF);
while (level >= PSCI_CPU_PWR_LVL) {
num_nodes_at_next_lvl = 0;
/*
* Associate a non-secure context with this affinity
* instance through the context management library.
* For each entry (parent node) at this level in the plat_array:
* - Find the number of children
* - Allocate a node in a power domain array for each child
* - Set the parent of the child to the parent_node_index - 1
* - Increment parent_node_index to point to the next parent
* - Accumulate the number of children at next level.
*/
linear_id = platform_get_core_pos(mpidr);
assert(linear_id < PLATFORM_CORE_COUNT);
/* Invalidate the suspend context for the node */
set_cpu_data_by_index(linear_id,
psci_svc_cpu_data.power_state,
PSCI_INVALID_DATA);
/*
* There is no state associated with the current execution
* context so ensure that any reads of the highest affinity
* level in a powered down state return PSCI_INVALID_DATA.
*/
set_cpu_data_by_index(linear_id,
psci_svc_cpu_data.max_phys_off_afflvl,
PSCI_INVALID_DATA);
for (i = 0; i < num_nodes_at_lvl; i++) {
assert(parent_node_index <=
PSCI_NUM_NON_CPU_PWR_DOMAINS);
num_children = topology[parent_node_index];
for (j = node_index;
j < node_index + num_children; j++)
psci_init_pwr_domain_node(j,
parent_node_index - 1,
level);
node_index = j;
num_nodes_at_next_lvl += num_children;
parent_node_index++;
}
flush_cpu_data_by_index(linear_id, psci_svc_cpu_data);
num_nodes_at_lvl = num_nodes_at_next_lvl;
level--;
cm_set_context_by_mpidr(mpidr,
(void *) &psci_ns_context[linear_id],
NON_SECURE);
/* Reset the index for the cpu power domain array */
if (level == PSCI_CPU_PWR_LVL)
node_index = 0;
}
return;
}
/* Validate the sanity of array exported by the platform */
assert(j == PLATFORM_CORE_COUNT);
/*******************************************************************************
* Core routine used by the Breadth-First-Search algorithm to populate the
* affinity tree. Each level in the tree corresponds to an affinity level. This
* routine's aim is to traverse to the target affinity level and populate nodes
* in the 'psci_aff_map' for all the siblings at that level. It uses the current
* affinity level to keep track of how many levels from the root of the tree
* have been traversed. If the current affinity level != target affinity level,
* then the platform is asked to return the number of children that each
* affinity instance has at the current affinity level. Traversal is then done
* for each child at the next lower level i.e. current affinity level - 1.
*
* CAUTION: This routine assumes that affinity instance ids are allocated in a
* monotonically increasing manner at each affinity level in a mpidr starting
* from 0. If the platform breaks this assumption then this code will have to
* be reworked accordingly.
******************************************************************************/
static unsigned int psci_init_aff_map(unsigned long mpidr,
unsigned int affmap_idx,
int cur_afflvl,
int tgt_afflvl)
{
unsigned int ctr, aff_count;
assert(cur_afflvl >= tgt_afflvl);
/*
* Find the number of siblings at the current affinity level &
* assert if there are none 'cause then we have been invoked with
* an invalid mpidr.
*/
aff_count = plat_get_aff_count(cur_afflvl, mpidr);
assert(aff_count);
if (tgt_afflvl < cur_afflvl) {
for (ctr = 0; ctr < aff_count; ctr++) {
mpidr = mpidr_set_aff_inst(mpidr, ctr, cur_afflvl);
affmap_idx = psci_init_aff_map(mpidr,
affmap_idx,
cur_afflvl - 1,
tgt_afflvl);
}
} else {
for (ctr = 0; ctr < aff_count; ctr++, affmap_idx++) {
mpidr = mpidr_set_aff_inst(mpidr, ctr, cur_afflvl);
psci_init_aff_map_node(mpidr, cur_afflvl, affmap_idx);
}
/* affmap_idx is 1 greater than the max index of cur_afflvl */
psci_aff_limits[cur_afflvl].max = affmap_idx - 1;
}
return affmap_idx;
#if !USE_COHERENT_MEM
/* Flush the non CPU power domain data to memory */
flush_dcache_range((uintptr_t) &psci_non_cpu_pd_nodes,
sizeof(psci_non_cpu_pd_nodes));
#endif
}
/*******************************************************************************
* This function initializes the topology tree by querying the platform. To do
* so, it's helper routines implement a Breadth-First-Search. At each affinity
* level the platform conveys the number of affinity instances that exist i.e.
* the affinity count. The algorithm populates the psci_aff_map recursively
* using this information. On a platform that implements two clusters of 4 cpus
* each, the populated aff_map_array would look like this:
* This function initializes the power domain topology tree by querying the
* platform. The power domain nodes higher than the CPU are populated in the
* array psci_non_cpu_pd_nodes[] and the CPU power domains are populated in
* psci_cpu_pd_nodes[]. The platform exports its static topology map through the
* populate_power_domain_topology_tree() API. The algorithm populates the
* psci_non_cpu_pd_nodes and psci_cpu_pd_nodes iteratively by using this
* topology map. On a platform that implements two clusters of 2 cpus each, and
* supporting 3 domain levels, the populated psci_non_cpu_pd_nodes would look
* like this:
*
* <- cpus cluster0 -><- cpus cluster1 ->
* ---------------------------------------------------
* | 0 | 1 | 0 | 1 | 2 | 3 | 0 | 1 | 2 | 3 |
* | system node | cluster 0 node | cluster 1 node |
* ---------------------------------------------------
* ^ ^
* cluster __| cpu __|
* limit limit
*
* The first 2 entries are of the cluster nodes. The next 4 entries are of cpus
* within cluster 0. The last 4 entries are of cpus within cluster 1.
* The 'psci_aff_limits' array contains the max & min index of each affinity
* level within the 'psci_aff_map' array. This allows restricting search of a
* node at an affinity level between the indices in the limits array.
* And populated psci_cpu_pd_nodes would look like this :
* <- cpus cluster0 -><- cpus cluster1 ->
* ------------------------------------------------
* | CPU 0 | CPU 1 | CPU 2 | CPU 3 |
* ------------------------------------------------
******************************************************************************/
int32_t psci_setup(void)
int psci_setup(void)
{
unsigned long mpidr = read_mpidr();
int afflvl, affmap_idx, max_afflvl;
aff_map_node_t *node;
const unsigned char *topology_tree;
psci_plat_pm_ops = NULL;
/* Query the topology map from the platform */
topology_tree = plat_get_power_domain_tree_desc();
/* Find out the maximum affinity level that the platform implements */
max_afflvl = PLATFORM_MAX_AFFLVL;
assert(max_afflvl <= MPIDR_MAX_AFFLVL);
/* Populate the power domain arrays using the platform topology map */
populate_power_domain_tree(topology_tree);
/*
* This call traverses the topology tree with help from the platform and
* populates the affinity map using a breadth-first-search recursively.
* We assume that the platform allocates affinity instance ids from 0
* onwards at each affinity level in the mpidr. FIRST_MPIDR = 0.0.0.0
*/
affmap_idx = 0;
for (afflvl = max_afflvl; afflvl >= MPIDR_AFFLVL0; afflvl--) {
affmap_idx = psci_init_aff_map(FIRST_MPIDR,
affmap_idx,
max_afflvl,
afflvl);
}
/* Update the CPU limits for each node in psci_non_cpu_pd_nodes */
psci_update_pwrlvl_limits();
/* Populate the mpidr field of cpu node for this CPU */
psci_cpu_pd_nodes[plat_my_core_pos()].mpidr =
read_mpidr() & MPIDR_AFFINITY_MASK;
#if !USE_COHERENT_MEM
/*
* The psci_aff_map only needs flushing when it's not allocated in
* The psci_non_cpu_pd_nodes only needs flushing when it's not allocated in
* coherent memory.
*/
flush_dcache_range((uint64_t) &psci_aff_map, sizeof(psci_aff_map));
flush_dcache_range((uintptr_t) &psci_non_cpu_pd_nodes,
sizeof(psci_non_cpu_pd_nodes));
#endif
/*
* Set the bounds for the affinity counts of each level in the map. Also
* flush out the entire array so that it's visible to subsequent power
* management operations. The 'psci_aff_limits' array is allocated in
* normal memory. It will be accessed when the mmu is off e.g. after
* reset. Hence it needs to be flushed.
*/
for (afflvl = MPIDR_AFFLVL0; afflvl < max_afflvl; afflvl++) {
psci_aff_limits[afflvl].min =
psci_aff_limits[afflvl + 1].max + 1;
}
flush_dcache_range((uintptr_t) &psci_cpu_pd_nodes,
sizeof(psci_cpu_pd_nodes));
flush_dcache_range((unsigned long) psci_aff_limits,
sizeof(psci_aff_limits));
psci_init_req_local_pwr_states();
/*
* Mark the affinity instances in our mpidr as ON. No need to lock as
* this is the primary cpu.
* Set the requested and target state of this CPU and all the higher
* power domain levels for this CPU to run.
*/
mpidr &= MPIDR_AFFINITY_MASK;
for (afflvl = MPIDR_AFFLVL0; afflvl <= max_afflvl; afflvl++) {
node = psci_get_aff_map_node(mpidr, afflvl);
assert(node);
/* Mark each present node as ON. */
if (node->state & PSCI_AFF_PRESENT)
psci_set_state(node, PSCI_STATE_ON);
}
psci_set_pwr_domains_to_run(PLAT_MAX_PWR_LVL);
platform_setup_pm(&psci_plat_pm_ops);
plat_setup_psci_ops((uintptr_t)psci_entrypoint,
&psci_plat_pm_ops);
assert(psci_plat_pm_ops);
/* Initialize the psci capability */
psci_caps = PSCI_GENERIC_CAP;
if (psci_plat_pm_ops->affinst_off)
if (psci_plat_pm_ops->pwr_domain_off)
psci_caps |= define_psci_cap(PSCI_CPU_OFF);
if (psci_plat_pm_ops->affinst_on && psci_plat_pm_ops->affinst_on_finish)
if (psci_plat_pm_ops->pwr_domain_on &&
psci_plat_pm_ops->pwr_domain_on_finish)
psci_caps |= define_psci_cap(PSCI_CPU_ON_AARCH64);
if (psci_plat_pm_ops->affinst_suspend &&
psci_plat_pm_ops->affinst_suspend_finish) {
if (psci_plat_pm_ops->pwr_domain_suspend &&
psci_plat_pm_ops->pwr_domain_suspend_finish) {
psci_caps |= define_psci_cap(PSCI_CPU_SUSPEND_AARCH64);
if (psci_plat_pm_ops->get_sys_suspend_power_state)
psci_caps |= define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64);
......
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
......@@ -41,247 +41,113 @@
#include <stddef.h>
#include "psci_private.h"
typedef void (*afflvl_suspend_handler_t)(aff_map_node_t *node);
/*******************************************************************************
* This function saves the power state parameter passed in the current PSCI
* cpu_suspend call in the per-cpu data array.
******************************************************************************/
void psci_set_suspend_power_state(unsigned int power_state)
{
set_cpu_data(psci_svc_cpu_data.power_state, power_state);
flush_cpu_data(psci_svc_cpu_data.power_state);
}
/*******************************************************************************
* This function gets the affinity level till which the current cpu could be
* powered down during a cpu_suspend call. Returns PSCI_INVALID_DATA if the
* power state is invalid.
* This function does generic and platform specific operations after a wake-up
* from standby/retention states at multiple power levels.
******************************************************************************/
int psci_get_suspend_afflvl(void)
static void psci_suspend_to_standby_finisher(unsigned int cpu_idx,
psci_power_state_t *state_info,
unsigned int end_pwrlvl)
{
unsigned int power_state;
power_state = get_cpu_data(psci_svc_cpu_data.power_state);
return ((power_state == PSCI_INVALID_DATA) ?
power_state : psci_get_pstate_afflvl(power_state));
}
/*******************************************************************************
* This function gets the state id of the current cpu from the power state
* parameter saved in the per-cpu data array. Returns PSCI_INVALID_DATA if the
* power state saved is invalid.
******************************************************************************/
int psci_get_suspend_stateid(void)
{
unsigned int power_state;
power_state = get_cpu_data(psci_svc_cpu_data.power_state);
return ((power_state == PSCI_INVALID_DATA) ?
power_state : psci_get_pstate_id(power_state));
}
/*******************************************************************************
* This function gets the state id of the cpu specified by the 'mpidr' parameter
* from the power state parameter saved in the per-cpu data array. Returns
* PSCI_INVALID_DATA if the power state saved is invalid.
******************************************************************************/
int psci_get_suspend_stateid_by_mpidr(unsigned long mpidr)
{
unsigned int power_state;
power_state = get_cpu_data_by_mpidr(mpidr,
psci_svc_cpu_data.power_state);
return ((power_state == PSCI_INVALID_DATA) ?
power_state : psci_get_pstate_id(power_state));
}
/*******************************************************************************
* The next three functions implement a handler for each supported affinity
* level which is called when that affinity level is about to be suspended.
******************************************************************************/
static void psci_afflvl0_suspend(aff_map_node_t *cpu_node)
{
unsigned long psci_entrypoint;
/* Sanity check to safeguard against data corruption */
assert(cpu_node->level == MPIDR_AFFLVL0);
/* Set the secure world (EL3) re-entry point after BL1 */
psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
psci_acquire_pwr_domain_locks(end_pwrlvl,
cpu_idx);
/*
* Arch. management. Perform the necessary steps to flush all
* cpu caches.
*/
psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL0);
/*
* Plat. management: Allow the platform to perform the
* necessary actions to turn off this cpu e.g. set the
* platform defined mailbox with the psci entrypoint,
* program the power controller etc.
* Plat. management: Allow the platform to do operations
* on waking up from retention.
*/
psci_plat_pm_ops->affinst_suspend(psci_entrypoint,
cpu_node->level,
psci_get_phys_state(cpu_node));
}
static void psci_afflvl1_suspend(aff_map_node_t *cluster_node)
{
unsigned int plat_state;
unsigned long psci_entrypoint;
/* Sanity check the cluster level */
assert(cluster_node->level == MPIDR_AFFLVL1);
psci_plat_pm_ops->pwr_domain_suspend_finish(state_info);
/*
* Arch. management: Flush all levels of caches to PoC if the
* cluster is to be shutdown.
* Set the requested and target state of this CPU and all the higher
* power domain levels for this CPU to run.
*/
psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL1);
psci_set_pwr_domains_to_run(end_pwrlvl);
/*
* Plat. Management. Allow the platform to do its cluster specific
* bookeeping e.g. turn off interconnect coherency, program the power
* controller etc. Sending the psci entrypoint is currently redundant
* beyond affinity level 0 but one never knows what a platform might
* do. Also it allows us to keep the platform handler prototype the
* same.
*/
plat_state = psci_get_phys_state(cluster_node);
psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
psci_plat_pm_ops->affinst_suspend(psci_entrypoint,
cluster_node->level,
plat_state);
psci_release_pwr_domain_locks(end_pwrlvl,
cpu_idx);
}
static void psci_afflvl2_suspend(aff_map_node_t *system_node)
/*******************************************************************************
* This function does generic and platform specific suspend to power down
* operations.
******************************************************************************/
static void psci_suspend_to_pwrdown_start(unsigned int end_pwrlvl,
entry_point_info_t *ep,
psci_power_state_t *state_info)
{
unsigned int plat_state;
unsigned long psci_entrypoint;
/* Cannot go beyond this */
assert(system_node->level == MPIDR_AFFLVL2);
/* Save PSCI target power level for the suspend finisher handler */
psci_set_suspend_pwrlvl(end_pwrlvl);
/*
* Keep the physical state of the system handy to decide what
* action needs to be taken
* Flush the target power level as it will be accessed on power up with
* Data cache disabled.
*/
plat_state = psci_get_phys_state(system_node);
flush_cpu_data(psci_svc_cpu_data.target_pwrlvl);
/*
* Arch. management: Flush all levels of caches to PoC if the
* system is to be shutdown.
* Call the cpu suspend handler registered by the Secure Payload
* Dispatcher to let it do any book-keeping. If the handler encounters an
* error, it's expected to assert within
*/
psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL2);
if (psci_spd_pm && psci_spd_pm->svc_suspend)
psci_spd_pm->svc_suspend(0);
/*
* Plat. Management : Allow the platform to do its bookeeping
* at this affinity level
* Store the re-entry information for the non-secure world.
*/
cm_init_my_context(ep);
/*
* Sending the psci entrypoint is currently redundant
* beyond affinity level 0 but one never knows what a
* platform might do. Also it allows us to keep the
* platform handler prototype the same.
* Arch. management. Perform the necessary steps to flush all
* cpu caches. Currently we assume that the power level correspond
* the cache level.
* TODO : Introduce a mechanism to query the cache level to flush
* and the cpu-ops power down to perform from the platform.
*/
plat_state = psci_get_phys_state(system_node);
psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
psci_plat_pm_ops->affinst_suspend(psci_entrypoint,
system_node->level,
plat_state);
}
static const afflvl_suspend_handler_t psci_afflvl_suspend_handlers[] = {
psci_afflvl0_suspend,
psci_afflvl1_suspend,
psci_afflvl2_suspend,
};
/*******************************************************************************
* This function takes an array of pointers to affinity instance nodes in the
* topology tree and calls the suspend handler for the corresponding affinity
* levels
******************************************************************************/
static void psci_call_suspend_handlers(aff_map_node_t *mpidr_nodes[],
int start_afflvl,
int end_afflvl)
{
int level;
aff_map_node_t *node;
for (level = start_afflvl; level <= end_afflvl; level++) {
node = mpidr_nodes[level];
if (node == NULL)
continue;
psci_afflvl_suspend_handlers[level](node);
}
psci_do_pwrdown_cache_maintenance(psci_find_max_off_lvl(state_info));
}
/*******************************************************************************
* Top level handler which is called when a cpu wants to suspend its execution.
* It is assumed that along with turning the cpu off, higher affinity levels
* until the target affinity level will be turned off as well. It traverses
* through all the affinity levels performing generic, architectural, platform
* setup and state management e.g. for a cluster that's to be suspended, it will
* call the platform specific code which will disable coherency at the
* interconnect level if the cpu is the last in the cluster. For a cpu it could
* mean programming the power controller etc.
*
* The state of all the relevant affinity levels is changed prior to calling the
* affinity level specific handlers as their actions would depend upon the state
* the affinity level is about to enter.
*
* The affinity level specific handlers are called in ascending order i.e. from
* the lowest to the highest affinity level implemented by the platform because
* to turn off affinity level X it is neccesary to turn off affinity level X - 1
* first.
* It is assumed that along with suspending the cpu power domain, power domains
* at higher levels until the target power level will be suspended as well. It
* coordinates with the platform to negotiate the target state for each of
* the power domain level till the target power domain level. It then performs
* generic, architectural, platform setup and state management required to
* suspend that power domain level and power domain levels below it.
* e.g. For a cpu that's to be suspended, it could mean programming the
* power controller whereas for a cluster that's to be suspended, it will call
* the platform specific code which will disable coherency at the interconnect
* level if the cpu is the last in the cluster and also the program the power
* controller.
*
* All the required parameter checks are performed at the beginning and after
* the state transition has been done, no further error is expected and it
* is not possible to undo any of the actions taken beyond that point.
* the state transition has been done, no further error is expected and it is
* not possible to undo any of the actions taken beyond that point.
******************************************************************************/
void psci_afflvl_suspend(entry_point_info_t *ep,
int start_afflvl,
int end_afflvl)
void psci_cpu_suspend_start(entry_point_info_t *ep,
unsigned int end_pwrlvl,
psci_power_state_t *state_info,
unsigned int is_power_down_state)
{
int skip_wfi = 0;
mpidr_aff_map_nodes_t mpidr_nodes;
unsigned int max_phys_off_afflvl;
unsigned int idx = plat_my_core_pos();
/*
* This function must only be called on platforms where the
* CPU_SUSPEND platform hooks have been implemented.
*/
assert(psci_plat_pm_ops->affinst_suspend &&
psci_plat_pm_ops->affinst_suspend_finish);
assert(psci_plat_pm_ops->pwr_domain_suspend &&
psci_plat_pm_ops->pwr_domain_suspend_finish);
/*
* Collect the pointers to the nodes in the topology tree for
* each affinity instance in the mpidr. If this function does
* not return successfully then either the mpidr or the affinity
* levels are incorrect. Either way, this an internal TF error
* therefore assert.
*/
if (psci_get_aff_map_nodes(read_mpidr_el1() & MPIDR_AFFINITY_MASK,
start_afflvl, end_afflvl, mpidr_nodes) != PSCI_E_SUCCESS)
assert(0);
/*
* This function acquires the lock corresponding to each affinity
* This function acquires the lock corresponding to each power
* level so that by the time all locks are taken, the system topology
* is snapshot and state management can be done safely.
*/
psci_acquire_afflvl_locks(start_afflvl,
end_afflvl,
mpidr_nodes);
psci_acquire_pwr_domain_locks(end_pwrlvl,
idx);
/*
* We check if there are any pending interrupts after the delay
......@@ -294,75 +160,64 @@ void psci_afflvl_suspend(entry_point_info_t *ep,
}
/*
* Call the cpu suspend handler registered by the Secure Payload
* Dispatcher to let it do any bookeeping. If the handler encounters an
* error, it's expected to assert within
* This function is passed the requested state info and
* it returns the negotiated state info for each power level upto
* the end level specified.
*/
if (psci_spd_pm && psci_spd_pm->svc_suspend)
psci_spd_pm->svc_suspend(0);
psci_do_state_coordination(end_pwrlvl, state_info);
if (is_power_down_state)
psci_suspend_to_pwrdown_start(end_pwrlvl, ep, state_info);
/*
* This function updates the state of each affinity instance
* corresponding to the mpidr in the range of affinity levels
* specified.
* Plat. management: Allow the platform to perform the
* necessary actions to turn off this cpu e.g. set the
* platform defined mailbox with the psci entrypoint,
* program the power controller etc.
*/
psci_do_afflvl_state_mgmt(start_afflvl,
end_afflvl,
mpidr_nodes,
PSCI_STATE_SUSPEND);
max_phys_off_afflvl = psci_find_max_phys_off_afflvl(start_afflvl,
end_afflvl,
mpidr_nodes);
assert(max_phys_off_afflvl != PSCI_INVALID_DATA);
/* Stash the highest affinity level that will be turned off */
psci_set_max_phys_off_afflvl(max_phys_off_afflvl);
psci_plat_pm_ops->pwr_domain_suspend(state_info);
exit:
/*
* Store the re-entry information for the non-secure world.
* Release the locks corresponding to each power level in the
* reverse order to which they were acquired.
*/
cm_init_context(read_mpidr_el1(), ep);
psci_release_pwr_domain_locks(end_pwrlvl,
idx);
if (skip_wfi)
return;
/* Perform generic, architecture and platform specific handling */
psci_call_suspend_handlers(mpidr_nodes,
start_afflvl,
end_afflvl);
if (is_power_down_state)
psci_power_down_wfi();
/*
* Invalidate the entry for the highest affinity level stashed earlier.
* This ensures that any reads of this variable outside the power
* up/down sequences return PSCI_INVALID_DATA.
* We will reach here if only retention/standby states have been
* requested at multiple power levels. This means that the cpu
* context will be preserved.
*/
psci_set_max_phys_off_afflvl(PSCI_INVALID_DATA);
wfi();
exit:
/*
* Release the locks corresponding to each affinity level in the
* reverse order to which they were acquired.
* After we wake up from context retaining suspend, call the
* context retaining suspend finisher.
*/
psci_release_afflvl_locks(start_afflvl,
end_afflvl,
mpidr_nodes);
if (!skip_wfi)
psci_power_down_wfi();
psci_suspend_to_standby_finisher(idx, state_info, end_pwrlvl);
}
/*******************************************************************************
* The following functions finish an earlier affinity suspend request. They
* are called by the common finisher routine in psci_common.c.
* The following functions finish an earlier suspend request. They
* are called by the common finisher routine in psci_common.c. The `state_info`
* is the psci_power_state from which this CPU has woken up from.
******************************************************************************/
static void psci_afflvl0_suspend_finish(aff_map_node_t *cpu_node)
void psci_cpu_suspend_finish(unsigned int cpu_idx,
psci_power_state_t *state_info)
{
unsigned int plat_state, state;
int32_t suspend_level;
uint64_t counter_freq;
assert(cpu_node->level == MPIDR_AFFLVL0);
unsigned long long counter_freq;
unsigned int suspend_level;
/* Ensure we have been woken up from a suspended state */
state = psci_get_state(cpu_node);
assert(state == PSCI_STATE_SUSPEND);
assert(psci_get_aff_info_state() == AFF_STATE_ON && is_local_state_off(\
state_info->pwr_domain_state[PSCI_CPU_PWR_LVL]));
/*
* Plat. management: Perform the platform specific actions
......@@ -371,11 +226,7 @@ static void psci_afflvl0_suspend_finish(aff_map_node_t *cpu_node)
* wrong then assert as there is no way to recover from this
* situation.
*/
/* Get the physical state of this cpu */
plat_state = get_phys_state(state);
psci_plat_pm_ops->affinst_suspend_finish(cpu_node->level,
plat_state);
psci_plat_pm_ops->pwr_domain_suspend_finish(state_info);
/*
* Arch. management: Enable the data cache, manage stack memory and
......@@ -394,13 +245,13 @@ static void psci_afflvl0_suspend_finish(aff_map_node_t *cpu_node)
* error, it's expected to assert within
*/
if (psci_spd_pm && psci_spd_pm->svc_suspend) {
suspend_level = psci_get_suspend_afflvl();
assert (suspend_level != PSCI_INVALID_DATA);
suspend_level = psci_get_suspend_pwrlvl();
assert (suspend_level != PSCI_INVALID_PWR_LVL);
psci_spd_pm->svc_suspend_finish(suspend_level);
}
/* Invalidate the suspend context for the node */
psci_set_suspend_power_state(PSCI_INVALID_DATA);
/* Invalidate the suspend level for the cpu */
psci_set_suspend_pwrlvl(PSCI_INVALID_PWR_LVL);
/*
* Generic management: Now we just need to retrieve the
......@@ -412,58 +263,3 @@ static void psci_afflvl0_suspend_finish(aff_map_node_t *cpu_node)
/* Clean caches before re-entering normal world */
dcsw_op_louis(DCCSW);
}
static void psci_afflvl1_suspend_finish(aff_map_node_t *cluster_node)
{
unsigned int plat_state;
assert(cluster_node->level == MPIDR_AFFLVL1);
/*
* Plat. management: Perform the platform specific actions
* as per the old state of the cluster e.g. enabling
* coherency at the interconnect depends upon the state with
* which this cluster was powered up. If anything goes wrong
* then assert as there is no way to recover from this
* situation.
*/
/* Get the physical state of this cpu */
plat_state = psci_get_phys_state(cluster_node);
psci_plat_pm_ops->affinst_suspend_finish(cluster_node->level,
plat_state);
}
static void psci_afflvl2_suspend_finish(aff_map_node_t *system_node)
{
unsigned int plat_state;
/* Cannot go beyond this affinity level */
assert(system_node->level == MPIDR_AFFLVL2);
/*
* Currently, there are no architectural actions to perform
* at the system level.
*/
/*
* Plat. management: Perform the platform specific actions
* as per the old state of the cluster e.g. enabling
* coherency at the interconnect depends upon the state with
* which this cluster was powered up. If anything goes wrong
* then assert as there is no way to recover from this
* situation.
*/
/* Get the physical state of the system */
plat_state = psci_get_phys_state(system_node);
psci_plat_pm_ops->affinst_suspend_finish(system_node->level,
plat_state);
}
const afflvl_power_on_finisher_t psci_afflvl_suspend_finishers[] = {
psci_afflvl0_suspend_finish,
psci_afflvl1_suspend_finish,
psci_afflvl2_suspend_finish,
};
/*
* Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
......@@ -37,7 +37,7 @@
void psci_system_off(void)
{
psci_print_affinity_map();
psci_print_power_domain_map();
assert(psci_plat_pm_ops->system_off);
......@@ -54,7 +54,7 @@ void psci_system_off(void)
void psci_system_reset(void)
{
psci_print_affinity_map();
psci_print_power_domain_map();
assert(psci_plat_pm_ops->system_reset);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment