Commit 6590ce22 authored by Soby Mathew's avatar Soby Mathew
Browse files

PSCI: Invoke PM hooks only for the highest level

This patch optimizes the invocation of the platform power management hooks for
ON, OFF and SUSPEND such that they are called only for the highest affinity
level which will be powered off/on. Earlier, the hooks were being invoked for
all the intermediate levels as well.

This patch requires that the platforms migrate to the new semantics of the PM
hooks.  It also removes the `state` parameter from the pm hooks as the `afflvl`
parameter now indicates the highest affinity level for which power management
operations are required.

Change-Id: I57c87931d8a2723aeade14acc710e5b78ac41732
parent b48349eb
......@@ -171,8 +171,6 @@
******************************************************************************/
typedef struct psci_cpu_data {
uint32_t power_state;
uint32_t max_phys_off_afflvl; /* Highest affinity level in physically
powered off state */
#if !USE_COHERENT_MEM
bakery_info_t pcpu_bakery_info[PSCI_NUM_AFFS];
#endif
......@@ -186,15 +184,12 @@ typedef struct plat_pm_ops {
void (*affinst_standby)(unsigned int power_state);
int (*affinst_on)(unsigned long mpidr,
unsigned long sec_entrypoint,
unsigned int afflvl,
unsigned int state);
void (*affinst_off)(unsigned int afflvl, unsigned int state);
unsigned int afflvl);
void (*affinst_off)(unsigned int afflvl);
void (*affinst_suspend)(unsigned long sec_entrypoint,
unsigned int afflvl,
unsigned int state);
void (*affinst_on_finish)(unsigned int afflvl, unsigned int state);
void (*affinst_suspend_finish)(unsigned int afflvl,
unsigned int state);
unsigned int afflvl);
void (*affinst_on_finish)(unsigned int afflvl);
void (*affinst_suspend_finish)(unsigned int afflvl);
void (*system_off)(void) __dead2;
void (*system_reset)(void) __dead2;
int (*validate_power_state)(unsigned int power_state);
......@@ -238,7 +233,6 @@ void psci_register_spd_pm_hook(const spd_pm_ops_t *);
int psci_get_suspend_stateid_by_mpidr(unsigned long);
int psci_get_suspend_stateid(void);
int psci_get_suspend_afflvl(void);
uint32_t psci_get_max_phys_off_afflvl(void);
uint64_t psci_smc_handler(uint32_t smc_fid,
uint64_t x1,
......
......@@ -35,122 +35,19 @@
#include <string.h>
#include "psci_private.h"
typedef void (*afflvl_off_handler_t)(aff_map_node_t *node);
/*******************************************************************************
* The next three functions implement a handler for each supported affinity
* level which is called when that affinity level is turned off.
******************************************************************************/
static void psci_afflvl0_off(aff_map_node_t *cpu_node)
{
assert(cpu_node->level == MPIDR_AFFLVL0);
/*
* Arch. management. Perform the necessary steps to flush all
* cpu caches.
*/
psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL0);
/*
* Plat. management: Perform platform specific actions to turn this
* cpu off e.g. exit cpu coherency, program the power controller etc.
*/
psci_plat_pm_ops->affinst_off(cpu_node->level,
psci_get_phys_state(cpu_node));
}
static void psci_afflvl1_off(aff_map_node_t *cluster_node)
{
/* Sanity check the cluster level */
assert(cluster_node->level == MPIDR_AFFLVL1);
/*
* Arch. Management. Flush all levels of caches to PoC if
* the cluster is to be shutdown.
*/
psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL1);
/*
* Plat. Management. Allow the platform to do its cluster
* specific bookeeping e.g. turn off interconnect coherency,
* program the power controller etc.
*/
psci_plat_pm_ops->affinst_off(cluster_node->level,
psci_get_phys_state(cluster_node));
}
static void psci_afflvl2_off(aff_map_node_t *system_node)
{
/* Cannot go beyond this level */
assert(system_node->level == MPIDR_AFFLVL2);
/*
* Keep the physical state of the system handy to decide what
* action needs to be taken
*/
/*
* Arch. Management. Flush all levels of caches to PoC if
* the system is to be shutdown.
*/
psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL2);
/*
* Plat. Management : Allow the platform to do its bookeeping
* at this affinity level
*/
psci_plat_pm_ops->affinst_off(system_node->level,
psci_get_phys_state(system_node));
}
static const afflvl_off_handler_t psci_afflvl_off_handlers[] = {
psci_afflvl0_off,
psci_afflvl1_off,
psci_afflvl2_off,
};
/*******************************************************************************
* This function takes an array of pointers to affinity instance nodes in the
* topology tree and calls the off handler for the corresponding affinity
* levels
******************************************************************************/
static void psci_call_off_handlers(aff_map_node_t *mpidr_nodes[],
int start_afflvl,
int end_afflvl)
{
int level;
aff_map_node_t *node;
for (level = start_afflvl; level <= end_afflvl; level++) {
node = mpidr_nodes[level];
if (node == NULL)
continue;
psci_afflvl_off_handlers[level](node);
}
}
/*******************************************************************************
/******************************************************************************
* Top level handler which is called when a cpu wants to power itself down.
* It's assumed that along with turning the cpu off, higher affinity levels will
* be turned off as far as possible. It traverses through all the affinity
* levels performing generic, architectural, platform setup and state management
* e.g. for a cluster that's to be powered off, it will call the platform
* specific code which will disable coherency at the interconnect level if the
* cpu is the last in the cluster. For a cpu it could mean programming the power
* the power controller etc.
*
* The state of all the relevant affinity levels is changed prior to calling the
* affinity level specific handlers as their actions would depend upon the state
* the affinity level is about to enter.
*
* The affinity level specific handlers are called in ascending order i.e. from
* the lowest to the highest affinity level implemented by the platform because
* to turn off affinity level X it is neccesary to turn off affinity level X - 1
* first.
* It's assumed that along with turning the cpu off, higher affinity levels
* will be turned off as far as possible. It finds the highest level to be
* powered off by traversing the node information and then performs generic,
* architectural, platform setup and state management required to turn OFF
* that affinity level and affinity levels below it. e.g. For a cpu that's to
* be powered OFF, it could mean programming the power controller whereas for
* a cluster that's to be powered off, it will call the platform specific code
* which will disable coherency at the interconnect level if the cpu is the
* last in the cluster and also the program the power controller.
******************************************************************************/
int psci_afflvl_off(int start_afflvl,
int end_afflvl)
int psci_afflvl_off(int end_afflvl)
{
int rc;
mpidr_aff_map_nodes_t mpidr_nodes;
......@@ -170,7 +67,7 @@ int psci_afflvl_off(int start_afflvl,
* therefore assert.
*/
rc = psci_get_aff_map_nodes(read_mpidr_el1() & MPIDR_AFFINITY_MASK,
start_afflvl,
MPIDR_AFFLVL0,
end_afflvl,
mpidr_nodes);
assert(rc == PSCI_E_SUCCESS);
......@@ -180,7 +77,7 @@ int psci_afflvl_off(int start_afflvl,
* level so that by the time all locks are taken, the system topology
* is snapshot and state management can be done safely.
*/
psci_acquire_afflvl_locks(start_afflvl,
psci_acquire_afflvl_locks(MPIDR_AFFLVL0,
end_afflvl,
mpidr_nodes);
......@@ -201,38 +98,34 @@ int psci_afflvl_off(int start_afflvl,
* corresponding to the mpidr in the range of affinity levels
* specified.
*/
psci_do_afflvl_state_mgmt(start_afflvl,
psci_do_afflvl_state_mgmt(MPIDR_AFFLVL0,
end_afflvl,
mpidr_nodes,
PSCI_STATE_OFF);
max_phys_off_afflvl = psci_find_max_phys_off_afflvl(start_afflvl,
max_phys_off_afflvl = psci_find_max_phys_off_afflvl(MPIDR_AFFLVL0,
end_afflvl,
mpidr_nodes);
assert(max_phys_off_afflvl != PSCI_INVALID_DATA);
/* Stash the highest affinity level that will enter the OFF state. */
psci_set_max_phys_off_afflvl(max_phys_off_afflvl);
/* Perform generic, architecture and platform specific handling */
psci_call_off_handlers(mpidr_nodes,
start_afflvl,
end_afflvl);
/*
* Arch. management. Perform the necessary steps to flush all
* cpu caches.
*/
psci_do_pwrdown_cache_maintenance(max_phys_off_afflvl);
/*
* Invalidate the entry for the highest affinity level stashed earlier.
* This ensures that any reads of this variable outside the power
* up/down sequences return PSCI_INVALID_DATA.
*
* Plat. management: Perform platform specific actions to turn this
* cpu off e.g. exit cpu coherency, program the power controller etc.
*/
psci_set_max_phys_off_afflvl(PSCI_INVALID_DATA);
psci_plat_pm_ops->affinst_off(max_phys_off_afflvl);
exit:
/*
* Release the locks corresponding to each affinity level in the
* reverse order to which they were acquired.
*/
psci_release_afflvl_locks(start_afflvl,
psci_release_afflvl_locks(MPIDR_AFFLVL0,
end_afflvl,
mpidr_nodes);
......
......@@ -40,9 +40,6 @@
#include <stddef.h>
#include "psci_private.h"
typedef int (*afflvl_on_handler_t)(unsigned long target_cpu,
aff_map_node_t *node);
/*******************************************************************************
* This function checks whether a cpu which has been requested to be turned on
* is OFF to begin with.
......@@ -59,158 +56,23 @@ static int cpu_on_validate_state(unsigned int psci_state)
return PSCI_E_SUCCESS;
}
/*******************************************************************************
* Handler routine to turn a cpu on. It takes care of any generic, architectural
* or platform specific setup required.
* TODO: Split this code across separate handlers for each type of setup?
******************************************************************************/
static int psci_afflvl0_on(unsigned long target_cpu,
aff_map_node_t *cpu_node)
{
unsigned long psci_entrypoint;
/* Sanity check to safeguard against data corruption */
assert(cpu_node->level == MPIDR_AFFLVL0);
/* Set the secure world (EL3) re-entry point after BL1 */
psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
/*
* Plat. management: Give the platform the current state
* of the target cpu to allow it to perform the necessary
* steps to power on.
*/
return psci_plat_pm_ops->affinst_on(target_cpu,
psci_entrypoint,
cpu_node->level,
psci_get_phys_state(cpu_node));
}
/*******************************************************************************
* Handler routine to turn a cluster on. It takes care or any generic, arch.
* or platform specific setup required.
* TODO: Split this code across separate handlers for each type of setup?
******************************************************************************/
static int psci_afflvl1_on(unsigned long target_cpu,
aff_map_node_t *cluster_node)
{
unsigned long psci_entrypoint;
assert(cluster_node->level == MPIDR_AFFLVL1);
/*
* There is no generic and arch. specific cluster
* management required
*/
/* State management: Is not required while turning a cluster on */
/*
* Plat. management: Give the platform the current state
* of the target cpu to allow it to perform the necessary
* steps to power on.
*/
psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
return psci_plat_pm_ops->affinst_on(target_cpu,
psci_entrypoint,
cluster_node->level,
psci_get_phys_state(cluster_node));
}
/*******************************************************************************
* Handler routine to turn a cluster of clusters on. It takes care or any
* generic, arch. or platform specific setup required.
* TODO: Split this code across separate handlers for each type of setup?
******************************************************************************/
static int psci_afflvl2_on(unsigned long target_cpu,
aff_map_node_t *system_node)
{
unsigned long psci_entrypoint;
/* Cannot go beyond affinity level 2 in this psci imp. */
assert(system_node->level == MPIDR_AFFLVL2);
/*
* There is no generic and arch. specific system management
* required
*/
/* State management: Is not required while turning a system on */
/*
* Plat. management: Give the platform the current state
* of the target cpu to allow it to perform the necessary
* steps to power on.
*/
psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
return psci_plat_pm_ops->affinst_on(target_cpu,
psci_entrypoint,
system_node->level,
psci_get_phys_state(system_node));
}
/* Private data structure to make this handlers accessible through indexing */
static const afflvl_on_handler_t psci_afflvl_on_handlers[] = {
psci_afflvl0_on,
psci_afflvl1_on,
psci_afflvl2_on,
};
/*******************************************************************************
* This function takes an array of pointers to affinity instance nodes in the
* topology tree and calls the on handler for the corresponding affinity
* levels
******************************************************************************/
static int psci_call_on_handlers(aff_map_node_t *target_cpu_nodes[],
int start_afflvl,
int end_afflvl,
unsigned long target_cpu)
{
int rc = PSCI_E_INVALID_PARAMS, level;
aff_map_node_t *node;
for (level = end_afflvl; level >= start_afflvl; level--) {
node = target_cpu_nodes[level];
if (node == NULL)
continue;
/*
* TODO: In case of an error should there be a way
* of undoing what we might have setup at higher
* affinity levels.
*/
rc = psci_afflvl_on_handlers[level](target_cpu,
node);
if (rc != PSCI_E_SUCCESS)
break;
}
return rc;
}
/*******************************************************************************
* Generic handler which is called to physically power on a cpu identified by
* its mpidr. It traverses through all the affinity levels performing generic,
* architectural, platform setup and state management e.g. for a cpu that is
* to be powered on, it will ensure that enough information is stashed for it
* to resume execution in the non-secure security state.
* its mpidr. It performs the generic, architectural, platform setup and state
* management to power on the target cpu e.g. it will ensure that
* enough information is stashed for it to resume execution in the non-secure
* security state.
*
* The state of all the relevant affinity levels is changed after calling the
* affinity level specific handlers as their actions would depend upon the state
* the affinity level is currently in.
*
* The affinity level specific handlers are called in descending order i.e. from
* the highest to the lowest affinity level implemented by the platform because
* to turn on affinity level X it is necessary to turn on affinity level X + 1
* first.
* platform handler as it can return error.
******************************************************************************/
int psci_afflvl_on(unsigned long target_cpu,
entry_point_info_t *ep,
int start_afflvl,
int end_afflvl)
{
int rc;
mpidr_aff_map_nodes_t target_cpu_nodes;
unsigned long psci_entrypoint;
/*
* This function must only be called on platforms where the
......@@ -226,7 +88,7 @@ int psci_afflvl_on(unsigned long target_cpu,
* levels are incorrect.
*/
rc = psci_get_aff_map_nodes(target_cpu,
start_afflvl,
MPIDR_AFFLVL0,
end_afflvl,
target_cpu_nodes);
assert(rc == PSCI_E_SUCCESS);
......@@ -236,7 +98,7 @@ int psci_afflvl_on(unsigned long target_cpu,
* level so that by the time all locks are taken, the system topology
* is snapshot and state management can be done safely.
*/
psci_acquire_afflvl_locks(start_afflvl,
psci_acquire_afflvl_locks(MPIDR_AFFLVL0,
end_afflvl,
target_cpu_nodes);
......@@ -262,17 +124,25 @@ int psci_afflvl_on(unsigned long target_cpu,
* corresponding to the mpidr in the range of affinity levels
* specified.
*/
psci_do_afflvl_state_mgmt(start_afflvl,
end_afflvl,
target_cpu_nodes,
PSCI_STATE_ON_PENDING);
/* Perform generic, architecture and platform specific handling. */
rc = psci_call_on_handlers(target_cpu_nodes,
start_afflvl,
psci_do_afflvl_state_mgmt(MPIDR_AFFLVL0,
end_afflvl,
target_cpu);
target_cpu_nodes,
PSCI_STATE_ON_PENDING);
/*
* Perform generic, architecture and platform specific handling.
*/
/* Set the secure world (EL3) re-entry point after BL1 */
psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
/*
* Plat. management: Give the platform the current state
* of the target cpu to allow it to perform the necessary
* steps to power on.
*/
rc = psci_plat_pm_ops->affinst_on(target_cpu,
psci_entrypoint,
MPIDR_AFFLVL0);
assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL);
if (rc == PSCI_E_SUCCESS)
......@@ -280,7 +150,7 @@ int psci_afflvl_on(unsigned long target_cpu,
cm_init_context(target_cpu, ep);
else
/* Restore the state on error. */
psci_do_afflvl_state_mgmt(start_afflvl,
psci_do_afflvl_state_mgmt(MPIDR_AFFLVL0,
end_afflvl,
target_cpu_nodes,
PSCI_STATE_OFF);
......@@ -289,7 +159,7 @@ exit:
* This loop releases the lock corresponding to each affinity level
* in the reverse order to which they were acquired.
*/
psci_release_afflvl_locks(start_afflvl,
psci_release_afflvl_locks(MPIDR_AFFLVL0,
end_afflvl,
target_cpu_nodes);
......@@ -297,18 +167,15 @@ exit:
}
/*******************************************************************************
* The following functions finish an earlier affinity power on request. They
* The following function finish an earlier affinity power on request. They
* are called by the common finisher routine in psci_common.c.
******************************************************************************/
static void psci_afflvl0_on_finish(aff_map_node_t *cpu_node)
void psci_afflvl_on_finisher(aff_map_node_t *node[], int afflvl)
{
unsigned int plat_state, state;
assert(cpu_node->level == MPIDR_AFFLVL0);
assert(node[afflvl]->level == afflvl);
/* Ensure we have been explicitly woken up by another cpu */
state = psci_get_state(cpu_node);
assert(state == PSCI_STATE_ON_PENDING);
assert(psci_get_state(node[MPIDR_AFFLVL0]) == PSCI_STATE_ON_PENDING);
/*
* Plat. management: Perform the platform specific actions
......@@ -316,11 +183,7 @@ static void psci_afflvl0_on_finish(aff_map_node_t *cpu_node)
* register. The actual state of this cpu has already been
* changed.
*/
/* Get the physical state of this cpu */
plat_state = get_phys_state(state);
psci_plat_pm_ops->affinst_on_finish(cpu_node->level,
plat_state);
psci_plat_pm_ops->affinst_on_finish(afflvl);
/*
* Arch. management: Enable data cache and manage stack memory
......@@ -353,53 +216,3 @@ static void psci_afflvl0_on_finish(aff_map_node_t *cpu_node)
dcsw_op_louis(DCCSW);
}
static void psci_afflvl1_on_finish(aff_map_node_t *cluster_node)
{
unsigned int plat_state;
assert(cluster_node->level == MPIDR_AFFLVL1);
/*
* Plat. management: Perform the platform specific actions
* as per the old state of the cluster e.g. enabling
* coherency at the interconnect depends upon the state with
* which this cluster was powered up. If anything goes wrong
* then assert as there is no way to recover from this
* situation.
*/
plat_state = psci_get_phys_state(cluster_node);
psci_plat_pm_ops->affinst_on_finish(cluster_node->level,
plat_state);
}
static void psci_afflvl2_on_finish(aff_map_node_t *system_node)
{
unsigned int plat_state;
/* Cannot go beyond this affinity level */
assert(system_node->level == MPIDR_AFFLVL2);
/*
* Currently, there are no architectural actions to perform
* at the system level.
*/
/*
* Plat. management: Perform the platform specific actions
* as per the old state of the cluster e.g. enabling
* coherency at the interconnect depends upon the state with
* which this cluster was powered up. If anything goes wrong
* then assert as there is no way to recover from this
* situation.
*/
plat_state = psci_get_phys_state(system_node);
psci_plat_pm_ops->affinst_on_finish(system_node->level,
plat_state);
}
const afflvl_power_on_finisher_t psci_afflvl_on_finishers[] = {
psci_afflvl0_on_finish,
psci_afflvl1_on_finish,
psci_afflvl2_on_finish,
};
......@@ -41,8 +41,6 @@
#include <stddef.h>
#include "psci_private.h"
typedef void (*afflvl_suspend_handler_t)(aff_map_node_t *node);
/*******************************************************************************
* This function saves the power state parameter passed in the current PSCI
* cpu_suspend call in the per-cpu data array.
......@@ -99,162 +97,30 @@ int psci_get_suspend_stateid_by_mpidr(unsigned long mpidr)
power_state : psci_get_pstate_id(power_state));
}
/*******************************************************************************
* The next three functions implement a handler for each supported affinity
* level which is called when that affinity level is about to be suspended.
******************************************************************************/
static void psci_afflvl0_suspend(aff_map_node_t *cpu_node)
{
unsigned long psci_entrypoint;
/* Sanity check to safeguard against data corruption */
assert(cpu_node->level == MPIDR_AFFLVL0);
/* Set the secure world (EL3) re-entry point after BL1 */
psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
/*
* Arch. management. Perform the necessary steps to flush all
* cpu caches.
*/
psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL0);
/*
* Plat. management: Allow the platform to perform the
* necessary actions to turn off this cpu e.g. set the
* platform defined mailbox with the psci entrypoint,
* program the power controller etc.
*/
psci_plat_pm_ops->affinst_suspend(psci_entrypoint,
cpu_node->level,
psci_get_phys_state(cpu_node));
}
static void psci_afflvl1_suspend(aff_map_node_t *cluster_node)
{
unsigned int plat_state;
unsigned long psci_entrypoint;
/* Sanity check the cluster level */
assert(cluster_node->level == MPIDR_AFFLVL1);
/*
* Arch. management: Flush all levels of caches to PoC if the
* cluster is to be shutdown.
*/
psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL1);
/*
* Plat. Management. Allow the platform to do its cluster specific
* bookeeping e.g. turn off interconnect coherency, program the power
* controller etc. Sending the psci entrypoint is currently redundant
* beyond affinity level 0 but one never knows what a platform might
* do. Also it allows us to keep the platform handler prototype the
* same.
*/
plat_state = psci_get_phys_state(cluster_node);
psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
psci_plat_pm_ops->affinst_suspend(psci_entrypoint,
cluster_node->level,
plat_state);
}
static void psci_afflvl2_suspend(aff_map_node_t *system_node)
{
unsigned int plat_state;
unsigned long psci_entrypoint;
/* Cannot go beyond this */
assert(system_node->level == MPIDR_AFFLVL2);
/*
* Keep the physical state of the system handy to decide what
* action needs to be taken
*/
plat_state = psci_get_phys_state(system_node);
/*
* Arch. management: Flush all levels of caches to PoC if the
* system is to be shutdown.
*/
psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL2);
/*
* Plat. Management : Allow the platform to do its bookeeping
* at this affinity level
*/
/*
* Sending the psci entrypoint is currently redundant
* beyond affinity level 0 but one never knows what a
* platform might do. Also it allows us to keep the
* platform handler prototype the same.
*/
plat_state = psci_get_phys_state(system_node);
psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
psci_plat_pm_ops->affinst_suspend(psci_entrypoint,
system_node->level,
plat_state);
}
static const afflvl_suspend_handler_t psci_afflvl_suspend_handlers[] = {
psci_afflvl0_suspend,
psci_afflvl1_suspend,
psci_afflvl2_suspend,
};
/*******************************************************************************
* This function takes an array of pointers to affinity instance nodes in the
* topology tree and calls the suspend handler for the corresponding affinity
* levels
******************************************************************************/
static void psci_call_suspend_handlers(aff_map_node_t *mpidr_nodes[],
int start_afflvl,
int end_afflvl)
{
int level;
aff_map_node_t *node;
for (level = start_afflvl; level <= end_afflvl; level++) {
node = mpidr_nodes[level];
if (node == NULL)
continue;
psci_afflvl_suspend_handlers[level](node);
}
}
/*******************************************************************************
* Top level handler which is called when a cpu wants to suspend its execution.
* It is assumed that along with turning the cpu off, higher affinity levels
* until the target affinity level will be turned off as well. It traverses
* through all the affinity levels performing generic, architectural, platform
* setup and state management e.g. for a cluster that's to be suspended, it will
* call the platform specific code which will disable coherency at the
* interconnect level if the cpu is the last in the cluster. For a cpu it could
* mean programming the power controller etc.
*
* The state of all the relevant affinity levels is changed prior to calling the
* affinity level specific handlers as their actions would depend upon the state
* the affinity level is about to enter.
*
* The affinity level specific handlers are called in ascending order i.e. from
* the lowest to the highest affinity level implemented by the platform because
* to turn off affinity level X it is neccesary to turn off affinity level X - 1
* first.
* It is assumed that along with suspending the cpu, higher affinity levels
* until the target affinity level will be suspended as well. It finds the
* highest level to be suspended by traversing the node information and then
* performs generic, architectural, platform setup and state management
* required to suspend that affinity level and affinity levels below it.
* e.g. For a cpu that's to be suspended, it could mean programming the
* power controller whereas for a cluster that's to be suspended, it will call
* the platform specific code which will disable coherency at the interconnect
* level if the cpu is the last in the cluster and also the program the power
* controller.
*
* All the required parameter checks are performed at the beginning and after
* the state transition has been done, no further error is expected and it
* is not possible to undo any of the actions taken beyond that point.
* the state transition has been done, no further error is expected and it is
* not possible to undo any of the actions taken beyond that point.
******************************************************************************/
void psci_afflvl_suspend(entry_point_info_t *ep,
int start_afflvl,
int end_afflvl)
{
int skip_wfi = 0;
mpidr_aff_map_nodes_t mpidr_nodes;
unsigned int max_phys_off_afflvl;
unsigned long psci_entrypoint;
/*
* This function must only be called on platforms where the
......@@ -271,7 +137,7 @@ void psci_afflvl_suspend(entry_point_info_t *ep,
* therefore assert.
*/
if (psci_get_aff_map_nodes(read_mpidr_el1() & MPIDR_AFFINITY_MASK,
start_afflvl, end_afflvl, mpidr_nodes) != PSCI_E_SUCCESS)
MPIDR_AFFLVL0, end_afflvl, mpidr_nodes) != PSCI_E_SUCCESS)
assert(0);
/*
......@@ -279,7 +145,7 @@ void psci_afflvl_suspend(entry_point_info_t *ep,
* level so that by the time all locks are taken, the system topology
* is snapshot and state management can be done safely.
*/
psci_acquire_afflvl_locks(start_afflvl,
psci_acquire_afflvl_locks(MPIDR_AFFLVL0,
end_afflvl,
mpidr_nodes);
......@@ -306,42 +172,45 @@ void psci_afflvl_suspend(entry_point_info_t *ep,
* corresponding to the mpidr in the range of affinity levels
* specified.
*/
psci_do_afflvl_state_mgmt(start_afflvl,
psci_do_afflvl_state_mgmt(MPIDR_AFFLVL0,
end_afflvl,
mpidr_nodes,
PSCI_STATE_SUSPEND);
max_phys_off_afflvl = psci_find_max_phys_off_afflvl(start_afflvl,
max_phys_off_afflvl = psci_find_max_phys_off_afflvl(MPIDR_AFFLVL0,
end_afflvl,
mpidr_nodes);
assert(max_phys_off_afflvl != PSCI_INVALID_DATA);
/* Stash the highest affinity level that will be turned off */
psci_set_max_phys_off_afflvl(max_phys_off_afflvl);
/*
* Store the re-entry information for the non-secure world.
*/
cm_init_context(read_mpidr_el1(), ep);
/* Perform generic, architecture and platform specific handling */
psci_call_suspend_handlers(mpidr_nodes,
start_afflvl,
end_afflvl);
/* Set the secure world (EL3) re-entry point after BL1 */
psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
/*
* Invalidate the entry for the highest affinity level stashed earlier.
* This ensures that any reads of this variable outside the power
* up/down sequences return PSCI_INVALID_DATA.
* Arch. management. Perform the necessary steps to flush all
* cpu caches.
*/
psci_set_max_phys_off_afflvl(PSCI_INVALID_DATA);
psci_do_pwrdown_cache_maintenance(max_phys_off_afflvl);
/*
* Plat. management: Allow the platform to perform the
* necessary actions to turn off this cpu e.g. set the
* platform defined mailbox with the psci entrypoint,
* program the power controller etc.
*/
psci_plat_pm_ops->affinst_suspend(psci_entrypoint,
max_phys_off_afflvl);
exit:
/*
* Release the locks corresponding to each affinity level in the
* reverse order to which they were acquired.
*/
psci_release_afflvl_locks(start_afflvl,
psci_release_afflvl_locks(MPIDR_AFFLVL0,
end_afflvl,
mpidr_nodes);
if (!skip_wfi)
......@@ -352,17 +221,15 @@ exit:
* The following functions finish an earlier affinity suspend request. They
* are called by the common finisher routine in psci_common.c.
******************************************************************************/
static void psci_afflvl0_suspend_finish(aff_map_node_t *cpu_node)
void psci_afflvl_suspend_finisher(aff_map_node_t *node[], int afflvl)
{
unsigned int plat_state, state;
int32_t suspend_level;
uint64_t counter_freq;
assert(cpu_node->level == MPIDR_AFFLVL0);
assert(node[afflvl]->level == afflvl);
/* Ensure we have been woken up from a suspended state */
state = psci_get_state(cpu_node);
assert(state == PSCI_STATE_SUSPEND);
assert(psci_get_state(node[MPIDR_AFFLVL0]) == PSCI_STATE_SUSPEND);
/*
* Plat. management: Perform the platform specific actions
......@@ -371,11 +238,7 @@ static void psci_afflvl0_suspend_finish(aff_map_node_t *cpu_node)
* wrong then assert as there is no way to recover from this
* situation.
*/
/* Get the physical state of this cpu */
plat_state = get_phys_state(state);
psci_plat_pm_ops->affinst_suspend_finish(cpu_node->level,
plat_state);
psci_plat_pm_ops->affinst_suspend_finish(afflvl);
/*
* Arch. management: Enable the data cache, manage stack memory and
......@@ -413,57 +276,3 @@ static void psci_afflvl0_suspend_finish(aff_map_node_t *cpu_node)
dcsw_op_louis(DCCSW);
}
static void psci_afflvl1_suspend_finish(aff_map_node_t *cluster_node)
{
unsigned int plat_state;
assert(cluster_node->level == MPIDR_AFFLVL1);
/*
* Plat. management: Perform the platform specific actions
* as per the old state of the cluster e.g. enabling
* coherency at the interconnect depends upon the state with
* which this cluster was powered up. If anything goes wrong
* then assert as there is no way to recover from this
* situation.
*/
/* Get the physical state of this cpu */
plat_state = psci_get_phys_state(cluster_node);
psci_plat_pm_ops->affinst_suspend_finish(cluster_node->level,
plat_state);
}
static void psci_afflvl2_suspend_finish(aff_map_node_t *system_node)
{
unsigned int plat_state;
/* Cannot go beyond this affinity level */
assert(system_node->level == MPIDR_AFFLVL2);
/*
* Currently, there are no architectural actions to perform
* at the system level.
*/
/*
* Plat. management: Perform the platform specific actions
* as per the old state of the cluster e.g. enabling
* coherency at the interconnect depends upon the state with
* which this cluster was powered up. If anything goes wrong
* then assert as there is no way to recover from this
* situation.
*/
/* Get the physical state of the system */
plat_state = psci_get_phys_state(system_node);
psci_plat_pm_ops->affinst_suspend_finish(system_node->level,
plat_state);
}
const afflvl_power_on_finisher_t psci_afflvl_suspend_finishers[] = {
psci_afflvl0_suspend_finish,
psci_afflvl1_suspend_finish,
psci_afflvl2_suspend_finish,
};
......@@ -123,43 +123,6 @@ unsigned int psci_is_last_on_cpu(void)
return 1;
}
/*******************************************************************************
* This function saves the highest affinity level which is in OFF state. The
* affinity instance with which the level is associated is determined by the
* caller.
******************************************************************************/
void psci_set_max_phys_off_afflvl(uint32_t afflvl)
{
set_cpu_data(psci_svc_cpu_data.max_phys_off_afflvl, afflvl);
/*
* Ensure that the saved value is flushed to main memory and any
* speculatively pre-fetched stale copies are invalidated from the
* caches of other cpus in the same coherency domain. This ensures that
* the value can be safely read irrespective of the state of the data
* cache.
*/
flush_cpu_data(psci_svc_cpu_data.max_phys_off_afflvl);
}
/*******************************************************************************
* This function reads the saved highest affinity level which is in OFF
* state. The affinity instance with which the level is associated is determined
* by the caller.
******************************************************************************/
uint32_t psci_get_max_phys_off_afflvl(void)
{
/*
* Ensure that the last update of this value in this cpu's cache is
* flushed to main memory and any speculatively pre-fetched stale copies
* are invalidated from the caches of other cpus in the same coherency
* domain. This ensures that the value is always read from the main
* memory when it was written before the data cache was enabled.
*/
flush_cpu_data(psci_svc_cpu_data.max_phys_off_afflvl);
return get_cpu_data(psci_svc_cpu_data.max_phys_off_afflvl);
}
/*******************************************************************************
* Routine to return the maximum affinity level to traverse to after a cpu has
* been physically powered up. It is expected to be called immediately after
......@@ -458,53 +421,20 @@ unsigned short psci_get_phys_state(aff_map_node_t *node)
return get_phys_state(state);
}
/*******************************************************************************
* This function takes an array of pointers to affinity instance nodes in the
* topology tree and calls the physical power on handler for the corresponding
* affinity levels
******************************************************************************/
static void psci_call_power_on_handlers(aff_map_node_t *mpidr_nodes[],
int start_afflvl,
int end_afflvl,
afflvl_power_on_finisher_t *pon_handlers)
{
int level;
aff_map_node_t *node;
for (level = end_afflvl; level >= start_afflvl; level--) {
node = mpidr_nodes[level];
if (node == NULL)
continue;
/*
* If we run into any trouble while powering up an
* affinity instance, then there is no recovery path
* so simply return an error and let the caller take
* care of the situation.
*/
pon_handlers[level](node);
}
}
/*******************************************************************************
* Generic handler which is called when a cpu is physically powered on. It
* traverses through all the affinity levels performing generic, architectural,
* platform setup and state management e.g. for a cluster that's been powered
* on, it will call the platform specific code which will enable coherency at
* the interconnect level. For a cpu it could mean turning on the MMU etc.
*
* The state of all the relevant affinity levels is changed after calling the
* affinity level specific handlers as their actions would depend upon the state
* the affinity level is exiting from.
* traverses the node information and finds the highest affinity level powered
* off and performs generic, architectural, platform setup and state management
* to power on that affinity level and affinity levels below it.
* e.g. For a cpu that's been powered on, it will call the platform specific
* code to enable the gic cpu interface and for a cluster it will enable
* coherency at the interconnect level in addition to gic cpu interface.
*
* The affinity level specific handlers are called in descending order i.e. from
* the highest to the lowest affinity level implemented by the platform because
* to turn on affinity level X it is neccesary to turn on affinity level X + 1
* first.
* The state of all the relevant affinity levels is changed prior to calling
* the platform specific code.
******************************************************************************/
void psci_afflvl_power_on_finish(int start_afflvl,
int end_afflvl,
afflvl_power_on_finisher_t *pon_handlers)
void psci_afflvl_power_on_finish(int end_afflvl,
afflvl_power_on_finisher_t pon_handler)
{
mpidr_aff_map_nodes_t mpidr_nodes;
int rc;
......@@ -518,7 +448,7 @@ void psci_afflvl_power_on_finish(int start_afflvl,
* levels are incorrect. Either case is an irrecoverable error.
*/
rc = psci_get_aff_map_nodes(read_mpidr_el1() & MPIDR_AFFINITY_MASK,
start_afflvl,
MPIDR_AFFLVL0,
end_afflvl,
mpidr_nodes);
if (rc != PSCI_E_SUCCESS)
......@@ -529,49 +459,33 @@ void psci_afflvl_power_on_finish(int start_afflvl,
* level so that by the time all locks are taken, the system topology
* is snapshot and state management can be done safely.
*/
psci_acquire_afflvl_locks(start_afflvl,
psci_acquire_afflvl_locks(MPIDR_AFFLVL0,
end_afflvl,
mpidr_nodes);
max_phys_off_afflvl = psci_find_max_phys_off_afflvl(start_afflvl,
max_phys_off_afflvl = psci_find_max_phys_off_afflvl(MPIDR_AFFLVL0,
end_afflvl,
mpidr_nodes);
assert(max_phys_off_afflvl != PSCI_INVALID_DATA);
/*
* Stash the highest affinity level that will come out of the OFF or
* SUSPEND states.
*/
psci_set_max_phys_off_afflvl(max_phys_off_afflvl);
/* Perform generic, architecture and platform specific handling */
psci_call_power_on_handlers(mpidr_nodes,
start_afflvl,
end_afflvl,
pon_handlers);
pon_handler(mpidr_nodes, max_phys_off_afflvl);
/*
* This function updates the state of each affinity instance
* corresponding to the mpidr in the range of affinity levels
* specified.
*/
psci_do_afflvl_state_mgmt(start_afflvl,
psci_do_afflvl_state_mgmt(MPIDR_AFFLVL0,
end_afflvl,
mpidr_nodes,
PSCI_STATE_ON);
/*
* Invalidate the entry for the highest affinity level stashed earlier.
* This ensures that any reads of this variable outside the power
* up/down sequences return PSCI_INVALID_DATA
*/
psci_set_max_phys_off_afflvl(PSCI_INVALID_DATA);
/*
* This loop releases the lock corresponding to each affinity level
* in the reverse order to which they were acquired.
*/
psci_release_afflvl_locks(start_afflvl,
psci_release_afflvl_locks(MPIDR_AFFLVL0,
end_afflvl,
mpidr_nodes);
}
......
......@@ -46,11 +46,11 @@
* -----------------------------------------------------
*/
func psci_aff_on_finish_entry
adr x23, psci_afflvl_on_finishers
adr x23, psci_afflvl_on_finisher
b psci_aff_common_finish_entry
psci_aff_suspend_finish_entry:
adr x23, psci_afflvl_suspend_finishers
adr x23, psci_afflvl_suspend_finisher
psci_aff_common_finish_entry:
/*
......@@ -98,15 +98,8 @@ psci_aff_common_finish_entry:
mov x0, #DISABLE_DCACHE
bl bl31_plat_enable_mmu
/* ---------------------------------------------
* Call the finishers starting from affinity
* level 0.
* ---------------------------------------------
*/
bl get_power_on_target_afflvl
mov x2, x23
mov x1, x0
mov x0, #MPIDR_AFFLVL0
mov x1, x23
bl psci_afflvl_power_on_finish
b el3_exit
......
......@@ -56,15 +56,6 @@ func psci_do_pwrdown_cache_maintenance
stp x29, x30, [sp,#-16]!
stp x19, x20, [sp,#-16]!
mov x19, x0
bl psci_get_max_phys_off_afflvl
#if ASM_ASSERTION
cmp x0, #PSCI_INVALID_DATA
ASM_ASSERT(ne)
#endif
cmp x0, x19
b.ne 1f
/* ---------------------------------------------
* Determine to how many levels of cache will be
* subject to cache maintenance. Affinity level
......@@ -116,7 +107,6 @@ do_stack_maintenance:
sub x1, sp, x0
bl inv_dcache_range
1:
ldp x19, x20, [sp], #16
ldp x29, x30, [sp], #16
ret
......
......@@ -46,7 +46,7 @@ int psci_cpu_on(unsigned long target_cpu,
{
int rc;
unsigned int start_afflvl, end_afflvl;
unsigned int end_afflvl;
entry_point_info_t ep;
/* Determine if the cpu exists of not */
......@@ -73,18 +73,14 @@ int psci_cpu_on(unsigned long target_cpu,
if (rc != PSCI_E_SUCCESS)
return rc;
/*
* To turn this cpu on, specify which affinity
* levels need to be turned on
*/
start_afflvl = MPIDR_AFFLVL0;
end_afflvl = PLATFORM_MAX_AFFLVL;
rc = psci_afflvl_on(target_cpu,
&ep,
start_afflvl,
end_afflvl);
return rc;
}
......@@ -160,7 +156,6 @@ int psci_cpu_suspend(unsigned int power_state,
* enter the final wfi which will power down this CPU.
*/
psci_afflvl_suspend(&ep,
MPIDR_AFFLVL0,
target_afflvl);
/* Reset PSCI power state parameter for the core. */
......@@ -235,7 +230,7 @@ int psci_cpu_off(void)
* management is done immediately followed by cpu, cluster ...
* ..target_afflvl specific actions as this function unwinds back.
*/
rc = psci_afflvl_off(MPIDR_AFFLVL0, target_afflvl);
rc = psci_afflvl_off(target_afflvl);
/*
* The only error cpu_off can return is E_DENIED. So check if that's
......
......@@ -96,7 +96,8 @@ typedef struct aff_limits_node {
} aff_limits_node_t;
typedef aff_map_node_t (*mpidr_aff_map_nodes_t[MPIDR_MAX_AFFLVL + 1]);
typedef void (*afflvl_power_on_finisher_t)(aff_map_node_t *);
typedef void (*afflvl_power_on_finisher_t)(aff_map_node_t *mpidr_nodes[],
int afflvl);
/*******************************************************************************
* Data prototypes
......@@ -121,9 +122,8 @@ void psci_set_state(aff_map_node_t *node, unsigned short state);
unsigned long mpidr_set_aff_inst(unsigned long, unsigned char, int);
int psci_validate_mpidr(unsigned long, int);
int get_power_on_target_afflvl(void);
void psci_afflvl_power_on_finish(int,
int,
afflvl_power_on_finisher_t *);
void psci_afflvl_power_on_finish(int end_afflvl,
afflvl_power_on_finisher_t pon_handler);
int psci_get_ns_ep_info(entry_point_info_t *ep,
uint64_t entrypoint, uint64_t context_id);
int psci_check_afflvl_range(int start_afflvl, int end_afflvl);
......@@ -138,7 +138,6 @@ void psci_release_afflvl_locks(int start_afflvl,
int end_afflvl,
mpidr_aff_map_nodes_t mpidr_nodes);
void psci_print_affinity_map(void);
void psci_set_max_phys_off_afflvl(uint32_t afflvl);
uint32_t psci_find_max_phys_off_afflvl(uint32_t start_afflvl,
uint32_t end_afflvl,
aff_map_node_t *mpidr_nodes[]);
......@@ -155,18 +154,19 @@ aff_map_node_t *psci_get_aff_map_node(unsigned long, int);
/* Private exported functions from psci_affinity_on.c */
int psci_afflvl_on(unsigned long target_cpu,
entry_point_info_t *ep,
int start_afflvl,
int end_afflvl);
void psci_afflvl_on_finisher(aff_map_node_t *node[], int afflvl);
/* Private exported functions from psci_affinity_off.c */
int psci_afflvl_off(int, int);
int psci_afflvl_off(int end_afflvl);
/* Private exported functions from psci_affinity_suspend.c */
void psci_afflvl_suspend(entry_point_info_t *ep,
int start_afflvl,
int end_afflvl);
unsigned int psci_afflvl_suspend_finish(int, int);
void psci_afflvl_suspend_finisher(aff_map_node_t *node[], int afflvl);
void psci_set_suspend_power_state(unsigned int power_state);
/* Private exported functions from psci_helpers.S */
......
......@@ -216,15 +216,6 @@ static void psci_init_aff_map_node(unsigned long mpidr,
psci_svc_cpu_data.power_state,
PSCI_INVALID_DATA);
/*
* There is no state associated with the current execution
* context so ensure that any reads of the highest affinity
* level in a powered down state return PSCI_INVALID_DATA.
*/
set_cpu_data_by_index(linear_id,
psci_svc_cpu_data.max_phys_off_afflvl,
PSCI_INVALID_DATA);
flush_cpu_data_by_index(linear_id, psci_svc_cpu_data);
cm_set_context_by_mpidr(mpidr,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment