Commit 4067dc31 authored by Soby Mathew's avatar Soby Mathew
Browse files

PSCI: Remove references to affinity based power management

As per Section 4.2.2. in the PSCI specification, the term "affinity"
is used in the context of describing the hierarchical arrangement
of cores. This often, but not always, maps directly to the processor
power domain topology of the system. The current PSCI implementation
assumes that this is always the case i.e. MPIDR based levels of
affinity always map to levels in a power domain topology tree.

This patch is the first in a series of patches which remove this
assumption. It removes all occurences of the terms "affinity
instances and levels" when used to describe the power domain
topology. Only the terminology is changed in this patch. Subsequent
patches will implement functional changes to remove the above
mentioned assumption.

Change-Id: Iee162f051b228828310610c5a320ff9d31009b4e
parent 6590ce22
/* /*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met: * modification, are permitted provided that the following conditions are met:
...@@ -32,15 +32,15 @@ ...@@ -32,15 +32,15 @@
#define __PSCI_H__ #define __PSCI_H__
#include <bakery_lock.h> #include <bakery_lock.h>
#include <platform_def.h> /* for PLATFORM_NUM_AFFS */ #include <platform_def.h> /* for PLAT_NUM_PWR_DOMAINS */
/******************************************************************************* /*******************************************************************************
* Number of affinity instances whose state this psci imp. can track * Number of power domains whose state this psci imp. can track
******************************************************************************/ ******************************************************************************/
#ifdef PLATFORM_NUM_AFFS #ifdef PLAT_NUM_PWR_DOMAINS
#define PSCI_NUM_AFFS PLATFORM_NUM_AFFS #define PSCI_NUM_PWR_DOMAINS PLAT_NUM_PWR_DOMAINS
#else #else
#define PSCI_NUM_AFFS (2 * PLATFORM_CORE_COUNT) #define PSCI_NUM_PWR_DOMAINS (2 * PLATFORM_CORE_COUNT)
#endif #endif
/******************************************************************************* /*******************************************************************************
...@@ -85,11 +85,11 @@ ...@@ -85,11 +85,11 @@
******************************************************************************/ ******************************************************************************/
#define PSTATE_ID_SHIFT 0 #define PSTATE_ID_SHIFT 0
#define PSTATE_TYPE_SHIFT 16 #define PSTATE_TYPE_SHIFT 16
#define PSTATE_AFF_LVL_SHIFT 24 #define PSTATE_PWR_LVL_SHIFT 24
#define PSTATE_ID_MASK 0xffff #define PSTATE_ID_MASK 0xffff
#define PSTATE_TYPE_MASK 0x1 #define PSTATE_TYPE_MASK 0x1
#define PSTATE_AFF_LVL_MASK 0x3 #define PSTATE_PWR_LVL_MASK 0x3
#define PSTATE_VALID_MASK 0xFCFE0000 #define PSTATE_VALID_MASK 0xFCFE0000
#define PSTATE_TYPE_STANDBY 0x0 #define PSTATE_TYPE_STANDBY 0x0
...@@ -99,12 +99,12 @@ ...@@ -99,12 +99,12 @@
PSTATE_ID_MASK) PSTATE_ID_MASK)
#define psci_get_pstate_type(pstate) (((pstate) >> PSTATE_TYPE_SHIFT) & \ #define psci_get_pstate_type(pstate) (((pstate) >> PSTATE_TYPE_SHIFT) & \
PSTATE_TYPE_MASK) PSTATE_TYPE_MASK)
#define psci_get_pstate_afflvl(pstate) (((pstate) >> PSTATE_AFF_LVL_SHIFT) & \ #define psci_get_pstate_pwrlvl(pstate) ((pstate >> PSTATE_PWR_LVL_SHIFT) & \
PSTATE_AFF_LVL_MASK) PSTATE_PWR_LVL_MASK)
#define psci_make_powerstate(state_id, type, afflvl) \ #define psci_make_powerstate(state_id, type, pwrlvl) \
(((state_id) & PSTATE_ID_MASK) << PSTATE_ID_SHIFT) |\ (((state_id) & PSTATE_ID_MASK) << PSTATE_ID_SHIFT) |\
(((type) & PSTATE_TYPE_MASK) << PSTATE_TYPE_SHIFT) |\ (((type) & PSTATE_TYPE_MASK) << PSTATE_TYPE_SHIFT) |\
(((afflvl) & PSTATE_AFF_LVL_MASK) << PSTATE_AFF_LVL_SHIFT) (((pwrlvl) & PSTATE_PWR_LVL_MASK) << PSTATE_PWR_LVL_SHIFT)
/******************************************************************************* /*******************************************************************************
* PSCI CPU_FEATURES feature flag specific defines * PSCI CPU_FEATURES feature flag specific defines
...@@ -138,15 +138,15 @@ ...@@ -138,15 +138,15 @@
#define PSCI_E_DISABLED -8 #define PSCI_E_DISABLED -8
/******************************************************************************* /*******************************************************************************
* PSCI affinity state related constants. An affinity instance could be present * PSCI power domain state related constants. A power domain instance could
* or absent physically to cater for asymmetric topologies. If present then it * be present or absent physically to cater for asymmetric topologies. If
* could in one of the 4 further defined states. * present then it could be in one of the 4 further defined states.
******************************************************************************/ ******************************************************************************/
#define PSCI_STATE_SHIFT 1 #define PSCI_STATE_SHIFT 1
#define PSCI_STATE_MASK 0xff #define PSCI_STATE_MASK 0xff
#define PSCI_AFF_ABSENT 0x0 #define PSCI_PWR_DOMAIN_ABSENT 0x0
#define PSCI_AFF_PRESENT 0x1 #define PSCI_PWR_DOMAIN_PRESENT 0x1
#define PSCI_STATE_ON 0x0 #define PSCI_STATE_ON 0x0
#define PSCI_STATE_OFF 0x1 #define PSCI_STATE_OFF 0x1
#define PSCI_STATE_ON_PENDING 0x2 #define PSCI_STATE_ON_PENDING 0x2
...@@ -172,7 +172,7 @@ ...@@ -172,7 +172,7 @@
typedef struct psci_cpu_data { typedef struct psci_cpu_data {
uint32_t power_state; uint32_t power_state;
#if !USE_COHERENT_MEM #if !USE_COHERENT_MEM
bakery_info_t pcpu_bakery_info[PSCI_NUM_AFFS]; bakery_info_t pcpu_bakery_info[PSCI_NUM_PWR_DOMAINS];
#endif #endif
} psci_cpu_data_t; } psci_cpu_data_t;
...@@ -181,15 +181,15 @@ typedef struct psci_cpu_data { ...@@ -181,15 +181,15 @@ typedef struct psci_cpu_data {
* perform common low level pm functions * perform common low level pm functions
******************************************************************************/ ******************************************************************************/
typedef struct plat_pm_ops { typedef struct plat_pm_ops {
void (*affinst_standby)(unsigned int power_state); void (*pwr_domain_standby)(unsigned int power_state);
int (*affinst_on)(unsigned long mpidr, int (*pwr_domain_on)(unsigned long mpidr,
unsigned long sec_entrypoint, unsigned long sec_entrypoint,
unsigned int afflvl); unsigned int pwrlvl);
void (*affinst_off)(unsigned int afflvl); void (*pwr_domain_off)(unsigned int pwrlvl);
void (*affinst_suspend)(unsigned long sec_entrypoint, void (*pwr_domain_suspend)(unsigned long sec_entrypoint,
unsigned int afflvl); unsigned int pwrlvl);
void (*affinst_on_finish)(unsigned int afflvl); void (*pwr_domain_on_finish)(unsigned int pwrlvl);
void (*affinst_suspend_finish)(unsigned int afflvl); void (*pwr_domain_suspend_finish)(unsigned int pwrlvl);
void (*system_off)(void) __dead2; void (*system_off)(void) __dead2;
void (*system_reset)(void) __dead2; void (*system_reset)(void) __dead2;
int (*validate_power_state)(unsigned int power_state); int (*validate_power_state)(unsigned int power_state);
...@@ -227,12 +227,12 @@ int psci_cpu_on(unsigned long, ...@@ -227,12 +227,12 @@ int psci_cpu_on(unsigned long,
unsigned long, unsigned long,
unsigned long); unsigned long);
void __dead2 psci_power_down_wfi(void); void __dead2 psci_power_down_wfi(void);
void psci_aff_on_finish_entry(void); void psci_cpu_on_finish_entry(void);
void psci_aff_suspend_finish_entry(void); void psci_cpu_suspend_finish_entry(void);
void psci_register_spd_pm_hook(const spd_pm_ops_t *); void psci_register_spd_pm_hook(const spd_pm_ops_t *);
int psci_get_suspend_stateid_by_mpidr(unsigned long); int psci_get_suspend_stateid_by_mpidr(unsigned long);
int psci_get_suspend_stateid(void); int psci_get_suspend_stateid(void);
int psci_get_suspend_afflvl(void); int psci_get_suspend_pwrlvl(void);
uint64_t psci_smc_handler(uint32_t smc_fid, uint64_t psci_smc_handler(uint32_t smc_fid,
uint64_t x1, uint64_t x1,
......
/* /*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met: * modification, are permitted provided that the following conditions are met:
...@@ -182,8 +182,8 @@ struct entry_point_info *bl31_plat_get_next_image_ep_info(uint32_t type); ...@@ -182,8 +182,8 @@ struct entry_point_info *bl31_plat_get_next_image_ep_info(uint32_t type);
* Mandatory PSCI functions (BL3-1) * Mandatory PSCI functions (BL3-1)
******************************************************************************/ ******************************************************************************/
int platform_setup_pm(const struct plat_pm_ops **); int platform_setup_pm(const struct plat_pm_ops **);
unsigned int plat_get_aff_count(unsigned int, unsigned long); unsigned int plat_get_pwr_domain_count(unsigned int, unsigned long);
unsigned int plat_get_aff_state(unsigned int, unsigned long); unsigned int plat_get_pwr_domain_state(unsigned int, unsigned long);
/******************************************************************************* /*******************************************************************************
* Optional BL3-1 functions (may be overridden) * Optional BL3-1 functions (may be overridden)
......
/* /*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met: * modification, are permitted provided that the following conditions are met:
...@@ -47,10 +47,10 @@ const spd_pm_ops_t *psci_spd_pm; ...@@ -47,10 +47,10 @@ const spd_pm_ops_t *psci_spd_pm;
/******************************************************************************* /*******************************************************************************
* Grand array that holds the platform's topology information for state * Grand array that holds the platform's topology information for state
* management of affinity instances. Each node (aff_map_node) in the array * management of power domain instances. Each node (pwr_map_node) in the array
* corresponds to an affinity instance e.g. cluster, cpu within an mpidr * corresponds to a power domain instance e.g. cluster, cpu within an mpidr
******************************************************************************/ ******************************************************************************/
aff_map_node_t psci_aff_map[PSCI_NUM_AFFS] pwr_map_node_t psci_pwr_domain_map[PSCI_NUM_PWR_DOMAINS]
#if USE_COHERENT_MEM #if USE_COHERENT_MEM
__attribute__ ((section("tzfw_coherent_mem"))) __attribute__ ((section("tzfw_coherent_mem")))
#endif #endif
...@@ -62,33 +62,34 @@ __attribute__ ((section("tzfw_coherent_mem"))) ...@@ -62,33 +62,34 @@ __attribute__ ((section("tzfw_coherent_mem")))
const plat_pm_ops_t *psci_plat_pm_ops; const plat_pm_ops_t *psci_plat_pm_ops;
/******************************************************************************* /*******************************************************************************
* Check that the maximum affinity level supported by the platform makes sense * Check that the maximum power level supported by the platform makes sense
* ****************************************************************************/ * ****************************************************************************/
CASSERT(PLATFORM_MAX_AFFLVL <= MPIDR_MAX_AFFLVL && \ CASSERT(PLAT_MAX_PWR_LVL <= MPIDR_MAX_AFFLVL && \
PLATFORM_MAX_AFFLVL >= MPIDR_AFFLVL0, \ PLAT_MAX_PWR_LVL >= MPIDR_AFFLVL0, \
assert_platform_max_afflvl_check); assert_platform_max_pwrlvl_check);
/******************************************************************************* /*******************************************************************************
* This function is passed an array of pointers to affinity level nodes in the * This function is passed an array of pointers to power domain nodes in the
* topology tree for an mpidr. It iterates through the nodes to find the highest * topology tree for an mpidr. It iterates through the nodes to find the
* affinity level which is marked as physically powered off. * highest power level where the power domain is marked as physically powered
* off.
******************************************************************************/ ******************************************************************************/
uint32_t psci_find_max_phys_off_afflvl(uint32_t start_afflvl, uint32_t psci_find_max_phys_off_pwrlvl(uint32_t start_pwrlvl,
uint32_t end_afflvl, uint32_t end_pwrlvl,
aff_map_node_t *mpidr_nodes[]) pwr_map_node_t *mpidr_nodes[])
{ {
uint32_t max_afflvl = PSCI_INVALID_DATA; uint32_t max_pwrlvl = PSCI_INVALID_DATA;
for (; start_afflvl <= end_afflvl; start_afflvl++) { for (; start_pwrlvl <= end_pwrlvl; start_pwrlvl++) {
if (mpidr_nodes[start_afflvl] == NULL) if (mpidr_nodes[start_pwrlvl] == NULL)
continue; continue;
if (psci_get_phys_state(mpidr_nodes[start_afflvl]) == if (psci_get_phys_state(mpidr_nodes[start_pwrlvl]) ==
PSCI_STATE_OFF) PSCI_STATE_OFF)
max_afflvl = start_afflvl; max_pwrlvl = start_pwrlvl;
} }
return max_afflvl; return max_pwrlvl;
} }
/******************************************************************************* /*******************************************************************************
...@@ -102,21 +103,21 @@ unsigned int psci_is_last_on_cpu(void) ...@@ -102,21 +103,21 @@ unsigned int psci_is_last_on_cpu(void)
unsigned long mpidr = read_mpidr_el1() & MPIDR_AFFINITY_MASK; unsigned long mpidr = read_mpidr_el1() & MPIDR_AFFINITY_MASK;
unsigned int i; unsigned int i;
for (i = psci_aff_limits[MPIDR_AFFLVL0].min; for (i = psci_pwr_lvl_limits[MPIDR_AFFLVL0].min;
i <= psci_aff_limits[MPIDR_AFFLVL0].max; i++) { i <= psci_pwr_lvl_limits[MPIDR_AFFLVL0].max; i++) {
assert(psci_aff_map[i].level == MPIDR_AFFLVL0); assert(psci_pwr_domain_map[i].level == MPIDR_AFFLVL0);
if (!(psci_aff_map[i].state & PSCI_AFF_PRESENT)) if (!(psci_pwr_domain_map[i].state & PSCI_AFF_PRESENT))
continue; continue;
if (psci_aff_map[i].mpidr == mpidr) { if (psci_pwr_domain_map[i].mpidr == mpidr) {
assert(psci_get_state(&psci_aff_map[i]) assert(psci_get_state(&psci_pwr_domain_map[i])
== PSCI_STATE_ON); == PSCI_STATE_ON);
continue; continue;
} }
if (psci_get_state(&psci_aff_map[i]) != PSCI_STATE_OFF) if (psci_get_state(&psci_pwr_domain_map[i]) != PSCI_STATE_OFF)
return 0; return 0;
} }
...@@ -124,20 +125,20 @@ unsigned int psci_is_last_on_cpu(void) ...@@ -124,20 +125,20 @@ unsigned int psci_is_last_on_cpu(void)
} }
/******************************************************************************* /*******************************************************************************
* Routine to return the maximum affinity level to traverse to after a cpu has * Routine to return the maximum power level to traverse to after a cpu has
* been physically powered up. It is expected to be called immediately after * been physically powered up. It is expected to be called immediately after
* reset from assembler code. * reset from assembler code.
******************************************************************************/ ******************************************************************************/
int get_power_on_target_afflvl(void) int get_power_on_target_pwrlvl(void)
{ {
int afflvl; int pwrlvl;
#if DEBUG #if DEBUG
unsigned int state; unsigned int state;
aff_map_node_t *node; pwr_map_node_t *node;
/* Retrieve our node from the topology tree */ /* Retrieve our node from the topology tree */
node = psci_get_aff_map_node(read_mpidr_el1() & MPIDR_AFFINITY_MASK, node = psci_get_pwr_map_node(read_mpidr_el1() & MPIDR_AFFINITY_MASK,
MPIDR_AFFLVL0); MPIDR_AFFLVL0);
assert(node); assert(node);
...@@ -150,73 +151,74 @@ int get_power_on_target_afflvl(void) ...@@ -150,73 +151,74 @@ int get_power_on_target_afflvl(void)
#endif #endif
/* /*
* Assume that this cpu was suspended and retrieve its target affinity * Assume that this cpu was suspended and retrieve its target power
* level. If it is invalid then it could only have been turned off * level. If it is invalid then it could only have been turned off
* earlier. PLATFORM_MAX_AFFLVL will be the highest affinity level a * earlier. PLAT_MAX_PWR_LVL will be the highest power level a
* cpu can be turned off to. * cpu can be turned off to.
*/ */
afflvl = psci_get_suspend_afflvl(); pwrlvl = psci_get_suspend_pwrlvl();
if (afflvl == PSCI_INVALID_DATA) if (pwrlvl == PSCI_INVALID_DATA)
afflvl = PLATFORM_MAX_AFFLVL; pwrlvl = PLAT_MAX_PWR_LVL;
return afflvl; return pwrlvl;
} }
/******************************************************************************* /*******************************************************************************
* Simple routine to set the id of an affinity instance at a given level in the * Simple routine to set the id of a power domain instance at a given level
* mpidr. * in the mpidr. The assumption is that the affinity level and the power
* level are the same.
******************************************************************************/ ******************************************************************************/
unsigned long mpidr_set_aff_inst(unsigned long mpidr, unsigned long mpidr_set_pwr_domain_inst(unsigned long mpidr,
unsigned char aff_inst, unsigned char pwr_inst,
int aff_lvl) int pwr_lvl)
{ {
unsigned long aff_shift; unsigned long aff_shift;
assert(aff_lvl <= MPIDR_AFFLVL3); assert(pwr_lvl <= MPIDR_AFFLVL3);
/* /*
* Decide the number of bits to shift by depending upon * Decide the number of bits to shift by depending upon
* the affinity level * the power level
*/ */
aff_shift = get_afflvl_shift(aff_lvl); aff_shift = get_afflvl_shift(pwr_lvl);
/* Clear the existing affinity instance & set the new one*/ /* Clear the existing affinity instance & set the new one*/
mpidr &= ~(((unsigned long)MPIDR_AFFLVL_MASK) << aff_shift); mpidr &= ~(((unsigned long)MPIDR_AFFLVL_MASK) << aff_shift);
mpidr |= ((unsigned long)aff_inst) << aff_shift; mpidr |= ((unsigned long)pwr_inst) << aff_shift;
return mpidr; return mpidr;
} }
/******************************************************************************* /*******************************************************************************
* This function sanity checks a range of affinity levels. * This function sanity checks a range of power levels.
******************************************************************************/ ******************************************************************************/
int psci_check_afflvl_range(int start_afflvl, int end_afflvl) int psci_check_pwrlvl_range(int start_pwrlvl, int end_pwrlvl)
{ {
/* Sanity check the parameters passed */ /* Sanity check the parameters passed */
if (end_afflvl > PLATFORM_MAX_AFFLVL) if (end_pwrlvl > PLAT_MAX_PWR_LVL)
return PSCI_E_INVALID_PARAMS; return PSCI_E_INVALID_PARAMS;
if (start_afflvl < MPIDR_AFFLVL0) if (start_pwrlvl < MPIDR_AFFLVL0)
return PSCI_E_INVALID_PARAMS; return PSCI_E_INVALID_PARAMS;
if (end_afflvl < start_afflvl) if (end_pwrlvl < start_pwrlvl)
return PSCI_E_INVALID_PARAMS; return PSCI_E_INVALID_PARAMS;
return PSCI_E_SUCCESS; return PSCI_E_SUCCESS;
} }
/******************************************************************************* /*******************************************************************************
* This function is passed an array of pointers to affinity level nodes in the * This function is passed an array of pointers to power domain nodes in the
* topology tree for an mpidr and the state which each node should transition * topology tree for an mpidr and the state which each node should transition
* to. It updates the state of each node between the specified affinity levels. * to. It updates the state of each node between the specified power levels.
******************************************************************************/ ******************************************************************************/
void psci_do_afflvl_state_mgmt(uint32_t start_afflvl, void psci_do_state_coordination(uint32_t start_pwrlvl,
uint32_t end_afflvl, uint32_t end_pwrlvl,
aff_map_node_t *mpidr_nodes[], pwr_map_node_t *mpidr_nodes[],
uint32_t state) uint32_t state)
{ {
uint32_t level; uint32_t level;
for (level = start_afflvl; level <= end_afflvl; level++) { for (level = start_pwrlvl; level <= end_pwrlvl; level++) {
if (mpidr_nodes[level] == NULL) if (mpidr_nodes[level] == NULL)
continue; continue;
psci_set_state(mpidr_nodes[level], state); psci_set_state(mpidr_nodes[level], state);
...@@ -224,17 +226,17 @@ void psci_do_afflvl_state_mgmt(uint32_t start_afflvl, ...@@ -224,17 +226,17 @@ void psci_do_afflvl_state_mgmt(uint32_t start_afflvl,
} }
/******************************************************************************* /*******************************************************************************
* This function is passed an array of pointers to affinity level nodes in the * This function is passed an array of pointers to power domain nodes in the
* topology tree for an mpidr. It picks up locks for each affinity level bottom * topology tree for an mpidr. It picks up locks for each power level bottom
* up in the range specified. * up in the range specified.
******************************************************************************/ ******************************************************************************/
void psci_acquire_afflvl_locks(int start_afflvl, void psci_acquire_pwr_domain_locks(int start_pwrlvl,
int end_afflvl, int end_pwrlvl,
aff_map_node_t *mpidr_nodes[]) pwr_map_node_t *mpidr_nodes[])
{ {
int level; int level;
for (level = start_afflvl; level <= end_afflvl; level++) { for (level = start_pwrlvl; level <= end_pwrlvl; level++) {
if (mpidr_nodes[level] == NULL) if (mpidr_nodes[level] == NULL)
continue; continue;
...@@ -243,17 +245,17 @@ void psci_acquire_afflvl_locks(int start_afflvl, ...@@ -243,17 +245,17 @@ void psci_acquire_afflvl_locks(int start_afflvl,
} }
/******************************************************************************* /*******************************************************************************
* This function is passed an array of pointers to affinity level nodes in the * This function is passed an array of pointers to power domain nodes in the
* topology tree for an mpidr. It releases the lock for each affinity level top * topology tree for an mpidr. It releases the lock for each power level top
* down in the range specified. * down in the range specified.
******************************************************************************/ ******************************************************************************/
void psci_release_afflvl_locks(int start_afflvl, void psci_release_pwr_domain_locks(int start_pwrlvl,
int end_afflvl, int end_pwrlvl,
aff_map_node_t *mpidr_nodes[]) pwr_map_node_t *mpidr_nodes[])
{ {
int level; int level;
for (level = end_afflvl; level >= start_afflvl; level--) { for (level = end_pwrlvl; level >= start_pwrlvl; level--) {
if (mpidr_nodes[level] == NULL) if (mpidr_nodes[level] == NULL)
continue; continue;
...@@ -262,15 +264,15 @@ void psci_release_afflvl_locks(int start_afflvl, ...@@ -262,15 +264,15 @@ void psci_release_afflvl_locks(int start_afflvl,
} }
/******************************************************************************* /*******************************************************************************
* Simple routine to determine whether an affinity instance at a given level * Simple routine to determine whether an power domain instance at a given
* in an mpidr exists or not. * level in an mpidr exists or not.
******************************************************************************/ ******************************************************************************/
int psci_validate_mpidr(unsigned long mpidr, int level) int psci_validate_mpidr(unsigned long mpidr, int level)
{ {
aff_map_node_t *node; pwr_map_node_t *node;
node = psci_get_aff_map_node(mpidr, level); node = psci_get_pwr_map_node(mpidr, level);
if (node && (node->state & PSCI_AFF_PRESENT)) if (node && (node->state & PSCI_PWR_DOMAIN_PRESENT))
return PSCI_E_SUCCESS; return PSCI_E_SUCCESS;
else else
return PSCI_E_INVALID_PARAMS; return PSCI_E_INVALID_PARAMS;
...@@ -334,10 +336,10 @@ int psci_get_ns_ep_info(entry_point_info_t *ep, ...@@ -334,10 +336,10 @@ int psci_get_ns_ep_info(entry_point_info_t *ep,
} }
/******************************************************************************* /*******************************************************************************
* This function takes a pointer to an affinity node in the topology tree and * This function takes a pointer to a power domain node in the topology tree
* returns its state. State of a non-leaf node needs to be calculated. * and returns its state. State of a non-leaf node needs to be calculated.
******************************************************************************/ ******************************************************************************/
unsigned short psci_get_state(aff_map_node_t *node) unsigned short psci_get_state(pwr_map_node_t *node)
{ {
#if !USE_COHERENT_MEM #if !USE_COHERENT_MEM
flush_dcache_range((uint64_t) node, sizeof(*node)); flush_dcache_range((uint64_t) node, sizeof(*node));
...@@ -350,11 +352,11 @@ unsigned short psci_get_state(aff_map_node_t *node) ...@@ -350,11 +352,11 @@ unsigned short psci_get_state(aff_map_node_t *node)
return (node->state >> PSCI_STATE_SHIFT) & PSCI_STATE_MASK; return (node->state >> PSCI_STATE_SHIFT) & PSCI_STATE_MASK;
/* /*
* For an affinity level higher than a cpu, the state has to be * For a power level higher than a cpu, the state has to be
* calculated. It depends upon the value of the reference count * calculated. It depends upon the value of the reference count
* which is managed by each node at the next lower affinity level * which is managed by each node at the next lower power level
* e.g. for a cluster, each cpu increments/decrements the reference * e.g. for a cluster, each cpu increments/decrements the reference
* count. If the reference count is 0 then the affinity level is * count. If the reference count is 0 then the power level is
* OFF else ON. * OFF else ON.
*/ */
if (node->ref_count) if (node->ref_count)
...@@ -364,16 +366,16 @@ unsigned short psci_get_state(aff_map_node_t *node) ...@@ -364,16 +366,16 @@ unsigned short psci_get_state(aff_map_node_t *node)
} }
/******************************************************************************* /*******************************************************************************
* This function takes a pointer to an affinity node in the topology tree and * This function takes a pointer to a power domain node in the topology
* a target state. State of a non-leaf node needs to be converted to a reference * tree and a target state. State of a non-leaf node needs to be converted
* count. State of a leaf node can be set directly. * to a reference count. State of a leaf node can be set directly.
******************************************************************************/ ******************************************************************************/
void psci_set_state(aff_map_node_t *node, unsigned short state) void psci_set_state(pwr_map_node_t *node, unsigned short state)
{ {
assert(node->level >= MPIDR_AFFLVL0 && node->level <= MPIDR_MAX_AFFLVL); assert(node->level >= MPIDR_AFFLVL0 && node->level <= MPIDR_MAX_AFFLVL);
/* /*
* For an affinity level higher than a cpu, the state is used * For a power level higher than a cpu, the state is used
* to decide whether the reference count is incremented or * to decide whether the reference count is incremented or
* decremented. Entry into the ON_PENDING state does not have * decremented. Entry into the ON_PENDING state does not have
* effect. * effect.
...@@ -389,7 +391,7 @@ void psci_set_state(aff_map_node_t *node, unsigned short state) ...@@ -389,7 +391,7 @@ void psci_set_state(aff_map_node_t *node, unsigned short state)
break; break;
case PSCI_STATE_ON_PENDING: case PSCI_STATE_ON_PENDING:
/* /*
* An affinity level higher than a cpu will not undergo * A power level higher than a cpu will not undergo
* a state change when it is about to be turned on * a state change when it is about to be turned on
*/ */
return; return;
...@@ -407,13 +409,13 @@ void psci_set_state(aff_map_node_t *node, unsigned short state) ...@@ -407,13 +409,13 @@ void psci_set_state(aff_map_node_t *node, unsigned short state)
} }
/******************************************************************************* /*******************************************************************************
* An affinity level could be on, on_pending, suspended or off. These are the * A power domain could be on, on_pending, suspended or off. These are the
* logical states it can be in. Physically either it is off or on. When it is in * logical states it can be in. Physically either it is off or on. When it is in
* the state on_pending then it is about to be turned on. It is not possible to * the state on_pending then it is about to be turned on. It is not possible to
* tell whether that's actually happenned or not. So we err on the side of * tell whether that's actually happened or not. So we err on the side of
* caution & treat the affinity level as being turned off. * caution & treat the power domain as being turned off.
******************************************************************************/ ******************************************************************************/
unsigned short psci_get_phys_state(aff_map_node_t *node) unsigned short psci_get_phys_state(pwr_map_node_t *node)
{ {
unsigned int state; unsigned int state;
...@@ -423,70 +425,67 @@ unsigned short psci_get_phys_state(aff_map_node_t *node) ...@@ -423,70 +425,67 @@ unsigned short psci_get_phys_state(aff_map_node_t *node)
/******************************************************************************* /*******************************************************************************
* Generic handler which is called when a cpu is physically powered on. It * Generic handler which is called when a cpu is physically powered on. It
* traverses the node information and finds the highest affinity level powered * traverses the node information and finds the highest power level powered
* off and performs generic, architectural, platform setup and state management * off and performs generic, architectural, platform setup and state management
* to power on that affinity level and affinity levels below it. * to power on that power level and power levels below it.
* e.g. For a cpu that's been powered on, it will call the platform specific * e.g. For a cpu that's been powered on, it will call the platform specific
* code to enable the gic cpu interface and for a cluster it will enable * code to enable the gic cpu interface and for a cluster it will enable
* coherency at the interconnect level in addition to gic cpu interface. * coherency at the interconnect level in addition to gic cpu interface.
*
* The state of all the relevant affinity levels is changed prior to calling
* the platform specific code.
******************************************************************************/ ******************************************************************************/
void psci_afflvl_power_on_finish(int end_afflvl, void psci_power_up_finish(int end_pwrlvl,
afflvl_power_on_finisher_t pon_handler) pwrlvl_power_on_finisher_t pon_handler)
{ {
mpidr_aff_map_nodes_t mpidr_nodes; mpidr_pwr_map_nodes_t mpidr_nodes;
int rc; int rc;
unsigned int max_phys_off_afflvl; unsigned int max_phys_off_pwrlvl;
/* /*
* Collect the pointers to the nodes in the topology tree for * Collect the pointers to the nodes in the topology tree for
* each affinity instance in the mpidr. If this function does * each power domain instances in the mpidr. If this function does
* not return successfully then either the mpidr or the affinity * not return successfully then either the mpidr or the power
* levels are incorrect. Either case is an irrecoverable error. * levels are incorrect. Either case is an irrecoverable error.
*/ */
rc = psci_get_aff_map_nodes(read_mpidr_el1() & MPIDR_AFFINITY_MASK, rc = psci_get_pwr_map_nodes(read_mpidr_el1() & MPIDR_AFFINITY_MASK,
MPIDR_AFFLVL0, MPIDR_AFFLVL0,
end_afflvl, end_pwrlvl,
mpidr_nodes); mpidr_nodes);
if (rc != PSCI_E_SUCCESS) if (rc != PSCI_E_SUCCESS)
panic(); panic();
/* /*
* This function acquires the lock corresponding to each affinity * This function acquires the lock corresponding to each power
* level so that by the time all locks are taken, the system topology * level so that by the time all locks are taken, the system topology
* is snapshot and state management can be done safely. * is snapshot and state management can be done safely.
*/ */
psci_acquire_afflvl_locks(MPIDR_AFFLVL0, psci_acquire_pwr_domain_locks(MPIDR_AFFLVL0,
end_afflvl, end_pwrlvl,
mpidr_nodes); mpidr_nodes);
max_phys_off_afflvl = psci_find_max_phys_off_afflvl(MPIDR_AFFLVL0, max_phys_off_pwrlvl = psci_find_max_phys_off_pwrlvl(MPIDR_AFFLVL0,
end_afflvl, end_pwrlvl,
mpidr_nodes); mpidr_nodes);
assert(max_phys_off_afflvl != PSCI_INVALID_DATA); assert(max_phys_off_pwrlvl != PSCI_INVALID_DATA);
/* Perform generic, architecture and platform specific handling */ /* Perform generic, architecture and platform specific handling */
pon_handler(mpidr_nodes, max_phys_off_afflvl); pon_handler(mpidr_nodes, max_phys_off_pwrlvl);
/* /*
* This function updates the state of each affinity instance * This function updates the state of each power instance
* corresponding to the mpidr in the range of affinity levels * corresponding to the mpidr in the range of power levels
* specified. * specified.
*/ */
psci_do_afflvl_state_mgmt(MPIDR_AFFLVL0, psci_do_state_coordination(MPIDR_AFFLVL0,
end_afflvl, end_pwrlvl,
mpidr_nodes, mpidr_nodes,
PSCI_STATE_ON); PSCI_STATE_ON);
/* /*
* This loop releases the lock corresponding to each affinity level * This loop releases the lock corresponding to each power level
* in the reverse order to which they were acquired. * in the reverse order to which they were acquired.
*/ */
psci_release_afflvl_locks(MPIDR_AFFLVL0, psci_release_pwr_domain_locks(MPIDR_AFFLVL0,
end_afflvl, end_pwrlvl,
mpidr_nodes); mpidr_nodes);
} }
...@@ -532,13 +531,13 @@ int psci_spd_migrate_info(uint64_t *mpidr) ...@@ -532,13 +531,13 @@ int psci_spd_migrate_info(uint64_t *mpidr)
/******************************************************************************* /*******************************************************************************
* This function prints the state of all affinity instances present in the * This function prints the state of all power domains present in the
* system * system
******************************************************************************/ ******************************************************************************/
void psci_print_affinity_map(void) void psci_print_power_domain_map(void)
{ {
#if LOG_LEVEL >= LOG_LEVEL_INFO #if LOG_LEVEL >= LOG_LEVEL_INFO
aff_map_node_t *node; pwr_map_node_t *node;
unsigned int idx; unsigned int idx;
/* This array maps to the PSCI_STATE_X definitions in psci.h */ /* This array maps to the PSCI_STATE_X definitions in psci.h */
static const char *psci_state_str[] = { static const char *psci_state_str[] = {
...@@ -548,13 +547,13 @@ void psci_print_affinity_map(void) ...@@ -548,13 +547,13 @@ void psci_print_affinity_map(void)
"SUSPEND" "SUSPEND"
}; };
INFO("PSCI Affinity Map:\n"); INFO("PSCI Power Domain Map:\n");
for (idx = 0; idx < PSCI_NUM_AFFS ; idx++) { for (idx = 0; idx < PSCI_NUM_PWR_DOMAINS; idx++) {
node = &psci_aff_map[idx]; node = &psci_pwr_domain_map[idx];
if (!(node->state & PSCI_AFF_PRESENT)) { if (!(node->state & PSCI_PWR_DOMAIN_PRESENT)) {
continue; continue;
} }
INFO(" AffInst: Level %u, MPID 0x%lx, State %s\n", INFO(" pwrInst: Level %u, MPID 0x%lx, State %s\n",
node->level, node->mpidr, node->level, node->mpidr,
psci_state_str[psci_get_state(node)]); psci_state_str[psci_get_state(node)]);
} }
......
...@@ -34,8 +34,8 @@ ...@@ -34,8 +34,8 @@
#include <psci.h> #include <psci.h>
#include <xlat_tables.h> #include <xlat_tables.h>
.globl psci_aff_on_finish_entry .globl psci_cpu_on_finish_entry
.globl psci_aff_suspend_finish_entry .globl psci_cpu_suspend_finish_entry
.globl psci_power_down_wfi .globl psci_power_down_wfi
/* ----------------------------------------------------- /* -----------------------------------------------------
...@@ -45,14 +45,14 @@ ...@@ -45,14 +45,14 @@
* the handlers (chosen depending upon original state). * the handlers (chosen depending upon original state).
* ----------------------------------------------------- * -----------------------------------------------------
*/ */
func psci_aff_on_finish_entry func psci_cpu_on_finish_entry
adr x23, psci_afflvl_on_finisher adr x23, psci_cpu_on_finish
b psci_aff_common_finish_entry b psci_power_up_entry
psci_aff_suspend_finish_entry: psci_cpu_suspend_finish_entry:
adr x23, psci_afflvl_suspend_finisher adr x23, psci_cpu_suspend_finish
psci_aff_common_finish_entry: psci_power_up_entry:
/* /*
* On the warm boot path, most of the EL3 initialisations performed by * On the warm boot path, most of the EL3 initialisations performed by
* 'el3_entrypoint_common' must be skipped: * 'el3_entrypoint_common' must be skipped:
...@@ -98,12 +98,12 @@ psci_aff_common_finish_entry: ...@@ -98,12 +98,12 @@ psci_aff_common_finish_entry:
mov x0, #DISABLE_DCACHE mov x0, #DISABLE_DCACHE
bl bl31_plat_enable_mmu bl bl31_plat_enable_mmu
bl get_power_on_target_afflvl bl get_power_on_target_pwrlvl
mov x1, x23 mov x1, x23
bl psci_afflvl_power_on_finish bl psci_power_up_finish
b el3_exit b el3_exit
endfunc psci_aff_on_finish_entry endfunc psci_cpu_on_finish_entry
/* -------------------------------------------- /* --------------------------------------------
* This function is called to indicate to the * This function is called to indicate to the
......
/* /*
* Copyright (c) 2014, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met: * modification, are permitted provided that the following conditions are met:
...@@ -38,14 +38,13 @@ ...@@ -38,14 +38,13 @@
.globl psci_do_pwrup_cache_maintenance .globl psci_do_pwrup_cache_maintenance
/* ----------------------------------------------------------------------- /* -----------------------------------------------------------------------
* void psci_do_pwrdown_cache_maintenance(uint32_t affinity level); * void psci_do_pwrdown_cache_maintenance(uint32_t power level);
* *
* This function performs cache maintenance if the specified affinity * This function performs cache maintenance for the specified power
* level is the equal to the level of the highest affinity instance which * level. The levels of cache affected are determined by the power
* will be/is physically powered off. The levels of cache affected are * level which is passed as the argument i.e. level 0 results
* determined by the affinity level which is passed as the argument i.e. * in a flush of the L1 cache. Both the L1 and L2 caches are flushed
* level 0 results in a flush of the L1 cache. Both the L1 and L2 caches * for a higher power level.
* are flushed for a higher affinity level.
* *
* Additionally, this function also ensures that stack memory is correctly * Additionally, this function also ensures that stack memory is correctly
* flushed out to avoid coherency issues due to a change in its memory * flushed out to avoid coherency issues due to a change in its memory
...@@ -58,11 +57,11 @@ func psci_do_pwrdown_cache_maintenance ...@@ -58,11 +57,11 @@ func psci_do_pwrdown_cache_maintenance
/* --------------------------------------------- /* ---------------------------------------------
* Determine to how many levels of cache will be * Determine to how many levels of cache will be
* subject to cache maintenance. Affinity level * subject to cache maintenance. Power level
* 0 implies that only the cpu is being powered * 0 implies that only the cpu is being powered
* down. Only the L1 data cache needs to be * down. Only the L1 data cache needs to be
* flushed to the PoU in this case. For a higher * flushed to the PoU in this case. For a higher
* affinity level we are assuming that a flush * power level we are assuming that a flush
* of L1 data and L2 unified cache is enough. * of L1 data and L2 unified cache is enough.
* This information should be provided by the * This information should be provided by the
* platform. * platform.
......
/* /*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met: * modification, are permitted provided that the following conditions are met:
...@@ -46,7 +46,7 @@ int psci_cpu_on(unsigned long target_cpu, ...@@ -46,7 +46,7 @@ int psci_cpu_on(unsigned long target_cpu,
{ {
int rc; int rc;
unsigned int end_afflvl; unsigned int end_pwrlvl;
entry_point_info_t ep; entry_point_info_t ep;
/* Determine if the cpu exists of not */ /* Determine if the cpu exists of not */
...@@ -74,13 +74,13 @@ int psci_cpu_on(unsigned long target_cpu, ...@@ -74,13 +74,13 @@ int psci_cpu_on(unsigned long target_cpu,
return rc; return rc;
/* /*
* To turn this cpu on, specify which affinity * To turn this cpu on, specify which power
* levels need to be turned on * levels need to be turned on
*/ */
end_afflvl = PLATFORM_MAX_AFFLVL; end_pwrlvl = PLAT_MAX_PWR_LVL;
rc = psci_afflvl_on(target_cpu, rc = psci_cpu_on_start(target_cpu,
&ep, &ep,
end_afflvl); end_pwrlvl);
return rc; return rc;
} }
...@@ -94,7 +94,7 @@ int psci_cpu_suspend(unsigned int power_state, ...@@ -94,7 +94,7 @@ int psci_cpu_suspend(unsigned int power_state,
unsigned long context_id) unsigned long context_id)
{ {
int rc; int rc;
unsigned int target_afflvl, pstate_type; unsigned int target_pwrlvl, pstate_type;
entry_point_info_t ep; entry_point_info_t ep;
/* Check SBZ bits in power state are zero */ /* Check SBZ bits in power state are zero */
...@@ -102,8 +102,8 @@ int psci_cpu_suspend(unsigned int power_state, ...@@ -102,8 +102,8 @@ int psci_cpu_suspend(unsigned int power_state,
return PSCI_E_INVALID_PARAMS; return PSCI_E_INVALID_PARAMS;
/* Sanity check the requested state */ /* Sanity check the requested state */
target_afflvl = psci_get_pstate_afflvl(power_state); target_pwrlvl = psci_get_pstate_pwrlvl(power_state);
if (target_afflvl > PLATFORM_MAX_AFFLVL) if (target_pwrlvl > PLAT_MAX_PWR_LVL)
return PSCI_E_INVALID_PARAMS; return PSCI_E_INVALID_PARAMS;
/* Validate the power_state using platform pm_ops */ /* Validate the power_state using platform pm_ops */
...@@ -132,10 +132,10 @@ int psci_cpu_suspend(unsigned int power_state, ...@@ -132,10 +132,10 @@ int psci_cpu_suspend(unsigned int power_state,
* a standby state. * a standby state.
*/ */
if (pstate_type == PSTATE_TYPE_STANDBY) { if (pstate_type == PSTATE_TYPE_STANDBY) {
if (!psci_plat_pm_ops->affinst_standby) if (!psci_plat_pm_ops->pwr_domain_standby)
return PSCI_E_INVALID_PARAMS; return PSCI_E_INVALID_PARAMS;
psci_plat_pm_ops->affinst_standby(power_state); psci_plat_pm_ops->pwr_domain_standby(power_state);
return PSCI_E_SUCCESS; return PSCI_E_SUCCESS;
} }
...@@ -155,8 +155,8 @@ int psci_cpu_suspend(unsigned int power_state, ...@@ -155,8 +155,8 @@ int psci_cpu_suspend(unsigned int power_state,
* Do what is needed to enter the power down state. Upon success, * Do what is needed to enter the power down state. Upon success,
* enter the final wfi which will power down this CPU. * enter the final wfi which will power down this CPU.
*/ */
psci_afflvl_suspend(&ep, psci_cpu_suspend_start(&ep,
target_afflvl); target_pwrlvl);
/* Reset PSCI power state parameter for the core. */ /* Reset PSCI power state parameter for the core. */
psci_set_suspend_power_state(PSCI_INVALID_DATA); psci_set_suspend_power_state(PSCI_INVALID_DATA);
...@@ -210,9 +210,7 @@ int psci_system_suspend(unsigned long entrypoint, ...@@ -210,9 +210,7 @@ int psci_system_suspend(unsigned long entrypoint,
* Do what is needed to enter the power down state. Upon success, * Do what is needed to enter the power down state. Upon success,
* enter the final wfi which will power down this cpu. * enter the final wfi which will power down this cpu.
*/ */
psci_afflvl_suspend(&ep, psci_cpu_suspend_start(&ep, PLAT_MAX_PWR_LVL);
MPIDR_AFFLVL0,
PLATFORM_MAX_AFFLVL);
/* Reset PSCI power state parameter for the core. */ /* Reset PSCI power state parameter for the core. */
psci_set_suspend_power_state(PSCI_INVALID_DATA); psci_set_suspend_power_state(PSCI_INVALID_DATA);
...@@ -222,15 +220,14 @@ int psci_system_suspend(unsigned long entrypoint, ...@@ -222,15 +220,14 @@ int psci_system_suspend(unsigned long entrypoint,
int psci_cpu_off(void) int psci_cpu_off(void)
{ {
int rc; int rc;
int target_afflvl = PLATFORM_MAX_AFFLVL; int target_pwrlvl = PLAT_MAX_PWR_LVL;
/* /*
* Traverse from the highest to the lowest affinity level. When the * Do what is needed to power off this CPU and possible higher power
* lowest affinity level is hit, all the locks are acquired. State * levels if it able to do so. Upon success, enter the final wfi
* management is done immediately followed by cpu, cluster ... * which will power down this CPU.
* ..target_afflvl specific actions as this function unwinds back.
*/ */
rc = psci_afflvl_off(target_afflvl); rc = psci_do_cpu_off(target_pwrlvl);
/* /*
* The only error cpu_off can return is E_DENIED. So check if that's * The only error cpu_off can return is E_DENIED. So check if that's
...@@ -245,28 +242,28 @@ int psci_affinity_info(unsigned long target_affinity, ...@@ -245,28 +242,28 @@ int psci_affinity_info(unsigned long target_affinity,
unsigned int lowest_affinity_level) unsigned int lowest_affinity_level)
{ {
int rc = PSCI_E_INVALID_PARAMS; int rc = PSCI_E_INVALID_PARAMS;
unsigned int aff_state; unsigned int pwr_domain_state;
aff_map_node_t *node; pwr_map_node_t *node;
if (lowest_affinity_level > PLATFORM_MAX_AFFLVL) if (lowest_affinity_level > PLAT_MAX_PWR_LVL)
return rc; return rc;
node = psci_get_aff_map_node(target_affinity, lowest_affinity_level); node = psci_get_pwr_map_node(target_affinity, lowest_affinity_level);
if (node && (node->state & PSCI_AFF_PRESENT)) { if (node && (node->state & PSCI_PWR_DOMAIN_PRESENT)) {
/* /*
* TODO: For affinity levels higher than 0 i.e. cpu, the * TODO: For power levels higher than 0 i.e. cpu, the
* state will always be either ON or OFF. Need to investigate * state will always be either ON or OFF. Need to investigate
* how critical is it to support ON_PENDING here. * how critical is it to support ON_PENDING here.
*/ */
aff_state = psci_get_state(node); pwr_domain_state = psci_get_state(node);
/* A suspended cpu is available & on for the OS */ /* A suspended cpu is available & on for the OS */
if (aff_state == PSCI_STATE_SUSPEND) { if (pwr_domain_state == PSCI_STATE_SUSPEND) {
aff_state = PSCI_STATE_ON; pwr_domain_state = PSCI_STATE_ON;
} }
rc = aff_state; rc = pwr_domain_state;
} }
return rc; return rc;
......
/* /*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met: * modification, are permitted provided that the following conditions are met:
...@@ -37,48 +37,49 @@ ...@@ -37,48 +37,49 @@
/****************************************************************************** /******************************************************************************
* Top level handler which is called when a cpu wants to power itself down. * Top level handler which is called when a cpu wants to power itself down.
* It's assumed that along with turning the cpu off, higher affinity levels * It's assumed that along with turning the cpu power domain off, power
* will be turned off as far as possible. It finds the highest level to be * domains at higher levels will be turned off as far as possible. It finds
* powered off by traversing the node information and then performs generic, * the highest level where a domain has to be powered off by traversing the
* architectural, platform setup and state management required to turn OFF * node information and then performs generic, architectural, platform setup
* that affinity level and affinity levels below it. e.g. For a cpu that's to * and state management required to turn OFF that power domain and domains
* be powered OFF, it could mean programming the power controller whereas for * below it. e.g. For a cpu that's to be powered OFF, it could mean programming
* a cluster that's to be powered off, it will call the platform specific code * the power controller whereas for a cluster that's to be powered off, it will
* which will disable coherency at the interconnect level if the cpu is the * call the platform specific code which will disable coherency at the
* last in the cluster and also the program the power controller. * interconnect level if the cpu is the last in the cluster and also the
* program the power controller.
******************************************************************************/ ******************************************************************************/
int psci_afflvl_off(int end_afflvl) int psci_do_cpu_off(int end_pwrlvl)
{ {
int rc; int rc;
mpidr_aff_map_nodes_t mpidr_nodes; mpidr_pwr_map_nodes_t mpidr_nodes;
unsigned int max_phys_off_afflvl; unsigned int max_phys_off_pwrlvl;
/* /*
* This function must only be called on platforms where the * This function must only be called on platforms where the
* CPU_OFF platform hooks have been implemented. * CPU_OFF platform hooks have been implemented.
*/ */
assert(psci_plat_pm_ops->affinst_off); assert(psci_plat_pm_ops->pwr_domain_off);
/* /*
* Collect the pointers to the nodes in the topology tree for * Collect the pointers to the nodes in the topology tree for
* each affinity instance in the mpidr. If this function does * each power domain instance in the mpidr. If this function does
* not return successfully then either the mpidr or the affinity * not return successfully then either the mpidr or the power
* levels are incorrect. Either way, this an internal TF error * levels are incorrect. Either way, this an internal TF error
* therefore assert. * therefore assert.
*/ */
rc = psci_get_aff_map_nodes(read_mpidr_el1() & MPIDR_AFFINITY_MASK, rc = psci_get_pwr_map_nodes(read_mpidr_el1() & MPIDR_AFFINITY_MASK,
MPIDR_AFFLVL0, MPIDR_AFFLVL0,
end_afflvl, end_pwrlvl,
mpidr_nodes); mpidr_nodes);
assert(rc == PSCI_E_SUCCESS); assert(rc == PSCI_E_SUCCESS);
/* /*
* This function acquires the lock corresponding to each affinity * This function acquires the lock corresponding to each power
* level so that by the time all locks are taken, the system topology * level so that by the time all locks are taken, the system topology
* is snapshot and state management can be done safely. * is snapshot and state management can be done safely.
*/ */
psci_acquire_afflvl_locks(MPIDR_AFFLVL0, psci_acquire_pwr_domain_locks(MPIDR_AFFLVL0,
end_afflvl, end_pwrlvl,
mpidr_nodes); mpidr_nodes);
...@@ -94,39 +95,39 @@ int psci_afflvl_off(int end_afflvl) ...@@ -94,39 +95,39 @@ int psci_afflvl_off(int end_afflvl)
} }
/* /*
* This function updates the state of each affinity instance * This function updates the state of each power domain instance
* corresponding to the mpidr in the range of affinity levels * corresponding to the mpidr in the range of power levels
* specified. * specified.
*/ */
psci_do_afflvl_state_mgmt(MPIDR_AFFLVL0, psci_do_state_coordination(MPIDR_AFFLVL0,
end_afflvl, end_pwrlvl,
mpidr_nodes, mpidr_nodes,
PSCI_STATE_OFF); PSCI_STATE_OFF);
max_phys_off_afflvl = psci_find_max_phys_off_afflvl(MPIDR_AFFLVL0, max_phys_off_pwrlvl = psci_find_max_phys_off_pwrlvl(MPIDR_AFFLVL0,
end_afflvl, end_pwrlvl,
mpidr_nodes); mpidr_nodes);
assert(max_phys_off_afflvl != PSCI_INVALID_DATA); assert(max_phys_off_pwrlvl != PSCI_INVALID_DATA);
/* /*
* Arch. management. Perform the necessary steps to flush all * Arch. management. Perform the necessary steps to flush all
* cpu caches. * cpu caches.
*/ */
psci_do_pwrdown_cache_maintenance(max_phys_off_afflvl); psci_do_pwrdown_cache_maintenance(max_phys_off_pwrlvl);
/* /*
* Plat. management: Perform platform specific actions to turn this * Plat. management: Perform platform specific actions to turn this
* cpu off e.g. exit cpu coherency, program the power controller etc. * cpu off e.g. exit cpu coherency, program the power controller etc.
*/ */
psci_plat_pm_ops->affinst_off(max_phys_off_afflvl); psci_plat_pm_ops->pwr_domain_off(max_phys_off_pwrlvl);
exit: exit:
/* /*
* Release the locks corresponding to each affinity level in the * Release the locks corresponding to each power level in the
* reverse order to which they were acquired. * reverse order to which they were acquired.
*/ */
psci_release_afflvl_locks(MPIDR_AFFLVL0, psci_release_pwr_domain_locks(MPIDR_AFFLVL0,
end_afflvl, end_pwrlvl,
mpidr_nodes); mpidr_nodes);
/* /*
......
/* /*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met: * modification, are permitted provided that the following conditions are met:
...@@ -63,43 +63,43 @@ static int cpu_on_validate_state(unsigned int psci_state) ...@@ -63,43 +63,43 @@ static int cpu_on_validate_state(unsigned int psci_state)
* enough information is stashed for it to resume execution in the non-secure * enough information is stashed for it to resume execution in the non-secure
* security state. * security state.
* *
* The state of all the relevant affinity levels is changed after calling the * The state of all the relevant power domains are changed after calling the
* platform handler as it can return error. * platform handler as it can return error.
******************************************************************************/ ******************************************************************************/
int psci_afflvl_on(unsigned long target_cpu, int psci_cpu_on_start(unsigned long target_cpu,
entry_point_info_t *ep, entry_point_info_t *ep,
int end_afflvl) int end_pwrlvl)
{ {
int rc; int rc;
mpidr_aff_map_nodes_t target_cpu_nodes; mpidr_pwr_map_nodes_t target_cpu_nodes;
unsigned long psci_entrypoint; unsigned long psci_entrypoint;
/* /*
* This function must only be called on platforms where the * This function must only be called on platforms where the
* CPU_ON platform hooks have been implemented. * CPU_ON platform hooks have been implemented.
*/ */
assert(psci_plat_pm_ops->affinst_on && assert(psci_plat_pm_ops->pwr_domain_on &&
psci_plat_pm_ops->affinst_on_finish); psci_plat_pm_ops->pwr_domain_on_finish);
/* /*
* Collect the pointers to the nodes in the topology tree for * Collect the pointers to the nodes in the topology tree for
* each affinity instance in the mpidr. If this function does * each power domain instance in the mpidr. If this function does
* not return successfully then either the mpidr or the affinity * not return successfully then either the mpidr or the power
* levels are incorrect. * levels are incorrect.
*/ */
rc = psci_get_aff_map_nodes(target_cpu, rc = psci_get_pwr_map_nodes(target_cpu,
MPIDR_AFFLVL0, MPIDR_AFFLVL0,
end_afflvl, end_pwrlvl,
target_cpu_nodes); target_cpu_nodes);
assert(rc == PSCI_E_SUCCESS); assert(rc == PSCI_E_SUCCESS);
/* /*
* This function acquires the lock corresponding to each affinity * This function acquires the lock corresponding to each power
* level so that by the time all locks are taken, the system topology * level so that by the time all locks are taken, the system topology
* is snapshot and state management can be done safely. * is snapshot and state management can be done safely.
*/ */
psci_acquire_afflvl_locks(MPIDR_AFFLVL0, psci_acquire_pwr_domain_locks(MPIDR_AFFLVL0,
end_afflvl, end_pwrlvl,
target_cpu_nodes); target_cpu_nodes);
/* /*
...@@ -124,8 +124,8 @@ int psci_afflvl_on(unsigned long target_cpu, ...@@ -124,8 +124,8 @@ int psci_afflvl_on(unsigned long target_cpu,
* corresponding to the mpidr in the range of affinity levels * corresponding to the mpidr in the range of affinity levels
* specified. * specified.
*/ */
psci_do_afflvl_state_mgmt(MPIDR_AFFLVL0, psci_do_state_coordination(MPIDR_AFFLVL0,
end_afflvl, end_pwrlvl,
target_cpu_nodes, target_cpu_nodes,
PSCI_STATE_ON_PENDING); PSCI_STATE_ON_PENDING);
...@@ -133,14 +133,14 @@ int psci_afflvl_on(unsigned long target_cpu, ...@@ -133,14 +133,14 @@ int psci_afflvl_on(unsigned long target_cpu,
* Perform generic, architecture and platform specific handling. * Perform generic, architecture and platform specific handling.
*/ */
/* Set the secure world (EL3) re-entry point after BL1 */ /* Set the secure world (EL3) re-entry point after BL1 */
psci_entrypoint = (unsigned long) psci_aff_on_finish_entry; psci_entrypoint = (unsigned long) psci_cpu_on_finish_entry;
/* /*
* Plat. management: Give the platform the current state * Plat. management: Give the platform the current state
* of the target cpu to allow it to perform the necessary * of the target cpu to allow it to perform the necessary
* steps to power on. * steps to power on.
*/ */
rc = psci_plat_pm_ops->affinst_on(target_cpu, rc = psci_plat_pm_ops->pwr_domain_on(target_cpu,
psci_entrypoint, psci_entrypoint,
MPIDR_AFFLVL0); MPIDR_AFFLVL0);
assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL); assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL);
...@@ -150,29 +150,29 @@ int psci_afflvl_on(unsigned long target_cpu, ...@@ -150,29 +150,29 @@ int psci_afflvl_on(unsigned long target_cpu,
cm_init_context(target_cpu, ep); cm_init_context(target_cpu, ep);
else else
/* Restore the state on error. */ /* Restore the state on error. */
psci_do_afflvl_state_mgmt(MPIDR_AFFLVL0, psci_do_state_coordination(MPIDR_AFFLVL0,
end_afflvl, end_pwrlvl,
target_cpu_nodes, target_cpu_nodes,
PSCI_STATE_OFF); PSCI_STATE_OFF);
exit: exit:
/* /*
* This loop releases the lock corresponding to each affinity level * This loop releases the lock corresponding to each power level
* in the reverse order to which they were acquired. * in the reverse order to which they were acquired.
*/ */
psci_release_afflvl_locks(MPIDR_AFFLVL0, psci_release_pwr_domain_locks(MPIDR_AFFLVL0,
end_afflvl, end_pwrlvl,
target_cpu_nodes); target_cpu_nodes);
return rc; return rc;
} }
/******************************************************************************* /*******************************************************************************
* The following function finish an earlier affinity power on request. They * The following function finish an earlier power on request. They
* are called by the common finisher routine in psci_common.c. * are called by the common finisher routine in psci_common.c.
******************************************************************************/ ******************************************************************************/
void psci_afflvl_on_finisher(aff_map_node_t *node[], int afflvl) void psci_cpu_on_finish(pwr_map_node_t *node[], int pwrlvl)
{ {
assert(node[afflvl]->level == afflvl); assert(node[pwrlvl]->level == pwrlvl);
/* Ensure we have been explicitly woken up by another cpu */ /* Ensure we have been explicitly woken up by another cpu */
assert(psci_get_state(node[MPIDR_AFFLVL0]) == PSCI_STATE_ON_PENDING); assert(psci_get_state(node[MPIDR_AFFLVL0]) == PSCI_STATE_ON_PENDING);
...@@ -183,7 +183,7 @@ void psci_afflvl_on_finisher(aff_map_node_t *node[], int afflvl) ...@@ -183,7 +183,7 @@ void psci_afflvl_on_finisher(aff_map_node_t *node[], int afflvl)
* register. The actual state of this cpu has already been * register. The actual state of this cpu has already been
* changed. * changed.
*/ */
psci_plat_pm_ops->affinst_on_finish(afflvl); psci_plat_pm_ops->pwr_domain_on_finish(pwrlvl);
/* /*
* Arch. management: Enable data cache and manage stack memory * Arch. management: Enable data cache and manage stack memory
......
/* /*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met: * modification, are permitted provided that the following conditions are met:
...@@ -41,14 +41,17 @@ ...@@ -41,14 +41,17 @@
* Lock API. * Lock API.
*/ */
#if USE_COHERENT_MEM #if USE_COHERENT_MEM
#define psci_lock_init(aff_map, idx) bakery_lock_init(&(aff_map)[(idx)].lock) #define psci_lock_init(pwr_map, idx) bakery_lock_init(&(pwr_map)[(idx)].lock)
#define psci_lock_get(node) bakery_lock_get(&((node)->lock)) #define psci_lock_get(node) bakery_lock_get(&((node)->lock))
#define psci_lock_release(node) bakery_lock_release(&((node)->lock)) #define psci_lock_release(node) bakery_lock_release(&((node)->lock))
#else #else
#define psci_lock_init(aff_map, idx) ((aff_map)[(idx)].aff_map_index = (idx)) #define psci_lock_init(pwr_map, idx) \
#define psci_lock_get(node) bakery_lock_get((node)->aff_map_index, \ ((pwr_map)[(idx)].pwr_domain_index = (idx))
#define psci_lock_get(node) \
bakery_lock_get((node)->pwr_domain_index,\
CPU_DATA_PSCI_LOCK_OFFSET) CPU_DATA_PSCI_LOCK_OFFSET)
#define psci_lock_release(node) bakery_lock_release((node)->aff_map_index,\ #define psci_lock_release(node) \
bakery_lock_release((node)->pwr_domain_index,\
CPU_DATA_PSCI_LOCK_OFFSET) CPU_DATA_PSCI_LOCK_OFFSET)
#endif #endif
...@@ -75,9 +78,9 @@ ...@@ -75,9 +78,9 @@
/******************************************************************************* /*******************************************************************************
* The following two data structures hold the topology tree which in turn tracks * The following two data structures hold the topology tree which in turn tracks
* the state of the all the affinity instances supported by the platform. * the state of the all the power domain instances supported by the platform.
******************************************************************************/ ******************************************************************************/
typedef struct aff_map_node { typedef struct pwr_map_node {
unsigned long mpidr; unsigned long mpidr;
unsigned char ref_count; unsigned char ref_count;
unsigned char state; unsigned char state;
...@@ -86,25 +89,25 @@ typedef struct aff_map_node { ...@@ -86,25 +89,25 @@ typedef struct aff_map_node {
bakery_lock_t lock; bakery_lock_t lock;
#else #else
/* For indexing the bakery_info array in per CPU data */ /* For indexing the bakery_info array in per CPU data */
unsigned char aff_map_index; unsigned char pwr_domain_index;
#endif #endif
} aff_map_node_t; } pwr_map_node_t;
typedef struct aff_limits_node { typedef struct pwr_lvl_limits_node {
int min; int min;
int max; int max;
} aff_limits_node_t; } pwr_lvl_limits_node_t;
typedef aff_map_node_t (*mpidr_aff_map_nodes_t[MPIDR_MAX_AFFLVL + 1]); typedef pwr_map_node_t (*mpidr_pwr_map_nodes_t[MPIDR_MAX_AFFLVL + 1]);
typedef void (*afflvl_power_on_finisher_t)(aff_map_node_t *mpidr_nodes[], typedef void (*pwrlvl_power_on_finisher_t)(pwr_map_node_t *mpidr_nodes[],
int afflvl); int pwrlvl);
/******************************************************************************* /*******************************************************************************
* Data prototypes * Data prototypes
******************************************************************************/ ******************************************************************************/
extern const plat_pm_ops_t *psci_plat_pm_ops; extern const plat_pm_ops_t *psci_plat_pm_ops;
extern aff_map_node_t psci_aff_map[PSCI_NUM_AFFS]; extern pwr_map_node_t psci_pwr_domain_map[PSCI_NUM_PWR_DOMAINS];
extern aff_limits_node_t psci_aff_limits[MPIDR_MAX_AFFLVL + 1]; extern pwr_lvl_limits_node_t psci_pwr_domain_map[MPIDR_MAX_AFFLVL + 1];
extern uint32_t psci_caps; extern uint32_t psci_caps;
/******************************************************************************* /*******************************************************************************
...@@ -116,61 +119,61 @@ extern const spd_pm_ops_t *psci_spd_pm; ...@@ -116,61 +119,61 @@ extern const spd_pm_ops_t *psci_spd_pm;
* Function prototypes * Function prototypes
******************************************************************************/ ******************************************************************************/
/* Private exported functions from psci_common.c */ /* Private exported functions from psci_common.c */
unsigned short psci_get_state(aff_map_node_t *node); unsigned short psci_get_state(pwr_map_node_t *node);
unsigned short psci_get_phys_state(aff_map_node_t *node); unsigned short psci_get_phys_state(pwr_map_node_t *node);
void psci_set_state(aff_map_node_t *node, unsigned short state); void psci_set_state(pwr_map_node_t *node, unsigned short state);
unsigned long mpidr_set_aff_inst(unsigned long, unsigned char, int); unsigned long mpidr_set_pwr_domain_inst(unsigned long, unsigned char, int);
int psci_validate_mpidr(unsigned long, int); int psci_validate_mpidr(unsigned long, int);
int get_power_on_target_afflvl(void); int get_power_on_target_pwrlvl(void);
void psci_afflvl_power_on_finish(int end_afflvl, void psci_power_up_finish(int end_pwrlvl,
afflvl_power_on_finisher_t pon_handler); pwrlvl_power_on_finisher_t pon_handler);
int psci_get_ns_ep_info(entry_point_info_t *ep, int psci_get_ns_ep_info(entry_point_info_t *ep,
uint64_t entrypoint, uint64_t context_id); uint64_t entrypoint, uint64_t context_id);
int psci_check_afflvl_range(int start_afflvl, int end_afflvl); int psci_check_pwrlvl_range(int start_pwrlvl, int end_pwrlvl);
void psci_do_afflvl_state_mgmt(uint32_t start_afflvl, void psci_do_state_coordination(uint32_t start_pwrlvl,
uint32_t end_afflvl, uint32_t end_pwrlvl,
aff_map_node_t *mpidr_nodes[], pwr_map_node_t *mpidr_nodes[],
uint32_t state); uint32_t state);
void psci_acquire_afflvl_locks(int start_afflvl, void psci_acquire_pwr_domain_locks(int start_pwrlvl,
int end_afflvl, int end_pwrlvl,
aff_map_node_t *mpidr_nodes[]); pwr_map_node_t *mpidr_nodes[]);
void psci_release_afflvl_locks(int start_afflvl, void psci_release_pwr_domain_locks(int start_pwrlvl,
int end_afflvl, int end_pwrlvl,
mpidr_aff_map_nodes_t mpidr_nodes); mpidr_pwr_map_nodes_t mpidr_nodes);
void psci_print_affinity_map(void); void psci_print_power_domain_map(void);
uint32_t psci_find_max_phys_off_afflvl(uint32_t start_afflvl, uint32_t psci_find_max_phys_off_pwrlvl(uint32_t start_pwrlvl,
uint32_t end_afflvl, uint32_t end_pwrlvl,
aff_map_node_t *mpidr_nodes[]); pwr_map_node_t *mpidr_nodes[]);
unsigned int psci_is_last_on_cpu(void); unsigned int psci_is_last_on_cpu(void);
int psci_spd_migrate_info(uint64_t *mpidr); int psci_spd_migrate_info(uint64_t *mpidr);
/* Private exported functions from psci_setup.c */ /* Private exported functions from psci_setup.c */
int psci_get_aff_map_nodes(unsigned long mpidr, int psci_get_pwr_map_nodes(unsigned long mpidr,
int start_afflvl, int start_pwrlvl,
int end_afflvl, int end_pwrlvl,
aff_map_node_t *mpidr_nodes[]); pwr_map_node_t *mpidr_nodes[]);
aff_map_node_t *psci_get_aff_map_node(unsigned long, int); pwr_map_node_t *psci_get_pwr_map_node(unsigned long, int);
/* Private exported functions from psci_affinity_on.c */ /* Private exported functions from psci_cpu_on.c */
int psci_afflvl_on(unsigned long target_cpu, int psci_cpu_on_start(unsigned long target_cpu,
entry_point_info_t *ep, entry_point_info_t *ep,
int end_afflvl); int end_pwrlvl);
void psci_afflvl_on_finisher(aff_map_node_t *node[], int afflvl); void psci_cpu_on_finish(pwr_map_node_t *node[], int pwrlvl);
/* Private exported functions from psci_affinity_off.c */ /* Private exported functions from psci_cpu_off.c */
int psci_afflvl_off(int end_afflvl); int psci_do_cpu_off(int end_pwrlvl);
/* Private exported functions from psci_affinity_suspend.c */ /* Private exported functions from psci_cpu_suspend.c */
void psci_afflvl_suspend(entry_point_info_t *ep, void psci_cpu_suspend_start(entry_point_info_t *ep,
int end_afflvl); int end_pwrlvl);
void psci_afflvl_suspend_finisher(aff_map_node_t *node[], int afflvl); void psci_cpu_suspend_finish(pwr_map_node_t *node[], int pwrlvl);
void psci_set_suspend_power_state(unsigned int power_state); void psci_set_suspend_power_state(unsigned int power_state);
/* Private exported functions from psci_helpers.S */ /* Private exported functions from psci_helpers.S */
void psci_do_pwrdown_cache_maintenance(uint32_t affinity_level); void psci_do_pwrdown_cache_maintenance(uint32_t pwr_level);
void psci_do_pwrup_cache_maintenance(void); void psci_do_pwrup_cache_maintenance(void);
/* Private exported functions from psci_system_off.c */ /* Private exported functions from psci_system_off.c */
......
/* /*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met: * modification, are permitted provided that the following conditions are met:
...@@ -42,20 +42,20 @@ ...@@ -42,20 +42,20 @@
* Per cpu non-secure contexts used to program the architectural state prior * Per cpu non-secure contexts used to program the architectural state prior
* return to the normal world. * return to the normal world.
* TODO: Use the memory allocator to set aside memory for the contexts instead * TODO: Use the memory allocator to set aside memory for the contexts instead
* of relying on platform defined constants. Using PSCI_NUM_AFFS will be an * of relying on platform defined constants. Using PSCI_NUM_PWR_DOMAINS will be
* overkill. * an overkill.
******************************************************************************/ ******************************************************************************/
static cpu_context_t psci_ns_context[PLATFORM_CORE_COUNT]; static cpu_context_t psci_ns_context[PLATFORM_CORE_COUNT];
/******************************************************************************* /*******************************************************************************
* In a system, a certain number of affinity instances are present at an * In a system, a certain number of power domain instances are present at a
* affinity level. The cumulative number of instances across all levels are * power level. The cumulative number of instances across all levels are
* stored in 'psci_aff_map'. The topology tree has been flattenned into this * stored in 'psci_pwr_domain_map'. The topology tree has been flattenned into
* array. To retrieve nodes, information about the extents of each affinity * this array. To retrieve nodes, information about the extents of each power
* level i.e. start index and end index needs to be present. 'psci_aff_limits' * level i.e. start index and end index needs to be present.
* stores this information. * 'psci_pwr_lvl_limits' stores this information.
******************************************************************************/ ******************************************************************************/
aff_limits_node_t psci_aff_limits[MPIDR_MAX_AFFLVL + 1]; pwr_lvl_limits_node_t psci_pwr_lvl_limits[MPIDR_MAX_AFFLVL + 1];
/****************************************************************************** /******************************************************************************
* Define the psci capability variable. * Define the psci capability variable.
...@@ -64,12 +64,12 @@ uint32_t psci_caps; ...@@ -64,12 +64,12 @@ uint32_t psci_caps;
/******************************************************************************* /*******************************************************************************
* Routines for retrieving the node corresponding to an affinity level instance * Routines for retrieving the node corresponding to a power domain instance
* in the mpidr. The first one uses binary search to find the node corresponding * in the mpidr. The first one uses binary search to find the node corresponding
* to the mpidr (key) at a particular affinity level. The second routine decides * to the mpidr (key) at a particular power level. The second routine decides
* extents of the binary search at each affinity level. * extents of the binary search at each power level.
******************************************************************************/ ******************************************************************************/
static int psci_aff_map_get_idx(unsigned long key, static int psci_pwr_domain_map_get_idx(unsigned long key,
int min_idx, int min_idx,
int max_idx) int max_idx)
{ {
...@@ -85,80 +85,80 @@ static int psci_aff_map_get_idx(unsigned long key, ...@@ -85,80 +85,80 @@ static int psci_aff_map_get_idx(unsigned long key,
/* /*
* Make sure we are within array limits. * Make sure we are within array limits.
*/ */
assert(min_idx >= 0 && max_idx < PSCI_NUM_AFFS); assert(min_idx >= 0 && max_idx < PSCI_NUM_PWR_DOMAINS);
/* /*
* Bisect the array around 'mid' and then recurse into the array chunk * Bisect the array around 'mid' and then recurse into the array chunk
* where the key is likely to be found. The mpidrs in each node in the * where the key is likely to be found. The mpidrs in each node in the
* 'psci_aff_map' for a given affinity level are stored in an ascending * 'psci_pwr_domain_map' for a given power level are stored in an
* order which makes the binary search possible. * ascending order which makes the binary search possible.
*/ */
mid = min_idx + ((max_idx - min_idx) >> 1); /* Divide by 2 */ mid = min_idx + ((max_idx - min_idx) >> 1); /* Divide by 2 */
if (psci_aff_map[mid].mpidr > key) if (psci_pwr_domain_map[mid].mpidr > key)
return psci_aff_map_get_idx(key, min_idx, mid - 1); return psci_pwr_domain_map_get_idx(key, min_idx, mid - 1);
else if (psci_aff_map[mid].mpidr < key) else if (psci_pwr_domain_map[mid].mpidr < key)
return psci_aff_map_get_idx(key, mid + 1, max_idx); return psci_pwr_domain_map_get_idx(key, mid + 1, max_idx);
else else
return mid; return mid;
} }
aff_map_node_t *psci_get_aff_map_node(unsigned long mpidr, int aff_lvl) pwr_map_node_t *psci_get_pwr_map_node(unsigned long mpidr, int pwr_lvl)
{ {
int rc; int rc;
if (aff_lvl > PLATFORM_MAX_AFFLVL) if (pwr_lvl > PLAT_MAX_PWR_LVL)
return NULL; return NULL;
/* Right shift the mpidr to the required affinity level */ /* Right shift the mpidr to the required power level */
mpidr = mpidr_mask_lower_afflvls(mpidr, aff_lvl); mpidr = mpidr_mask_lower_afflvls(mpidr, pwr_lvl);
rc = psci_aff_map_get_idx(mpidr, rc = psci_pwr_domain_map_get_idx(mpidr,
psci_aff_limits[aff_lvl].min, psci_pwr_lvl_limits[pwr_lvl].min,
psci_aff_limits[aff_lvl].max); psci_pwr_lvl_limits[pwr_lvl].max);
if (rc >= 0) if (rc >= 0)
return &psci_aff_map[rc]; return &psci_pwr_domain_map[rc];
else else
return NULL; return NULL;
} }
/******************************************************************************* /*******************************************************************************
* This function populates an array with nodes corresponding to a given range of * This function populates an array with nodes corresponding to a given range of
* affinity levels in an mpidr. It returns successfully only when the affinity * power levels in an mpidr. It returns successfully only when the power
* levels are correct, the mpidr is valid i.e. no affinity level is absent from * levels are correct, the mpidr is valid i.e. no power level is absent from
* the topology tree & the affinity instance at level 0 is not absent. * the topology tree & the power domain instance at level 0 is not absent.
******************************************************************************/ ******************************************************************************/
int psci_get_aff_map_nodes(unsigned long mpidr, int psci_get_pwr_map_nodes(unsigned long mpidr,
int start_afflvl, int start_pwrlvl,
int end_afflvl, int end_pwrlvl,
aff_map_node_t *mpidr_nodes[]) pwr_map_node_t *mpidr_nodes[])
{ {
int rc = PSCI_E_INVALID_PARAMS, level; int rc = PSCI_E_INVALID_PARAMS, level;
aff_map_node_t *node; pwr_map_node_t *node;
rc = psci_check_afflvl_range(start_afflvl, end_afflvl); rc = psci_check_pwrlvl_range(start_pwrlvl, end_pwrlvl);
if (rc != PSCI_E_SUCCESS) if (rc != PSCI_E_SUCCESS)
return rc; return rc;
for (level = start_afflvl; level <= end_afflvl; level++) { for (level = start_pwrlvl; level <= end_pwrlvl; level++) {
/* /*
* Grab the node for each affinity level. No affinity level * Grab the node for each power level. No power level
* can be missing as that would mean that the topology tree * can be missing as that would mean that the topology tree
* is corrupted. * is corrupted.
*/ */
node = psci_get_aff_map_node(mpidr, level); node = psci_get_pwr_map_node(mpidr, level);
if (node == NULL) { if (node == NULL) {
rc = PSCI_E_INVALID_PARAMS; rc = PSCI_E_INVALID_PARAMS;
break; break;
} }
/* /*
* Skip absent affinity levels unless it's afffinity level 0. * Skip absent power levels unless it's power level 0.
* An absent cpu means that the mpidr is invalid. Save the * An absent cpu means that the mpidr is invalid. Save the
* pointer to the node for the present affinity level * pointer to the node for the present power level
*/ */
if (!(node->state & PSCI_AFF_PRESENT)) { if (!(node->state & PSCI_PWR_DOMAIN_PRESENT)) {
if (level == MPIDR_AFFLVL0) { if (level == MPIDR_AFFLVL0) {
rc = PSCI_E_INVALID_PARAMS; rc = PSCI_E_INVALID_PARAMS;
break; break;
...@@ -173,39 +173,38 @@ int psci_get_aff_map_nodes(unsigned long mpidr, ...@@ -173,39 +173,38 @@ int psci_get_aff_map_nodes(unsigned long mpidr,
} }
/******************************************************************************* /*******************************************************************************
* Function which initializes the 'aff_map_node' corresponding to an affinity * Function which initializes the 'pwr_map_node' corresponding to a power
* level instance. Each node has a unique mpidr, level and bakery lock. The data * domain instance. Each node has a unique mpidr, level and bakery lock.
* field is opaque and holds affinity level specific data e.g. for affinity
* level 0 it contains the index into arrays that hold the secure/non-secure
* state for a cpu that's been turned on/off
******************************************************************************/ ******************************************************************************/
static void psci_init_aff_map_node(unsigned long mpidr, static void psci_init_pwr_map_node(unsigned long mpidr,
int level, int level,
unsigned int idx) unsigned int idx)
{ {
unsigned char state; unsigned char state;
uint32_t linear_id; uint32_t linear_id;
psci_aff_map[idx].mpidr = mpidr; psci_pwr_domain_map[idx].mpidr = mpidr;
psci_aff_map[idx].level = level; psci_pwr_domain_map[idx].level = level;
psci_lock_init(psci_aff_map, idx); psci_lock_init(psci_pwr_domain_map, idx);
/* /*
* If an affinity instance is present then mark it as OFF to begin with. * If an power domain instance is present then mark it as OFF
* to begin with.
*/ */
state = plat_get_aff_state(level, mpidr); state = plat_get_pwr_domain_state(level, mpidr);
psci_aff_map[idx].state = state; psci_pwr_domain_map[idx].state = state;
if (level == MPIDR_AFFLVL0) { if (level == MPIDR_AFFLVL0) {
/* /*
* Mark the cpu as OFF. Higher affinity level reference counts * Mark the cpu as OFF. Higher power level reference counts
* have already been memset to 0 * have already been memset to 0
*/ */
if (state & PSCI_AFF_PRESENT) if (state & PSCI_PWR_DOMAIN_PRESENT)
psci_set_state(&psci_aff_map[idx], PSCI_STATE_OFF); psci_set_state(&psci_pwr_domain_map[idx],
PSCI_STATE_OFF);
/* /*
* Associate a non-secure context with this affinity * Associate a non-secure context with this power
* instance through the context management library. * instance through the context management library.
*/ */
linear_id = platform_get_core_pos(mpidr); linear_id = platform_get_core_pos(mpidr);
...@@ -228,65 +227,68 @@ static void psci_init_aff_map_node(unsigned long mpidr, ...@@ -228,65 +227,68 @@ static void psci_init_aff_map_node(unsigned long mpidr,
/******************************************************************************* /*******************************************************************************
* Core routine used by the Breadth-First-Search algorithm to populate the * Core routine used by the Breadth-First-Search algorithm to populate the
* affinity tree. Each level in the tree corresponds to an affinity level. This * power domain tree. Each level in the tree corresponds to a power level. This
* routine's aim is to traverse to the target affinity level and populate nodes * routine's aim is to traverse to the target power level and populate nodes
* in the 'psci_aff_map' for all the siblings at that level. It uses the current * in the 'psci_pwr_domain_map' for all the siblings at that level. It uses the
* affinity level to keep track of how many levels from the root of the tree * current power level to keep track of how many levels from the root of the
* have been traversed. If the current affinity level != target affinity level, * tree have been traversed. If the current power level != target power level,
* then the platform is asked to return the number of children that each * then the platform is asked to return the number of children that each
* affinity instance has at the current affinity level. Traversal is then done * power domain instance has at the current power level. Traversal is then done
* for each child at the next lower level i.e. current affinity level - 1. * for each child at the next lower level i.e. current power level - 1.
* *
* CAUTION: This routine assumes that affinity instance ids are allocated in a * CAUTION: This routine assumes that power domain instance ids are allocated
* monotonically increasing manner at each affinity level in a mpidr starting * in a monotonically increasing manner at each power level in a mpidr starting
* from 0. If the platform breaks this assumption then this code will have to * from 0. If the platform breaks this assumption then this code will have to
* be reworked accordingly. * be reworked accordingly.
******************************************************************************/ ******************************************************************************/
static unsigned int psci_init_aff_map(unsigned long mpidr, static unsigned int psci_init_pwr_map(unsigned long mpidr,
unsigned int affmap_idx, unsigned int pwrmap_idx,
int cur_afflvl, int cur_pwrlvl,
int tgt_afflvl) int tgt_pwrlvl)
{ {
unsigned int ctr, aff_count; unsigned int ctr, pwr_inst_count;
assert(cur_afflvl >= tgt_afflvl); assert(cur_pwrlvl >= tgt_pwrlvl);
/* /*
* Find the number of siblings at the current affinity level & * Find the number of siblings at the current power level &
* assert if there are none 'cause then we have been invoked with * assert if there are none 'cause then we have been invoked with
* an invalid mpidr. * an invalid mpidr.
*/ */
aff_count = plat_get_aff_count(cur_afflvl, mpidr); pwr_inst_count = plat_get_pwr_domain_count(cur_pwrlvl, mpidr);
assert(aff_count); assert(pwr_inst_count);
if (tgt_afflvl < cur_afflvl) { if (tgt_pwrlvl < cur_pwrlvl) {
for (ctr = 0; ctr < aff_count; ctr++) { for (ctr = 0; ctr < pwr_inst_count; ctr++) {
mpidr = mpidr_set_aff_inst(mpidr, ctr, cur_afflvl); mpidr = mpidr_set_pwr_domain_inst(mpidr, ctr,
affmap_idx = psci_init_aff_map(mpidr, cur_pwrlvl);
affmap_idx, pwrmap_idx = psci_init_pwr_map(mpidr,
cur_afflvl - 1, pwrmap_idx,
tgt_afflvl); cur_pwrlvl - 1,
tgt_pwrlvl);
} }
} else { } else {
for (ctr = 0; ctr < aff_count; ctr++, affmap_idx++) { for (ctr = 0; ctr < pwr_inst_count; ctr++, pwrmap_idx++) {
mpidr = mpidr_set_aff_inst(mpidr, ctr, cur_afflvl); mpidr = mpidr_set_pwr_domain_inst(mpidr, ctr,
psci_init_aff_map_node(mpidr, cur_afflvl, affmap_idx); cur_pwrlvl);
psci_init_pwr_map_node(mpidr, cur_pwrlvl, pwrmap_idx);
} }
/* affmap_idx is 1 greater than the max index of cur_afflvl */ /* pwrmap_idx is 1 greater than the max index of cur_pwrlvl */
psci_aff_limits[cur_afflvl].max = affmap_idx - 1; psci_pwr_lvl_limits[cur_pwrlvl].max = pwrmap_idx - 1;
} }
return affmap_idx; return pwrmap_idx;
} }
/******************************************************************************* /*******************************************************************************
* This function initializes the topology tree by querying the platform. To do * This function initializes the topology tree by querying the platform. To do
* so, it's helper routines implement a Breadth-First-Search. At each affinity * so, it's helper routines implement a Breadth-First-Search. At each power
* level the platform conveys the number of affinity instances that exist i.e. * level the platform conveys the number of power domain instances that exist
* the affinity count. The algorithm populates the psci_aff_map recursively * i.e. the power instance count. The algorithm populates the
* using this information. On a platform that implements two clusters of 4 cpus * psci_pwr_domain_map* recursively using this information. On a platform that
* each, the populated aff_map_array would look like this: * implements two clusters of 4 cpus each, the populated pwr_map_array would
* look like this:
* *
* <- cpus cluster0 -><- cpus cluster1 -> * <- cpus cluster0 -><- cpus cluster1 ->
* --------------------------------------------------- * ---------------------------------------------------
...@@ -298,71 +300,72 @@ static unsigned int psci_init_aff_map(unsigned long mpidr, ...@@ -298,71 +300,72 @@ static unsigned int psci_init_aff_map(unsigned long mpidr,
* *
* The first 2 entries are of the cluster nodes. The next 4 entries are of cpus * The first 2 entries are of the cluster nodes. The next 4 entries are of cpus
* within cluster 0. The last 4 entries are of cpus within cluster 1. * within cluster 0. The last 4 entries are of cpus within cluster 1.
* The 'psci_aff_limits' array contains the max & min index of each affinity * The 'psci_pwr_lvl_limits' array contains the max & min index of each power
* level within the 'psci_aff_map' array. This allows restricting search of a * level within the 'psci_pwr_domain_map' array. This allows restricting search
* node at an affinity level between the indices in the limits array. * of a node at a power level between the indices in the limits array.
******************************************************************************/ ******************************************************************************/
int32_t psci_setup(void) int32_t psci_setup(void)
{ {
unsigned long mpidr = read_mpidr(); unsigned long mpidr = read_mpidr();
int afflvl, affmap_idx, max_afflvl; int pwrlvl, pwrmap_idx, max_pwrlvl;
aff_map_node_t *node; pwr_map_node_t *node;
psci_plat_pm_ops = NULL; psci_plat_pm_ops = NULL;
/* Find out the maximum affinity level that the platform implements */ /* Find out the maximum power level that the platform implements */
max_afflvl = PLATFORM_MAX_AFFLVL; max_pwrlvl = PLAT_MAX_PWR_LVL;
assert(max_afflvl <= MPIDR_MAX_AFFLVL); assert(max_pwrlvl <= MPIDR_MAX_AFFLVL);
/* /*
* This call traverses the topology tree with help from the platform and * This call traverses the topology tree with help from the platform and
* populates the affinity map using a breadth-first-search recursively. * populates the power map using a breadth-first-search recursively.
* We assume that the platform allocates affinity instance ids from 0 * We assume that the platform allocates power domain instance ids from
* onwards at each affinity level in the mpidr. FIRST_MPIDR = 0.0.0.0 * 0 onwards at each power level in the mpidr. FIRST_MPIDR = 0.0.0.0
*/ */
affmap_idx = 0; pwrmap_idx = 0;
for (afflvl = max_afflvl; afflvl >= MPIDR_AFFLVL0; afflvl--) { for (pwrlvl = max_pwrlvl; pwrlvl >= MPIDR_AFFLVL0; pwrlvl--) {
affmap_idx = psci_init_aff_map(FIRST_MPIDR, pwrmap_idx = psci_init_pwr_map(FIRST_MPIDR,
affmap_idx, pwrmap_idx,
max_afflvl, max_pwrlvl,
afflvl); pwrlvl);
} }
#if !USE_COHERENT_MEM #if !USE_COHERENT_MEM
/* /*
* The psci_aff_map only needs flushing when it's not allocated in * The psci_pwr_domain_map only needs flushing when it's not allocated
* coherent memory. * in coherent memory.
*/ */
flush_dcache_range((uint64_t) &psci_aff_map, sizeof(psci_aff_map)); flush_dcache_range((uint64_t) &psci_pwr_domain_map,
sizeof(psci_pwr_domain_map));
#endif #endif
/* /*
* Set the bounds for the affinity counts of each level in the map. Also * Set the bounds for number of instances of each level in the map. Also
* flush out the entire array so that it's visible to subsequent power * flush out the entire array so that it's visible to subsequent power
* management operations. The 'psci_aff_limits' array is allocated in * management operations. The 'psci_pwr_lvl_limits' array is allocated
* normal memory. It will be accessed when the mmu is off e.g. after * in normal memory. It will be accessed when the mmu is off e.g. after
* reset. Hence it needs to be flushed. * reset. Hence it needs to be flushed.
*/ */
for (afflvl = MPIDR_AFFLVL0; afflvl < max_afflvl; afflvl++) { for (pwrlvl = MPIDR_AFFLVL0; pwrlvl < max_pwrlvl; pwrlvl++) {
psci_aff_limits[afflvl].min = psci_pwr_lvl_limits[pwrlvl].min =
psci_aff_limits[afflvl + 1].max + 1; psci_pwr_lvl_limits[pwrlvl + 1].max + 1;
} }
flush_dcache_range((unsigned long) psci_aff_limits, flush_dcache_range((unsigned long) psci_pwr_lvl_limits,
sizeof(psci_aff_limits)); sizeof(psci_pwr_lvl_limits));
/* /*
* Mark the affinity instances in our mpidr as ON. No need to lock as * Mark the power domain instances in our mpidr as ON. No need to lock
* this is the primary cpu. * as this is the primary cpu.
*/ */
mpidr &= MPIDR_AFFINITY_MASK; mpidr &= MPIDR_AFFINITY_MASK;
for (afflvl = MPIDR_AFFLVL0; afflvl <= max_afflvl; afflvl++) { for (pwrlvl = MPIDR_AFFLVL0; pwrlvl <= max_pwrlvl; pwrlvl++) {
node = psci_get_aff_map_node(mpidr, afflvl); node = psci_get_pwr_map_node(mpidr, pwrlvl);
assert(node); assert(node);
/* Mark each present node as ON. */ /* Mark each present node as ON. */
if (node->state & PSCI_AFF_PRESENT) if (node->state & PSCI_PWR_DOMAIN_PRESENT)
psci_set_state(node, PSCI_STATE_ON); psci_set_state(node, PSCI_STATE_ON);
} }
...@@ -372,12 +375,13 @@ int32_t psci_setup(void) ...@@ -372,12 +375,13 @@ int32_t psci_setup(void)
/* Initialize the psci capability */ /* Initialize the psci capability */
psci_caps = PSCI_GENERIC_CAP; psci_caps = PSCI_GENERIC_CAP;
if (psci_plat_pm_ops->affinst_off) if (psci_plat_pm_ops->pwr_domain_off)
psci_caps |= define_psci_cap(PSCI_CPU_OFF); psci_caps |= define_psci_cap(PSCI_CPU_OFF);
if (psci_plat_pm_ops->affinst_on && psci_plat_pm_ops->affinst_on_finish) if (psci_plat_pm_ops->pwr_domain_on &&
psci_plat_pm_ops->pwr_domain_on_finish)
psci_caps |= define_psci_cap(PSCI_CPU_ON_AARCH64); psci_caps |= define_psci_cap(PSCI_CPU_ON_AARCH64);
if (psci_plat_pm_ops->affinst_suspend && if (psci_plat_pm_ops->pwr_domain_suspend &&
psci_plat_pm_ops->affinst_suspend_finish) { psci_plat_pm_ops->pwr_domain_suspend_finish) {
psci_caps |= define_psci_cap(PSCI_CPU_SUSPEND_AARCH64); psci_caps |= define_psci_cap(PSCI_CPU_SUSPEND_AARCH64);
if (psci_plat_pm_ops->get_sys_suspend_power_state) if (psci_plat_pm_ops->get_sys_suspend_power_state)
psci_caps |= define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64); psci_caps |= define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64);
......
/* /*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met: * modification, are permitted provided that the following conditions are met:
...@@ -52,18 +52,18 @@ void psci_set_suspend_power_state(unsigned int power_state) ...@@ -52,18 +52,18 @@ void psci_set_suspend_power_state(unsigned int power_state)
} }
/******************************************************************************* /*******************************************************************************
* This function gets the affinity level till which the current cpu could be * This function gets the power level till which the current cpu could be
* powered down during a cpu_suspend call. Returns PSCI_INVALID_DATA if the * powered down during a cpu_suspend call. Returns PSCI_INVALID_DATA if the
* power state is invalid. * power state is invalid.
******************************************************************************/ ******************************************************************************/
int psci_get_suspend_afflvl(void) int psci_get_suspend_pwrlvl(void)
{ {
unsigned int power_state; unsigned int power_state;
power_state = get_cpu_data(psci_svc_cpu_data.power_state); power_state = get_cpu_data(psci_svc_cpu_data.power_state);
return ((power_state == PSCI_INVALID_DATA) ? return ((power_state == PSCI_INVALID_DATA) ?
power_state : psci_get_pstate_afflvl(power_state)); power_state : psci_get_pstate_pwrlvl(power_state));
} }
/******************************************************************************* /*******************************************************************************
...@@ -99,54 +99,54 @@ int psci_get_suspend_stateid_by_mpidr(unsigned long mpidr) ...@@ -99,54 +99,54 @@ int psci_get_suspend_stateid_by_mpidr(unsigned long mpidr)
/******************************************************************************* /*******************************************************************************
* Top level handler which is called when a cpu wants to suspend its execution. * Top level handler which is called when a cpu wants to suspend its execution.
* It is assumed that along with suspending the cpu, higher affinity levels * It is assumed that along with suspending the cpu power domain, power domains
* until the target affinity level will be suspended as well. It finds the * at higher levels until the target power level will be suspended as well.
* highest level to be suspended by traversing the node information and then * It finds the highest level where a domain has to be suspended by traversing
* performs generic, architectural, platform setup and state management * the node information and then performs generic, architectural, platform
* required to suspend that affinity level and affinity levels below it. * setup and state management required to suspend that power domain and domains
* e.g. For a cpu that's to be suspended, it could mean programming the * below it. * e.g. For a cpu that's to be suspended, it could mean programming
* power controller whereas for a cluster that's to be suspended, it will call * the power controller whereas for a cluster that's to be suspended, it will
* the platform specific code which will disable coherency at the interconnect * call the platform specific code which will disable coherency at the
* level if the cpu is the last in the cluster and also the program the power * interconnect level if the cpu is the last in the cluster and also the
* controller. * program the power controller.
* *
* All the required parameter checks are performed at the beginning and after * All the required parameter checks are performed at the beginning and after
* the state transition has been done, no further error is expected and it is * the state transition has been done, no further error is expected and it is
* not possible to undo any of the actions taken beyond that point. * not possible to undo any of the actions taken beyond that point.
******************************************************************************/ ******************************************************************************/
void psci_afflvl_suspend(entry_point_info_t *ep, void psci_cpu_suspend_start(entry_point_info_t *ep,
int end_afflvl) int end_pwrlvl)
{ {
int skip_wfi = 0; int skip_wfi = 0;
mpidr_aff_map_nodes_t mpidr_nodes; mpidr_pwr_map_nodes_t mpidr_nodes;
unsigned int max_phys_off_afflvl; unsigned int max_phys_off_pwrlvl;
unsigned long psci_entrypoint; unsigned long psci_entrypoint;
/* /*
* This function must only be called on platforms where the * This function must only be called on platforms where the
* CPU_SUSPEND platform hooks have been implemented. * CPU_SUSPEND platform hooks have been implemented.
*/ */
assert(psci_plat_pm_ops->affinst_suspend && assert(psci_plat_pm_ops->pwr_domain_suspend &&
psci_plat_pm_ops->affinst_suspend_finish); psci_plat_pm_ops->pwr_domain_suspend_finish);
/* /*
* Collect the pointers to the nodes in the topology tree for * Collect the pointers to the nodes in the topology tree for
* each affinity instance in the mpidr. If this function does * each power domain instance in the mpidr. If this function does
* not return successfully then either the mpidr or the affinity * not return successfully then either the mpidr or the power
* levels are incorrect. Either way, this an internal TF error * levels are incorrect. Either way, this an internal TF error
* therefore assert. * therefore assert.
*/ */
if (psci_get_aff_map_nodes(read_mpidr_el1() & MPIDR_AFFINITY_MASK, if (psci_get_pwr_map_nodes(read_mpidr_el1() & MPIDR_AFFINITY_MASK,
MPIDR_AFFLVL0, end_afflvl, mpidr_nodes) != PSCI_E_SUCCESS) MPIDR_AFFLVL0, end_pwrlvl, mpidr_nodes) != PSCI_E_SUCCESS)
assert(0); assert(0);
/* /*
* This function acquires the lock corresponding to each affinity * This function acquires the lock corresponding to each power
* level so that by the time all locks are taken, the system topology * level so that by the time all locks are taken, the system topology
* is snapshot and state management can be done safely. * is snapshot and state management can be done safely.
*/ */
psci_acquire_afflvl_locks(MPIDR_AFFLVL0, psci_acquire_pwr_domain_locks(MPIDR_AFFLVL0,
end_afflvl, end_pwrlvl,
mpidr_nodes); mpidr_nodes);
/* /*
...@@ -168,19 +168,19 @@ void psci_afflvl_suspend(entry_point_info_t *ep, ...@@ -168,19 +168,19 @@ void psci_afflvl_suspend(entry_point_info_t *ep,
psci_spd_pm->svc_suspend(0); psci_spd_pm->svc_suspend(0);
/* /*
* This function updates the state of each affinity instance * This function updates the state of each power domain instance
* corresponding to the mpidr in the range of affinity levels * corresponding to the mpidr in the range of power levels
* specified. * specified.
*/ */
psci_do_afflvl_state_mgmt(MPIDR_AFFLVL0, psci_do_state_coordination(MPIDR_AFFLVL0,
end_afflvl, end_pwrlvl,
mpidr_nodes, mpidr_nodes,
PSCI_STATE_SUSPEND); PSCI_STATE_SUSPEND);
max_phys_off_afflvl = psci_find_max_phys_off_afflvl(MPIDR_AFFLVL0, max_phys_off_pwrlvl = psci_find_max_phys_off_pwrlvl(MPIDR_AFFLVL0,
end_afflvl, end_pwrlvl,
mpidr_nodes); mpidr_nodes);
assert(max_phys_off_afflvl != PSCI_INVALID_DATA); assert(max_phys_off_pwrlvl != PSCI_INVALID_DATA);
/* /*
* Store the re-entry information for the non-secure world. * Store the re-entry information for the non-secure world.
...@@ -188,13 +188,13 @@ void psci_afflvl_suspend(entry_point_info_t *ep, ...@@ -188,13 +188,13 @@ void psci_afflvl_suspend(entry_point_info_t *ep,
cm_init_context(read_mpidr_el1(), ep); cm_init_context(read_mpidr_el1(), ep);
/* Set the secure world (EL3) re-entry point after BL1 */ /* Set the secure world (EL3) re-entry point after BL1 */
psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry; psci_entrypoint = (unsigned long) psci_cpu_suspend_finish_entry;
/* /*
* Arch. management. Perform the necessary steps to flush all * Arch. management. Perform the necessary steps to flush all
* cpu caches. * cpu caches.
*/ */
psci_do_pwrdown_cache_maintenance(max_phys_off_afflvl); psci_do_pwrdown_cache_maintenance(max_phys_off_pwrlvl);
/* /*
* Plat. management: Allow the platform to perform the * Plat. management: Allow the platform to perform the
...@@ -202,31 +202,31 @@ void psci_afflvl_suspend(entry_point_info_t *ep, ...@@ -202,31 +202,31 @@ void psci_afflvl_suspend(entry_point_info_t *ep,
* platform defined mailbox with the psci entrypoint, * platform defined mailbox with the psci entrypoint,
* program the power controller etc. * program the power controller etc.
*/ */
psci_plat_pm_ops->affinst_suspend(psci_entrypoint, psci_plat_pm_ops->pwr_domain_suspend(psci_entrypoint,
max_phys_off_afflvl); max_phys_off_pwrlvl);
exit: exit:
/* /*
* Release the locks corresponding to each affinity level in the * Release the locks corresponding to each power level in the
* reverse order to which they were acquired. * reverse order to which they were acquired.
*/ */
psci_release_afflvl_locks(MPIDR_AFFLVL0, psci_release_pwr_domain_locks(MPIDR_AFFLVL0,
end_afflvl, end_pwrlvl,
mpidr_nodes); mpidr_nodes);
if (!skip_wfi) if (!skip_wfi)
psci_power_down_wfi(); psci_power_down_wfi();
} }
/******************************************************************************* /*******************************************************************************
* The following functions finish an earlier affinity suspend request. They * The following functions finish an earlier suspend request. They
* are called by the common finisher routine in psci_common.c. * are called by the common finisher routine in psci_common.c.
******************************************************************************/ ******************************************************************************/
void psci_afflvl_suspend_finisher(aff_map_node_t *node[], int afflvl) void psci_cpu_suspend_finish(pwr_map_node_t *node[], int pwrlvl)
{ {
int32_t suspend_level; int32_t suspend_level;
uint64_t counter_freq; uint64_t counter_freq;
assert(node[afflvl]->level == afflvl); assert(node[pwrlvl]->level == pwrlvl);
/* Ensure we have been woken up from a suspended state */ /* Ensure we have been woken up from a suspended state */
assert(psci_get_state(node[MPIDR_AFFLVL0]) == PSCI_STATE_SUSPEND); assert(psci_get_state(node[MPIDR_AFFLVL0]) == PSCI_STATE_SUSPEND);
...@@ -238,7 +238,7 @@ void psci_afflvl_suspend_finisher(aff_map_node_t *node[], int afflvl) ...@@ -238,7 +238,7 @@ void psci_afflvl_suspend_finisher(aff_map_node_t *node[], int afflvl)
* wrong then assert as there is no way to recover from this * wrong then assert as there is no way to recover from this
* situation. * situation.
*/ */
psci_plat_pm_ops->affinst_suspend_finish(afflvl); psci_plat_pm_ops->pwr_domain_suspend_finish(pwrlvl);
/* /*
* Arch. management: Enable the data cache, manage stack memory and * Arch. management: Enable the data cache, manage stack memory and
...@@ -257,7 +257,7 @@ void psci_afflvl_suspend_finisher(aff_map_node_t *node[], int afflvl) ...@@ -257,7 +257,7 @@ void psci_afflvl_suspend_finisher(aff_map_node_t *node[], int afflvl)
* error, it's expected to assert within * error, it's expected to assert within
*/ */
if (psci_spd_pm && psci_spd_pm->svc_suspend) { if (psci_spd_pm && psci_spd_pm->svc_suspend) {
suspend_level = psci_get_suspend_afflvl(); suspend_level = psci_get_suspend_pwrlvl();
assert (suspend_level != PSCI_INVALID_DATA); assert (suspend_level != PSCI_INVALID_DATA);
psci_spd_pm->svc_suspend_finish(suspend_level); psci_spd_pm->svc_suspend_finish(suspend_level);
} }
......
/* /*
* Copyright (c) 2014, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met: * modification, are permitted provided that the following conditions are met:
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
void psci_system_off(void) void psci_system_off(void)
{ {
psci_print_affinity_map(); psci_print_power_domain_map();
assert(psci_plat_pm_ops->system_off); assert(psci_plat_pm_ops->system_off);
...@@ -54,7 +54,7 @@ void psci_system_off(void) ...@@ -54,7 +54,7 @@ void psci_system_off(void)
void psci_system_reset(void) void psci_system_reset(void)
{ {
psci_print_affinity_map(); psci_print_power_domain_map();
assert(psci_plat_pm_ops->system_reset); assert(psci_plat_pm_ops->system_reset);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment