Commit 432b9905 authored by Achin Gupta's avatar Achin Gupta
Browse files

Merge pull request #361 from achingupta/for_sm/psci_proto_v5

For sm/psci proto v5
parents 9caf7e36 9d070b99
...@@ -30,35 +30,49 @@ ...@@ -30,35 +30,49 @@
#include <arch.h> #include <arch.h>
#include <psci.h> #include <psci.h>
#include <plat_arm.h>
#include <platform_def.h> #include <platform_def.h>
/* #define get_arm_cluster_core_count(mpidr)\
* Weak definitions use fixed topology. Strong definitions could make topology (((mpidr) & 0x100) ? PLAT_ARM_CLUSTER1_CORE_COUNT :\
* configurable PLAT_ARM_CLUSTER0_CORE_COUNT)
*/
#pragma weak plat_get_aff_count
#pragma weak plat_get_aff_state
#pragma weak plat_arm_topology_setup
unsigned int plat_get_aff_count(unsigned int aff_lvl, unsigned long mpidr) /* The power domain tree descriptor which need to be exported by ARM platforms */
{ extern const unsigned char arm_power_domain_tree_desc[];
/* Report 1 (absent) instance at levels higher that the cluster level */
if (aff_lvl > MPIDR_AFFLVL1)
return 1;
if (aff_lvl == MPIDR_AFFLVL1)
return ARM_CLUSTER_COUNT;
return mpidr & 0x100 ? PLAT_ARM_CLUSTER1_CORE_COUNT :
PLAT_ARM_CLUSTER0_CORE_COUNT;
}
unsigned int plat_get_aff_state(unsigned int aff_lvl, unsigned long mpidr) /*******************************************************************************
* This function returns the ARM default topology tree information.
******************************************************************************/
const unsigned char *plat_get_power_domain_tree_desc(void)
{ {
return aff_lvl <= MPIDR_AFFLVL1 ? PSCI_AFF_PRESENT : PSCI_AFF_ABSENT; return arm_power_domain_tree_desc;
} }
void plat_arm_topology_setup(void) /*******************************************************************************
* This function validates an MPIDR by checking whether it falls within the
* acceptable bounds. An error code (-1) is returned if an incorrect mpidr
* is passed.
******************************************************************************/
int arm_check_mpidr(u_register_t mpidr)
{ {
unsigned int cluster_id, cpu_id;
mpidr &= MPIDR_AFFINITY_MASK;
if (mpidr & ~(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK))
return -1;
cluster_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
cpu_id = (mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK;
if (cluster_id >= ARM_CLUSTER_COUNT)
return -1;
/* Validate cpu_id by checking whether it represents a CPU in
one of the two clusters present on the platform. */
if (cpu_id >= get_arm_cluster_core_count(mpidr))
return -1;
return 0;
} }
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
# TSP source files common to ARM standard platforms # TSP source files common to ARM standard platforms
BL32_SOURCES += drivers/arm/gic/arm_gic.c \ BL32_SOURCES += drivers/arm/gic/arm_gic.c \
drivers/arm/gic/gic_v2.c \ drivers/arm/gic/gic_v2.c \
plat/arm/common/arm_topology.c \
plat/arm/common/tsp/arm_tsp_setup.c \ plat/arm/common/tsp/arm_tsp_setup.c \
plat/common/aarch64/platform_mp_stack.S \ plat/common/aarch64/platform_mp_stack.S \
plat/common/plat_gic.c plat/common/plat_gic.c
...@@ -33,11 +33,9 @@ ...@@ -33,11 +33,9 @@
#include <css_def.h> #include <css_def.h>
.weak plat_secondary_cold_boot_setup .weak plat_secondary_cold_boot_setup
.weak platform_get_entrypoint .weak plat_get_my_entrypoint
.weak platform_mem_init .globl plat_arm_calc_core_pos
.globl platform_get_core_pos .weak plat_is_my_cpu_primary
.weak platform_is_primary_cpu
/* ----------------------------------------------------- /* -----------------------------------------------------
* void plat_secondary_cold_boot_setup (void); * void plat_secondary_cold_boot_setup (void);
...@@ -54,69 +52,56 @@ cb_panic: ...@@ -54,69 +52,56 @@ cb_panic:
b cb_panic b cb_panic
endfunc plat_secondary_cold_boot_setup endfunc plat_secondary_cold_boot_setup
/* ----------------------------------------------------- /* ---------------------------------------------------------------------
* void platform_get_entrypoint (unsigned int mpid); * unsigned long plat_get_my_entrypoint (void);
* *
* Main job of this routine is to distinguish between * Main job of this routine is to distinguish between a cold and a warm
* a cold and warm boot. * boot. On CSS platforms, this distinction is based on the contents of
* On a cold boot the secondaries first wait for the * the Trusted Mailbox. It is initialised to zero by the SCP before the
* platform to be initialized after which they are * AP cores are released from reset. Therefore, a zero mailbox means
* hotplugged in. The primary proceeds to perform the * it's a cold reset.
* platform initialization.
* On a warm boot, each cpu jumps to the address in its
* mailbox.
* *
* TODO: Not a good idea to save lr in a temp reg * This functions returns the contents of the mailbox, i.e.:
* ----------------------------------------------------- * - 0 for a cold boot;
* - the warm boot entrypoint for a warm boot.
* ---------------------------------------------------------------------
*/ */
func platform_get_entrypoint func plat_get_my_entrypoint
mov x9, x30 // lr mov_imm x0, TRUSTED_MAILBOX_BASE
bl platform_get_core_pos ldr x0, [x0]
ldr x1, =TRUSTED_MAILBOXES_BASE ret
lsl x0, x0, #TRUSTED_MAILBOX_SHIFT endfunc plat_get_my_entrypoint
ldr x0, [x1, x0]
ret x9
endfunc platform_get_entrypoint
/* /* -----------------------------------------------------------
* Override the default implementation to swap the cluster order. * unsigned int plat_arm_calc_core_pos(uint64_t mpidr)
* This is necessary in order to match the format of the boot * Function to calculate the core position by
* information passed by the SCP and read in platform_is_primary_cpu * swapping the cluster order. This is necessary in order to
* below. * match the format of the boot information passed by the SCP
* and read in plat_is_my_cpu_primary below.
* -----------------------------------------------------------
*/ */
func platform_get_core_pos func plat_arm_calc_core_pos
and x1, x0, #MPIDR_CPU_MASK and x1, x0, #MPIDR_CPU_MASK
and x0, x0, #MPIDR_CLUSTER_MASK and x0, x0, #MPIDR_CLUSTER_MASK
eor x0, x0, #(1 << MPIDR_AFFINITY_BITS) // swap cluster order eor x0, x0, #(1 << MPIDR_AFFINITY_BITS) // swap cluster order
add x0, x1, x0, LSR #6 add x0, x1, x0, LSR #6
ret ret
endfunc platform_get_core_pos endfunc plat_arm_calc_core_pos
/* -----------------------------------------------------
* void platform_mem_init(void);
*
* We don't need to carry out any memory initialization
* on CSS platforms. The Secure RAM is accessible straight away.
* -----------------------------------------------------
*/
func platform_mem_init
ret
endfunc platform_mem_init
/* ----------------------------------------------------- /* -----------------------------------------------------
* unsigned int platform_is_primary_cpu (unsigned int mpid); * unsigned int plat_is_my_cpu_primary (void);
* *
* Given the mpidr say whether this cpu is the primary * Find out whether the current cpu is the primary
* cpu (applicable ony after a cold boot) * cpu (applicable ony after a cold boot)
* ----------------------------------------------------- * -----------------------------------------------------
*/ */
func platform_is_primary_cpu func plat_is_my_cpu_primary
mov x9, x30 mov x9, x30
bl platform_get_core_pos bl plat_my_core_pos
ldr x1, =SCP_BOOT_CFG_ADDR ldr x1, =SCP_BOOT_CFG_ADDR
ldr x1, [x1] ldr x1, [x1]
ubfx x1, x1, #PRIMARY_CPU_SHIFT, #PRIMARY_CPU_BIT_WIDTH ubfx x1, x1, #PRIMARY_CPU_SHIFT, #PRIMARY_CPU_BIT_WIDTH
cmp x0, x1 cmp x0, x1
cset x0, eq cset w0, eq
ret x9 ret x9
endfunc platform_is_primary_cpu endfunc plat_is_my_cpu_primary
...@@ -44,7 +44,8 @@ BL2_SOURCES += plat/arm/css/common/css_bl2_setup.c \ ...@@ -44,7 +44,8 @@ BL2_SOURCES += plat/arm/css/common/css_bl2_setup.c \
BL31_SOURCES += plat/arm/css/common/css_mhu.c \ BL31_SOURCES += plat/arm/css/common/css_mhu.c \
plat/arm/css/common/css_pm.c \ plat/arm/css/common/css_pm.c \
plat/arm/css/common/css_scpi.c plat/arm/css/common/css_scpi.c \
plat/arm/css/common/css_topology.c
ifneq (${RESET_TO_BL31},0) ifneq (${RESET_TO_BL31},0)
......
...@@ -41,42 +41,49 @@ ...@@ -41,42 +41,49 @@
#include <psci.h> #include <psci.h>
#include "css_scpi.h" #include "css_scpi.h"
#if ARM_RECOM_STATE_ID_ENC
/*
* The table storing the valid idle power states. Ensure that the
* array entries are populated in ascending order of state-id to
* enable us to use binary search during power state validation.
* The table must be terminated by a NULL entry.
*/
const unsigned int arm_pm_idle_states[] = {
/* State-id - 0x01 */
arm_make_pwrstate_lvl1(ARM_LOCAL_STATE_RUN, ARM_LOCAL_STATE_RET,
ARM_PWR_LVL0, PSTATE_TYPE_STANDBY),
/* State-id - 0x02 */
arm_make_pwrstate_lvl1(ARM_LOCAL_STATE_RUN, ARM_LOCAL_STATE_OFF,
ARM_PWR_LVL0, PSTATE_TYPE_POWERDOWN),
/* State-id - 0x22 */
arm_make_pwrstate_lvl1(ARM_LOCAL_STATE_OFF, ARM_LOCAL_STATE_OFF,
ARM_PWR_LVL1, PSTATE_TYPE_POWERDOWN),
0,
};
#endif
/******************************************************************************* /*******************************************************************************
* Private function to program the mailbox for a cpu before it is released * Private function to program the mailbox for a cpu before it is released
* from reset. * from reset.
******************************************************************************/ ******************************************************************************/
static void css_program_mailbox(uint64_t mpidr, uint64_t address) static void css_program_mailbox(uintptr_t address)
{ {
uint64_t linear_id; uintptr_t *mailbox = (void *) TRUSTED_MAILBOX_BASE;
uint64_t mbox; *mailbox = address;
flush_dcache_range((uintptr_t) mailbox, sizeof(*mailbox));
linear_id = platform_get_core_pos(mpidr);
mbox = TRUSTED_MAILBOXES_BASE + (linear_id << TRUSTED_MAILBOX_SHIFT);
*((uint64_t *) mbox) = address;
flush_dcache_range(mbox, sizeof(mbox));
} }
/******************************************************************************* /*******************************************************************************
* Handler called when an affinity instance is about to be turned on. The * Handler called when a power domain is about to be turned on. The
* level and mpidr determine the affinity instance. * level and mpidr determine the affinity instance.
******************************************************************************/ ******************************************************************************/
int32_t css_affinst_on(uint64_t mpidr, int css_pwr_domain_on(u_register_t mpidr)
uint64_t sec_entrypoint,
uint32_t afflvl,
uint32_t state)
{ {
/* /*
* SCP takes care of powering up higher affinity levels so we * SCP takes care of powering up parent power domains so we
* only need to care about level 0 * only need to care about level 0
*/ */
if (afflvl != MPIDR_AFFLVL0)
return PSCI_E_SUCCESS;
/*
* Setup mailbox with address for CPU entrypoint when it next powers up
*/
css_program_mailbox(mpidr, sec_entrypoint);
scpi_set_css_power_state(mpidr, scpi_power_on, scpi_power_on, scpi_set_css_power_state(mpidr, scpi_power_on, scpi_power_on,
scpi_power_on); scpi_power_on);
...@@ -84,47 +91,37 @@ int32_t css_affinst_on(uint64_t mpidr, ...@@ -84,47 +91,37 @@ int32_t css_affinst_on(uint64_t mpidr,
} }
/******************************************************************************* /*******************************************************************************
* Handler called when an affinity instance has just been powered on after * Handler called when a power level has just been powered on after
* being turned off earlier. The level and mpidr determine the affinity * being turned off earlier. The target_state encodes the low power state that
* instance. The 'state' arg. allows the platform to decide whether the cluster * each level has woken up from.
* was turned off prior to wakeup and do what's necessary to setup it up
* correctly.
******************************************************************************/ ******************************************************************************/
void css_affinst_on_finish(uint32_t afflvl, uint32_t state) void css_pwr_domain_on_finish(const psci_power_state_t *target_state)
{ {
unsigned long mpidr; assert(target_state->pwr_domain_state[ARM_PWR_LVL0] ==
ARM_LOCAL_STATE_OFF);
/* Determine if any platform actions need to be executed. */
if (arm_do_affinst_actions(afflvl, state) == -EAGAIN)
return;
/* Get the mpidr for this cpu */
mpidr = read_mpidr_el1();
/* /*
* Perform the common cluster specific operations i.e enable coherency * Perform the common cluster specific operations i.e enable coherency
* if this cluster was off. * if this cluster was off.
*/ */
if (afflvl != MPIDR_AFFLVL0) if (target_state->pwr_domain_state[ARM_PWR_LVL1] ==
cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(mpidr)); ARM_LOCAL_STATE_OFF)
cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr_el1()));
/* Enable the gic cpu interface */ /* Enable the gic cpu interface */
arm_gic_cpuif_setup(); arm_gic_cpuif_setup();
/* todo: Is this setup only needed after a cold boot? */ /* todo: Is this setup only needed after a cold boot? */
arm_gic_pcpu_distif_setup(); arm_gic_pcpu_distif_setup();
/* Clear the mailbox for this cpu. */
css_program_mailbox(mpidr, 0);
} }
/******************************************************************************* /*******************************************************************************
* Common function called while turning a cpu off or suspending it. It is called * Common function called while turning a cpu off or suspending it. It is called
* from css_off() or css_suspend() when these functions in turn are called for * from css_off() or css_suspend() when these functions in turn are called for
* the highest affinity level which will be powered down. It performs the * power domain at the highest power level which will be powered down. It
* actions common to the OFF and SUSPEND calls. * performs the actions common to the OFF and SUSPEND calls.
******************************************************************************/ ******************************************************************************/
static void css_power_down_common(uint32_t afflvl) static void css_power_down_common(const psci_power_state_t *target_state)
{ {
uint32_t cluster_state = scpi_power_on; uint32_t cluster_state = scpi_power_on;
...@@ -132,7 +129,8 @@ static void css_power_down_common(uint32_t afflvl) ...@@ -132,7 +129,8 @@ static void css_power_down_common(uint32_t afflvl)
arm_gic_cpuif_deactivate(); arm_gic_cpuif_deactivate();
/* Cluster is to be turned off, so disable coherency */ /* Cluster is to be turned off, so disable coherency */
if (afflvl > MPIDR_AFFLVL0) { if (target_state->pwr_domain_state[ARM_PWR_LVL1] ==
ARM_LOCAL_STATE_OFF) {
cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr())); cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr()));
cluster_state = scpi_power_off; cluster_state = scpi_power_off;
} }
...@@ -148,64 +146,55 @@ static void css_power_down_common(uint32_t afflvl) ...@@ -148,64 +146,55 @@ static void css_power_down_common(uint32_t afflvl)
} }
/******************************************************************************* /*******************************************************************************
* Handler called when an affinity instance is about to be turned off. The * Handler called when a power domain is about to be turned off. The
* level and mpidr determine the affinity instance. The 'state' arg. allows the * target_state encodes the power state that each level should transition to.
* platform to decide whether the cluster is being turned off and take
* appropriate actions.
*
* CAUTION: There is no guarantee that caches will remain turned on across calls
* to this function as each affinity level is dealt with. So do not write & read
* global variables across calls. It will be wise to do flush a write to the
* global to prevent unpredictable results.
******************************************************************************/ ******************************************************************************/
static void css_affinst_off(uint32_t afflvl, uint32_t state) static void css_pwr_domain_off(const psci_power_state_t *target_state)
{ {
/* Determine if any platform actions need to be executed */ assert(target_state->pwr_domain_state[ARM_PWR_LVL0] ==
if (arm_do_affinst_actions(afflvl, state) == -EAGAIN) ARM_LOCAL_STATE_OFF);
return;
css_power_down_common(afflvl); css_power_down_common(target_state);
} }
/******************************************************************************* /*******************************************************************************
* Handler called when an affinity instance is about to be suspended. The * Handler called when a power domain is about to be suspended. The
* level and mpidr determine the affinity instance. The 'state' arg. allows the * target_state encodes the power state that each level should transition to.
* platform to decide whether the cluster is being turned off and take apt
* actions. The 'sec_entrypoint' determines the address in BL3-1 from where
* execution should resume.
*
* CAUTION: There is no guarantee that caches will remain turned on across calls
* to this function as each affinity level is dealt with. So do not write & read
* global variables across calls. It will be wise to do flush a write to the
* global to prevent unpredictable results.
******************************************************************************/ ******************************************************************************/
static void css_affinst_suspend(uint64_t sec_entrypoint, static void css_pwr_domain_suspend(const psci_power_state_t *target_state)
uint32_t afflvl,
uint32_t state)
{ {
/* Determine if any platform actions need to be executed */
if (arm_do_affinst_actions(afflvl, state) == -EAGAIN)
return;
/* /*
* Setup mailbox with address for CPU entrypoint when it next powers up. * Juno has retention only at cpu level. Just return
* as nothing is to be done for retention.
*/ */
css_program_mailbox(read_mpidr_el1(), sec_entrypoint); if (target_state->pwr_domain_state[ARM_PWR_LVL0] ==
ARM_LOCAL_STATE_RET)
return;
assert(target_state->pwr_domain_state[ARM_PWR_LVL0] ==
ARM_LOCAL_STATE_OFF);
css_power_down_common(afflvl); css_power_down_common(target_state);
} }
/******************************************************************************* /*******************************************************************************
* Handler called when an affinity instance has just been powered on after * Handler called when a power domain has just been powered on after
* having been suspended earlier. The level and mpidr determine the affinity * having been suspended earlier. The target_state encodes the low power state
* instance. * that each level has woken up from.
* TODO: At the moment we reuse the on finisher and reinitialize the secure * TODO: At the moment we reuse the on finisher and reinitialize the secure
* context. Need to implement a separate suspend finisher. * context. Need to implement a separate suspend finisher.
******************************************************************************/ ******************************************************************************/
static void css_affinst_suspend_finish(uint32_t afflvl, static void css_pwr_domain_suspend_finish(
uint32_t state) const psci_power_state_t *target_state)
{ {
css_affinst_on_finish(afflvl, state); /*
* Return as nothing is to be done on waking up from retention.
*/
if (target_state->pwr_domain_state[ARM_PWR_LVL0] ==
ARM_LOCAL_STATE_RET)
return;
css_pwr_domain_on_finish(target_state);
} }
/******************************************************************************* /*******************************************************************************
...@@ -244,12 +233,14 @@ static void __dead2 css_system_reset(void) ...@@ -244,12 +233,14 @@ static void __dead2 css_system_reset(void)
} }
/******************************************************************************* /*******************************************************************************
* Handler called when an affinity instance is about to enter standby. * Handler called when the CPU power domain is about to enter standby.
******************************************************************************/ ******************************************************************************/
void css_affinst_standby(unsigned int power_state) void css_cpu_standby(plat_local_state_t cpu_state)
{ {
unsigned int scr; unsigned int scr;
assert(cpu_state == ARM_LOCAL_STATE_RET);
scr = read_scr_el3(); scr = read_scr_el3();
/* Enable PhysicalIRQ bit for NS world to wake the CPU */ /* Enable PhysicalIRQ bit for NS world to wake the CPU */
write_scr_el3(scr | SCR_IRQ_BIT); write_scr_el3(scr | SCR_IRQ_BIT);
...@@ -267,23 +258,28 @@ void css_affinst_standby(unsigned int power_state) ...@@ -267,23 +258,28 @@ void css_affinst_standby(unsigned int power_state)
/******************************************************************************* /*******************************************************************************
* Export the platform handlers to enable psci to invoke them * Export the platform handlers to enable psci to invoke them
******************************************************************************/ ******************************************************************************/
static const plat_pm_ops_t css_ops = { static const plat_psci_ops_t css_ops = {
.affinst_on = css_affinst_on, .pwr_domain_on = css_pwr_domain_on,
.affinst_on_finish = css_affinst_on_finish, .pwr_domain_on_finish = css_pwr_domain_on_finish,
.affinst_off = css_affinst_off, .pwr_domain_off = css_pwr_domain_off,
.affinst_standby = css_affinst_standby, .cpu_standby = css_cpu_standby,
.affinst_suspend = css_affinst_suspend, .pwr_domain_suspend = css_pwr_domain_suspend,
.affinst_suspend_finish = css_affinst_suspend_finish, .pwr_domain_suspend_finish = css_pwr_domain_suspend_finish,
.system_off = css_system_off, .system_off = css_system_off,
.system_reset = css_system_reset, .system_reset = css_system_reset,
.validate_power_state = arm_validate_power_state .validate_power_state = arm_validate_power_state,
.validate_ns_entrypoint = arm_validate_ns_entrypoint
}; };
/******************************************************************************* /*******************************************************************************
* Export the platform specific power ops. * Export the platform specific psci ops.
******************************************************************************/ ******************************************************************************/
int32_t platform_setup_pm(const plat_pm_ops_t **plat_ops) int plat_setup_psci_ops(uintptr_t sec_entrypoint,
const plat_psci_ops_t **psci_ops)
{ {
*plat_ops = &css_ops; *psci_ops = &css_ops;
/* Setup mailbox with entry point. */
css_program_mailbox(sec_entrypoint);
return 0; return 0;
} }
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <plat_arm.h>
/*
* On ARM platforms, by default the cluster power level is treated as the
* highest. The first entry in the power domain descriptor specifies the
* number of cluster power domains i.e. 2.
*/
#define CSS_PWR_DOMAINS_AT_MAX_PWR_LVL ARM_CLUSTER_COUNT
/*
* The CSS power domain tree descriptor. The cluster power domains are
* arranged so that when the PSCI generic code creates the power domain tree,
* the indices of the CPU power domain nodes it allocates match the linear
* indices returned by plat_core_pos_by_mpidr() i.e.
* CLUSTER1 CPUs are allocated indices from 0 to 3 and the higher indices for
* CLUSTER0 CPUs.
*/
const unsigned char arm_power_domain_tree_desc[] = {
/* No of root nodes */
CSS_PWR_DOMAINS_AT_MAX_PWR_LVL,
/* No of children for the first node */
PLAT_ARM_CLUSTER1_CORE_COUNT,
/* No of children for the second node */
PLAT_ARM_CLUSTER0_CORE_COUNT
};
/******************************************************************************
* This function implements a part of the critical interface between the psci
* generic layer and the platform that allows the former to query the platform
* to convert an MPIDR to a unique linear index. An error code (-1) is
* returned in case the MPIDR is invalid.
*****************************************************************************/
int plat_core_pos_by_mpidr(u_register_t mpidr)
{
if (arm_check_mpidr(mpidr) == 0)
return plat_arm_calc_core_pos(mpidr);
return -1;
}
...@@ -27,7 +27,8 @@ ...@@ -27,7 +27,8 @@
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE. * POSSIBILITY OF SUCH DAMAGE.
*/ */
#include <assert.h>
#include <platform.h>
#include <xlat_tables.h> #include <xlat_tables.h>
/* /*
...@@ -47,3 +48,18 @@ void bl32_plat_enable_mmu(uint32_t flags) ...@@ -47,3 +48,18 @@ void bl32_plat_enable_mmu(uint32_t flags)
{ {
enable_mmu_el1(flags); enable_mmu_el1(flags);
} }
#if !ENABLE_PLAT_COMPAT
/*
* Helper function for platform_get_pos() when platform compatibility is
* disabled. This is to enable SPDs using the older platform API to continue
* to work.
*/
unsigned int platform_core_pos_helper(unsigned long mpidr)
{
int idx = plat_core_pos_by_mpidr(mpidr);
assert(idx >= 0);
return idx;
}
#endif
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <assert.h>
#include <platform.h>
#include <psci.h>
/*
* The PSCI generic code uses this API to let the platform participate in state
* coordination during a power management operation. It compares the platform
* specific local power states requested by each cpu for a given power domain
* and returns the coordinated target power state that the domain should
* enter. A platform assigns a number to a local power state. This default
* implementation assumes that the platform assigns these numbers in order of
* increasing depth of the power state i.e. for two power states X & Y, if X < Y
* then X represents a shallower power state than Y. As a result, the
* coordinated target local power state for a power domain will be the minimum
* of the requested local power states.
*/
plat_local_state_t plat_get_target_pwr_state(unsigned int lvl,
const plat_local_state_t *states,
unsigned int ncpu)
{
plat_local_state_t target = PLAT_MAX_OFF_STATE, temp;
assert(ncpu);
do {
temp = *states++;
if (temp < target)
target = temp;
} while (--ncpu);
return target;
}
...@@ -32,37 +32,38 @@ ...@@ -32,37 +32,38 @@
#include <asm_macros.S> #include <asm_macros.S>
#include <platform_def.h> #include <platform_def.h>
.weak platform_get_core_pos
.weak platform_check_mpidr
.weak plat_report_exception .weak plat_report_exception
.weak plat_crash_console_init .weak plat_crash_console_init
.weak plat_crash_console_putc .weak plat_crash_console_putc
.weak plat_reset_handler .weak plat_reset_handler
.weak plat_disable_acp .weak plat_disable_acp
/* ----------------------------------------------------- #if !ENABLE_PLAT_COMPAT
* int platform_get_core_pos(int mpidr); .globl platform_get_core_pos
* With this function: CorePos = (ClusterId * 4) +
* CoreId
* -----------------------------------------------------
*/
func platform_get_core_pos
and x1, x0, #MPIDR_CPU_MASK
and x0, x0, #MPIDR_CLUSTER_MASK
add x0, x1, x0, LSR #6
ret
endfunc platform_get_core_pos
/* ----------------------------------------------------- #define MPIDR_RES_BIT_MASK 0xff000000
* Placeholder function which should be redefined by
* each platform. /* ------------------------------------------------------------------
* ----------------------------------------------------- * int platform_get_core_pos(int mpidr)
* Returns the CPU index of the CPU specified by mpidr. This is
* defined when platform compatibility is disabled to enable Trusted
* Firmware components like SPD using the old platform API to work.
* This API is deprecated and it assumes that the mpidr specified is
* that of a valid and present CPU. Instead, plat_my_core_pos()
* should be used for CPU index of the current CPU and
* plat_core_pos_by_mpidr() should be used for CPU index of a
* CPU specified by its mpidr.
* ------------------------------------------------------------------
*/ */
func platform_check_mpidr func_deprecated platform_get_core_pos
mov x0, xzr bic x0, x0, #MPIDR_RES_BIT_MASK
ret mrs x1, mpidr_el1
endfunc platform_check_mpidr bic x1, x1, #MPIDR_RES_BIT_MASK
cmp x0, x1
beq plat_my_core_pos
b platform_core_pos_helper
endfunc_deprecated platform_get_core_pos
#endif
/* ----------------------------------------------------- /* -----------------------------------------------------
* Placeholder function which should be redefined by * Placeholder function which should be redefined by
......
...@@ -30,13 +30,56 @@ ...@@ -30,13 +30,56 @@
#include <arch.h> #include <arch.h>
#include <asm_macros.S> #include <asm_macros.S>
#include <assert_macros.S>
#include <platform_def.h> #include <platform_def.h>
.local platform_normal_stacks .local platform_normal_stacks
.weak platform_set_stack #if ENABLE_PLAT_COMPAT
.globl plat_get_my_stack
.globl plat_set_my_stack
.weak platform_get_stack .weak platform_get_stack
.weak platform_set_stack
#else
.weak plat_get_my_stack
.weak plat_set_my_stack
.globl platform_get_stack
.globl platform_set_stack
#endif /* __ENABLE_PLAT_COMPAT__ */
#if ENABLE_PLAT_COMPAT
/* ---------------------------------------------------------------------
* When the compatility layer is enabled, the new platform APIs
* viz plat_get_my_stack() and plat_set_my_stack() need to be
* defined using the previous APIs platform_get_stack() and
* platform_set_stack(). Also we need to provide weak definitions
* of platform_get_stack() and platform_set_stack() for the platforms
* to reuse.
* --------------------------------------------------------------------
*/
/* -----------------------------------------------------
* unsigned long plat_get_my_stack ()
*
* For the current CPU, this function returns the stack
* pointer for a stack allocated in device memory.
* -----------------------------------------------------
*/
func plat_get_my_stack
mrs x0, mpidr_el1
b platform_get_stack
endfunc plat_get_my_stack
/* -----------------------------------------------------
* void plat_set_my_stack ()
*
* For the current CPU, this function sets the stack
* pointer to a stack allocated in normal memory.
* -----------------------------------------------------
*/
func plat_set_my_stack
mrs x0, mpidr_el1
b platform_set_stack
endfunc plat_set_my_stack
/* ----------------------------------------------------- /* -----------------------------------------------------
* unsigned long platform_get_stack (unsigned long mpidr) * unsigned long platform_get_stack (unsigned long mpidr)
...@@ -65,6 +108,85 @@ func platform_set_stack ...@@ -65,6 +108,85 @@ func platform_set_stack
ret x9 ret x9
endfunc platform_set_stack endfunc platform_set_stack
#else
/* ---------------------------------------------------------------------
* When the compatility layer is disabled, the new platform APIs
* viz plat_get_my_stack() and plat_set_my_stack() are
* supported by the platform and the previous APIs platform_get_stack()
* and platform_set_stack() are defined in terms of new APIs making use
* of the fact that they are only ever invoked for the current CPU.
* This is to enable components of Trusted Firmware like SPDs using the
* old platform APIs to continue to work.
* --------------------------------------------------------------------
*/
/* -------------------------------------------------------
* unsigned long platform_get_stack (unsigned long mpidr)
*
* For the current CPU, this function returns the stack
* pointer for a stack allocated in device memory. The
* 'mpidr' should correspond to that of the current CPU.
* This function is deprecated and plat_get_my_stack()
* should be used instead.
* -------------------------------------------------------
*/
func_deprecated platform_get_stack
#if ASM_ASSERTION
mrs x1, mpidr_el1
cmp x0, x1
ASM_ASSERT(eq)
#endif
b plat_get_my_stack
endfunc_deprecated platform_get_stack
/* -----------------------------------------------------
* void platform_set_stack (unsigned long mpidr)
*
* For the current CPU, this function sets the stack pointer
* to a stack allocated in normal memory. The
* 'mpidr' should correspond to that of the current CPU.
* This function is deprecated and plat_get_my_stack()
* should be used instead.
* -----------------------------------------------------
*/
func_deprecated platform_set_stack
#if ASM_ASSERTION
mrs x1, mpidr_el1
cmp x0, x1
ASM_ASSERT(eq)
#endif
b plat_set_my_stack
endfunc_deprecated platform_set_stack
/* -----------------------------------------------------
* unsigned long plat_get_my_stack ()
*
* For the current CPU, this function returns the stack
* pointer for a stack allocated in device memory.
* -----------------------------------------------------
*/
func plat_get_my_stack
mov x10, x30 // lr
get_my_mp_stack platform_normal_stacks, PLATFORM_STACK_SIZE
ret x10
endfunc plat_get_my_stack
/* -----------------------------------------------------
* void plat_set_my_stack ()
*
* For the current CPU, this function sets the stack
* pointer to a stack allocated in normal memory.
* -----------------------------------------------------
*/
func plat_set_my_stack
mov x9, x30 // lr
bl plat_get_my_stack
mov sp, x0
ret x9
endfunc plat_set_my_stack
#endif /*__ENABLE_PLAT_COMPAT__*/
/* ----------------------------------------------------- /* -----------------------------------------------------
* Per-cpu stacks in normal memory. Each cpu gets a * Per-cpu stacks in normal memory. Each cpu gets a
* stack of PLATFORM_STACK_SIZE bytes. * stack of PLATFORM_STACK_SIZE bytes.
......
...@@ -34,35 +34,63 @@ ...@@ -34,35 +34,63 @@
.local platform_normal_stacks .local platform_normal_stacks
.globl plat_set_my_stack
.globl plat_get_my_stack
.globl platform_set_stack .globl platform_set_stack
.globl platform_get_stack .globl platform_get_stack
/* ----------------------------------------------------- /* -----------------------------------------------------
* unsigned long platform_get_stack (unsigned long) * unsigned long plat_get_my_stack ()
* *
* For cold-boot BL images, only the primary CPU needs a * For cold-boot BL images, only the primary CPU needs a
* stack. This function returns the stack pointer for a * stack. This function returns the stack pointer for a
* stack allocated in device memory. * stack allocated in device memory.
* ----------------------------------------------------- * -----------------------------------------------------
*/ */
func platform_get_stack func plat_get_my_stack
get_up_stack platform_normal_stacks, PLATFORM_STACK_SIZE get_up_stack platform_normal_stacks, PLATFORM_STACK_SIZE
ret ret
endfunc platform_get_stack endfunc plat_get_my_stack
/* ----------------------------------------------------- /* -----------------------------------------------------
* void platform_set_stack (unsigned long) * void plat_set_my_stack ()
* *
* For cold-boot BL images, only the primary CPU needs a * For cold-boot BL images, only the primary CPU needs a
* stack. This function sets the stack pointer to a stack * stack. This function sets the stack pointer to a stack
* allocated in normal memory. * allocated in normal memory.
* ----------------------------------------------------- * -----------------------------------------------------
*/ */
func platform_set_stack func plat_set_my_stack
get_up_stack platform_normal_stacks, PLATFORM_STACK_SIZE get_up_stack platform_normal_stacks, PLATFORM_STACK_SIZE
mov sp, x0 mov sp, x0
ret ret
endfunc platform_set_stack endfunc plat_set_my_stack
/* -----------------------------------------------------
* unsigned long platform_get_stack ()
*
* For cold-boot BL images, only the primary CPU needs a
* stack. This function returns the stack pointer for a
* stack allocated in device memory. This function
* is deprecated.
* -----------------------------------------------------
*/
func_deprecated platform_get_stack
b plat_get_my_stack
endfunc_deprecated platform_get_stack
/* -----------------------------------------------------
* void platform_set_stack ()
*
* For cold-boot BL images, only the primary CPU needs a
* stack. This function sets the stack pointer to a stack
* allocated in normal memory.This function is
* deprecated.
* -----------------------------------------------------
*/
func_deprecated platform_set_stack
b plat_set_my_stack
endfunc_deprecated platform_set_stack
/* ----------------------------------------------------- /* -----------------------------------------------------
* Single cpu stack in normal memory. * Single cpu stack in normal memory.
......
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <asm_macros.S>
#include <assert_macros.S>
#include <platform_def.h>
.globl plat_my_core_pos
.globl plat_is_my_cpu_primary
.globl plat_get_my_entrypoint
.weak platform_get_core_pos
/* -----------------------------------------------------
* Compatibility wrappers for new platform APIs.
* -----------------------------------------------------
*/
func plat_my_core_pos
mrs x0, mpidr_el1
b platform_get_core_pos
endfunc plat_my_core_pos
func plat_is_my_cpu_primary
mrs x0, mpidr_el1
b platform_is_primary_cpu
endfunc plat_is_my_cpu_primary
func plat_get_my_entrypoint
mrs x0, mpidr_el1
b platform_get_entrypoint
endfunc plat_get_my_entrypoint
/* -----------------------------------------------------
* int platform_get_core_pos(int mpidr);
* With this function: CorePos = (ClusterId * 4) +
* CoreId
* -----------------------------------------------------
*/
func platform_get_core_pos
and x1, x0, #MPIDR_CPU_MASK
and x0, x0, #MPIDR_CLUSTER_MASK
add x0, x1, x0, LSR #6
ret
endfunc platform_get_core_pos
#
# Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of ARM nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
ifeq (${PSCI_EXTENDED_STATE_ID}, 1)
$(error "PSCI Compatibility mode can be enabled only if \
PSCI_EXTENDED_STATE_ID is not set")
endif
PLAT_BL_COMMON_SOURCES += plat/compat/aarch64/plat_helpers_compat.S
BL31_SOURCES += plat/common/aarch64/plat_psci_common.c \
plat/compat/plat_pm_compat.c \
plat/compat/plat_topology_compat.c
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch_helpers.h>
#include <assert.h>
#include <errno.h>
#include <platform.h>
#include <psci.h>
/*
* The platform hooks exported by the platform using the earlier version of
* platform interface
*/
const plat_pm_ops_t *pm_ops;
/*
* The hooks exported by the compatibility layer
*/
static plat_psci_ops_t compat_psci_ops;
/*
* The secure entry point to be used on warm reset.
*/
static unsigned long secure_entrypoint;
/*
* This array stores the 'power_state' requests of each CPU during
* CPU_SUSPEND and SYSTEM_SUSPEND to support querying of state-ID
* by the platform.
*/
unsigned int psci_power_state_compat[PLATFORM_CORE_COUNT];
/*******************************************************************************
* The PSCI compatibility helper to parse the power state and populate the
* 'pwr_domain_state' for each power level. It is assumed that, when in
* compatibility mode, the PSCI generic layer need to know only whether the
* affinity level will be OFF or in RETENTION and if the platform supports
* multiple power down and retention states, it will be taken care within
* the platform layer.
******************************************************************************/
static int parse_power_state(unsigned int power_state,
psci_power_state_t *req_state)
{
int i;
int pstate = psci_get_pstate_type(power_state);
int aff_lvl = psci_get_pstate_pwrlvl(power_state);
if (aff_lvl > PLATFORM_MAX_AFFLVL)
return PSCI_E_INVALID_PARAMS;
/* Sanity check the requested state */
if (pstate == PSTATE_TYPE_STANDBY) {
/*
* Set the CPU local state as retention and ignore the higher
* levels. This allows the generic PSCI layer to invoke
* plat_psci_ops 'cpu_standby' hook and the compatibility
* layer invokes the 'affinst_standby' handler with the
* correct power_state parameter thus preserving the correct
* behavior.
*/
req_state->pwr_domain_state[0] =
PLAT_MAX_RET_STATE;
} else {
for (i = 0; i <= aff_lvl; i++)
req_state->pwr_domain_state[i] =
PLAT_MAX_OFF_STATE;
}
return PSCI_E_SUCCESS;
}
/*******************************************************************************
* The PSCI compatibility helper to set the 'power_state' in
* psci_power_state_compat[] at index corresponding to the current core.
******************************************************************************/
static void set_psci_power_state_compat(unsigned int power_state)
{
unsigned int my_core_pos = plat_my_core_pos();
psci_power_state_compat[my_core_pos] = power_state;
flush_dcache_range((uintptr_t) &psci_power_state_compat[my_core_pos],
sizeof(psci_power_state_compat[my_core_pos]));
}
/*******************************************************************************
* The PSCI compatibility helper for plat_pm_ops_t 'validate_power_state'
* hook.
******************************************************************************/
static int validate_power_state_compat(unsigned int power_state,
psci_power_state_t *req_state)
{
int rc;
assert(req_state);
if (pm_ops->validate_power_state) {
rc = pm_ops->validate_power_state(power_state);
if (rc != PSCI_E_SUCCESS)
return rc;
}
/* Store the 'power_state' parameter for the current CPU. */
set_psci_power_state_compat(power_state);
return parse_power_state(power_state, req_state);
}
/*******************************************************************************
* The PSCI compatibility helper for plat_pm_ops_t
* 'get_sys_suspend_power_state' hook.
******************************************************************************/
void get_sys_suspend_power_state_compat(psci_power_state_t *req_state)
{
unsigned int power_state;
assert(req_state);
power_state = pm_ops->get_sys_suspend_power_state();
/* Store the 'power_state' parameter for the current CPU. */
set_psci_power_state_compat(power_state);
if (parse_power_state(power_state, req_state) != PSCI_E_SUCCESS)
assert(0);
}
/*******************************************************************************
* The PSCI compatibility helper for plat_pm_ops_t 'validate_ns_entrypoint'
* hook.
******************************************************************************/
static int validate_ns_entrypoint_compat(uintptr_t ns_entrypoint)
{
return pm_ops->validate_ns_entrypoint(ns_entrypoint);
}
/*******************************************************************************
* The PSCI compatibility helper for plat_pm_ops_t 'affinst_standby' hook.
******************************************************************************/
static void cpu_standby_compat(plat_local_state_t cpu_state)
{
unsigned int powerstate = psci_get_suspend_powerstate();
assert(powerstate != PSCI_INVALID_DATA);
pm_ops->affinst_standby(powerstate);
}
/*******************************************************************************
* The PSCI compatibility helper for plat_pm_ops_t 'affinst_on' hook.
******************************************************************************/
static int pwr_domain_on_compat(u_register_t mpidr)
{
int level, rc;
/*
* The new PSCI framework does not hold the locks for higher level
* power domain nodes when this hook is invoked. Hence figuring out the
* target state of the parent power domains does not make much sense.
* Hence we hard-code the state as PSCI_STATE_OFF for all the levels.
* We expect the platform to perform the necessary CPU_ON operations
* when the 'affinst_on' is invoked only for level 0.
*/
for (level = PLATFORM_MAX_AFFLVL; level >= 0; level--) {
rc = pm_ops->affinst_on((unsigned long)mpidr, secure_entrypoint,
level, PSCI_STATE_OFF);
if (rc != PSCI_E_SUCCESS)
break;
}
return rc;
}
/*******************************************************************************
* The PSCI compatibility helper for plat_pm_ops_t 'affinst_off' hook.
******************************************************************************/
static void pwr_domain_off_compat(const psci_power_state_t *target_state)
{
int level;
unsigned int plat_state;
for (level = 0; level <= PLATFORM_MAX_AFFLVL; level++) {
plat_state = (is_local_state_run(
target_state->pwr_domain_state[level]) ?
PSCI_STATE_ON : PSCI_STATE_OFF);
pm_ops->affinst_off(level, plat_state);
}
}
/*******************************************************************************
* The PSCI compatibility helper for plat_pm_ops_t 'affinst_suspend' hook.
******************************************************************************/
static void pwr_domain_suspend_compat(const psci_power_state_t *target_state)
{
int level;
unsigned int plat_state;
for (level = 0; level <= psci_get_suspend_afflvl(); level++) {
plat_state = (is_local_state_run(
target_state->pwr_domain_state[level]) ?
PSCI_STATE_ON : PSCI_STATE_OFF);
pm_ops->affinst_suspend(secure_entrypoint, level, plat_state);
}
}
/*******************************************************************************
* The PSCI compatibility helper for plat_pm_ops_t 'affinst_on_finish'
* hook.
******************************************************************************/
static void pwr_domain_on_finish_compat(const psci_power_state_t *target_state)
{
int level;
unsigned int plat_state;
for (level = PLATFORM_MAX_AFFLVL; level >= 0; level--) {
plat_state = (is_local_state_run(
target_state->pwr_domain_state[level]) ?
PSCI_STATE_ON : PSCI_STATE_OFF);
pm_ops->affinst_on_finish(level, plat_state);
}
}
/*******************************************************************************
* The PSCI compatibility helper for plat_pm_ops_t
* 'affinst_suspend_finish' hook.
******************************************************************************/
static void pwr_domain_suspend_finish_compat(
const psci_power_state_t *target_state)
{
int level;
unsigned int plat_state;
for (level = psci_get_suspend_afflvl(); level >= 0; level--) {
plat_state = (is_local_state_run(
target_state->pwr_domain_state[level]) ?
PSCI_STATE_ON : PSCI_STATE_OFF);
pm_ops->affinst_suspend_finish(level, plat_state);
}
}
/*******************************************************************************
* The PSCI compatibility helper for plat_pm_ops_t 'system_off' hook.
******************************************************************************/
static void __dead2 system_off_compat(void)
{
pm_ops->system_off();
}
/*******************************************************************************
* The PSCI compatibility helper for plat_pm_ops_t 'system_reset' hook.
******************************************************************************/
static void __dead2 system_reset_compat(void)
{
pm_ops->system_reset();
}
/*******************************************************************************
* Export the compatibility compat_psci_ops. The assumption made is that the
* power domains correspond to affinity instances on the platform.
******************************************************************************/
int plat_setup_psci_ops(uintptr_t sec_entrypoint,
const plat_psci_ops_t **psci_ops)
{
platform_setup_pm(&pm_ops);
secure_entrypoint = (unsigned long) sec_entrypoint;
/*
* It is compulsory for the platform ports using the new porting
* interface to export a hook to validate the power state parameter
*/
compat_psci_ops.validate_power_state = validate_power_state_compat;
/*
* Populate the compatibility plat_psci_ops_t hooks if available
*/
if (pm_ops->validate_ns_entrypoint)
compat_psci_ops.validate_ns_entrypoint =
validate_ns_entrypoint_compat;
if (pm_ops->affinst_standby)
compat_psci_ops.cpu_standby = cpu_standby_compat;
if (pm_ops->affinst_on)
compat_psci_ops.pwr_domain_on = pwr_domain_on_compat;
if (pm_ops->affinst_off)
compat_psci_ops.pwr_domain_off = pwr_domain_off_compat;
if (pm_ops->affinst_suspend)
compat_psci_ops.pwr_domain_suspend = pwr_domain_suspend_compat;
if (pm_ops->affinst_on_finish)
compat_psci_ops.pwr_domain_on_finish =
pwr_domain_on_finish_compat;
if (pm_ops->affinst_suspend_finish)
compat_psci_ops.pwr_domain_suspend_finish =
pwr_domain_suspend_finish_compat;
if (pm_ops->system_off)
compat_psci_ops.system_off = system_off_compat;
if (pm_ops->system_reset)
compat_psci_ops.system_reset = system_reset_compat;
if (pm_ops->get_sys_suspend_power_state)
compat_psci_ops.get_sys_suspend_power_state =
get_sys_suspend_power_state_compat;
*psci_ops = &compat_psci_ops;
return 0;
}
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch_helpers.h>
#include <assert.h>
#include <platform.h>
#include <platform_def.h>
#include <psci.h>
/* The power domain tree descriptor */
static unsigned char power_domain_tree_desc
[PLATFORM_NUM_AFFS - PLATFORM_CORE_COUNT + 1];
/*******************************************************************************
* Simple routine to set the id of an affinity instance at a given level
* in the mpidr. The assumption is that the affinity level and the power
* domain level are the same.
******************************************************************************/
unsigned long mpidr_set_aff_inst(unsigned long mpidr,
unsigned char aff_inst,
int aff_lvl)
{
unsigned long aff_shift;
assert(aff_lvl <= MPIDR_AFFLVL3);
/*
* Decide the number of bits to shift by depending upon
* the power level
*/
aff_shift = get_afflvl_shift(aff_lvl);
/* Clear the existing power instance & set the new one*/
mpidr &= ~((unsigned long)MPIDR_AFFLVL_MASK << aff_shift);
mpidr |= (unsigned long)aff_inst << aff_shift;
return mpidr;
}
/******************************************************************************
* This function uses insertion sort to sort a given list of mpidr's in the
* ascending order of the index returned by platform_get_core_pos.
*****************************************************************************/
void sort_mpidr_by_cpu_idx(unsigned int aff_count, unsigned long mpidr_list[])
{
int i, j;
unsigned long temp_mpidr;
for (i = 1; i < aff_count; i++) {
temp_mpidr = mpidr_list[i];
for (j = i;
j > 0 &&
platform_get_core_pos(mpidr_list[j-1]) >
platform_get_core_pos(temp_mpidr);
j--)
mpidr_list[j] = mpidr_list[j-1];
mpidr_list[j] = temp_mpidr;
}
}
/*******************************************************************************
* The compatibility routine to construct the power domain tree description.
* The assumption made is that the power domains correspond to affinity
* instances on the platform. This routine's aim is to traverse to the target
* affinity level and populate the number of siblings at that level in
* 'power_domain_tree_desc' array. It uses the current affinity level to keep
* track of how many levels from the root of the tree have been traversed.
* If the current affinity level != target affinity level, then the platform
* is asked to return the number of children that each affinity instance has
* at the current affinity level. Traversal is then done for each child at the
* next lower level i.e. current affinity level - 1.
*
* The power domain description needs to be constructed in such a way that
* affinity instances containing CPUs with lower cpu indices need to be
* described first. Hence when traversing the power domain levels, the list
* of mpidrs at that power domain level is sorted in the ascending order of CPU
* indices before the lower levels are recursively described.
*
* CAUTION: This routine assumes that affinity instance ids are allocated in a
* monotonically increasing manner at each affinity level in a mpidr starting
* from 0. If the platform breaks this assumption then this code will have to
* be reworked accordingly.
******************************************************************************/
static unsigned int init_pwr_domain_tree_desc(unsigned long mpidr,
unsigned int affmap_idx,
int cur_afflvl,
int tgt_afflvl)
{
unsigned int ctr, aff_count;
/*
* Temporary list to hold the MPIDR list at a particular power domain
* level so as to sort them.
*/
unsigned long mpidr_list[PLATFORM_CORE_COUNT];
assert(cur_afflvl >= tgt_afflvl);
/*
* Find the number of siblings at the current power level &
* assert if there are none 'cause then we have been invoked with
* an invalid mpidr.
*/
aff_count = plat_get_aff_count(cur_afflvl, mpidr);
assert(aff_count);
if (tgt_afflvl < cur_afflvl) {
for (ctr = 0; ctr < aff_count; ctr++) {
mpidr_list[ctr] = mpidr_set_aff_inst(mpidr, ctr,
cur_afflvl);
}
/* Need to sort mpidr list according to CPU index */
sort_mpidr_by_cpu_idx(aff_count, mpidr_list);
for (ctr = 0; ctr < aff_count; ctr++) {
affmap_idx = init_pwr_domain_tree_desc(mpidr_list[ctr],
affmap_idx,
cur_afflvl - 1,
tgt_afflvl);
}
} else {
power_domain_tree_desc[affmap_idx++] = aff_count;
}
return affmap_idx;
}
/*******************************************************************************
* This function constructs the topology tree description at runtime
* and returns it. The assumption made is that the power domains correspond
* to affinity instances on the platform.
******************************************************************************/
const unsigned char *plat_get_power_domain_tree_desc(void)
{
int afflvl, affmap_idx;
/*
* We assume that the platform allocates affinity instance ids from
* 0 onwards at each affinity level in the mpidr. FIRST_MPIDR = 0.0.0.0
*/
affmap_idx = 0;
for (afflvl = PLATFORM_MAX_AFFLVL; afflvl >= MPIDR_AFFLVL0; afflvl--) {
affmap_idx = init_pwr_domain_tree_desc(FIRST_MPIDR,
affmap_idx,
PLATFORM_MAX_AFFLVL,
afflvl);
}
assert(affmap_idx == (PLATFORM_NUM_AFFS - PLATFORM_CORE_COUNT + 1));
return power_domain_tree_desc;
}
/******************************************************************************
* The compatibility helper function for plat_core_pos_by_mpidr(). It
* validates the 'mpidr' by making sure that it is within acceptable bounds
* for the platform and queries the platform layer whether the CPU specified
* by the mpidr is present or not. If present, it returns the index of the
* core corresponding to the 'mpidr'. Else it returns -1.
*****************************************************************************/
int plat_core_pos_by_mpidr(u_register_t mpidr)
{
unsigned long shift, aff_inst;
int i;
/* Ignore the Reserved bits and U bit in MPIDR */
mpidr &= MPIDR_AFFINITY_MASK;
/*
* Check if any affinity field higher than
* the PLATFORM_MAX_AFFLVL is set.
*/
shift = get_afflvl_shift(PLATFORM_MAX_AFFLVL + 1);
if (mpidr >> shift)
return -1;
for (i = PLATFORM_MAX_AFFLVL; i >= 0; i--) {
shift = get_afflvl_shift(i);
aff_inst = ((mpidr &
((unsigned long)MPIDR_AFFLVL_MASK << shift)) >> shift);
if (aff_inst >= plat_get_aff_count(i, mpidr))
return -1;
}
if (plat_get_aff_state(0, mpidr) == PSCI_AFF_ABSENT)
return -1;
return platform_get_core_pos(mpidr);
}
/* /*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met: * modification, are permitted provided that the following conditions are met:
...@@ -80,7 +80,6 @@ static uint64_t opteed_sel1_interrupt_handler(uint32_t id, ...@@ -80,7 +80,6 @@ static uint64_t opteed_sel1_interrupt_handler(uint32_t id,
void *cookie) void *cookie)
{ {
uint32_t linear_id; uint32_t linear_id;
uint64_t mpidr;
optee_context_t *optee_ctx; optee_context_t *optee_ctx;
/* Check the security state when the exception was generated */ /* Check the security state when the exception was generated */
...@@ -92,14 +91,13 @@ static uint64_t opteed_sel1_interrupt_handler(uint32_t id, ...@@ -92,14 +91,13 @@ static uint64_t opteed_sel1_interrupt_handler(uint32_t id,
#endif #endif
/* Sanity check the pointer to this cpu's context */ /* Sanity check the pointer to this cpu's context */
mpidr = read_mpidr();
assert(handle == cm_get_context(NON_SECURE)); assert(handle == cm_get_context(NON_SECURE));
/* Save the non-secure context before entering the OPTEE */ /* Save the non-secure context before entering the OPTEE */
cm_el1_sysregs_context_save(NON_SECURE); cm_el1_sysregs_context_save(NON_SECURE);
/* Get a reference to this cpu's OPTEE context */ /* Get a reference to this cpu's OPTEE context */
linear_id = platform_get_core_pos(mpidr); linear_id = plat_my_core_pos();
optee_ctx = &opteed_sp_context[linear_id]; optee_ctx = &opteed_sp_context[linear_id];
assert(&optee_ctx->cpu_ctx == cm_get_context(SECURE)); assert(&optee_ctx->cpu_ctx == cm_get_context(SECURE));
...@@ -125,10 +123,9 @@ static uint64_t opteed_sel1_interrupt_handler(uint32_t id, ...@@ -125,10 +123,9 @@ static uint64_t opteed_sel1_interrupt_handler(uint32_t id,
int32_t opteed_setup(void) int32_t opteed_setup(void)
{ {
entry_point_info_t *optee_ep_info; entry_point_info_t *optee_ep_info;
uint64_t mpidr = read_mpidr();
uint32_t linear_id; uint32_t linear_id;
linear_id = platform_get_core_pos(mpidr); linear_id = plat_my_core_pos();
/* /*
* Get information about the Secure Payload (BL32) image. Its * Get information about the Secure Payload (BL32) image. Its
...@@ -182,8 +179,7 @@ int32_t opteed_setup(void) ...@@ -182,8 +179,7 @@ int32_t opteed_setup(void)
******************************************************************************/ ******************************************************************************/
static int32_t opteed_init(void) static int32_t opteed_init(void)
{ {
uint64_t mpidr = read_mpidr(); uint32_t linear_id = plat_my_core_pos();
uint32_t linear_id = platform_get_core_pos(mpidr);
optee_context_t *optee_ctx = &opteed_sp_context[linear_id]; optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
entry_point_info_t *optee_entry_point; entry_point_info_t *optee_entry_point;
uint64_t rc; uint64_t rc;
...@@ -195,7 +191,7 @@ static int32_t opteed_init(void) ...@@ -195,7 +191,7 @@ static int32_t opteed_init(void)
optee_entry_point = bl31_plat_get_next_image_ep_info(SECURE); optee_entry_point = bl31_plat_get_next_image_ep_info(SECURE);
assert(optee_entry_point); assert(optee_entry_point);
cm_init_context(mpidr, optee_entry_point); cm_init_my_context(optee_entry_point);
/* /*
* Arrange for an entry into OPTEE. It will be returned via * Arrange for an entry into OPTEE. It will be returned via
...@@ -226,8 +222,7 @@ uint64_t opteed_smc_handler(uint32_t smc_fid, ...@@ -226,8 +222,7 @@ uint64_t opteed_smc_handler(uint32_t smc_fid,
uint64_t flags) uint64_t flags)
{ {
cpu_context_t *ns_cpu_context; cpu_context_t *ns_cpu_context;
unsigned long mpidr = read_mpidr(); uint32_t linear_id = plat_my_core_pos();
uint32_t linear_id = platform_get_core_pos(mpidr);
optee_context_t *optee_ctx = &opteed_sp_context[linear_id]; optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
uint64_t rc; uint64_t rc;
......
/* /*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met: * modification, are permitted provided that the following conditions are met:
...@@ -51,8 +51,7 @@ static void opteed_cpu_on_handler(uint64_t target_cpu) ...@@ -51,8 +51,7 @@ static void opteed_cpu_on_handler(uint64_t target_cpu)
static int32_t opteed_cpu_off_handler(uint64_t unused) static int32_t opteed_cpu_off_handler(uint64_t unused)
{ {
int32_t rc = 0; int32_t rc = 0;
uint64_t mpidr = read_mpidr(); uint32_t linear_id = plat_my_core_pos();
uint32_t linear_id = platform_get_core_pos(mpidr);
optee_context_t *optee_ctx = &opteed_sp_context[linear_id]; optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
assert(optee_vectors); assert(optee_vectors);
...@@ -85,8 +84,7 @@ static int32_t opteed_cpu_off_handler(uint64_t unused) ...@@ -85,8 +84,7 @@ static int32_t opteed_cpu_off_handler(uint64_t unused)
static void opteed_cpu_suspend_handler(uint64_t unused) static void opteed_cpu_suspend_handler(uint64_t unused)
{ {
int32_t rc = 0; int32_t rc = 0;
uint64_t mpidr = read_mpidr(); uint32_t linear_id = plat_my_core_pos();
uint32_t linear_id = platform_get_core_pos(mpidr);
optee_context_t *optee_ctx = &opteed_sp_context[linear_id]; optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
assert(optee_vectors); assert(optee_vectors);
...@@ -116,8 +114,7 @@ static void opteed_cpu_suspend_handler(uint64_t unused) ...@@ -116,8 +114,7 @@ static void opteed_cpu_suspend_handler(uint64_t unused)
static void opteed_cpu_on_finish_handler(uint64_t unused) static void opteed_cpu_on_finish_handler(uint64_t unused)
{ {
int32_t rc = 0; int32_t rc = 0;
uint64_t mpidr = read_mpidr(); uint32_t linear_id = plat_my_core_pos();
uint32_t linear_id = platform_get_core_pos(mpidr);
optee_context_t *optee_ctx = &opteed_sp_context[linear_id]; optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
entry_point_info_t optee_on_entrypoint; entry_point_info_t optee_on_entrypoint;
...@@ -129,7 +126,7 @@ static void opteed_cpu_on_finish_handler(uint64_t unused) ...@@ -129,7 +126,7 @@ static void opteed_cpu_on_finish_handler(uint64_t unused)
optee_ctx); optee_ctx);
/* Initialise this cpu's secure context */ /* Initialise this cpu's secure context */
cm_init_context(mpidr, &optee_on_entrypoint); cm_init_my_context(&optee_on_entrypoint);
/* Enter OPTEE */ /* Enter OPTEE */
rc = opteed_synchronous_sp_entry(optee_ctx); rc = opteed_synchronous_sp_entry(optee_ctx);
...@@ -153,8 +150,7 @@ static void opteed_cpu_on_finish_handler(uint64_t unused) ...@@ -153,8 +150,7 @@ static void opteed_cpu_on_finish_handler(uint64_t unused)
static void opteed_cpu_suspend_finish_handler(uint64_t suspend_level) static void opteed_cpu_suspend_finish_handler(uint64_t suspend_level)
{ {
int32_t rc = 0; int32_t rc = 0;
uint64_t mpidr = read_mpidr(); uint32_t linear_id = plat_my_core_pos();
uint32_t linear_id = platform_get_core_pos(mpidr);
optee_context_t *optee_ctx = &opteed_sp_context[linear_id]; optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
assert(optee_vectors); assert(optee_vectors);
...@@ -193,8 +189,7 @@ static int32_t opteed_cpu_migrate_info(uint64_t *resident_cpu) ...@@ -193,8 +189,7 @@ static int32_t opteed_cpu_migrate_info(uint64_t *resident_cpu)
******************************************************************************/ ******************************************************************************/
static void opteed_system_off(void) static void opteed_system_off(void)
{ {
uint64_t mpidr = read_mpidr(); uint32_t linear_id = plat_my_core_pos();
uint32_t linear_id = platform_get_core_pos(mpidr);
optee_context_t *optee_ctx = &opteed_sp_context[linear_id]; optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
assert(optee_vectors); assert(optee_vectors);
...@@ -214,8 +209,7 @@ static void opteed_system_off(void) ...@@ -214,8 +209,7 @@ static void opteed_system_off(void)
******************************************************************************/ ******************************************************************************/
static void opteed_system_reset(void) static void opteed_system_reset(void)
{ {
uint64_t mpidr = read_mpidr(); uint32_t linear_id = plat_my_core_pos();
uint32_t linear_id = platform_get_core_pos(mpidr);
optee_context_t *optee_ctx = &opteed_sp_context[linear_id]; optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
assert(optee_vectors); assert(optee_vectors);
......
...@@ -121,7 +121,6 @@ int32_t tlkd_setup(void) ...@@ -121,7 +121,6 @@ int32_t tlkd_setup(void)
******************************************************************************/ ******************************************************************************/
int32_t tlkd_init(void) int32_t tlkd_init(void)
{ {
uint64_t mpidr = read_mpidr();
entry_point_info_t *tlk_entry_point; entry_point_info_t *tlk_entry_point;
/* /*
...@@ -131,7 +130,7 @@ int32_t tlkd_init(void) ...@@ -131,7 +130,7 @@ int32_t tlkd_init(void)
tlk_entry_point = bl31_plat_get_next_image_ep_info(SECURE); tlk_entry_point = bl31_plat_get_next_image_ep_info(SECURE);
assert(tlk_entry_point); assert(tlk_entry_point);
cm_init_context(mpidr, tlk_entry_point); cm_init_my_context(tlk_entry_point);
/* /*
* Arrange for an entry into the test secure payload. * Arrange for an entry into the test secure payload.
......
/* /*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met: * modification, are permitted provided that the following conditions are met:
...@@ -101,7 +101,6 @@ static uint64_t tspd_sel1_interrupt_handler(uint32_t id, ...@@ -101,7 +101,6 @@ static uint64_t tspd_sel1_interrupt_handler(uint32_t id,
void *cookie) void *cookie)
{ {
uint32_t linear_id; uint32_t linear_id;
uint64_t mpidr;
tsp_context_t *tsp_ctx; tsp_context_t *tsp_ctx;
/* Check the security state when the exception was generated */ /* Check the security state when the exception was generated */
...@@ -113,14 +112,13 @@ static uint64_t tspd_sel1_interrupt_handler(uint32_t id, ...@@ -113,14 +112,13 @@ static uint64_t tspd_sel1_interrupt_handler(uint32_t id,
#endif #endif
/* Sanity check the pointer to this cpu's context */ /* Sanity check the pointer to this cpu's context */
mpidr = read_mpidr();
assert(handle == cm_get_context(NON_SECURE)); assert(handle == cm_get_context(NON_SECURE));
/* Save the non-secure context before entering the TSP */ /* Save the non-secure context before entering the TSP */
cm_el1_sysregs_context_save(NON_SECURE); cm_el1_sysregs_context_save(NON_SECURE);
/* Get a reference to this cpu's TSP context */ /* Get a reference to this cpu's TSP context */
linear_id = platform_get_core_pos(mpidr); linear_id = plat_my_core_pos();
tsp_ctx = &tspd_sp_context[linear_id]; tsp_ctx = &tspd_sp_context[linear_id];
assert(&tsp_ctx->cpu_ctx == cm_get_context(SECURE)); assert(&tsp_ctx->cpu_ctx == cm_get_context(SECURE));
...@@ -197,10 +195,9 @@ static uint64_t tspd_ns_interrupt_handler(uint32_t id, ...@@ -197,10 +195,9 @@ static uint64_t tspd_ns_interrupt_handler(uint32_t id,
int32_t tspd_setup(void) int32_t tspd_setup(void)
{ {
entry_point_info_t *tsp_ep_info; entry_point_info_t *tsp_ep_info;
uint64_t mpidr = read_mpidr();
uint32_t linear_id; uint32_t linear_id;
linear_id = platform_get_core_pos(mpidr); linear_id = plat_my_core_pos();
/* /*
* Get information about the Secure Payload (BL32) image. Its * Get information about the Secure Payload (BL32) image. Its
...@@ -256,8 +253,7 @@ int32_t tspd_setup(void) ...@@ -256,8 +253,7 @@ int32_t tspd_setup(void)
******************************************************************************/ ******************************************************************************/
int32_t tspd_init(void) int32_t tspd_init(void)
{ {
uint64_t mpidr = read_mpidr(); uint32_t linear_id = plat_my_core_pos();
uint32_t linear_id = platform_get_core_pos(mpidr);
tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
entry_point_info_t *tsp_entry_point; entry_point_info_t *tsp_entry_point;
uint64_t rc; uint64_t rc;
...@@ -269,7 +265,7 @@ int32_t tspd_init(void) ...@@ -269,7 +265,7 @@ int32_t tspd_init(void)
tsp_entry_point = bl31_plat_get_next_image_ep_info(SECURE); tsp_entry_point = bl31_plat_get_next_image_ep_info(SECURE);
assert(tsp_entry_point); assert(tsp_entry_point);
cm_init_context(mpidr, tsp_entry_point); cm_init_my_context(tsp_entry_point);
/* /*
* Arrange for an entry into the test secure payload. It will be * Arrange for an entry into the test secure payload. It will be
...@@ -300,8 +296,7 @@ uint64_t tspd_smc_handler(uint32_t smc_fid, ...@@ -300,8 +296,7 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
uint64_t flags) uint64_t flags)
{ {
cpu_context_t *ns_cpu_context; cpu_context_t *ns_cpu_context;
unsigned long mpidr = read_mpidr(); uint32_t linear_id = plat_my_core_pos(), ns;
uint32_t linear_id = platform_get_core_pos(mpidr), ns;
tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
uint64_t rc; uint64_t rc;
#if TSP_INIT_ASYNC #if TSP_INIT_ASYNC
...@@ -453,7 +448,7 @@ uint64_t tspd_smc_handler(uint32_t smc_fid, ...@@ -453,7 +448,7 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
/* /*
* Disable the interrupt NS locally since it will be enabled globally * Disable the interrupt NS locally since it will be enabled globally
* within cm_init_context. * within cm_init_my_context.
*/ */
disable_intr_rm_local(INTR_TYPE_NS, SECURE); disable_intr_rm_local(INTR_TYPE_NS, SECURE);
#endif #endif
...@@ -471,7 +466,7 @@ uint64_t tspd_smc_handler(uint32_t smc_fid, ...@@ -471,7 +466,7 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
assert(NON_SECURE == assert(NON_SECURE ==
GET_SECURITY_STATE(next_image_info->h.attr)); GET_SECURITY_STATE(next_image_info->h.attr));
cm_init_context(read_mpidr_el1(), next_image_info); cm_init_my_context(next_image_info);
cm_prepare_el3_exit(NON_SECURE); cm_prepare_el3_exit(NON_SECURE);
SMC_RET0(cm_get_context(NON_SECURE)); SMC_RET0(cm_get_context(NON_SECURE));
#else #else
......
/* /*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met: * modification, are permitted provided that the following conditions are met:
...@@ -52,8 +52,7 @@ static void tspd_cpu_on_handler(uint64_t target_cpu) ...@@ -52,8 +52,7 @@ static void tspd_cpu_on_handler(uint64_t target_cpu)
static int32_t tspd_cpu_off_handler(uint64_t unused) static int32_t tspd_cpu_off_handler(uint64_t unused)
{ {
int32_t rc = 0; int32_t rc = 0;
uint64_t mpidr = read_mpidr(); uint32_t linear_id = plat_my_core_pos();
uint32_t linear_id = platform_get_core_pos(mpidr);
tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
assert(tsp_vectors); assert(tsp_vectors);
...@@ -86,8 +85,7 @@ static int32_t tspd_cpu_off_handler(uint64_t unused) ...@@ -86,8 +85,7 @@ static int32_t tspd_cpu_off_handler(uint64_t unused)
static void tspd_cpu_suspend_handler(uint64_t unused) static void tspd_cpu_suspend_handler(uint64_t unused)
{ {
int32_t rc = 0; int32_t rc = 0;
uint64_t mpidr = read_mpidr(); uint32_t linear_id = plat_my_core_pos();
uint32_t linear_id = platform_get_core_pos(mpidr);
tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
assert(tsp_vectors); assert(tsp_vectors);
...@@ -117,8 +115,7 @@ static void tspd_cpu_suspend_handler(uint64_t unused) ...@@ -117,8 +115,7 @@ static void tspd_cpu_suspend_handler(uint64_t unused)
static void tspd_cpu_on_finish_handler(uint64_t unused) static void tspd_cpu_on_finish_handler(uint64_t unused)
{ {
int32_t rc = 0; int32_t rc = 0;
uint64_t mpidr = read_mpidr(); uint32_t linear_id = plat_my_core_pos();
uint32_t linear_id = platform_get_core_pos(mpidr);
tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
entry_point_info_t tsp_on_entrypoint; entry_point_info_t tsp_on_entrypoint;
...@@ -131,12 +128,12 @@ static void tspd_cpu_on_finish_handler(uint64_t unused) ...@@ -131,12 +128,12 @@ static void tspd_cpu_on_finish_handler(uint64_t unused)
tsp_ctx); tsp_ctx);
/* Initialise this cpu's secure context */ /* Initialise this cpu's secure context */
cm_init_context(mpidr, &tsp_on_entrypoint); cm_init_my_context(&tsp_on_entrypoint);
#if TSPD_ROUTE_IRQ_TO_EL3 #if TSPD_ROUTE_IRQ_TO_EL3
/* /*
* Disable the NS interrupt locally since it will be enabled globally * Disable the NS interrupt locally since it will be enabled globally
* within cm_init_context. * within cm_init_my_context.
*/ */
disable_intr_rm_local(INTR_TYPE_NS, SECURE); disable_intr_rm_local(INTR_TYPE_NS, SECURE);
#endif #endif
...@@ -163,8 +160,7 @@ static void tspd_cpu_on_finish_handler(uint64_t unused) ...@@ -163,8 +160,7 @@ static void tspd_cpu_on_finish_handler(uint64_t unused)
static void tspd_cpu_suspend_finish_handler(uint64_t suspend_level) static void tspd_cpu_suspend_finish_handler(uint64_t suspend_level)
{ {
int32_t rc = 0; int32_t rc = 0;
uint64_t mpidr = read_mpidr(); uint32_t linear_id = plat_my_core_pos();
uint32_t linear_id = platform_get_core_pos(mpidr);
tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
assert(tsp_vectors); assert(tsp_vectors);
...@@ -203,8 +199,7 @@ static int32_t tspd_cpu_migrate_info(uint64_t *resident_cpu) ...@@ -203,8 +199,7 @@ static int32_t tspd_cpu_migrate_info(uint64_t *resident_cpu)
******************************************************************************/ ******************************************************************************/
static void tspd_system_off(void) static void tspd_system_off(void)
{ {
uint64_t mpidr = read_mpidr(); uint32_t linear_id = plat_my_core_pos();
uint32_t linear_id = platform_get_core_pos(mpidr);
tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
assert(tsp_vectors); assert(tsp_vectors);
...@@ -224,8 +219,7 @@ static void tspd_system_off(void) ...@@ -224,8 +219,7 @@ static void tspd_system_off(void)
******************************************************************************/ ******************************************************************************/
static void tspd_system_reset(void) static void tspd_system_reset(void)
{ {
uint64_t mpidr = read_mpidr(); uint32_t linear_id = plat_my_core_pos();
uint32_t linear_id = platform_get_core_pos(mpidr);
tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
assert(tsp_vectors); assert(tsp_vectors);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment