Commit 432b9905 authored by Achin Gupta's avatar Achin Gupta
Browse files

Merge pull request #361 from achingupta/for_sm/psci_proto_v5

For sm/psci proto v5
parents 9caf7e36 9d070b99
...@@ -30,35 +30,49 @@ ...@@ -30,35 +30,49 @@
#include <arch.h> #include <arch.h>
#include <psci.h> #include <psci.h>
#include <plat_arm.h>
#include <platform_def.h> #include <platform_def.h>
/* #define get_arm_cluster_core_count(mpidr)\
* Weak definitions use fixed topology. Strong definitions could make topology (((mpidr) & 0x100) ? PLAT_ARM_CLUSTER1_CORE_COUNT :\
* configurable PLAT_ARM_CLUSTER0_CORE_COUNT)
*/
#pragma weak plat_get_aff_count
#pragma weak plat_get_aff_state
#pragma weak plat_arm_topology_setup
unsigned int plat_get_aff_count(unsigned int aff_lvl, unsigned long mpidr) /* The power domain tree descriptor which need to be exported by ARM platforms */
{ extern const unsigned char arm_power_domain_tree_desc[];
/* Report 1 (absent) instance at levels higher that the cluster level */
if (aff_lvl > MPIDR_AFFLVL1)
return 1;
if (aff_lvl == MPIDR_AFFLVL1)
return ARM_CLUSTER_COUNT;
return mpidr & 0x100 ? PLAT_ARM_CLUSTER1_CORE_COUNT :
PLAT_ARM_CLUSTER0_CORE_COUNT;
}
unsigned int plat_get_aff_state(unsigned int aff_lvl, unsigned long mpidr) /*******************************************************************************
* This function returns the ARM default topology tree information.
******************************************************************************/
const unsigned char *plat_get_power_domain_tree_desc(void)
{ {
return aff_lvl <= MPIDR_AFFLVL1 ? PSCI_AFF_PRESENT : PSCI_AFF_ABSENT; return arm_power_domain_tree_desc;
} }
void plat_arm_topology_setup(void) /*******************************************************************************
* This function validates an MPIDR by checking whether it falls within the
* acceptable bounds. An error code (-1) is returned if an incorrect mpidr
* is passed.
******************************************************************************/
int arm_check_mpidr(u_register_t mpidr)
{ {
unsigned int cluster_id, cpu_id;
mpidr &= MPIDR_AFFINITY_MASK;
if (mpidr & ~(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK))
return -1;
cluster_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
cpu_id = (mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK;
if (cluster_id >= ARM_CLUSTER_COUNT)
return -1;
/* Validate cpu_id by checking whether it represents a CPU in
one of the two clusters present on the platform. */
if (cpu_id >= get_arm_cluster_core_count(mpidr))
return -1;
return 0;
} }
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
# TSP source files common to ARM standard platforms # TSP source files common to ARM standard platforms
BL32_SOURCES += drivers/arm/gic/arm_gic.c \ BL32_SOURCES += drivers/arm/gic/arm_gic.c \
drivers/arm/gic/gic_v2.c \ drivers/arm/gic/gic_v2.c \
plat/arm/common/arm_topology.c \
plat/arm/common/tsp/arm_tsp_setup.c \ plat/arm/common/tsp/arm_tsp_setup.c \
plat/common/aarch64/platform_mp_stack.S \ plat/common/aarch64/platform_mp_stack.S \
plat/common/plat_gic.c plat/common/plat_gic.c
...@@ -33,11 +33,9 @@ ...@@ -33,11 +33,9 @@
#include <css_def.h> #include <css_def.h>
.weak plat_secondary_cold_boot_setup .weak plat_secondary_cold_boot_setup
.weak platform_get_entrypoint .weak plat_get_my_entrypoint
.weak platform_mem_init .globl plat_arm_calc_core_pos
.globl platform_get_core_pos .weak plat_is_my_cpu_primary
.weak platform_is_primary_cpu
/* ----------------------------------------------------- /* -----------------------------------------------------
* void plat_secondary_cold_boot_setup (void); * void plat_secondary_cold_boot_setup (void);
...@@ -54,69 +52,56 @@ cb_panic: ...@@ -54,69 +52,56 @@ cb_panic:
b cb_panic b cb_panic
endfunc plat_secondary_cold_boot_setup endfunc plat_secondary_cold_boot_setup
/* ----------------------------------------------------- /* ---------------------------------------------------------------------
* void platform_get_entrypoint (unsigned int mpid); * unsigned long plat_get_my_entrypoint (void);
* *
* Main job of this routine is to distinguish between * Main job of this routine is to distinguish between a cold and a warm
* a cold and warm boot. * boot. On CSS platforms, this distinction is based on the contents of
* On a cold boot the secondaries first wait for the * the Trusted Mailbox. It is initialised to zero by the SCP before the
* platform to be initialized after which they are * AP cores are released from reset. Therefore, a zero mailbox means
* hotplugged in. The primary proceeds to perform the * it's a cold reset.
* platform initialization.
* On a warm boot, each cpu jumps to the address in its
* mailbox.
* *
* TODO: Not a good idea to save lr in a temp reg * This functions returns the contents of the mailbox, i.e.:
* ----------------------------------------------------- * - 0 for a cold boot;
* - the warm boot entrypoint for a warm boot.
* ---------------------------------------------------------------------
*/ */
func platform_get_entrypoint func plat_get_my_entrypoint
mov x9, x30 // lr mov_imm x0, TRUSTED_MAILBOX_BASE
bl platform_get_core_pos ldr x0, [x0]
ldr x1, =TRUSTED_MAILBOXES_BASE ret
lsl x0, x0, #TRUSTED_MAILBOX_SHIFT endfunc plat_get_my_entrypoint
ldr x0, [x1, x0]
ret x9
endfunc platform_get_entrypoint
/* /* -----------------------------------------------------------
* Override the default implementation to swap the cluster order. * unsigned int plat_arm_calc_core_pos(uint64_t mpidr)
* This is necessary in order to match the format of the boot * Function to calculate the core position by
* information passed by the SCP and read in platform_is_primary_cpu * swapping the cluster order. This is necessary in order to
* below. * match the format of the boot information passed by the SCP
* and read in plat_is_my_cpu_primary below.
* -----------------------------------------------------------
*/ */
func platform_get_core_pos func plat_arm_calc_core_pos
and x1, x0, #MPIDR_CPU_MASK and x1, x0, #MPIDR_CPU_MASK
and x0, x0, #MPIDR_CLUSTER_MASK and x0, x0, #MPIDR_CLUSTER_MASK
eor x0, x0, #(1 << MPIDR_AFFINITY_BITS) // swap cluster order eor x0, x0, #(1 << MPIDR_AFFINITY_BITS) // swap cluster order
add x0, x1, x0, LSR #6 add x0, x1, x0, LSR #6
ret ret
endfunc platform_get_core_pos endfunc plat_arm_calc_core_pos
/* -----------------------------------------------------
* void platform_mem_init(void);
*
* We don't need to carry out any memory initialization
* on CSS platforms. The Secure RAM is accessible straight away.
* -----------------------------------------------------
*/
func platform_mem_init
ret
endfunc platform_mem_init
/* ----------------------------------------------------- /* -----------------------------------------------------
* unsigned int platform_is_primary_cpu (unsigned int mpid); * unsigned int plat_is_my_cpu_primary (void);
* *
* Given the mpidr say whether this cpu is the primary * Find out whether the current cpu is the primary
* cpu (applicable ony after a cold boot) * cpu (applicable ony after a cold boot)
* ----------------------------------------------------- * -----------------------------------------------------
*/ */
func platform_is_primary_cpu func plat_is_my_cpu_primary
mov x9, x30 mov x9, x30
bl platform_get_core_pos bl plat_my_core_pos
ldr x1, =SCP_BOOT_CFG_ADDR ldr x1, =SCP_BOOT_CFG_ADDR
ldr x1, [x1] ldr x1, [x1]
ubfx x1, x1, #PRIMARY_CPU_SHIFT, #PRIMARY_CPU_BIT_WIDTH ubfx x1, x1, #PRIMARY_CPU_SHIFT, #PRIMARY_CPU_BIT_WIDTH
cmp x0, x1 cmp x0, x1
cset x0, eq cset w0, eq
ret x9 ret x9
endfunc platform_is_primary_cpu endfunc plat_is_my_cpu_primary
...@@ -44,7 +44,8 @@ BL2_SOURCES += plat/arm/css/common/css_bl2_setup.c \ ...@@ -44,7 +44,8 @@ BL2_SOURCES += plat/arm/css/common/css_bl2_setup.c \
BL31_SOURCES += plat/arm/css/common/css_mhu.c \ BL31_SOURCES += plat/arm/css/common/css_mhu.c \
plat/arm/css/common/css_pm.c \ plat/arm/css/common/css_pm.c \
plat/arm/css/common/css_scpi.c plat/arm/css/common/css_scpi.c \
plat/arm/css/common/css_topology.c
ifneq (${RESET_TO_BL31},0) ifneq (${RESET_TO_BL31},0)
......
...@@ -41,42 +41,49 @@ ...@@ -41,42 +41,49 @@
#include <psci.h> #include <psci.h>
#include "css_scpi.h" #include "css_scpi.h"
#if ARM_RECOM_STATE_ID_ENC
/*
* The table storing the valid idle power states. Ensure that the
* array entries are populated in ascending order of state-id to
* enable us to use binary search during power state validation.
* The table must be terminated by a NULL entry.
*/
const unsigned int arm_pm_idle_states[] = {
/* State-id - 0x01 */
arm_make_pwrstate_lvl1(ARM_LOCAL_STATE_RUN, ARM_LOCAL_STATE_RET,
ARM_PWR_LVL0, PSTATE_TYPE_STANDBY),
/* State-id - 0x02 */
arm_make_pwrstate_lvl1(ARM_LOCAL_STATE_RUN, ARM_LOCAL_STATE_OFF,
ARM_PWR_LVL0, PSTATE_TYPE_POWERDOWN),
/* State-id - 0x22 */
arm_make_pwrstate_lvl1(ARM_LOCAL_STATE_OFF, ARM_LOCAL_STATE_OFF,
ARM_PWR_LVL1, PSTATE_TYPE_POWERDOWN),
0,
};
#endif
/******************************************************************************* /*******************************************************************************
* Private function to program the mailbox for a cpu before it is released * Private function to program the mailbox for a cpu before it is released
* from reset. * from reset.
******************************************************************************/ ******************************************************************************/
static void css_program_mailbox(uint64_t mpidr, uint64_t address) static void css_program_mailbox(uintptr_t address)
{ {
uint64_t linear_id; uintptr_t *mailbox = (void *) TRUSTED_MAILBOX_BASE;
uint64_t mbox; *mailbox = address;
flush_dcache_range((uintptr_t) mailbox, sizeof(*mailbox));
linear_id = platform_get_core_pos(mpidr);
mbox = TRUSTED_MAILBOXES_BASE + (linear_id << TRUSTED_MAILBOX_SHIFT);
*((uint64_t *) mbox) = address;
flush_dcache_range(mbox, sizeof(mbox));
} }
/******************************************************************************* /*******************************************************************************
* Handler called when an affinity instance is about to be turned on. The * Handler called when a power domain is about to be turned on. The
* level and mpidr determine the affinity instance. * level and mpidr determine the affinity instance.
******************************************************************************/ ******************************************************************************/
int32_t css_affinst_on(uint64_t mpidr, int css_pwr_domain_on(u_register_t mpidr)
uint64_t sec_entrypoint,
uint32_t afflvl,
uint32_t state)
{ {
/* /*
* SCP takes care of powering up higher affinity levels so we * SCP takes care of powering up parent power domains so we
* only need to care about level 0 * only need to care about level 0
*/ */
if (afflvl != MPIDR_AFFLVL0)
return PSCI_E_SUCCESS;
/*
* Setup mailbox with address for CPU entrypoint when it next powers up
*/
css_program_mailbox(mpidr, sec_entrypoint);
scpi_set_css_power_state(mpidr, scpi_power_on, scpi_power_on, scpi_set_css_power_state(mpidr, scpi_power_on, scpi_power_on,
scpi_power_on); scpi_power_on);
...@@ -84,47 +91,37 @@ int32_t css_affinst_on(uint64_t mpidr, ...@@ -84,47 +91,37 @@ int32_t css_affinst_on(uint64_t mpidr,
} }
/******************************************************************************* /*******************************************************************************
* Handler called when an affinity instance has just been powered on after * Handler called when a power level has just been powered on after
* being turned off earlier. The level and mpidr determine the affinity * being turned off earlier. The target_state encodes the low power state that
* instance. The 'state' arg. allows the platform to decide whether the cluster * each level has woken up from.
* was turned off prior to wakeup and do what's necessary to setup it up
* correctly.
******************************************************************************/ ******************************************************************************/
void css_affinst_on_finish(uint32_t afflvl, uint32_t state) void css_pwr_domain_on_finish(const psci_power_state_t *target_state)
{ {
unsigned long mpidr; assert(target_state->pwr_domain_state[ARM_PWR_LVL0] ==
ARM_LOCAL_STATE_OFF);
/* Determine if any platform actions need to be executed. */
if (arm_do_affinst_actions(afflvl, state) == -EAGAIN)
return;
/* Get the mpidr for this cpu */
mpidr = read_mpidr_el1();
/* /*
* Perform the common cluster specific operations i.e enable coherency * Perform the common cluster specific operations i.e enable coherency
* if this cluster was off. * if this cluster was off.
*/ */
if (afflvl != MPIDR_AFFLVL0) if (target_state->pwr_domain_state[ARM_PWR_LVL1] ==
cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(mpidr)); ARM_LOCAL_STATE_OFF)
cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr_el1()));
/* Enable the gic cpu interface */ /* Enable the gic cpu interface */
arm_gic_cpuif_setup(); arm_gic_cpuif_setup();
/* todo: Is this setup only needed after a cold boot? */ /* todo: Is this setup only needed after a cold boot? */
arm_gic_pcpu_distif_setup(); arm_gic_pcpu_distif_setup();
/* Clear the mailbox for this cpu. */
css_program_mailbox(mpidr, 0);
} }
/******************************************************************************* /*******************************************************************************
* Common function called while turning a cpu off or suspending it. It is called * Common function called while turning a cpu off or suspending it. It is called
* from css_off() or css_suspend() when these functions in turn are called for * from css_off() or css_suspend() when these functions in turn are called for
* the highest affinity level which will be powered down. It performs the * power domain at the highest power level which will be powered down. It
* actions common to the OFF and SUSPEND calls. * performs the actions common to the OFF and SUSPEND calls.
******************************************************************************/ ******************************************************************************/
static void css_power_down_common(uint32_t afflvl) static void css_power_down_common(const psci_power_state_t *target_state)
{ {
uint32_t cluster_state = scpi_power_on; uint32_t cluster_state = scpi_power_on;
...@@ -132,7 +129,8 @@ static void css_power_down_common(uint32_t afflvl) ...@@ -132,7 +129,8 @@ static void css_power_down_common(uint32_t afflvl)
arm_gic_cpuif_deactivate(); arm_gic_cpuif_deactivate();
/* Cluster is to be turned off, so disable coherency */ /* Cluster is to be turned off, so disable coherency */
if (afflvl > MPIDR_AFFLVL0) { if (target_state->pwr_domain_state[ARM_PWR_LVL1] ==
ARM_LOCAL_STATE_OFF) {
cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr())); cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr()));
cluster_state = scpi_power_off; cluster_state = scpi_power_off;
} }
...@@ -148,64 +146,55 @@ static void css_power_down_common(uint32_t afflvl) ...@@ -148,64 +146,55 @@ static void css_power_down_common(uint32_t afflvl)
} }
/******************************************************************************* /*******************************************************************************
* Handler called when an affinity instance is about to be turned off. The * Handler called when a power domain is about to be turned off. The
* level and mpidr determine the affinity instance. The 'state' arg. allows the * target_state encodes the power state that each level should transition to.
* platform to decide whether the cluster is being turned off and take
* appropriate actions.
*
* CAUTION: There is no guarantee that caches will remain turned on across calls
* to this function as each affinity level is dealt with. So do not write & read
* global variables across calls. It will be wise to do flush a write to the
* global to prevent unpredictable results.
******************************************************************************/ ******************************************************************************/
static void css_affinst_off(uint32_t afflvl, uint32_t state) static void css_pwr_domain_off(const psci_power_state_t *target_state)
{ {
/* Determine if any platform actions need to be executed */ assert(target_state->pwr_domain_state[ARM_PWR_LVL0] ==
if (arm_do_affinst_actions(afflvl, state) == -EAGAIN) ARM_LOCAL_STATE_OFF);
return;
css_power_down_common(afflvl); css_power_down_common(target_state);
} }
/******************************************************************************* /*******************************************************************************
* Handler called when an affinity instance is about to be suspended. The * Handler called when a power domain is about to be suspended. The
* level and mpidr determine the affinity instance. The 'state' arg. allows the * target_state encodes the power state that each level should transition to.
* platform to decide whether the cluster is being turned off and take apt
* actions. The 'sec_entrypoint' determines the address in BL3-1 from where
* execution should resume.
*
* CAUTION: There is no guarantee that caches will remain turned on across calls
* to this function as each affinity level is dealt with. So do not write & read
* global variables across calls. It will be wise to do flush a write to the
* global to prevent unpredictable results.
******************************************************************************/ ******************************************************************************/
static void css_affinst_suspend(uint64_t sec_entrypoint, static void css_pwr_domain_suspend(const psci_power_state_t *target_state)
uint32_t afflvl,
uint32_t state)
{ {
/* Determine if any platform actions need to be executed */
if (arm_do_affinst_actions(afflvl, state) == -EAGAIN)
return;
/* /*
* Setup mailbox with address for CPU entrypoint when it next powers up. * Juno has retention only at cpu level. Just return
* as nothing is to be done for retention.
*/ */
css_program_mailbox(read_mpidr_el1(), sec_entrypoint); if (target_state->pwr_domain_state[ARM_PWR_LVL0] ==
ARM_LOCAL_STATE_RET)
return;
assert(target_state->pwr_domain_state[ARM_PWR_LVL0] ==
ARM_LOCAL_STATE_OFF);
css_power_down_common(afflvl); css_power_down_common(target_state);
} }
/******************************************************************************* /*******************************************************************************
* Handler called when an affinity instance has just been powered on after * Handler called when a power domain has just been powered on after
* having been suspended earlier. The level and mpidr determine the affinity * having been suspended earlier. The target_state encodes the low power state
* instance. * that each level has woken up from.
* TODO: At the moment we reuse the on finisher and reinitialize the secure * TODO: At the moment we reuse the on finisher and reinitialize the secure
* context. Need to implement a separate suspend finisher. * context. Need to implement a separate suspend finisher.
******************************************************************************/ ******************************************************************************/
static void css_affinst_suspend_finish(uint32_t afflvl, static void css_pwr_domain_suspend_finish(
uint32_t state) const psci_power_state_t *target_state)
{ {
css_affinst_on_finish(afflvl, state); /*
* Return as nothing is to be done on waking up from retention.
*/
if (target_state->pwr_domain_state[ARM_PWR_LVL0] ==
ARM_LOCAL_STATE_RET)
return;
css_pwr_domain_on_finish(target_state);
} }
/******************************************************************************* /*******************************************************************************
...@@ -244,12 +233,14 @@ static void __dead2 css_system_reset(void) ...@@ -244,12 +233,14 @@ static void __dead2 css_system_reset(void)
} }
/******************************************************************************* /*******************************************************************************
* Handler called when an affinity instance is about to enter standby. * Handler called when the CPU power domain is about to enter standby.
******************************************************************************/ ******************************************************************************/
void css_affinst_standby(unsigned int power_state) void css_cpu_standby(plat_local_state_t cpu_state)
{ {
unsigned int scr; unsigned int scr;
assert(cpu_state == ARM_LOCAL_STATE_RET);
scr = read_scr_el3(); scr = read_scr_el3();
/* Enable PhysicalIRQ bit for NS world to wake the CPU */ /* Enable PhysicalIRQ bit for NS world to wake the CPU */
write_scr_el3(scr | SCR_IRQ_BIT); write_scr_el3(scr | SCR_IRQ_BIT);
...@@ -267,23 +258,28 @@ void css_affinst_standby(unsigned int power_state) ...@@ -267,23 +258,28 @@ void css_affinst_standby(unsigned int power_state)
/******************************************************************************* /*******************************************************************************
* Export the platform handlers to enable psci to invoke them * Export the platform handlers to enable psci to invoke them
******************************************************************************/ ******************************************************************************/
static const plat_pm_ops_t css_ops = { static const plat_psci_ops_t css_ops = {
.affinst_on = css_affinst_on, .pwr_domain_on = css_pwr_domain_on,
.affinst_on_finish = css_affinst_on_finish, .pwr_domain_on_finish = css_pwr_domain_on_finish,
.affinst_off = css_affinst_off, .pwr_domain_off = css_pwr_domain_off,
.affinst_standby = css_affinst_standby, .cpu_standby = css_cpu_standby,
.affinst_suspend = css_affinst_suspend, .pwr_domain_suspend = css_pwr_domain_suspend,
.affinst_suspend_finish = css_affinst_suspend_finish, .pwr_domain_suspend_finish = css_pwr_domain_suspend_finish,
.system_off = css_system_off, .system_off = css_system_off,
.system_reset = css_system_reset, .system_reset = css_system_reset,
.validate_power_state = arm_validate_power_state .validate_power_state = arm_validate_power_state,
.validate_ns_entrypoint = arm_validate_ns_entrypoint
}; };
/******************************************************************************* /*******************************************************************************
* Export the platform specific power ops. * Export the platform specific psci ops.
******************************************************************************/ ******************************************************************************/
int32_t platform_setup_pm(const plat_pm_ops_t **plat_ops) int plat_setup_psci_ops(uintptr_t sec_entrypoint,
const plat_psci_ops_t **psci_ops)
{ {
*plat_ops = &css_ops; *psci_ops = &css_ops;
/* Setup mailbox with entry point. */
css_program_mailbox(sec_entrypoint);
return 0; return 0;
} }
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <plat_arm.h>
/*
* On ARM platforms, by default the cluster power level is treated as the
* highest. The first entry in the power domain descriptor specifies the
* number of cluster power domains i.e. 2.
*/
#define CSS_PWR_DOMAINS_AT_MAX_PWR_LVL ARM_CLUSTER_COUNT
/*
* The CSS power domain tree descriptor. The cluster power domains are
* arranged so that when the PSCI generic code creates the power domain tree,
* the indices of the CPU power domain nodes it allocates match the linear
* indices returned by plat_core_pos_by_mpidr() i.e.
* CLUSTER1 CPUs are allocated indices from 0 to 3 and the higher indices for
* CLUSTER0 CPUs.
*/
const unsigned char arm_power_domain_tree_desc[] = {
/* No of root nodes */
CSS_PWR_DOMAINS_AT_MAX_PWR_LVL,
/* No of children for the first node */
PLAT_ARM_CLUSTER1_CORE_COUNT,
/* No of children for the second node */
PLAT_ARM_CLUSTER0_CORE_COUNT
};
/******************************************************************************
* This function implements a part of the critical interface between the psci
* generic layer and the platform that allows the former to query the platform
* to convert an MPIDR to a unique linear index. An error code (-1) is
* returned in case the MPIDR is invalid.
*****************************************************************************/
int plat_core_pos_by_mpidr(u_register_t mpidr)
{
if (arm_check_mpidr(mpidr) == 0)
return plat_arm_calc_core_pos(mpidr);
return -1;
}
...@@ -27,7 +27,8 @@ ...@@ -27,7 +27,8 @@
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE. * POSSIBILITY OF SUCH DAMAGE.
*/ */
#include <assert.h>
#include <platform.h>
#include <xlat_tables.h> #include <xlat_tables.h>
/* /*
...@@ -47,3 +48,18 @@ void bl32_plat_enable_mmu(uint32_t flags) ...@@ -47,3 +48,18 @@ void bl32_plat_enable_mmu(uint32_t flags)
{ {
enable_mmu_el1(flags); enable_mmu_el1(flags);
} }
#if !ENABLE_PLAT_COMPAT
/*
* Helper function for platform_get_pos() when platform compatibility is
* disabled. This is to enable SPDs using the older platform API to continue
* to work.
*/
unsigned int platform_core_pos_helper(unsigned long mpidr)
{
int idx = plat_core_pos_by_mpidr(mpidr);
assert(idx >= 0);
return idx;
}
#endif
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <assert.h>
#include <platform.h>
#include <psci.h>
/*
* The PSCI generic code uses this API to let the platform participate in state
* coordination during a power management operation. It compares the platform
* specific local power states requested by each cpu for a given power domain
* and returns the coordinated target power state that the domain should
* enter. A platform assigns a number to a local power state. This default
* implementation assumes that the platform assigns these numbers in order of
* increasing depth of the power state i.e. for two power states X & Y, if X < Y
* then X represents a shallower power state than Y. As a result, the
* coordinated target local power state for a power domain will be the minimum
* of the requested local power states.
*/
plat_local_state_t plat_get_target_pwr_state(unsigned int lvl,
const plat_local_state_t *states,
unsigned int ncpu)
{
plat_local_state_t target = PLAT_MAX_OFF_STATE, temp;
assert(ncpu);
do {
temp = *states++;
if (temp < target)
target = temp;
} while (--ncpu);
return target;
}
...@@ -32,37 +32,38 @@ ...@@ -32,37 +32,38 @@
#include <asm_macros.S> #include <asm_macros.S>
#include <platform_def.h> #include <platform_def.h>
.weak platform_get_core_pos
.weak platform_check_mpidr
.weak plat_report_exception .weak plat_report_exception
.weak plat_crash_console_init .weak plat_crash_console_init
.weak plat_crash_console_putc .weak plat_crash_console_putc
.weak plat_reset_handler .weak plat_reset_handler
.weak plat_disable_acp .weak plat_disable_acp
/* ----------------------------------------------------- #if !ENABLE_PLAT_COMPAT
* int platform_get_core_pos(int mpidr); .globl platform_get_core_pos
* With this function: CorePos = (ClusterId * 4) +
* CoreId
* -----------------------------------------------------
*/
func platform_get_core_pos
and x1, x0, #MPIDR_CPU_MASK
and x0, x0, #MPIDR_CLUSTER_MASK
add x0, x1, x0, LSR #6
ret
endfunc platform_get_core_pos
/* ----------------------------------------------------- #define MPIDR_RES_BIT_MASK 0xff000000
* Placeholder function which should be redefined by
* each platform. /* ------------------------------------------------------------------
* ----------------------------------------------------- * int platform_get_core_pos(int mpidr)
* Returns the CPU index of the CPU specified by mpidr. This is
* defined when platform compatibility is disabled to enable Trusted
* Firmware components like SPD using the old platform API to work.
* This API is deprecated and it assumes that the mpidr specified is
* that of a valid and present CPU. Instead, plat_my_core_pos()
* should be used for CPU index of the current CPU and
* plat_core_pos_by_mpidr() should be used for CPU index of a
* CPU specified by its mpidr.
* ------------------------------------------------------------------
*/ */
func platform_check_mpidr func_deprecated platform_get_core_pos
mov x0, xzr bic x0, x0, #MPIDR_RES_BIT_MASK
ret mrs x1, mpidr_el1
endfunc platform_check_mpidr bic x1, x1, #MPIDR_RES_BIT_MASK
cmp x0, x1
beq plat_my_core_pos
b platform_core_pos_helper
endfunc_deprecated platform_get_core_pos
#endif
/* ----------------------------------------------------- /* -----------------------------------------------------
* Placeholder function which should be redefined by * Placeholder function which should be redefined by
......
...@@ -30,13 +30,56 @@ ...@@ -30,13 +30,56 @@
#include <arch.h> #include <arch.h>
#include <asm_macros.S> #include <asm_macros.S>
#include <assert_macros.S>
#include <platform_def.h> #include <platform_def.h>
.local platform_normal_stacks .local platform_normal_stacks
.weak platform_set_stack #if ENABLE_PLAT_COMPAT
.globl plat_get_my_stack
.globl plat_set_my_stack
.weak platform_get_stack .weak platform_get_stack
.weak platform_set_stack
#else
.weak plat_get_my_stack
.weak plat_set_my_stack
.globl platform_get_stack
.globl platform_set_stack
#endif /* __ENABLE_PLAT_COMPAT__ */
#if ENABLE_PLAT_COMPAT
/* ---------------------------------------------------------------------
* When the compatility layer is enabled, the new platform APIs
* viz plat_get_my_stack() and plat_set_my_stack() need to be
* defined using the previous APIs platform_get_stack() and
* platform_set_stack(). Also we need to provide weak definitions
* of platform_get_stack() and platform_set_stack() for the platforms
* to reuse.
* --------------------------------------------------------------------
*/
/* -----------------------------------------------------
* unsigned long plat_get_my_stack ()
*
* For the current CPU, this function returns the stack
* pointer for a stack allocated in device memory.
* -----------------------------------------------------
*/
func plat_get_my_stack
mrs x0, mpidr_el1
b platform_get_stack
endfunc plat_get_my_stack
/* -----------------------------------------------------
* void plat_set_my_stack ()
*
* For the current CPU, this function sets the stack
* pointer to a stack allocated in normal memory.
* -----------------------------------------------------
*/
func plat_set_my_stack
mrs x0, mpidr_el1
b platform_set_stack
endfunc plat_set_my_stack
/* ----------------------------------------------------- /* -----------------------------------------------------
* unsigned long platform_get_stack (unsigned long mpidr) * unsigned long platform_get_stack (unsigned long mpidr)
...@@ -65,6 +108,85 @@ func platform_set_stack ...@@ -65,6 +108,85 @@ func platform_set_stack
ret x9 ret x9
endfunc platform_set_stack endfunc platform_set_stack
#else
/* ---------------------------------------------------------------------
* When the compatility layer is disabled, the new platform APIs
* viz plat_get_my_stack() and plat_set_my_stack() are
* supported by the platform and the previous APIs platform_get_stack()
* and platform_set_stack() are defined in terms of new APIs making use
* of the fact that they are only ever invoked for the current CPU.
* This is to enable components of Trusted Firmware like SPDs using the
* old platform APIs to continue to work.
* --------------------------------------------------------------------
*/
/* -------------------------------------------------------
* unsigned long platform_get_stack (unsigned long mpidr)
*
* For the current CPU, this function returns the stack
* pointer for a stack allocated in device memory. The
* 'mpidr' should correspond to that of the current CPU.
* This function is deprecated and plat_get_my_stack()
* should be used instead.
* -------------------------------------------------------
*/
func_deprecated platform_get_stack
#if ASM_ASSERTION
mrs x1, mpidr_el1
cmp x0, x1
ASM_ASSERT(eq)
#endif
b plat_get_my_stack
endfunc_deprecated platform_get_stack
/* -----------------------------------------------------
* void platform_set_stack (unsigned long mpidr)
*
* For the current CPU, this function sets the stack pointer
* to a stack allocated in normal memory. The
* 'mpidr' should correspond to that of the current CPU.
* This function is deprecated and plat_get_my_stack()
* should be used instead.
* -----------------------------------------------------
*/
func_deprecated platform_set_stack
#if ASM_ASSERTION
mrs x1, mpidr_el1
cmp x0, x1
ASM_ASSERT(eq)
#endif
b plat_set_my_stack
endfunc_deprecated platform_set_stack
/* -----------------------------------------------------
* unsigned long plat_get_my_stack ()
*
* For the current CPU, this function returns the stack
* pointer for a stack allocated in device memory.
* -----------------------------------------------------
*/
func plat_get_my_stack
mov x10, x30 // lr
get_my_mp_stack platform_normal_stacks, PLATFORM_STACK_SIZE
ret x10
endfunc plat_get_my_stack
/* -----------------------------------------------------
* void plat_set_my_stack ()
*
* For the current CPU, this function sets the stack
* pointer to a stack allocated in normal memory.
* -----------------------------------------------------
*/
func plat_set_my_stack
mov x9, x30 // lr
bl plat_get_my_stack
mov sp, x0
ret x9
endfunc plat_set_my_stack
#endif /*__ENABLE_PLAT_COMPAT__*/
/* ----------------------------------------------------- /* -----------------------------------------------------
* Per-cpu stacks in normal memory. Each cpu gets a * Per-cpu stacks in normal memory. Each cpu gets a
* stack of PLATFORM_STACK_SIZE bytes. * stack of PLATFORM_STACK_SIZE bytes.
......
...@@ -34,35 +34,63 @@ ...@@ -34,35 +34,63 @@
.local platform_normal_stacks .local platform_normal_stacks
.globl plat_set_my_stack
.globl plat_get_my_stack
.globl platform_set_stack .globl platform_set_stack
.globl platform_get_stack .globl platform_get_stack
/* ----------------------------------------------------- /* -----------------------------------------------------
* unsigned long platform_get_stack (unsigned long) * unsigned long plat_get_my_stack ()
* *
* For cold-boot BL images, only the primary CPU needs a * For cold-boot BL images, only the primary CPU needs a
* stack. This function returns the stack pointer for a * stack. This function returns the stack pointer for a
* stack allocated in device memory. * stack allocated in device memory.
* ----------------------------------------------------- * -----------------------------------------------------
*/ */
func platform_get_stack func plat_get_my_stack
get_up_stack platform_normal_stacks, PLATFORM_STACK_SIZE get_up_stack platform_normal_stacks, PLATFORM_STACK_SIZE
ret ret
endfunc platform_get_stack endfunc plat_get_my_stack
/* ----------------------------------------------------- /* -----------------------------------------------------
* void platform_set_stack (unsigned long) * void plat_set_my_stack ()
* *
* For cold-boot BL images, only the primary CPU needs a * For cold-boot BL images, only the primary CPU needs a
* stack. This function sets the stack pointer to a stack * stack. This function sets the stack pointer to a stack
* allocated in normal memory. * allocated in normal memory.
* ----------------------------------------------------- * -----------------------------------------------------
*/ */
func platform_set_stack func plat_set_my_stack
get_up_stack platform_normal_stacks, PLATFORM_STACK_SIZE get_up_stack platform_normal_stacks, PLATFORM_STACK_SIZE
mov sp, x0 mov sp, x0
ret ret
endfunc platform_set_stack endfunc plat_set_my_stack
/* -----------------------------------------------------
* unsigned long platform_get_stack ()
*
* For cold-boot BL images, only the primary CPU needs a
* stack. This function returns the stack pointer for a
* stack allocated in device memory. This function
* is deprecated.
* -----------------------------------------------------
*/
func_deprecated platform_get_stack
b plat_get_my_stack
endfunc_deprecated platform_get_stack
/* -----------------------------------------------------
* void platform_set_stack ()
*
* For cold-boot BL images, only the primary CPU needs a
* stack. This function sets the stack pointer to a stack
* allocated in normal memory.This function is
* deprecated.
* -----------------------------------------------------
*/
func_deprecated platform_set_stack
b plat_set_my_stack
endfunc_deprecated platform_set_stack
/* ----------------------------------------------------- /* -----------------------------------------------------
* Single cpu stack in normal memory. * Single cpu stack in normal memory.
......
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <asm_macros.S>
#include <assert_macros.S>
#include <platform_def.h>
.globl plat_my_core_pos
.globl plat_is_my_cpu_primary
.globl plat_get_my_entrypoint
.weak platform_get_core_pos
/* -----------------------------------------------------
* Compatibility wrappers for new platform APIs.
* -----------------------------------------------------
*/
func plat_my_core_pos
mrs x0, mpidr_el1
b platform_get_core_pos
endfunc plat_my_core_pos
func plat_is_my_cpu_primary
mrs x0, mpidr_el1
b platform_is_primary_cpu
endfunc plat_is_my_cpu_primary
func plat_get_my_entrypoint
mrs x0, mpidr_el1
b platform_get_entrypoint
endfunc plat_get_my_entrypoint
/* -----------------------------------------------------
* int platform_get_core_pos(int mpidr);
* With this function: CorePos = (ClusterId * 4) +
* CoreId
* -----------------------------------------------------
*/
func platform_get_core_pos
and x1, x0, #MPIDR_CPU_MASK
and x0, x0, #MPIDR_CLUSTER_MASK
add x0, x1, x0, LSR #6
ret
endfunc platform_get_core_pos
#
# Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of ARM nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
ifeq (${PSCI_EXTENDED_STATE_ID}, 1)
$(error "PSCI Compatibility mode can be enabled only if \
PSCI_EXTENDED_STATE_ID is not set")
endif
PLAT_BL_COMMON_SOURCES += plat/compat/aarch64/plat_helpers_compat.S
BL31_SOURCES += plat/common/aarch64/plat_psci_common.c \
plat/compat/plat_pm_compat.c \
plat/compat/plat_topology_compat.c
This diff is collapsed.
This diff is collapsed.
/* /*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met: * modification, are permitted provided that the following conditions are met:
...@@ -80,7 +80,6 @@ static uint64_t opteed_sel1_interrupt_handler(uint32_t id, ...@@ -80,7 +80,6 @@ static uint64_t opteed_sel1_interrupt_handler(uint32_t id,
void *cookie) void *cookie)
{ {
uint32_t linear_id; uint32_t linear_id;
uint64_t mpidr;
optee_context_t *optee_ctx; optee_context_t *optee_ctx;
/* Check the security state when the exception was generated */ /* Check the security state when the exception was generated */
...@@ -92,14 +91,13 @@ static uint64_t opteed_sel1_interrupt_handler(uint32_t id, ...@@ -92,14 +91,13 @@ static uint64_t opteed_sel1_interrupt_handler(uint32_t id,
#endif #endif
/* Sanity check the pointer to this cpu's context */ /* Sanity check the pointer to this cpu's context */
mpidr = read_mpidr();
assert(handle == cm_get_context(NON_SECURE)); assert(handle == cm_get_context(NON_SECURE));
/* Save the non-secure context before entering the OPTEE */ /* Save the non-secure context before entering the OPTEE */
cm_el1_sysregs_context_save(NON_SECURE); cm_el1_sysregs_context_save(NON_SECURE);
/* Get a reference to this cpu's OPTEE context */ /* Get a reference to this cpu's OPTEE context */
linear_id = platform_get_core_pos(mpidr); linear_id = plat_my_core_pos();
optee_ctx = &opteed_sp_context[linear_id]; optee_ctx = &opteed_sp_context[linear_id];
assert(&optee_ctx->cpu_ctx == cm_get_context(SECURE)); assert(&optee_ctx->cpu_ctx == cm_get_context(SECURE));
...@@ -125,10 +123,9 @@ static uint64_t opteed_sel1_interrupt_handler(uint32_t id, ...@@ -125,10 +123,9 @@ static uint64_t opteed_sel1_interrupt_handler(uint32_t id,
int32_t opteed_setup(void) int32_t opteed_setup(void)
{ {
entry_point_info_t *optee_ep_info; entry_point_info_t *optee_ep_info;
uint64_t mpidr = read_mpidr();
uint32_t linear_id; uint32_t linear_id;
linear_id = platform_get_core_pos(mpidr); linear_id = plat_my_core_pos();
/* /*
* Get information about the Secure Payload (BL32) image. Its * Get information about the Secure Payload (BL32) image. Its
...@@ -182,8 +179,7 @@ int32_t opteed_setup(void) ...@@ -182,8 +179,7 @@ int32_t opteed_setup(void)
******************************************************************************/ ******************************************************************************/
static int32_t opteed_init(void) static int32_t opteed_init(void)
{ {
uint64_t mpidr = read_mpidr(); uint32_t linear_id = plat_my_core_pos();
uint32_t linear_id = platform_get_core_pos(mpidr);
optee_context_t *optee_ctx = &opteed_sp_context[linear_id]; optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
entry_point_info_t *optee_entry_point; entry_point_info_t *optee_entry_point;
uint64_t rc; uint64_t rc;
...@@ -195,7 +191,7 @@ static int32_t opteed_init(void) ...@@ -195,7 +191,7 @@ static int32_t opteed_init(void)
optee_entry_point = bl31_plat_get_next_image_ep_info(SECURE); optee_entry_point = bl31_plat_get_next_image_ep_info(SECURE);
assert(optee_entry_point); assert(optee_entry_point);
cm_init_context(mpidr, optee_entry_point); cm_init_my_context(optee_entry_point);
/* /*
* Arrange for an entry into OPTEE. It will be returned via * Arrange for an entry into OPTEE. It will be returned via
...@@ -226,8 +222,7 @@ uint64_t opteed_smc_handler(uint32_t smc_fid, ...@@ -226,8 +222,7 @@ uint64_t opteed_smc_handler(uint32_t smc_fid,
uint64_t flags) uint64_t flags)
{ {
cpu_context_t *ns_cpu_context; cpu_context_t *ns_cpu_context;
unsigned long mpidr = read_mpidr(); uint32_t linear_id = plat_my_core_pos();
uint32_t linear_id = platform_get_core_pos(mpidr);
optee_context_t *optee_ctx = &opteed_sp_context[linear_id]; optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
uint64_t rc; uint64_t rc;
......
This diff is collapsed.
...@@ -121,7 +121,6 @@ int32_t tlkd_setup(void) ...@@ -121,7 +121,6 @@ int32_t tlkd_setup(void)
******************************************************************************/ ******************************************************************************/
int32_t tlkd_init(void) int32_t tlkd_init(void)
{ {
uint64_t mpidr = read_mpidr();
entry_point_info_t *tlk_entry_point; entry_point_info_t *tlk_entry_point;
/* /*
...@@ -131,7 +130,7 @@ int32_t tlkd_init(void) ...@@ -131,7 +130,7 @@ int32_t tlkd_init(void)
tlk_entry_point = bl31_plat_get_next_image_ep_info(SECURE); tlk_entry_point = bl31_plat_get_next_image_ep_info(SECURE);
assert(tlk_entry_point); assert(tlk_entry_point);
cm_init_context(mpidr, tlk_entry_point); cm_init_my_context(tlk_entry_point);
/* /*
* Arrange for an entry into the test secure payload. * Arrange for an entry into the test secure payload.
......
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment