Commit 432b9905 authored by Achin Gupta's avatar Achin Gupta
Browse files

Merge pull request #361 from achingupta/for_sm/psci_proto_v5

For sm/psci proto v5
parents 9caf7e36 9d070b99
......@@ -164,8 +164,7 @@
* then it means it is a warm boot so jump to this address.
* -------------------------------------------------------------
*/
mrs x0, mpidr_el1
bl platform_get_entrypoint
bl plat_get_my_entrypoint
cbz x0, do_cold_boot
br x0
......@@ -181,9 +180,8 @@
* of that state and allows entry into the OS.
* -------------------------------------------------------------
*/
mrs x0, mpidr_el1
bl platform_is_primary_cpu
cbnz x0, do_primary_cold_boot
bl plat_is_my_cpu_primary
cbnz w0, do_primary_cold_boot
/* This is a cold boot on a secondary CPU */
bl plat_secondary_cold_boot_setup
......@@ -249,8 +247,7 @@
* moment.
* ---------------------------------------------------------------------
*/
mrs x0, mpidr_el1
bl platform_set_stack
bl plat_set_my_stack
.endm
#endif /* __EL3_COMMON_MACROS_S__ */
......@@ -30,6 +30,7 @@
#ifndef __ARM_DEF_H__
#define __ARM_DEF_H__
#include <arch.h>
#include <common_def.h>
#include <platform_def.h>
#include <tbbr_img_def.h>
......@@ -47,6 +48,25 @@
#define ARM_CACHE_WRITEBACK_SHIFT 6
/*
* Macros mapping the MPIDR Affinity levels to ARM Platform Power levels. The
* power levels have a 1:1 mapping with the MPIDR affinity levels.
*/
#define ARM_PWR_LVL0 MPIDR_AFFLVL0
#define ARM_PWR_LVL1 MPIDR_AFFLVL1
/*
* Macros for local power states in ARM platforms encoded by State-ID field
* within the power-state parameter.
*/
/* Local power state for power domains in Run state. */
#define ARM_LOCAL_STATE_RUN 0
/* Local power state for retention. Valid only for CPU power domains */
#define ARM_LOCAL_STATE_RET 1
/* Local power state for OFF/power-down. Valid for CPU and cluster power
domains */
#define ARM_LOCAL_STATE_OFF 2
/* Memory location options for TSP */
#define ARM_TRUSTED_SRAM_ID 0
#define ARM_TRUSTED_DRAM_ID 1
......@@ -163,9 +183,22 @@
#define ADDR_SPACE_SIZE (1ull << 32)
#define PLATFORM_NUM_AFFS (ARM_CLUSTER_COUNT + \
#define PLAT_NUM_PWR_DOMAINS (ARM_CLUSTER_COUNT + \
PLATFORM_CORE_COUNT)
#define PLATFORM_MAX_AFFLVL MPIDR_AFFLVL1
#define PLAT_MAX_PWR_LVL ARM_PWR_LVL1
/*
* This macro defines the deepest retention state possible. A higher state
* id will represent an invalid or a power down state.
*/
#define PLAT_MAX_RET_STATE ARM_LOCAL_STATE_RET
/*
* This macro defines the deepest power down states possible. Any state ID
* higher than this is invalid.
*/
#define PLAT_MAX_OFF_STATE ARM_LOCAL_STATE_OFF
#define PLATFORM_CORE_COUNT (PLAT_ARM_CLUSTER0_CORE_COUNT + \
PLAT_ARM_CLUSTER1_CORE_COUNT)
......
......@@ -148,6 +148,35 @@ CASSERT(PLAT_PCPU_DATA_SIZE == sizeof(arm_cpu_data_t),
#endif /* IMAGE_BL31 */
#if ARM_RECOM_STATE_ID_ENC
/*
* Macros used to parse state information from State-ID if it is using the
* recommended encoding for State-ID.
*/
#define ARM_LOCAL_PSTATE_WIDTH 4
#define ARM_LOCAL_PSTATE_MASK ((1 << ARM_LOCAL_PSTATE_WIDTH) - 1)
/* Macros to construct the composite power state */
/* Make composite power state parameter till power level 0 */
#if PSCI_EXTENDED_STATE_ID
#define arm_make_pwrstate_lvl0(lvl0_state, pwr_lvl, type) \
(((lvl0_state) << PSTATE_ID_SHIFT) | ((type) << PSTATE_TYPE_SHIFT))
#else
#define arm_make_pwrstate_lvl0(lvl0_state, pwr_lvl, type) \
(((lvl0_state) << PSTATE_ID_SHIFT) | \
((pwr_lvl) << PSTATE_PWR_LVL_SHIFT) | \
((type) << PSTATE_TYPE_SHIFT))
#endif /* __PSCI_EXTENDED_STATE_ID__ */
/* Make composite power state parameter till power level 1 */
#define arm_make_pwrstate_lvl1(lvl1_state, lvl0_state, pwr_lvl, type) \
(((lvl1_state) << ARM_LOCAL_PSTATE_WIDTH) | \
arm_make_pwrstate_lvl0(lvl0_state, pwr_lvl, type))
#endif /* __ARM_RECOM_STATE_ID_ENC__ */
/* CCI utility functions */
void arm_cci_init(void);
......@@ -159,8 +188,12 @@ void arm_io_setup(void);
void arm_tzc_setup(void);
/* PM utility functions */
int32_t arm_do_affinst_actions(unsigned int afflvl, unsigned int state);
int arm_validate_power_state(unsigned int power_state);
int arm_validate_power_state(unsigned int power_state,
psci_power_state_t *req_state);
int arm_validate_ns_entrypoint(uintptr_t entrypoint);
/* Topology utility function */
int arm_check_mpidr(u_register_t mpidr);
/* BL1 utility functions */
void arm_bl1_early_platform_setup(void);
......@@ -199,7 +232,7 @@ int plat_arm_get_alt_image_source(
unsigned int image_id,
uintptr_t *dev_handle,
uintptr_t *image_spec);
void plat_arm_topology_setup(void);
unsigned int plat_arm_calc_core_pos(u_register_t mpidr);
#endif /* __PLAT_ARM_H__ */
......@@ -39,8 +39,7 @@
*************************************************************************/
#define MHU_PAYLOAD_CACHED 0
#define TRUSTED_MAILBOXES_BASE ARM_TRUSTED_SRAM_BASE
#define TRUSTED_MAILBOX_SHIFT 4
#define TRUSTED_MAILBOX_BASE ARM_TRUSTED_SRAM_BASE
#define NSROM_BASE 0x1f000000
#define NSROM_SIZE 0x00001000
......
......@@ -67,6 +67,17 @@
#define MAKE_ULL(x) x
#endif
/*
* Macros to wrap declarations of deprecated APIs within Trusted Firmware.
* The callers of these APIs will continue to compile as long as the build
* flag WARN_DEPRECATED is zero. Else the compiler will emit a warning
* when the callers of these APIs are compiled.
*/
#if WARN_DEPRECATED
#define __warn_deprecated __attribute__ ((deprecated))
#else
#define __warn_deprecated
#endif
#endif /* __COMMON_DEF_H__ */
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
......@@ -31,13 +31,14 @@
#ifndef __PLATFORM_H__
#define __PLATFORM_H__
#include <psci.h>
#include <stdint.h>
#include <types.h>
/*******************************************************************************
* Forward declarations
******************************************************************************/
struct plat_pm_ops;
struct meminfo;
struct image_info;
struct entry_point_info;
......@@ -59,6 +60,8 @@ int plat_get_image_source(unsigned int image_id,
uintptr_t *dev_handle,
uintptr_t *image_spec);
unsigned long plat_get_ns_image_entrypoint(void);
unsigned int plat_my_core_pos(void);
int plat_core_pos_by_mpidr(u_register_t mpidr);
/*******************************************************************************
* Mandatory interrupt management functions
......@@ -74,8 +77,7 @@ uint32_t plat_interrupt_type_to_line(uint32_t type,
/*******************************************************************************
* Optional common functions (may be overridden)
******************************************************************************/
unsigned int platform_get_core_pos(unsigned long mpidr);
unsigned long platform_get_stack(unsigned long mpidr);
unsigned long plat_get_my_stack(void);
void plat_report_exception(unsigned long);
int plat_crash_console_init(void);
int plat_crash_console_putc(int c);
......@@ -181,9 +183,16 @@ struct entry_point_info *bl31_plat_get_next_image_ep_info(uint32_t type);
/*******************************************************************************
* Mandatory PSCI functions (BL3-1)
******************************************************************************/
int platform_setup_pm(const struct plat_pm_ops **);
unsigned int plat_get_aff_count(unsigned int, unsigned long);
unsigned int plat_get_aff_state(unsigned int, unsigned long);
int plat_setup_psci_ops(uintptr_t sec_entrypoint,
const struct plat_psci_ops **);
const unsigned char *plat_get_power_domain_tree_desc(void);
/*******************************************************************************
* Optional PSCI functions (BL3-1).
******************************************************************************/
plat_local_state_t plat_get_target_pwr_state(unsigned int lvl,
const plat_local_state_t *states,
unsigned int ncpu);
/*******************************************************************************
* Optional BL3-1 functions (may be overridden)
......@@ -201,4 +210,31 @@ void bl32_plat_enable_mmu(uint32_t flags);
int plat_get_rotpk_info(void *cookie, void **key_ptr, unsigned int *key_len,
unsigned int *flags);
#if ENABLE_PLAT_COMPAT
/*
* The below declarations are to enable compatibility for the platform ports
* using the old platform interface.
*/
/*******************************************************************************
* Optional common functions (may be overridden)
******************************************************************************/
unsigned int platform_get_core_pos(unsigned long mpidr);
/*******************************************************************************
* Mandatory PSCI Compatibility functions (BL3-1)
******************************************************************************/
int platform_setup_pm(const plat_pm_ops_t **);
unsigned int plat_get_aff_count(unsigned int, unsigned long);
unsigned int plat_get_aff_state(unsigned int, unsigned long);
#else
/*
* The below function enable Trusted Firmware components like SPDs which
* haven't migrated to the new platform API to compile on platforms which
* have the compatibility layer disabled.
*/
unsigned int platform_get_core_pos(unsigned long mpidr) __warn_deprecated;
#endif /* __ENABLE_PLAT_COMPAT__ */
#endif /* __PLATFORM_H__ */
......@@ -128,7 +128,7 @@ void bakery_lock_get(bakery_lock_t *bakery)
unsigned int my_ticket, my_prio, their_ticket;
unsigned int their_bakery_data;
me = platform_get_core_pos(read_mpidr_el1());
me = plat_my_core_pos();
assert_bakery_entry_valid(me, bakery);
......@@ -174,7 +174,7 @@ void bakery_lock_get(bakery_lock_t *bakery)
/* Release the lock and signal contenders */
void bakery_lock_release(bakery_lock_t *bakery)
{
unsigned int me = platform_get_core_pos(read_mpidr_el1());
unsigned int me = plat_my_core_pos();
assert_bakery_entry_valid(me, bakery);
assert(bakery_ticket_number(bakery->lock_data[me]));
......
......@@ -148,7 +148,7 @@ void bakery_lock_get(unsigned int id, unsigned int offset)
bakery_info_t *their_bakery_info;
unsigned int their_bakery_data;
me = platform_get_core_pos(read_mpidr_el1());
me = plat_my_core_pos();
is_cached = read_sctlr_el3() & SCTLR_C_BIT;
......
......@@ -37,9 +37,8 @@
#include "../fvp_def.h"
.globl plat_secondary_cold_boot_setup
.globl platform_get_entrypoint
.globl platform_mem_init
.globl platform_is_primary_cpu
.globl plat_get_my_entrypoint
.globl plat_is_my_cpu_primary
.macro fvp_choose_gicmmap param1, param2, x_tmp, w_tmp, res
ldr \x_tmp, =V2M_SYSREGS_BASE + V2M_SYS_ID
......@@ -96,30 +95,31 @@ cb_panic:
b cb_panic
endfunc plat_secondary_cold_boot_setup
/* -----------------------------------------------------
* void platform_get_entrypoint (unsigned int mpid);
/* ---------------------------------------------------------------------
* unsigned long plat_get_my_entrypoint (void);
*
* Main job of this routine is to distinguish between
* a cold and warm boot.
* On a cold boot the secondaries first wait for the
* platform to be initialized after which they are
* hotplugged in. The primary proceeds to perform the
* platform initialization.
* On a warm boot, each cpu jumps to the address in its
* mailbox.
* Main job of this routine is to distinguish between a cold and warm
* boot. On FVP, this information can be queried from the power
* controller. The Power Control SYS Status Register (PSYSR) indicates
* the wake-up reason for the CPU.
*
* For a cold boot, return 0.
* For a warm boot, read the mailbox and return the address it contains.
*
* TODO: Not a good idea to save lr in a temp reg
* TODO: PSYSR is a common register and should be
* accessed using locks. Since its not possible
* to use locks immediately after a cold reset
* we are relying on the fact that after a cold
* reset all cpus will read the same WK field
* -----------------------------------------------------
* ---------------------------------------------------------------------
*/
func platform_get_entrypoint
mov x9, x30 // lr
mov x2, x0
func plat_get_my_entrypoint
/* ---------------------------------------------------------------------
* When bit PSYSR.WK indicates either "Wake by PPONR" or "Wake by GIC
* WakeRequest signal" then it is a warm boot.
* ---------------------------------------------------------------------
*/
mrs x2, mpidr_el1
ldr x1, =PWRC_BASE
str w2, [x1, #PSYSR_OFF]
ldr w2, [x1, #PSYSR_OFF]
......@@ -128,53 +128,43 @@ func platform_get_entrypoint
beq warm_reset
cmp w2, #WKUP_GICREQ
beq warm_reset
/* Cold reset */
mov x0, #0
b exit
ret
warm_reset:
/* ---------------------------------------------
* A per-cpu mailbox is maintained in the tru-
* sted DRAM. Its flushed out of the caches
* after every update using normal memory so
* its safe to read it here with SO attributes
* ---------------------------------------------
/* ---------------------------------------------------------------------
* A mailbox is maintained in the trusted SRAM. It is flushed out of the
* caches after every update using normal memory so it is safe to read
* it here with SO attributes.
* ---------------------------------------------------------------------
*/
ldr x10, =MBOX_BASE
bl platform_get_core_pos
lsl x0, x0, #ARM_CACHE_WRITEBACK_SHIFT
ldr x0, [x10, x0]
mov_imm x0, MBOX_BASE
ldr x0, [x0]
cbz x0, _panic
exit:
ret x9
_panic: b _panic
endfunc platform_get_entrypoint
ret
/* ---------------------------------------------------------------------
* The power controller indicates this is a warm reset but the mailbox
* is empty. This should never happen!
* ---------------------------------------------------------------------
*/
_panic:
b _panic
endfunc plat_get_my_entrypoint
/* -----------------------------------------------------
* void platform_mem_init (void);
* unsigned int plat_is_my_cpu_primary (void);
*
* Zero out the mailbox registers in the shared memory.
* The mmu is turned off right now and only the primary can
* ever execute this code. Secondaries will read the
* mailboxes using SO accesses. In short, BL31 will
* update the mailboxes after mapping the tzdram as
* normal memory. It will flush its copy after update.
* BL1 will always read the mailboxes with the MMU off
* Find out whether the current cpu is the primary
* cpu.
* -----------------------------------------------------
*/
func platform_mem_init
ldr x0, =MBOX_BASE
mov w1, #PLATFORM_CORE_COUNT
loop:
str xzr, [x0], #CACHE_WRITEBACK_GRANULE
subs w1, w1, #1
b.gt loop
ret
endfunc platform_mem_init
func platform_is_primary_cpu
func plat_is_my_cpu_primary
mrs x0, mpidr_el1
and x0, x0, #(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK)
cmp x0, #FVP_PRIMARY_CPU
cset x0, eq
cset w0, eq
ret
endfunc platform_is_primary_cpu
endfunc plat_is_my_cpu_primary
......@@ -139,7 +139,6 @@
/* Entrypoint mailboxes */
#define MBOX_BASE ARM_SHARED_RAM_BASE
#define MBOX_SIZE 0x200
#endif /* __FVP_DEF_H__ */
......@@ -44,24 +44,36 @@
#include "fvp_private.h"
typedef volatile struct mailbox {
unsigned long value __aligned(CACHE_WRITEBACK_GRANULE);
} mailbox_t;
#if ARM_RECOM_STATE_ID_ENC
/*
* The table storing the valid idle power states. Ensure that the
* array entries are populated in ascending order of state-id to
* enable us to use binary search during power state validation.
* The table must be terminated by a NULL entry.
*/
const unsigned int arm_pm_idle_states[] = {
/* State-id - 0x01 */
arm_make_pwrstate_lvl1(ARM_LOCAL_STATE_RUN, ARM_LOCAL_STATE_RET,
ARM_PWR_LVL0, PSTATE_TYPE_STANDBY),
/* State-id - 0x02 */
arm_make_pwrstate_lvl1(ARM_LOCAL_STATE_RUN, ARM_LOCAL_STATE_OFF,
ARM_PWR_LVL0, PSTATE_TYPE_POWERDOWN),
/* State-id - 0x22 */
arm_make_pwrstate_lvl1(ARM_LOCAL_STATE_OFF, ARM_LOCAL_STATE_OFF,
ARM_PWR_LVL1, PSTATE_TYPE_POWERDOWN),
0,
};
#endif
/*******************************************************************************
* Private FVP function to program the mailbox for a cpu before it is released
* from reset.
******************************************************************************/
static void fvp_program_mailbox(uint64_t mpidr, uint64_t address)
static void fvp_program_mailbox(uintptr_t address)
{
uint64_t linear_id;
mailbox_t *fvp_mboxes;
linear_id = platform_get_core_pos(mpidr);
fvp_mboxes = (mailbox_t *)MBOX_BASE;
fvp_mboxes[linear_id].value = address;
flush_dcache_range((unsigned long) &fvp_mboxes[linear_id],
sizeof(unsigned long));
uintptr_t *mailbox = (void *) MBOX_BASE;
*mailbox = address;
flush_dcache_range((uintptr_t) mailbox, sizeof(*mailbox));
}
/*******************************************************************************
......@@ -93,10 +105,13 @@ static void fvp_cluster_pwrdwn_common(void)
}
/*******************************************************************************
* FVP handler called when an affinity instance is about to enter standby.
* FVP handler called when a CPU is about to enter standby.
******************************************************************************/
void fvp_affinst_standby(unsigned int power_state)
void fvp_cpu_standby(plat_local_state_t cpu_state)
{
assert(cpu_state == ARM_LOCAL_STATE_RET);
/*
* Enter standby state
* dsb is good practice before using wfi to enter low power states
......@@ -106,24 +121,14 @@ void fvp_affinst_standby(unsigned int power_state)
}
/*******************************************************************************
* FVP handler called when an affinity instance is about to be turned on. The
* level and mpidr determine the affinity instance.
* FVP handler called when a power domain is about to be turned on. The
* mpidr determines the CPU to be turned on.
******************************************************************************/
int fvp_affinst_on(unsigned long mpidr,
unsigned long sec_entrypoint,
unsigned int afflvl,
unsigned int state)
int fvp_pwr_domain_on(u_register_t mpidr)
{
int rc = PSCI_E_SUCCESS;
unsigned int psysr;
/*
* It's possible to turn on only affinity level 0 i.e. a cpu
* on the FVP. Ignore any other affinity level.
*/
if (afflvl != MPIDR_AFFLVL0)
return rc;
/*
* Ensure that we do not cancel an inflight power off request
* for the target cpu. That would leave it in a zombie wfi.
......@@ -135,69 +140,54 @@ int fvp_affinst_on(unsigned long mpidr,
psysr = fvp_pwrc_read_psysr(mpidr);
} while (psysr & PSYSR_AFF_L0);
fvp_program_mailbox(mpidr, sec_entrypoint);
fvp_pwrc_write_pponr(mpidr);
return rc;
}
/*******************************************************************************
* FVP handler called when an affinity instance is about to be turned off. The
* level and mpidr determine the affinity instance. The 'state' arg. allows the
* platform to decide whether the cluster is being turned off and take apt
* actions.
*
* CAUTION: There is no guarantee that caches will remain turned on across calls
* to this function as each affinity level is dealt with. So do not write & read
* global variables across calls. It will be wise to do flush a write to the
* global to prevent unpredictable results.
* FVP handler called when a power domain is about to be turned off. The
* target_state encodes the power state that each level should transition to.
******************************************************************************/
void fvp_affinst_off(unsigned int afflvl,
unsigned int state)
void fvp_pwr_domain_off(const psci_power_state_t *target_state)
{
/* Determine if any platform actions need to be executed */
if (arm_do_affinst_actions(afflvl, state) == -EAGAIN)
return;
assert(target_state->pwr_domain_state[ARM_PWR_LVL0] ==
ARM_LOCAL_STATE_OFF);
/*
* If execution reaches this stage then this affinity level will be
* suspended. Perform at least the cpu specific actions followed the
* cluster specific operations if applicable.
* If execution reaches this stage then this power domain will be
* suspended. Perform at least the cpu specific actions followed
* by the cluster specific operations if applicable.
*/
fvp_cpu_pwrdwn_common();
if (afflvl != MPIDR_AFFLVL0)
if (target_state->pwr_domain_state[ARM_PWR_LVL1] ==
ARM_LOCAL_STATE_OFF)
fvp_cluster_pwrdwn_common();
}
/*******************************************************************************
* FVP handler called when an affinity instance is about to be suspended. The
* level and mpidr determine the affinity instance. The 'state' arg. allows the
* platform to decide whether the cluster is being turned off and take apt
* actions.
*
* CAUTION: There is no guarantee that caches will remain turned on across calls
* to this function as each affinity level is dealt with. So do not write & read
* global variables across calls. It will be wise to do flush a write to the
* global to prevent unpredictable results.
* FVP handler called when a power domain is about to be suspended. The
* target_state encodes the power state that each level should transition to.
******************************************************************************/
void fvp_affinst_suspend(unsigned long sec_entrypoint,
unsigned int afflvl,
unsigned int state)
void fvp_pwr_domain_suspend(const psci_power_state_t *target_state)
{
unsigned long mpidr;
/* Determine if any platform actions need to be executed. */
if (arm_do_affinst_actions(afflvl, state) == -EAGAIN)
/*
* FVP has retention only at cpu level. Just return
* as nothing is to be done for retention.
*/
if (target_state->pwr_domain_state[ARM_PWR_LVL0] ==
ARM_LOCAL_STATE_RET)
return;
assert(target_state->pwr_domain_state[ARM_PWR_LVL0] ==
ARM_LOCAL_STATE_OFF);
/* Get the mpidr for this cpu */
mpidr = read_mpidr_el1();
/* Program the jump address for the this cpu */
fvp_program_mailbox(mpidr, sec_entrypoint);
/* Program the power controller to enable wakeup interrupts. */
fvp_pwrc_set_wen(mpidr);
......@@ -205,31 +195,29 @@ void fvp_affinst_suspend(unsigned long sec_entrypoint,
fvp_cpu_pwrdwn_common();
/* Perform the common cluster specific operations */
if (afflvl != MPIDR_AFFLVL0)
if (target_state->pwr_domain_state[ARM_PWR_LVL1] ==
ARM_LOCAL_STATE_OFF)
fvp_cluster_pwrdwn_common();
}
/*******************************************************************************
* FVP handler called when an affinity instance has just been powered on after
* being turned off earlier. The level and mpidr determine the affinity
* instance. The 'state' arg. allows the platform to decide whether the cluster
* was turned off prior to wakeup and do what's necessary to setup it up
* correctly.
* FVP handler called when a power domain has just been powered on after
* being turned off earlier. The target_state encodes the low power state that
* each level has woken up from.
******************************************************************************/
void fvp_affinst_on_finish(unsigned int afflvl,
unsigned int state)
void fvp_pwr_domain_on_finish(const psci_power_state_t *target_state)
{
unsigned long mpidr;
/* Determine if any platform actions need to be executed. */
if (arm_do_affinst_actions(afflvl, state) == -EAGAIN)
return;
assert(target_state->pwr_domain_state[ARM_PWR_LVL0] ==
ARM_LOCAL_STATE_OFF);
/* Get the mpidr for this cpu */
mpidr = read_mpidr_el1();
/* Perform the common cluster specific operations */
if (afflvl != MPIDR_AFFLVL0) {
if (target_state->pwr_domain_state[ARM_PWR_LVL1] ==
ARM_LOCAL_STATE_OFF) {
/*
* This CPU might have woken up whilst the cluster was
* attempting to power down. In this case the FVP power
......@@ -251,9 +239,6 @@ void fvp_affinst_on_finish(unsigned int afflvl,
*/
fvp_pwrc_clr_wen(mpidr);
/* Zero the jump address in the mailbox for this cpu */
fvp_program_mailbox(mpidr, 0);
/* Enable the gic cpu interface */
arm_gic_cpuif_setup();
......@@ -262,16 +247,22 @@ void fvp_affinst_on_finish(unsigned int afflvl,
}
/*******************************************************************************
* FVP handler called when an affinity instance has just been powered on after
* having been suspended earlier. The level and mpidr determine the affinity
* instance.
* FVP handler called when a power domain has just been powered on after
* having been suspended earlier. The target_state encodes the low power state
* that each level has woken up from.
* TODO: At the moment we reuse the on finisher and reinitialize the secure
* context. Need to implement a separate suspend finisher.
******************************************************************************/
void fvp_affinst_suspend_finish(unsigned int afflvl,
unsigned int state)
void fvp_pwr_domain_suspend_finish(const psci_power_state_t *target_state)
{
fvp_affinst_on_finish(afflvl, state);
/*
* Nothing to be done on waking up from retention from CPU level.
*/
if (target_state->pwr_domain_state[ARM_PWR_LVL0] ==
ARM_LOCAL_STATE_RET)
return;
fvp_pwr_domain_on_finish(target_state);
}
/*******************************************************************************
......@@ -304,23 +295,28 @@ static void __dead2 fvp_system_reset(void)
/*******************************************************************************
* Export the platform handlers to enable psci to invoke them
******************************************************************************/
static const plat_pm_ops_t fvp_plat_pm_ops = {
.affinst_standby = fvp_affinst_standby,
.affinst_on = fvp_affinst_on,
.affinst_off = fvp_affinst_off,
.affinst_suspend = fvp_affinst_suspend,
.affinst_on_finish = fvp_affinst_on_finish,
.affinst_suspend_finish = fvp_affinst_suspend_finish,
static const plat_psci_ops_t fvp_plat_psci_ops = {
.cpu_standby = fvp_cpu_standby,
.pwr_domain_on = fvp_pwr_domain_on,
.pwr_domain_off = fvp_pwr_domain_off,
.pwr_domain_suspend = fvp_pwr_domain_suspend,
.pwr_domain_on_finish = fvp_pwr_domain_on_finish,
.pwr_domain_suspend_finish = fvp_pwr_domain_suspend_finish,
.system_off = fvp_system_off,
.system_reset = fvp_system_reset,
.validate_power_state = arm_validate_power_state
.validate_power_state = arm_validate_power_state,
.validate_ns_entrypoint = arm_validate_ns_entrypoint
};
/*******************************************************************************
* Export the platform specific power ops & initialize the fvp power controller
* Export the platform specific psci ops & initialize the fvp power controller
******************************************************************************/
int platform_setup_pm(const plat_pm_ops_t **plat_ops)
int plat_setup_psci_ops(uintptr_t sec_entrypoint,
const plat_psci_ops_t **psci_ops)
{
*plat_ops = &fvp_plat_pm_ops;
*psci_ops = &fvp_plat_psci_ops;
/* Program the jump address */
fvp_program_mailbox(sec_entrypoint);
return 0;
}
......@@ -29,204 +29,41 @@
*/
#include <arch.h>
#include <assert.h>
#include <plat_arm.h>
#include <platform_def.h>
/* TODO: Reusing psci error codes & state information. Get our own! */
#include <psci.h>
#include "drivers/pwrc/fvp_pwrc.h"
#include "fvp_def.h"
/* We treat '255' as an invalid affinity instance */
#define AFFINST_INVAL 0xff
/*******************************************************************************
* We support 3 flavours of the FVP: Foundation, Base AEM & Base Cortex. Each
* flavour has a different topology. The common bit is that there can be a max.
* of 2 clusters (affinity 1) and 4 cpus (affinity 0) per cluster. So we define
* a tree like data structure which caters to these maximum bounds. It simply
* marks the absent affinity level instances as PSCI_AFF_ABSENT e.g. there is no
* cluster 1 on the Foundation FVP. The 'data' field is currently unused.
******************************************************************************/
typedef struct affinity_info {
unsigned char sibling;
unsigned char child;
unsigned char state;
unsigned int data;
} affinity_info_t;
/*******************************************************************************
* The following two data structures store the topology tree for the fvp. There
* is a separate array for each affinity level i.e. cpus and clusters. The child
* and sibling references allow traversal inside and in between the two arrays.
******************************************************************************/
static affinity_info_t fvp_aff1_topology_map[ARM_CLUSTER_COUNT];
static affinity_info_t fvp_aff0_topology_map[PLATFORM_CORE_COUNT];
/* Simple global variable to safeguard us from stupidity */
static unsigned int topology_setup_done;
/*******************************************************************************
* This function implements a part of the critical interface between the psci
* generic layer and the platform to allow the former to detect the platform
* topology. psci queries the platform to determine how many affinity instances
* are present at a particular level for a given mpidr e.g. consider a dual
* cluster platform where each cluster has 4 cpus. A call to this function with
* (0, 0x100) will return the number of cpus implemented under cluster 1 i.e. 4.
* Similarly a call with (1, 0x100) will return 2 i.e. the number of clusters.
* This is 'cause we are effectively asking how many affinity level 1 instances
* are implemented under affinity level 2 instance 0.
******************************************************************************/
unsigned int plat_get_aff_count(unsigned int aff_lvl,
unsigned long mpidr)
{
unsigned int aff_count = 1, ctr;
unsigned char parent_aff_id;
assert(topology_setup_done == 1);
switch (aff_lvl) {
case 3:
case 2:
/*
* Assert if the parent affinity instance is not 0.
* This also takes care of level 3 in an obfuscated way
*/
parent_aff_id = (mpidr >> MPIDR_AFF3_SHIFT) & MPIDR_AFFLVL_MASK;
assert(parent_aff_id == 0);
/*
* Report that we implement a single instance of
* affinity levels 2 & 3 which are AFF_ABSENT
*/
break;
case 1:
/* Assert if the parent affinity instance is not 0. */
parent_aff_id = (mpidr >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK;
assert(parent_aff_id == 0);
/* Fetch the starting index in the aff1 array */
for (ctr = 0;
fvp_aff1_topology_map[ctr].sibling != AFFINST_INVAL;
ctr = fvp_aff1_topology_map[ctr].sibling) {
aff_count++;
}
break;
case 0:
/* Assert if the cluster id is anything apart from 0 or 1 */
parent_aff_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
assert(parent_aff_id < ARM_CLUSTER_COUNT);
/* Fetch the starting index in the aff0 array */
for (ctr = fvp_aff1_topology_map[parent_aff_id].child;
fvp_aff0_topology_map[ctr].sibling != AFFINST_INVAL;
ctr = fvp_aff0_topology_map[ctr].sibling) {
aff_count++;
}
break;
default:
assert(0);
}
return aff_count;
}
/*
* The FVP power domain tree does not have a single system level power domain
* i.e. a single root node. The first entry in the power domain descriptor
* specifies the number of power domains at the highest power level. For the FVP
* this is 2 i.e. the number of cluster power domains.
*/
#define FVP_PWR_DOMAINS_AT_MAX_PWR_LVL ARM_CLUSTER_COUNT
/* The FVP power domain tree descriptor */
const unsigned char arm_power_domain_tree_desc[] = {
/* No of root nodes */
FVP_PWR_DOMAINS_AT_MAX_PWR_LVL,
/* No of children for the first node */
PLAT_ARM_CLUSTER0_CORE_COUNT,
/* No of children for the second node */
PLAT_ARM_CLUSTER1_CORE_COUNT
};
/*******************************************************************************
* This function implements a part of the critical interface between the psci
* generic layer and the platform to allow the former to detect the state of a
* affinity instance in the platform topology. psci queries the platform to
* determine whether an affinity instance is present or absent. This caters for
* topologies where an intermediate affinity level instance is missing e.g.
* consider a platform which implements a single cluster with 4 cpus and there
* is another cpu sitting directly on the interconnect along with the cluster.
* The mpidrs of the cluster would range from 0x0-0x3. The mpidr of the single
* cpu would be 0x100 to highlight that it does not belong to cluster 0. Cluster
* 1 is however missing but needs to be accounted to reach this single cpu in
* the topology tree. Hence it will be marked as PSCI_AFF_ABSENT. This is not
* applicable to the FVP but depicted as an example.
******************************************************************************/
unsigned int plat_get_aff_state(unsigned int aff_lvl,
unsigned long mpidr)
{
unsigned int aff_state = PSCI_AFF_ABSENT, idx;
idx = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
assert(topology_setup_done == 1);
switch (aff_lvl) {
case 3:
case 2:
/* Report affinity levels 2 & 3 as absent */
break;
case 1:
aff_state = fvp_aff1_topology_map[idx].state;
break;
case 0:
/*
* First get start index of the aff0 in its array & then add
* to it the affinity id that we want the state of
*/
idx = fvp_aff1_topology_map[idx].child;
idx += (mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK;
aff_state = fvp_aff0_topology_map[idx].state;
break;
default:
assert(0);
}
return aff_state;
}
/*******************************************************************************
* This function populates the FVP specific topology information depending upon
* the FVP flavour its running on. We construct all the mpidrs we can handle
* and rely on the PWRC.PSYSR to flag absent cpus when their status is queried.
* generic layer and the platform that allows the former to query the platform
* to convert an MPIDR to a unique linear index. An error code (-1) is returned
* in case the MPIDR is invalid.
******************************************************************************/
void plat_arm_topology_setup(void)
int plat_core_pos_by_mpidr(u_register_t mpidr)
{
unsigned char aff0, aff1, aff_state, aff0_offset = 0;
unsigned long mpidr;
topology_setup_done = 0;
for (aff1 = 0; aff1 < ARM_CLUSTER_COUNT; aff1++) {
fvp_aff1_topology_map[aff1].child = aff0_offset;
fvp_aff1_topology_map[aff1].sibling = aff1 + 1;
for (aff0 = 0; aff0 < FVP_MAX_CPUS_PER_CLUSTER; aff0++) {
mpidr = aff1 << MPIDR_AFF1_SHIFT;
mpidr |= aff0 << MPIDR_AFF0_SHIFT;
if (fvp_pwrc_read_psysr(mpidr) != PSYSR_INVALID) {
/*
* Presence of even a single aff0 indicates
* presence of parent aff1 on the FVP.
*/
aff_state = PSCI_AFF_PRESENT;
fvp_aff1_topology_map[aff1].state =
PSCI_AFF_PRESENT;
} else {
aff_state = PSCI_AFF_ABSENT;
}
fvp_aff0_topology_map[aff0_offset].child = AFFINST_INVAL;
fvp_aff0_topology_map[aff0_offset].state = aff_state;
fvp_aff0_topology_map[aff0_offset].sibling =
aff0_offset + 1;
/* Increment the absolute number of aff0s traversed */
aff0_offset++;
}
/* Tie-off the last aff0 sibling to -1 to avoid overflow */
fvp_aff0_topology_map[aff0_offset - 1].sibling = AFFINST_INVAL;
}
if (arm_check_mpidr(mpidr) == -1)
return -1;
/* Tie-off the last aff1 sibling to AFFINST_INVAL to avoid overflow */
fvp_aff1_topology_map[aff1 - 1].sibling = AFFINST_INVAL;
if (fvp_pwrc_read_psysr(mpidr) == PSYSR_INVALID)
return -1;
topology_setup_done = 1;
return plat_arm_calc_core_pos(mpidr);
}
......@@ -28,7 +28,6 @@
# POSSIBILITY OF SUCH DAMAGE.
#
PLAT_INCLUDES := -Iplat/arm/board/fvp/include
......@@ -63,5 +62,8 @@ BL31_SOURCES += lib/cpus/aarch64/aem_generic.S \
plat/arm/board/fvp/aarch64/fvp_helpers.S \
plat/arm/board/fvp/drivers/pwrc/fvp_pwrc.c
# Disable the PSCI platform compatibility layer
ENABLE_PLAT_COMPAT := 0
include plat/arm/board/common/board_common.mk
include plat/arm/common/arm_common.mk
......@@ -29,6 +29,8 @@
#
# TSP source files specific to FVP platform
BL32_SOURCES += plat/arm/board/fvp/tsp/fvp_tsp_setup.c
BL32_SOURCES += plat/arm/board/fvp/fvp_topology.c \
plat/arm/board/fvp/drivers/pwrc/fvp_pwrc.c \
plat/arm/board/fvp/tsp/fvp_tsp_setup.c
include plat/arm/common/tsp/arm_tsp.mk
......@@ -48,6 +48,9 @@ ERRATA_A57_813420 := 1
# power down sequence
SKIP_A57_L1_FLUSH_PWR_DWN := 1
# Disable the PSCI platform compatibility layer
ENABLE_PLAT_COMPAT := 0
include plat/arm/board/common/board_css.mk
include plat/arm/common/arm_common.mk
include plat/arm/soc/common/soc_css.mk
......
......@@ -28,4 +28,6 @@
# POSSIBILITY OF SUCH DAMAGE.
#
BL32_SOURCES += plat/arm/css/common/css_topology.c
include plat/arm/common/tsp/arm_tsp.mk
......@@ -30,10 +30,37 @@
#include <asm_macros.S>
#include <platform_def.h>
.weak plat_arm_calc_core_pos
.weak plat_my_core_pos
.globl plat_crash_console_init
.globl plat_crash_console_putc
.globl platform_mem_init
/* -----------------------------------------------------
* unsigned int plat_my_core_pos(void)
* This function uses the plat_arm_calc_core_pos()
* definition to get the index of the calling CPU.
* -----------------------------------------------------
*/
func plat_my_core_pos
mrs x0, mpidr_el1
b plat_arm_calc_core_pos
endfunc plat_my_core_pos
/* -----------------------------------------------------
* unsigned int plat_arm_calc_core_pos(uint64_t mpidr)
* Helper function to calculate the core position.
* With this function: CorePos = (ClusterId * 4) +
* CoreId
* -----------------------------------------------------
*/
func plat_arm_calc_core_pos
and x1, x0, #MPIDR_CPU_MASK
and x0, x0, #MPIDR_CLUSTER_MASK
add x0, x1, x0, LSR #6
ret
endfunc plat_arm_calc_core_pos
/* ---------------------------------------------
* int plat_crash_console_init(void)
......@@ -60,3 +87,12 @@ func plat_crash_console_putc
mov_imm x1, PLAT_ARM_CRASH_UART_BASE
b console_core_putc
endfunc plat_crash_console_putc
/* ---------------------------------------------------------------------
* We don't need to carry out any memory initialization on ARM
* platforms. The Secure RAM is accessible straight away.
* ---------------------------------------------------------------------
*/
func platform_mem_init
ret
endfunc platform_mem_init
......@@ -226,9 +226,6 @@ void arm_bl31_platform_setup(void)
/* Initialize power controller before setting up topology */
plat_arm_pwrc_setup();
/* Topologies are best known to the platform. */
plat_arm_topology_setup();
}
void bl31_platform_setup(void)
......
......@@ -46,6 +46,23 @@ endif
# Process flags
$(eval $(call add_define,ARM_TSP_RAM_LOCATION_ID))
# For the original power-state parameter format, the State-ID can be encoded
# according to the recommended encoding or zero. This flag determines which
# State-ID encoding to be parsed.
ARM_RECOM_STATE_ID_ENC := 0
# If the PSCI_EXTENDED_STATE_ID is set, then the recommended state ID need to
# be used. Else throw a build error.
ifeq (${PSCI_EXTENDED_STATE_ID}, 1)
ifeq (${ARM_RECOM_STATE_ID_ENC}, 0)
$(error "Incompatible STATE_ID build option specified")
endif
endif
# Process ARM_RECOM_STATE_ID_ENC flag
$(eval $(call assert_boolean,ARM_RECOM_STATE_ID_ENC))
$(eval $(call add_define,ARM_RECOM_STATE_ID_ENC))
PLAT_INCLUDES += -Iinclude/common/tbbr \
-Iinclude/plat/arm/common \
-Iinclude/plat/arm/common/aarch64
......@@ -83,7 +100,8 @@ BL31_SOURCES += drivers/arm/cci/cci.c \
plat/arm/common/arm_security.c \
plat/arm/common/arm_topology.c \
plat/common/plat_gic.c \
plat/common/aarch64/platform_mp_stack.S
plat/common/aarch64/platform_mp_stack.S \
plat/common/aarch64/plat_psci_common.c
ifneq (${TRUSTED_BOARD_BOOT},0)
......
......@@ -29,60 +29,118 @@
*/
#include <arch_helpers.h>
#include <arm_def.h>
#include <assert.h>
#include <errno.h>
#include <plat_arm.h>
#include <psci.h>
#if ARM_RECOM_STATE_ID_ENC
extern unsigned int arm_pm_idle_states[];
#endif /* __ARM_RECOM_STATE_ID_ENC__ */
#if !ARM_RECOM_STATE_ID_ENC
/*******************************************************************************
* ARM standard platform utility function which is used to determine if any
* platform actions should be performed for the specified affinity instance
* given its state. Nothing needs to be done if the 'state' is not off or if
* this is not the highest affinity level which will enter the 'state'.
* ARM standard platform handler called to check the validity of the power state
* parameter.
******************************************************************************/
int32_t arm_do_affinst_actions(unsigned int afflvl, unsigned int state)
int arm_validate_power_state(unsigned int power_state,
psci_power_state_t *req_state)
{
unsigned int max_phys_off_afflvl;
int pstate = psci_get_pstate_type(power_state);
int pwr_lvl = psci_get_pstate_pwrlvl(power_state);
int i;
assert(afflvl <= MPIDR_AFFLVL1);
assert(req_state);
if (pwr_lvl > PLAT_MAX_PWR_LVL)
return PSCI_E_INVALID_PARAMS;
if (state != PSCI_STATE_OFF)
return -EAGAIN;
/* Sanity check the requested state */
if (pstate == PSTATE_TYPE_STANDBY) {
/*
* It's possible to enter standby only on power level 0
* Ignore any other power level.
*/
if (pwr_lvl != ARM_PWR_LVL0)
return PSCI_E_INVALID_PARAMS;
req_state->pwr_domain_state[ARM_PWR_LVL0] =
ARM_LOCAL_STATE_RET;
} else {
for (i = ARM_PWR_LVL0; i <= pwr_lvl; i++)
req_state->pwr_domain_state[i] =
ARM_LOCAL_STATE_OFF;
}
/*
* Find the highest affinity level which will be suspended and postpone
* all the platform specific actions until that level is hit.
* We expect the 'state id' to be zero.
*/
max_phys_off_afflvl = psci_get_max_phys_off_afflvl();
assert(max_phys_off_afflvl != PSCI_INVALID_DATA);
if (afflvl != max_phys_off_afflvl)
return -EAGAIN;
if (psci_get_pstate_id(power_state))
return PSCI_E_INVALID_PARAMS;
return 0;
return PSCI_E_SUCCESS;
}
#else
/*******************************************************************************
* ARM standard platform handler called to check the validity of the power state
* parameter.
* ARM standard platform handler called to check the validity of the power
* state parameter. The power state parameter has to be a composite power
* state.
******************************************************************************/
int arm_validate_power_state(unsigned int power_state)
int arm_validate_power_state(unsigned int power_state,
psci_power_state_t *req_state)
{
/* Sanity check the requested state */
if (psci_get_pstate_type(power_state) == PSTATE_TYPE_STANDBY) {
/*
* It's possible to enter standby only on affinity level 0
* (i.e. a CPU on ARM standard platforms).
* Ignore any other affinity level.
*/
if (psci_get_pstate_afflvl(power_state) != MPIDR_AFFLVL0)
return PSCI_E_INVALID_PARAMS;
}
unsigned int state_id;
int i;
assert(req_state);
/*
* We expect the 'state id' to be zero.
* Currently we are using a linear search for finding the matching
* entry in the idle power state array. This can be made a binary
* search if the number of entries justify the additional complexity.
*/
if (psci_get_pstate_id(power_state))
for (i = 0; !!arm_pm_idle_states[i]; i++) {
if (power_state == arm_pm_idle_states[i])
break;
}
/* Return error if entry not found in the idle state array */
if (!arm_pm_idle_states[i])
return PSCI_E_INVALID_PARAMS;
i = 0;
state_id = psci_get_pstate_id(power_state);
/* Parse the State ID and populate the state info parameter */
while (state_id) {
req_state->pwr_domain_state[i++] = state_id &
ARM_LOCAL_PSTATE_MASK;
state_id >>= ARM_LOCAL_PSTATE_WIDTH;
}
return PSCI_E_SUCCESS;
}
#endif /* __ARM_RECOM_STATE_ID_ENC__ */
/*******************************************************************************
* ARM standard platform handler called to check the validity of the non secure
* entrypoint.
******************************************************************************/
int arm_validate_ns_entrypoint(uintptr_t entrypoint)
{
/*
* Check if the non secure entrypoint lies within the non
* secure DRAM.
*/
if ((entrypoint >= ARM_NS_DRAM1_BASE) && (entrypoint <
(ARM_NS_DRAM1_BASE + ARM_NS_DRAM1_SIZE)))
return PSCI_E_SUCCESS;
if ((entrypoint >= ARM_DRAM2_BASE) && (entrypoint <
(ARM_DRAM2_BASE + ARM_DRAM2_SIZE)))
return PSCI_E_SUCCESS;
return PSCI_E_INVALID_ADDRESS;
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment