Commit 12d0d00d authored by Soby Mathew's avatar Soby Mathew Committed by Achin Gupta
Browse files

PSCI: Introduce new platform and CM helper APIs

This patch introduces new platform APIs and context management helper APIs
to support the new topology framework based on linear core position. This
framework will be introduced in the follwoing patch and it removes the
assumption that the MPIDR based affinity levels map directly to levels
in a power domain tree. The new platforms APIs and context management
helpers based on core position are as described below:

* plat_my_core_pos() and plat_core_pos_by_mpidr()

These 2 new mandatory platform APIs are meant to replace the existing
'platform_get_core_pos()' API. The 'plat_my_core_pos()' API returns the
linear index of the calling core and 'plat_core_pos_by_mpidr()' returns
the linear index of a core specified by its MPIDR. The latter API will also
validate the MPIDR passed as an argument and will return an error code (-1)
if an invalid MPIDR is passed as the argument. This enables the caller to
safely convert an MPIDR of another core to its linear index with...
parent 4067dc31
Showing with 182 additions and 63 deletions
+182 -63
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
......@@ -64,6 +64,32 @@ void cm_init(void)
*/
}
/*******************************************************************************
* This function returns a pointer to the most recent 'cpu_context' structure
* for the CPU identified by `cpu_idx` that was set as the context for the
* specified security state. NULL is returned if no such structure has been
* specified.
******************************************************************************/
void *cm_get_context_by_index(unsigned int cpu_idx,
unsigned int security_state)
{
assert(sec_state_is_valid(security_state));
return get_cpu_data_by_index(cpu_idx, cpu_context[security_state]);
}
/*******************************************************************************
* This function sets the pointer to the current 'cpu_context' structure for the
* specified security state for the CPU identified by CPU index.
******************************************************************************/
void cm_set_context_by_index(unsigned int cpu_idx, void *context,
unsigned int security_state)
{
assert(sec_state_is_valid(security_state));
set_cpu_data_by_index(cpu_idx, cpu_context[security_state], context);
}
/*******************************************************************************
* This function returns a pointer to the most recent 'cpu_context' structure
* for the CPU identified by MPIDR that was set as the context for the specified
......@@ -73,7 +99,7 @@ void *cm_get_context_by_mpidr(uint64_t mpidr, uint32_t security_state)
{
assert(sec_state_is_valid(security_state));
return get_cpu_data_by_mpidr(mpidr, cpu_context[security_state]);
return cm_get_context_by_index(platform_get_core_pos(mpidr), security_state);
}
/*******************************************************************************
......@@ -84,7 +110,8 @@ void cm_set_context_by_mpidr(uint64_t mpidr, void *context, uint32_t security_st
{
assert(sec_state_is_valid(security_state));
set_cpu_data_by_mpidr(mpidr, cpu_context[security_state], context);
cm_set_context_by_index(platform_get_core_pos(mpidr),
context, security_state);
}
/*******************************************************************************
......@@ -114,7 +141,7 @@ static inline void cm_set_next_context(void *context)
}
/*******************************************************************************
* The following function initializes a cpu_context for the current CPU for
* The following function initializes the cpu_context 'ctx' for
* first use, and sets the initial entrypoint state as specified by the
* entry_point_info structure.
*
......@@ -123,25 +150,24 @@ static inline void cm_set_next_context(void *context)
* context and sets this as the next context to return to.
*
* The EE and ST attributes are used to configure the endianess and secure
* timer availability for the new excution context.
* timer availability for the new execution context.
*
* To prepare the register state for entry call cm_prepare_el3_exit() and
* el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to
* cm_e1_sysreg_context_restore().
******************************************************************************/
void cm_init_context(uint64_t mpidr, const entry_point_info_t *ep)
static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t *ep)
{
uint32_t security_state;
cpu_context_t *ctx;
unsigned int security_state;
uint32_t scr_el3;
el3_state_t *state;
gp_regs_t *gp_regs;
unsigned long sctlr_elx;
security_state = GET_SECURITY_STATE(ep->h.attr);
ctx = cm_get_context_by_mpidr(mpidr, security_state);
assert(ctx);
security_state = GET_SECURITY_STATE(ep->h.attr);
/* Clear any residual register values from the context */
memset(ctx, 0, sizeof(*ctx));
......@@ -209,6 +235,45 @@ void cm_init_context(uint64_t mpidr, const entry_point_info_t *ep)
memcpy(gp_regs, (void *)&ep->args, sizeof(aapcs64_params_t));
}
/*******************************************************************************
* The following function initializes the cpu_context for a CPU specified by
* its `cpu_idx` for first use, and sets the initial entrypoint state as
* specified by the entry_point_info structure.
******************************************************************************/
void cm_init_context_by_index(unsigned int cpu_idx,
const entry_point_info_t *ep)
{
cpu_context_t *ctx;
ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr));
cm_init_context_common(ctx, ep);
}
/*******************************************************************************
* The following function initializes the cpu_context for the current CPU
* for first use, and sets the initial entrypoint state as specified by the
* entry_point_info structure.
******************************************************************************/
void cm_init_my_context(const entry_point_info_t *ep)
{
cpu_context_t *ctx;
ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr));
cm_init_context_common(ctx, ep);
}
/*******************************************************************************
* The following function provides a compatibility function for SPDs using the
* existing cm library routines. This function is expected to be invoked for
* initializing the cpu_context for the CPU specified by MPIDR for first use.
******************************************************************************/
void cm_init_context(unsigned long mpidr, const entry_point_info_t *ep)
{
if ((mpidr & MPIDR_AFFINITY_MASK) ==
(read_mpidr_el1() & MPIDR_AFFINITY_MASK))
cm_init_my_context(ep);
else
cm_init_context_by_index(platform_get_core_pos(mpidr), ep);
}
/*******************************************************************************
* Prepare the CPU system registers for first entry into secure or normal world
*
......
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
......@@ -48,8 +48,16 @@ static inline void *cm_get_context(uint32_t security_state);
void cm_set_context_by_mpidr(uint64_t mpidr,
void *context,
uint32_t security_state);
void *cm_get_context_by_index(unsigned int cpu_idx,
unsigned int security_state);
void cm_set_context_by_index(unsigned int cpu_idx,
void *context,
unsigned int security_state);
static inline void cm_set_context(void *context, uint32_t security_state);
void cm_init_context(uint64_t mpidr, const struct entry_point_info *ep);
void cm_init_my_context(const struct entry_point_info *ep);
void cm_init_context_by_index(unsigned int cpu_idx,
const struct entry_point_info *ep);
void cm_prepare_el3_exit(uint32_t security_state);
void cm_el1_sysregs_context_save(uint32_t security_state);
void cm_el1_sysregs_context_restore(uint32_t security_state);
......
......@@ -130,6 +130,20 @@
madd x0, x0, x1, x2
.endm
/*
* This macro calculates the base address of the current CPU's MP stack
* using the plat_my_core_pos() index, the name of the stack storage
* and the size of each stack
* Out: X0 = physical address of stack base
* Clobber: X30, X1, X2
*/
.macro get_my_mp_stack _name, _size
bl plat_my_core_pos
ldr x2, =(\_name + \_size)
mov x1, #\_size
madd x0, x0, x1, x2
.endm
/*
* This macro calculates the base address of a UP stack using the
* name of the stack storage and the size of the stack
......
......@@ -59,6 +59,8 @@ int plat_get_image_source(unsigned int image_id,
uintptr_t *dev_handle,
uintptr_t *image_spec);
unsigned long plat_get_ns_image_entrypoint(void);
unsigned int plat_my_core_pos(void);
int plat_core_pos_by_mpidr(unsigned long mpidr);
/*******************************************************************************
* Mandatory interrupt management functions
......@@ -74,8 +76,7 @@ uint32_t plat_interrupt_type_to_line(uint32_t type,
/*******************************************************************************
* Optional common functions (may be overridden)
******************************************************************************/
unsigned int platform_get_core_pos(unsigned long mpidr);
unsigned long platform_get_stack(unsigned long mpidr);
unsigned long plat_get_my_stack(void);
void plat_report_exception(unsigned long);
int plat_crash_console_init(void);
int plat_crash_console_putc(int c);
......
......@@ -36,7 +36,8 @@
.local platform_normal_stacks
.weak platform_set_stack
.weak platform_get_stack
.weak plat_get_my_stack
.weak plat_set_my_stack
/* -----------------------------------------------------
* unsigned long platform_get_stack (unsigned long mpidr)
......@@ -65,6 +66,33 @@ func platform_set_stack
ret x9
endfunc platform_set_stack
/* -----------------------------------------------------
* unsigned long plat_get_my_stack ()
*
* For the current CPU, this function returns the stack
* pointer for a stack allocated in device memory.
* -----------------------------------------------------
*/
func plat_get_my_stack
mov x10, x30 // lr
get_my_mp_stack platform_normal_stacks, PLATFORM_STACK_SIZE
ret x10
endfunc plat_get_my_stack
/* -----------------------------------------------------
* void plat_set_my_stack ()
*
* For the current CPU, this function sets the stack
* pointer to a stack allocated in normal memory.
* -----------------------------------------------------
*/
func plat_set_my_stack
mov x9, x30 // lr
bl plat_get_my_stack
mov sp, x0
ret x9
endfunc plat_set_my_stack
/* -----------------------------------------------------
* Per-cpu stacks in normal memory. Each cpu gets a
* stack of PLATFORM_STACK_SIZE bytes.
......
......@@ -34,10 +34,13 @@
.local platform_normal_stacks
.globl plat_set_my_stack
.globl plat_get_my_stack
.globl platform_set_stack
.globl platform_get_stack
/* -----------------------------------------------------
* unsigned long plat_get_my_stack ()
* unsigned long platform_get_stack (unsigned long)
*
* For cold-boot BL images, only the primary CPU needs a
......@@ -45,12 +48,14 @@
* stack allocated in device memory.
* -----------------------------------------------------
*/
func platform_get_stack
func plat_get_my_stack
platform_get_stack:
get_up_stack platform_normal_stacks, PLATFORM_STACK_SIZE
ret
endfunc platform_get_stack
endfunc plat_get_my_stack
/* -----------------------------------------------------
* void plat_set_my_stack ()
* void platform_set_stack (unsigned long)
*
* For cold-boot BL images, only the primary CPU needs a
......@@ -58,11 +63,12 @@ endfunc platform_get_stack
* allocated in normal memory.
* -----------------------------------------------------
*/
func platform_set_stack
func plat_set_my_stack
platform_set_stack:
get_up_stack platform_normal_stacks, PLATFORM_STACK_SIZE
mov sp, x0
ret
endfunc platform_set_stack
endfunc plat_set_my_stack
/* -----------------------------------------------------
* Single cpu stack in normal memory.
......
......@@ -264,18 +264,14 @@ void psci_release_pwr_domain_locks(int start_pwrlvl,
}
/*******************************************************************************
* Simple routine to determine whether an power domain instance at a given
* level in an mpidr exists or not.
* Simple routine to determine whether a mpidr is valid or not.
******************************************************************************/
int psci_validate_mpidr(unsigned long mpidr, int level)
int psci_validate_mpidr(unsigned long mpidr)
{
pwr_map_node_t *node;
node = psci_get_pwr_map_node(mpidr, level);
if (node && (node->state & PSCI_PWR_DOMAIN_PRESENT))
return PSCI_E_SUCCESS;
else
if (plat_core_pos_by_mpidr(mpidr) < 0)
return PSCI_E_INVALID_PARAMS;
return PSCI_E_SUCCESS;
}
/*******************************************************************************
......
......@@ -82,8 +82,7 @@ do_core_pwr_dwn:
* ---------------------------------------------
*/
do_stack_maintenance:
mrs x0, mpidr_el1
bl platform_get_stack
bl plat_get_my_stack
/* ---------------------------------------------
* Calculate and store the size of the used
......@@ -136,8 +135,7 @@ func psci_do_pwrup_cache_maintenance
* stack base address in x0.
* ---------------------------------------------
*/
mrs x0, mpidr_el1
bl platform_get_stack
bl plat_get_my_stack
mov x1, sp
sub x1, x0, x1
mov x0, sp
......
......@@ -50,10 +50,9 @@ int psci_cpu_on(unsigned long target_cpu,
entry_point_info_t ep;
/* Determine if the cpu exists of not */
rc = psci_validate_mpidr(target_cpu, MPIDR_AFFLVL0);
if (rc != PSCI_E_SUCCESS) {
rc = psci_validate_mpidr(target_cpu);
if (rc != PSCI_E_SUCCESS)
return PSCI_E_INVALID_PARAMS;
}
/* Validate the entrypoint using platform pm_ops */
if (psci_plat_pm_ops->validate_ns_entrypoint) {
......@@ -287,7 +286,7 @@ int psci_migrate(unsigned long target_cpu)
return PSCI_E_NOT_PRESENT;
/* Check the validity of the specified target cpu */
rc = psci_validate_mpidr(target_cpu, MPIDR_AFFLVL0);
rc = psci_validate_mpidr(target_cpu);
if (rc != PSCI_E_SUCCESS)
return PSCI_E_INVALID_PARAMS;
......
......@@ -147,13 +147,14 @@ int psci_cpu_on_start(unsigned long target_cpu,
if (rc == PSCI_E_SUCCESS)
/* Store the re-entry information for the non-secure world. */
cm_init_context(target_cpu, ep);
cm_init_context_by_index(target_idx, ep);
else
/* Restore the state on error. */
psci_do_state_coordination(MPIDR_AFFLVL0,
end_pwrlvl,
target_cpu_nodes,
PSCI_STATE_OFF);
exit:
/*
* This loop releases the lock corresponding to each power level
......
......@@ -123,7 +123,7 @@ unsigned short psci_get_state(pwr_map_node_t *node);
unsigned short psci_get_phys_state(pwr_map_node_t *node);
void psci_set_state(pwr_map_node_t *node, unsigned short state);
unsigned long mpidr_set_pwr_domain_inst(unsigned long, unsigned char, int);
int psci_validate_mpidr(unsigned long, int);
int psci_validate_mpidr(unsigned long mpidr);
int get_power_on_target_pwrlvl(void);
void psci_power_up_finish(int end_pwrlvl,
pwrlvl_power_on_finisher_t pon_handler);
......
......@@ -193,36 +193,39 @@ static void psci_init_pwr_map_node(unsigned long mpidr,
state = plat_get_pwr_domain_state(level, mpidr);
psci_pwr_domain_map[idx].state = state;
if (level == MPIDR_AFFLVL0) {
/*
* Check if this is a CPU node and is present in which case certain
* other initialisations are required.
*/
if (level != MPIDR_AFFLVL0)
return;
/*
* Mark the cpu as OFF. Higher power level reference counts
* have already been memset to 0
*/
if (state & PSCI_PWR_DOMAIN_PRESENT)
psci_set_state(&psci_pwr_domain_map[idx],
PSCI_STATE_OFF);
if (!(state & PSCI_PWR_DOMAIN_PRESENT))
return;
/*
* Associate a non-secure context with this power
* instance through the context management library.
*/
linear_id = platform_get_core_pos(mpidr);
assert(linear_id < PLATFORM_CORE_COUNT);
/*
* Mark the cpu as OFF. Higher power level reference counts
* have already been memset to 0
*/
psci_set_state(&psci_pwr_domain_map[idx], PSCI_STATE_OFF);
/* Invalidate the suspend context for the node */
set_cpu_data_by_index(linear_id,
psci_svc_cpu_data.power_state,
PSCI_INVALID_DATA);
/*
* Associate a non-secure context with this power
* instance through the context management library.
*/
linear_id = plat_core_pos_by_mpidr(mpidr);
assert(linear_id < PLATFORM_CORE_COUNT);
flush_cpu_data_by_index(linear_id, psci_svc_cpu_data);
/* Invalidate the suspend context for the node */
set_cpu_data_by_index(linear_id,
psci_svc_cpu_data.power_state,
PSCI_INVALID_DATA);
cm_set_context_by_mpidr(mpidr,
(void *) &psci_ns_context[linear_id],
NON_SECURE);
}
flush_cpu_data_by_index(linear_id, psci_svc_cpu_data);
return;
cm_set_context_by_index(linear_id,
(void *) &psci_ns_context[linear_id],
NON_SECURE);
}
/*******************************************************************************
......
......@@ -90,7 +90,7 @@ int psci_get_suspend_stateid_by_mpidr(unsigned long mpidr)
{
unsigned int power_state;
power_state = get_cpu_data_by_mpidr(mpidr,
power_state = get_cpu_data_by_index(plat_core_pos_by_mpidr(mpidr),
psci_svc_cpu_data.power_state);
return ((power_state == PSCI_INVALID_DATA) ?
......@@ -185,7 +185,7 @@ void psci_cpu_suspend_start(entry_point_info_t *ep,
/*
* Store the re-entry information for the non-secure world.
*/
cm_init_context(read_mpidr_el1(), ep);
cm_init_my_context(ep);
/* Set the secure world (EL3) re-entry point after BL1 */
psci_entrypoint = (unsigned long) psci_cpu_suspend_finish_entry;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment