Commit 4f6ad66a authored by Achin Gupta's avatar Achin Gupta Committed by James Morrissey
Browse files

ARMv8 Trusted Firmware release v0.2

parents
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <runtime_svc.h>
.globl runtime_exceptions
#include <asm_macros.S>
.section aarch64_code, "ax"; .align 11
.align 7
runtime_exceptions:
/* -----------------------------------------------------
* Current EL with _sp_el0 : 0x0 - 0x180
* -----------------------------------------------------
*/
sync_exception_sp_el0:
exception_entry save_regs
mov x0, #SYNC_EXCEPTION_SP_EL0
mov x1, sp
bl sync_exception_handler
exception_exit restore_regs
eret
.align 7
irq_sp_el0:
exception_entry save_regs
mov x0, #IRQ_SP_EL0
mov x1, sp
bl async_exception_handler
exception_exit restore_regs
eret
.align 7
fiq_sp_el0:
exception_entry save_regs
mov x0, #FIQ_SP_EL0
mov x1, sp
bl async_exception_handler
exception_exit restore_regs
eret
.align 7
serror_sp_el0:
exception_entry save_regs
mov x0, #SERROR_SP_EL0
mov x1, sp
bl async_exception_handler
exception_exit restore_regs
eret
/* -----------------------------------------------------
* Current EL with SPx: 0x200 - 0x380
* -----------------------------------------------------
*/
.align 7
sync_exception_sp_elx:
exception_entry save_regs
mov x0, #SYNC_EXCEPTION_SP_ELX
mov x1, sp
bl sync_exception_handler
exception_exit restore_regs
eret
.align 7
irq_sp_elx:
exception_entry save_regs
mov x0, #IRQ_SP_ELX
mov x1, sp
bl async_exception_handler
exception_exit restore_regs
eret
.align 7
fiq_sp_elx:
exception_entry save_regs
mov x0, #FIQ_SP_ELX
mov x1, sp
bl async_exception_handler
exception_exit restore_regs
eret
.align 7
serror_sp_elx:
exception_entry save_regs
mov x0, #SERROR_SP_ELX
mov x1, sp
bl async_exception_handler
exception_exit restore_regs
eret
/* -----------------------------------------------------
* Lower EL using AArch64 : 0x400 - 0x580
* -----------------------------------------------------
*/
.align 7
sync_exception_aarch64:
exception_entry save_regs
mov x0, #SYNC_EXCEPTION_AARCH64
mov x1, sp
bl sync_exception_handler
exception_exit restore_regs
eret
.align 7
irq_aarch64:
exception_entry save_regs
mov x0, #IRQ_AARCH64
mov x1, sp
bl async_exception_handler
exception_exit restore_regs
eret
.align 7
fiq_aarch64:
exception_entry save_regs
mov x0, #FIQ_AARCH64
mov x1, sp
bl async_exception_handler
exception_exit restore_regs
eret
.align 7
serror_aarch64:
exception_entry save_regs
mov x0, #IRQ_AARCH32
mov x1, sp
bl async_exception_handler
exception_exit restore_regs
eret
/* -----------------------------------------------------
* Lower EL using AArch32 : 0x600 - 0x780
* -----------------------------------------------------
*/
.align 7
sync_exception_aarch32:
exception_entry save_regs
mov x0, #SYNC_EXCEPTION_AARCH32
mov x1, sp
bl sync_exception_handler
exception_exit restore_regs
eret
.align 7
irq_aarch32:
exception_entry save_regs
mov x0, #IRQ_AARCH32
mov x1, sp
bl async_exception_handler
exception_exit restore_regs
eret
.align 7
fiq_aarch32:
exception_entry save_regs
mov x0, #FIQ_AARCH32
mov x1, sp
bl async_exception_handler
exception_exit restore_regs
eret
.align 7
serror_aarch32:
exception_entry save_regs
mov x0, #SERROR_AARCH32
mov x1, sp
bl async_exception_handler
exception_exit restore_regs
eret
.align 7
save_regs:; .type save_regs, %function
sub sp, sp, #0x100
stp x0, x1, [sp, #0x0]
stp x2, x3, [sp, #0x10]
stp x4, x5, [sp, #0x20]
stp x6, x7, [sp, #0x30]
stp x8, x9, [sp, #0x40]
stp x10, x11, [sp, #0x50]
stp x12, x13, [sp, #0x60]
stp x14, x15, [sp, #0x70]
stp x16, x17, [sp, #0x80]
stp x18, x19, [sp, #0x90]
stp x20, x21, [sp, #0xa0]
stp x22, x23, [sp, #0xb0]
stp x24, x25, [sp, #0xc0]
stp x26, x27, [sp, #0xd0]
mrs x0, sp_el0
stp x28, x0, [sp, #0xe0]
mrs x0, spsr_el3
str x0, [sp, #0xf0]
ret
restore_regs:; .type restore_regs, %function
ldr x9, [sp, #0xf0]
msr spsr_el3, x9
ldp x28, x9, [sp, #0xe0]
msr sp_el0, x9
ldp x26, x27, [sp, #0xd0]
ldp x24, x25, [sp, #0xc0]
ldp x22, x23, [sp, #0xb0]
ldp x20, x21, [sp, #0xa0]
ldp x18, x19, [sp, #0x90]
ldp x16, x17, [sp, #0x80]
ldp x14, x15, [sp, #0x70]
ldp x12, x13, [sp, #0x60]
ldp x10, x11, [sp, #0x50]
ldp x8, x9, [sp, #0x40]
ldp x6, x7, [sp, #0x30]
ldp x4, x5, [sp, #0x20]
ldp x2, x3, [sp, #0x10]
ldp x0, x1, [sp, #0x0]
add sp, sp, #0x100
ret
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <platform.h>
OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
MEMORY {
/* RAM is read/write and Initialised */
RAM (rwx): ORIGIN = TZRAM_BASE, LENGTH = TZRAM_SIZE
}
SECTIONS
{
. = BL31_BASE;
BL31_RO ALIGN (4096): {
*(entry_code)
*(.text)
*(.rodata)
} >RAM
BL31_STACKS ALIGN (4096): {
. += 0x1000;
*(tzfw_normal_stacks)
} >RAM
BL31_COHERENT_RAM ALIGN (4096): {
*(tzfw_coherent_mem)
/* . += 0x1000;*/
/* Do we need to ensure at least 4k here? */
. = ALIGN(4096);
} >RAM
__BL31_DATA_START__ = .;
.bss ALIGN (4096): {
*(.bss)
*(COMMON)
} >RAM
.data : {
*(.data)
} >RAM
__BL31_DATA_STOP__ = .;
__BL31_RO_BASE__ = LOADADDR(BL31_RO);
__BL31_RO_SIZE__ = SIZEOF(BL31_RO);
__BL31_STACKS_BASE__ = LOADADDR(BL31_STACKS);
__BL31_STACKS_SIZE__ = SIZEOF(BL31_STACKS);
__BL31_COHERENT_RAM_BASE__ = LOADADDR(BL31_COHERENT_RAM);
__BL31_COHERENT_RAM_SIZE__ = SIZEOF(BL31_COHERENT_RAM);
__BL31_RW_BASE__ = __BL31_DATA_START__;
__BL31_RW_SIZE__ = __BL31_DATA_STOP__ - __BL31_DATA_START__;
}
#
# Copyright (c) 2013, ARM Limited. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of ARM nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
vpath %.c drivers/arm/interconnect/cci-400/ common/ lib/ \
drivers/arm/peripherals/pl011 plat/fvp common/psci \
lib/semihosting arch/aarch64/ lib/non-semihosting \
lib/sync/locks/bakery/ drivers/power/ arch/system/gic/ \
plat/fvp/aarch64/
vpath %.S lib/arch/aarch64 common/psci \
lib/semihosting/aarch64 include/ plat/fvp/${ARCH} \
lib/sync/locks/exclusive plat/common/aarch64/ \
arch/system/gic/${ARCH}
BL31_ASM_OBJS := bl31_entrypoint.o runtime_exceptions.o psci_entry.o \
spinlock.o gic_v3_sysregs.o fvp_helpers.o
BL31_C_OBJS := bl31_main.o bl31_plat_setup.o bl31_arch_setup.o \
exception_handlers.o bakery_lock.o cci400.o \
fvp_common.o fvp_pm.o fvp_pwrc.o fvp_topology.o \
runtime_svc.o gic_v3.o gic_v2.o psci_setup.o \
psci_common.o psci_afflvl_on.o psci_main.o \
psci_afflvl_off.o psci_afflvl_suspend.o
BL31_ENTRY_POINT := bl31_entrypoint
BL31_MAPFILE := bl31.map
BL31_LINKERFILE := bl31.ld
BL31_OBJS := $(BL31_C_OBJS) $(BL31_ASM_OBJS)
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <arch_helpers.h>
#include <console.h>
#include <platform.h>
#include <semihosting.h>
#include <bl_common.h>
#include <bl31.h>
#include <runtime_svc.h>
void bl31_arch_next_el_setup(void);
/*******************************************************************************
* BL31 is responsible for setting up the runtime services for the primary cpu
* before passing control to the bootloader (UEFI) or Linux.
******************************************************************************/
void bl31_main(void)
{
el_change_info *image_info;
unsigned long mpidr = read_mpidr();
/* Perform remaining generic architectural setup from EL3 */
bl31_arch_setup();
/* Perform platform setup in BL1 */
bl31_platform_setup();
#if defined (__GNUC__)
printf("BL31 Built : %s, %s\n\r", __TIME__, __DATE__);
#endif
/* Initialize the runtime services e.g. psci */
runtime_svc_init(mpidr);
/* Clean caches before re-entering normal world */
dcsw_op_all(DCCSW);
image_info = bl31_get_next_image_info(mpidr);
bl31_arch_next_el_setup();
change_el(image_info);
/* There is no valid reason for change_el() to return */
assert(0);
}
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <string.h>
#include <errno.h>
#include <assert.h>
#include <arch_helpers.h>
#include <console.h>
#include <platform.h>
#include <semihosting.h>
#include <bl_common.h>
#include <bl1.h>
/***********************************************************
* Memory for sharing data while changing exception levels.
* Only used by the primary core.
**********************************************************/
unsigned char bl2_el_change_mem_ptr[EL_CHANGE_MEM_SIZE];
unsigned long *get_el_change_mem_ptr(void)
{
return (unsigned long *) bl2_el_change_mem_ptr;
}
unsigned long page_align(unsigned long value, unsigned dir)
{
unsigned long page_size = 1 << FOUR_KB_SHIFT;
/* Round up the limit to the next page boundary */
if (value & (page_size - 1)) {
value &= ~(page_size - 1);
if (dir == UP)
value += page_size;
}
return value;
}
static inline unsigned int is_page_aligned (unsigned long addr) {
const unsigned long page_size = 1 << FOUR_KB_SHIFT;
return (addr & (page_size - 1)) == 0;
}
void change_security_state(unsigned int target_security_state)
{
unsigned long scr = read_scr();
if (target_security_state == SECURE)
scr &= ~SCR_NS_BIT;
else if (target_security_state == NON_SECURE)
scr |= SCR_NS_BIT;
else
assert(0);
write_scr(scr);
}
int drop_el(aapcs64_params *args,
unsigned long spsr,
unsigned long entrypoint)
{
write_spsr(spsr);
write_elr(entrypoint);
eret(args->arg0,
args->arg1,
args->arg2,
args->arg3,
args->arg4,
args->arg5,
args->arg6,
args->arg7);
return -EINVAL;
}
long raise_el(aapcs64_params *args)
{
return smc(args->arg0,
args->arg1,
args->arg2,
args->arg3,
args->arg4,
args->arg5,
args->arg6,
args->arg7);
}
/*
* TODO: If we are not EL3 then currently we only issue an SMC.
* Add support for dropping into EL0 etc. Consider adding support
* for switching from S-EL1 to S-EL0/1 etc.
*/
long change_el(el_change_info *info)
{
unsigned long current_el = read_current_el();
if (GET_EL(current_el) == MODE_EL3) {
/*
* We can go anywhere from EL3. So find where.
* TODO: Lots to do if we are going non-secure.
* Flip the NS bit. Restore NS registers etc.
* Just doing the bare minimal for now.
*/
if (info->security_state == NON_SECURE)
change_security_state(info->security_state);
return drop_el(&info->args, info->spsr, info->entrypoint);
} else
return raise_el(&info->args);
}
/* TODO: add a parameter for DAIF. not needed right now */
unsigned long make_spsr(unsigned long target_el,
unsigned long target_sp,
unsigned long target_rw)
{
unsigned long spsr;
/* Disable all exceptions & setup the EL */
spsr = (DAIF_FIQ_BIT | DAIF_IRQ_BIT | DAIF_ABT_BIT | DAIF_DBG_BIT)
<< PSR_DAIF_SHIFT;
spsr |= PSR_MODE(target_rw, target_el, target_sp);
return spsr;
}
/*******************************************************************************
* The next two functions are the weak definitions. Platform specific
* code can override them if it wishes to.
******************************************************************************/
/*******************************************************************************
* Function that takes a memory layout into which BL31 has been either top or
* bottom loaded. Using this information, it populates bl31_mem_layout to tell
* BL31 how much memory it has access to and how much is available for use. It
* does not need the address where BL31 has been loaded as BL31 will reclaim
* all the memory used by BL2.
* TODO: Revisit if this and init_bl2_mem_layout can be replaced by a single
* routine.
******************************************************************************/
void init_bl31_mem_layout(const meminfo *bl2_mem_layout,
meminfo *bl31_mem_layout,
unsigned int load_type)
{
if (load_type == BOT_LOAD) {
/*
* ------------ ^
* | BL2 | |
* |----------| ^ | BL2
* | | | BL2 free | total
* | | | size | size
* |----------| BL2 free base v |
* | BL31 | |
* ------------ BL2 total base v
*/
unsigned long bl31_size;
bl31_mem_layout->free_base = bl2_mem_layout->free_base;
bl31_size = bl2_mem_layout->free_base - bl2_mem_layout->total_base;
bl31_mem_layout->free_size = bl2_mem_layout->total_size - bl31_size;
} else {
/*
* ------------ ^
* | BL31 | |
* |----------| ^ | BL2
* | | | BL2 free | total
* | | | size | size
* |----------| BL2 free base v |
* | BL2 | |
* ------------ BL2 total base v
*/
unsigned long bl2_size;
bl31_mem_layout->free_base = bl2_mem_layout->total_base;
bl2_size = bl2_mem_layout->free_base - bl2_mem_layout->total_base;
bl31_mem_layout->free_size = bl2_mem_layout->free_size + bl2_size;
}
bl31_mem_layout->total_base = bl2_mem_layout->total_base;
bl31_mem_layout->total_size = bl2_mem_layout->total_size;
bl31_mem_layout->attr = load_type;
flush_dcache_range((unsigned long) bl31_mem_layout, sizeof(meminfo));
return;
}
/*******************************************************************************
* Function that takes a memory layout into which BL2 has been either top or
* bottom loaded along with the address where BL2 has been loaded in it. Using
* this information, it populates bl2_mem_layout to tell BL2 how much memory
* it has access to and how much is available for use.
******************************************************************************/
void init_bl2_mem_layout(meminfo *bl1_mem_layout,
meminfo *bl2_mem_layout,
unsigned int load_type,
unsigned long bl2_base)
{
unsigned tmp;
if (load_type == BOT_LOAD) {
bl2_mem_layout->total_base = bl2_base;
tmp = bl1_mem_layout->free_base - bl2_base;
bl2_mem_layout->total_size = bl1_mem_layout->free_size + tmp;
} else {
bl2_mem_layout->total_base = bl1_mem_layout->free_base;
tmp = bl1_mem_layout->total_base + bl1_mem_layout->total_size;
bl2_mem_layout->total_size = tmp - bl1_mem_layout->free_base;
}
bl2_mem_layout->free_base = bl1_mem_layout->free_base;
bl2_mem_layout->free_size = bl1_mem_layout->free_size;
bl2_mem_layout->attr = load_type;
flush_dcache_range((unsigned long) bl2_mem_layout, sizeof(meminfo));
return;
}
static void dump_load_info(unsigned long image_load_addr,
unsigned long image_size,
const meminfo *mem_layout)
{
#if DEBUG
printf("Trying to load image at address 0x%lx, size = 0x%lx\r\n",
image_load_addr, image_size);
printf("Current memory layout:\r\n");
printf(" total region = [0x%lx, 0x%lx]\r\n", mem_layout->total_base,
mem_layout->total_base + mem_layout->total_size);
printf(" free region = [0x%lx, 0x%lx]\r\n", mem_layout->free_base,
mem_layout->free_base + mem_layout->free_size);
#endif
}
/*******************************************************************************
* Generic function to load an image into the trusted RAM using semihosting
* given a name, extents of free memory & whether the image should be loaded at
* the bottom or top of the free memory. It updates the memory layout if the
* load is successful.
******************************************************************************/
unsigned long load_image(meminfo *mem_layout,
const char *image_name,
unsigned int load_type,
unsigned long fixed_addr)
{
unsigned long temp_image_base, image_base;
long offset;
int image_flen;
/* Find the size of the image */
image_flen = semihosting_get_flen(image_name);
if (image_flen < 0) {
printf("ERROR: Cannot access '%s' file (%i).\r\n",
image_name, image_flen);
return 0;
}
/* See if we have enough space */
if (image_flen > mem_layout->free_size) {
printf("ERROR: Cannot load '%s' file: Not enough space.\r\n",
image_name);
dump_load_info(0, image_flen, mem_layout);
return 0;
}
switch (load_type) {
case TOP_LOAD:
/* Load the image in the top of free memory */
temp_image_base = mem_layout->free_base + mem_layout->free_size;
temp_image_base -= image_flen;
/* Page align base address and check whether the image still fits */
image_base = page_align(temp_image_base, DOWN);
assert(image_base <= temp_image_base);
if (image_base < mem_layout->free_base) {
printf("ERROR: Cannot load '%s' file: Not enough space.\r\n",
image_name);
dump_load_info(image_base, image_flen, mem_layout);
return 0;
}
/* Calculate the amount of extra memory used due to alignment */
offset = temp_image_base - image_base;
break;
case BOT_LOAD:
/* Load the BL2 image in the bottom of free memory */
temp_image_base = mem_layout->free_base;
image_base = page_align(temp_image_base, UP);
assert(image_base >= temp_image_base);
/* Page align base address and check whether the image still fits */
if (image_base + image_flen >
mem_layout->free_base + mem_layout->free_size) {
printf("ERROR: Cannot load '%s' file: Not enough space.\r\n",
image_name);
dump_load_info(image_base, image_flen, mem_layout);
return 0;
}
/* Calculate the amount of extra memory used due to alignment */
offset = image_base - temp_image_base;
break;
default:
assert(0);
}
/*
* Some images must be loaded at a fixed address, not a dynamic one.
*
* This has been implemented as a hack on top of the existing dynamic
* loading mechanism, for the time being. If the 'fixed_addr' function
* argument is different from zero, then it will force the load address.
* So we still have this principle of top/bottom loading but the code
* determining the load address is bypassed and the load address is
* forced to the fixed one.
*
* This can result in quite a lot of wasted space because we still use
* 1 sole meminfo structure to represent the extents of free memory,
* where we should use some sort of linked list.
*
* E.g. we want to load BL2 at address 0x04020000, the resulting memory
* layout should look as follows:
* ------------ 0x04040000
* | | <- Free space (1)
* |----------|
* | BL2 |
* |----------| 0x04020000
* | | <- Free space (2)
* |----------|
* | BL1 |
* ------------ 0x04000000
*
* But in the current hacky implementation, we'll need to specify
* whether BL2 is loaded at the top or bottom of the free memory.
* E.g. if BL2 is considered as top-loaded, the meminfo structure
* will give the following view of the memory, hiding the chunk of
* free memory above BL2:
* ------------ 0x04040000
* | |
* | |
* | BL2 |
* |----------| 0x04020000
* | | <- Free space (2)
* |----------|
* | BL1 |
* ------------ 0x04000000
*/
if (fixed_addr != 0) {
/* Load the image at the given address. */
image_base = fixed_addr;
/* Check whether the image fits. */
if ((image_base < mem_layout->free_base) ||
(image_base + image_flen >
mem_layout->free_base + mem_layout->free_size)) {
printf("ERROR: Cannot load '%s' file: Not enough space.\r\n",
image_name);
dump_load_info(image_base, image_flen, mem_layout);
return 0;
}
/* Check whether the fixed load address is page-aligned. */
if (!is_page_aligned(image_base)) {
printf("ERROR: Cannot load '%s' file at unaligned address 0x%lx.\r\n",
image_name, fixed_addr);
return 0;
}
/*
* Calculate the amount of extra memory used due to fixed
* loading.
*/
if (load_type == TOP_LOAD) {
unsigned long max_addr, space_used;
/*
* ------------ max_addr
* | /wasted/ | | offset
* |..........|..............................
* | image | | image_flen
* |----------| fixed_addr
* | |
* | |
* ------------ total_base
*/
max_addr = mem_layout->total_base + mem_layout->total_size;
/*
* Compute the amount of memory used by the image.
* Corresponds to all space above the image load
* address.
*/
space_used = max_addr - fixed_addr;
/*
* Calculate the amount of wasted memory within the
* amount of memory used by the image.
*/
offset = space_used - image_flen;
} else /* BOT_LOAD */
/*
* ------------
* | |
* | |
* |----------|
* | image |
* |..........| fixed_addr
* | /wasted/ | | offset
* ------------ total_base
*/
offset = fixed_addr - mem_layout->total_base;
}
/* We have enough space so load the image now */
image_flen = semihosting_download_file(image_name,
image_flen,
(void *) image_base);
if (image_flen <= 0) {
printf("ERROR: Failed to load '%s' file from semihosting (%i).\r\n",
image_name, image_flen);
return 0;
}
/*
* File has been successfully loaded. Update the free memory
* data structure & flush the contents of the TZRAM so that
* the next EL can see it.
*/
/* Update the memory contents */
flush_dcache_range(image_base, image_flen);
mem_layout->free_size -= image_flen + offset;
/* Update the base of free memory since its moved up */
if (load_type == BOT_LOAD)
mem_layout->free_base += offset + image_flen;
return image_base;
}
/*******************************************************************************
* Run a loaded image from the given entry point. This could result in either
* dropping into a lower exception level or jumping to a higher exception level.
* The only way of doing the latter is through an SMC. In either case, setup the
* parameters for the EL change request correctly.
******************************************************************************/
int run_image(unsigned long entrypoint,
unsigned long spsr,
unsigned long target_security_state,
meminfo *mem_layout,
void *data)
{
el_change_info run_image_info;
unsigned long current_el = read_current_el();
/* Tell next EL what we want done */
run_image_info.args.arg0 = RUN_IMAGE;
run_image_info.entrypoint = entrypoint;
run_image_info.spsr = spsr;
run_image_info.security_state = target_security_state;
run_image_info.next = 0;
/*
* If we are EL3 then only an eret can take us to the desired
* exception level. Else for the time being assume that we have
* to jump to a higher EL and issue an SMC. Contents of argY
* will go into the general purpose register xY e.g. arg0->x0
*/
if (GET_EL(current_el) == MODE_EL3) {
run_image_info.args.arg1 = (unsigned long) mem_layout;
run_image_info.args.arg2 = (unsigned long) data;
} else {
run_image_info.args.arg1 = entrypoint;
run_image_info.args.arg2 = spsr;
run_image_info.args.arg3 = (unsigned long) mem_layout;
run_image_info.args.arg4 = (unsigned long) data;
}
return change_el(&run_image_info);
}
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <arch_helpers.h>
#include <console.h>
#include <platform.h>
#include <psci.h>
#include <psci_private.h>
typedef int (*afflvl_off_handler)(unsigned long, aff_map_node *);
/*******************************************************************************
* The next three functions implement a handler for each supported affinity
* level which is called when that affinity level is turned off.
******************************************************************************/
static int psci_afflvl0_off(unsigned long mpidr, aff_map_node *cpu_node)
{
unsigned int index, plat_state;
int rc = PSCI_E_SUCCESS;
unsigned long sctlr = read_sctlr();
assert(cpu_node->level == MPIDR_AFFLVL0);
/*
* Generic management: Get the index for clearing any
* lingering re-entry information
*/
index = cpu_node->data;
memset(&psci_ns_entry_info[index], 0, sizeof(psci_ns_entry_info[index]));
/*
* Arch. management. Perform the necessary steps to flush all
* cpu caches.
*
* TODO: This power down sequence varies across cpus so it needs to be
* abstracted out on the basis of the MIDR like in cpu_reset_handler().
* Do the bare minimal for the time being. Fix this before porting to
* Cortex models.
*/
sctlr &= ~SCTLR_C_BIT;
write_sctlr(sctlr);
/*
* CAUTION: This flush to the level of unification makes an assumption
* about the cache hierarchy at affinity level 0 (cpu) in the platform.
* Ideally the platform should tell psci which levels to flush to exit
* coherency.
*/
dcsw_op_louis(DCCISW);
/*
* Plat. management: Perform platform specific actions to turn this
* cpu off e.g. exit cpu coherency, program the power controller etc.
*/
if (psci_plat_pm_ops->affinst_off) {
/* Get the current physical state of this cpu */
plat_state = psci_get_aff_phys_state(cpu_node);
rc = psci_plat_pm_ops->affinst_off(mpidr,
cpu_node->level,
plat_state);
}
/*
* The only error cpu_off can return is E_DENIED. So check if that's
* indeed the case. The caller will simply 'eret' in case of an error.
*/
if (rc != PSCI_E_SUCCESS)
assert(rc == PSCI_E_DENIED);
return rc;
}
static int psci_afflvl1_off(unsigned long mpidr, aff_map_node *cluster_node)
{
int rc = PSCI_E_SUCCESS;
unsigned int plat_state;
/* Sanity check the cluster level */
assert(cluster_node->level == MPIDR_AFFLVL1);
/*
* Keep the physical state of this cluster handy to decide
* what action needs to be taken
*/
plat_state = psci_get_aff_phys_state(cluster_node);
/*
* Arch. Management. Flush all levels of caches to PoC if
* the cluster is to be shutdown
*/
if (plat_state == PSCI_STATE_OFF)
dcsw_op_all(DCCISW);
/*
* Plat. Management. Allow the platform to do it's cluster
* specific bookeeping e.g. turn off interconnect coherency,
* program the power controller etc.
*/
if (psci_plat_pm_ops->affinst_off)
rc = psci_plat_pm_ops->affinst_off(mpidr,
cluster_node->level,
plat_state);
return rc;
}
static int psci_afflvl2_off(unsigned long mpidr, aff_map_node *system_node)
{
int rc = PSCI_E_SUCCESS;
unsigned int plat_state;
/* Cannot go beyond this level */
assert(system_node->level == MPIDR_AFFLVL2);
/*
* Keep the physical state of the system handy to decide what
* action needs to be taken
*/
plat_state = psci_get_aff_phys_state(system_node);
/* No arch. and generic bookeeping to do here currently */
/*
* Plat. Management : Allow the platform to do it's bookeeping
* at this affinity level
*/
if (psci_plat_pm_ops->affinst_off)
rc = psci_plat_pm_ops->affinst_off(mpidr,
system_node->level,
plat_state);
return rc;
}
static const afflvl_off_handler psci_afflvl_off_handlers[] = {
psci_afflvl0_off,
psci_afflvl1_off,
psci_afflvl2_off,
};
/*******************************************************************************
* This function implements the core of the processing required to turn a cpu
* off. It's assumed that along with turning the cpu off, higher affinity levels
* will be turned off as far as possible. We first need to determine the new
* state off all the affinity instances in the mpidr corresponding to the target
* cpu. Action will be taken on the basis of this new state. To do the state
* change we first need to acquire the locks for all the implemented affinity
* level to be able to snapshot the system state. Then we need to start turning
* affinity levels off from the lowest to the highest (e.g. a cpu needs to be
* off before a cluster can be turned off). To achieve this flow, we start
* acquiring the locks from the highest to the lowest affinity level. Once we
* reach affinity level 0, we do the state change followed by the actions
* corresponding to the new state for affinity level 0. Actions as per the
* updated state for higher affinity levels are performed as we unwind back to
* highest affinity level.
******************************************************************************/
int psci_afflvl_off(unsigned long mpidr,
int cur_afflvl,
int tgt_afflvl)
{
int rc = PSCI_E_SUCCESS, level;
unsigned int next_state, prev_state;
aff_map_node *aff_node;
mpidr &= MPIDR_AFFINITY_MASK;;
/*
* Some affinity instances at levels between the current and
* target levels could be absent in the mpidr. Skip them and
* start from the first present instance.
*/
level = psci_get_first_present_afflvl(mpidr,
cur_afflvl,
tgt_afflvl,
&aff_node);
/*
* Return if there are no more affinity instances beyond this
* level to process. Else ensure that the returned affinity
* node makes sense.
*/
if (aff_node == NULL)
return rc;
assert(level == aff_node->level);
/*
* This function acquires the lock corresponding to each
* affinity level so that state management can be done safely.
*/
bakery_lock_get(mpidr, &aff_node->lock);
/* Keep the old state and the next one handy */
prev_state = psci_get_state(aff_node->state);
next_state = PSCI_STATE_OFF;
/*
* We start from the highest affinity level and work our way
* downwards to the lowest i.e. MPIDR_AFFLVL0.
*/
if (aff_node->level == tgt_afflvl) {
psci_change_state(mpidr,
tgt_afflvl,
get_max_afflvl(),
next_state);
} else {
rc = psci_afflvl_off(mpidr, level - 1, tgt_afflvl);
if (rc != PSCI_E_SUCCESS) {
psci_set_state(aff_node->state, prev_state);
goto exit;
}
}
/*
* Perform generic, architecture and platform specific
* handling
*/
rc = psci_afflvl_off_handlers[level](mpidr, aff_node);
if (rc != PSCI_E_SUCCESS) {
psci_set_state(aff_node->state, prev_state);
goto exit;
}
/*
* If all has gone as per plan then this cpu should be
* marked as OFF
*/
if (level == MPIDR_AFFLVL0) {
next_state = psci_get_state(aff_node->state);
assert(next_state == PSCI_STATE_OFF);
}
exit:
bakery_lock_release(mpidr, &aff_node->lock);
return rc;
}
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <arch_helpers.h>
#include <console.h>
#include <platform.h>
#include <psci.h>
#include <psci_private.h>
typedef int (*afflvl_on_handler)(unsigned long,
aff_map_node *,
unsigned long,
unsigned long);
/*******************************************************************************
* This function checks whether a cpu which has been requested to be turned on
* is OFF to begin with.
******************************************************************************/
static int cpu_on_validate_state(unsigned int state)
{
unsigned int psci_state;
/* Get the raw psci state */
psci_state = psci_get_state(state);
if (psci_state == PSCI_STATE_ON || psci_state == PSCI_STATE_SUSPEND)
return PSCI_E_ALREADY_ON;
if (psci_state == PSCI_STATE_ON_PENDING)
return PSCI_E_ON_PENDING;
assert(psci_state == PSCI_STATE_OFF);
return PSCI_E_SUCCESS;
}
/*******************************************************************************
* Handler routine to turn a cpu on. It takes care of any generic, architectural
* or platform specific setup required.
* TODO: Split this code across separate handlers for each type of setup?
******************************************************************************/
static int psci_afflvl0_on(unsigned long target_cpu,
aff_map_node *cpu_node,
unsigned long ns_entrypoint,
unsigned long context_id)
{
unsigned int index, plat_state;
unsigned long psci_entrypoint;
int rc;
/* Sanity check to safeguard against data corruption */
assert(cpu_node->level == MPIDR_AFFLVL0);
/*
* Generic management: Ensure that the cpu is off to be
* turned on
*/
rc = cpu_on_validate_state(cpu_node->state);
if (rc != PSCI_E_SUCCESS)
return rc;
/*
* Arch. management: Derive the re-entry information for
* the non-secure world from the non-secure state from
* where this call originated.
*/
index = cpu_node->data;
rc = psci_set_ns_entry_info(index, ns_entrypoint, context_id);
if (rc != PSCI_E_SUCCESS)
return rc;
/* Set the secure world (EL3) re-entry point after BL1 */
psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
/*
* Plat. management: Give the platform the current state
* of the target cpu to allow it to perform the necessary
* steps to power on.
*/
if (psci_plat_pm_ops->affinst_on) {
/* Get the current physical state of this cpu */
plat_state = psci_get_aff_phys_state(cpu_node);
rc = psci_plat_pm_ops->affinst_on(target_cpu,
psci_entrypoint,
ns_entrypoint,
cpu_node->level,
plat_state);
}
return rc;
}
/*******************************************************************************
* Handler routine to turn a cluster on. It takes care or any generic, arch.
* or platform specific setup required.
* TODO: Split this code across separate handlers for each type of setup?
******************************************************************************/
static int psci_afflvl1_on(unsigned long target_cpu,
aff_map_node *cluster_node,
unsigned long ns_entrypoint,
unsigned long context_id)
{
int rc = PSCI_E_SUCCESS;
unsigned int plat_state;
unsigned long psci_entrypoint;
assert(cluster_node->level == MPIDR_AFFLVL1);
/*
* There is no generic and arch. specific cluster
* management required
*/
/*
* Plat. management: Give the platform the current state
* of the target cpu to allow it to perform the necessary
* steps to power on.
*/
if (psci_plat_pm_ops->affinst_on) {
plat_state = psci_get_aff_phys_state(cluster_node);
psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
rc = psci_plat_pm_ops->affinst_on(target_cpu,
psci_entrypoint,
ns_entrypoint,
cluster_node->level,
plat_state);
}
return rc;
}
/*******************************************************************************
* Handler routine to turn a cluster of clusters on. It takes care or any
* generic, arch. or platform specific setup required.
* TODO: Split this code across separate handlers for each type of setup?
******************************************************************************/
static int psci_afflvl2_on(unsigned long target_cpu,
aff_map_node *system_node,
unsigned long ns_entrypoint,
unsigned long context_id)
{
int rc = PSCI_E_SUCCESS;
unsigned int plat_state;
unsigned long psci_entrypoint;
/* Cannot go beyond affinity level 2 in this psci imp. */
assert(system_node->level == MPIDR_AFFLVL2);
/*
* There is no generic and arch. specific system management
* required
*/
/*
* Plat. management: Give the platform the current state
* of the target cpu to allow it to perform the necessary
* steps to power on.
*/
if (psci_plat_pm_ops->affinst_on) {
plat_state = psci_get_aff_phys_state(system_node);
psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
rc = psci_plat_pm_ops->affinst_on(target_cpu,
psci_entrypoint,
ns_entrypoint,
system_node->level,
plat_state);
}
return rc;
}
/* Private data structure to make this handlers accessible through indexing */
static const afflvl_on_handler psci_afflvl_on_handlers[] = {
psci_afflvl0_on,
psci_afflvl1_on,
psci_afflvl2_on,
};
/*******************************************************************************
* This function implements the core of the processing required to turn a cpu
* on. It avoids recursion to traverse from the lowest to the highest affinity
* level unlike the off/suspend/pon_finisher functions. It does ensure that the
* locks are picked in the same order as the order routines to avoid deadlocks.
* The flow is: Take all the locks until the highest affinity level, Call the
* handlers for turning an affinity level on & finally change the state of the
* affinity level.
******************************************************************************/
int psci_afflvl_on(unsigned long target_cpu,
unsigned long entrypoint,
unsigned long context_id,
int current_afflvl,
int target_afflvl)
{
unsigned int prev_state, next_state;
int rc = PSCI_E_SUCCESS, level;
aff_map_node *aff_node;
unsigned long mpidr = read_mpidr() & MPIDR_AFFINITY_MASK;
/*
* This loop acquires the lock corresponding to each
* affinity level so that by the time we hit the lowest
* affinity level, the system topology is snapshot and
* state management can be done safely.
*/
for (level = current_afflvl; level >= target_afflvl; level--) {
aff_node = psci_get_aff_map_node(target_cpu, level);
if (aff_node)
bakery_lock_get(mpidr, &aff_node->lock);
}
/*
* Perform generic, architecture and platform specific
* handling
*/
for (level = current_afflvl; level >= target_afflvl; level--) {
/* Grab the node for each affinity level once again */
aff_node = psci_get_aff_map_node(target_cpu, level);
if (aff_node) {
/* Keep the old state and the next one handy */
prev_state = psci_get_state(aff_node->state);
rc = psci_afflvl_on_handlers[level](target_cpu,
aff_node,
entrypoint,
context_id);
if (rc != PSCI_E_SUCCESS) {
psci_set_state(aff_node->state, prev_state);
goto exit;
}
}
}
/*
* State management: Update the states since this is the
* target affinity level requested.
*/
psci_change_state(target_cpu,
target_afflvl,
get_max_afflvl(),
PSCI_STATE_ON_PENDING);
exit:
/*
* This loop releases the lock corresponding to each affinity level
* in the reverse order. It also checks the final state of the cpu.
*/
for (level = target_afflvl; level <= current_afflvl; level++) {
aff_node = psci_get_aff_map_node(target_cpu, level);
if (aff_node) {
if (level == MPIDR_AFFLVL0) {
next_state = psci_get_state(aff_node->state);
assert(next_state == PSCI_STATE_ON_PENDING);
}
bakery_lock_release(mpidr, &aff_node->lock);
}
}
return rc;
}
/*******************************************************************************
* The following functions finish an earlier affinity power on request. They
* are called by the common finisher routine in psci_common.c.
******************************************************************************/
static unsigned int psci_afflvl0_on_finish(unsigned long mpidr,
aff_map_node *cpu_node,
unsigned int prev_state)
{
unsigned int index, plat_state, rc = PSCI_E_SUCCESS;
assert(cpu_node->level == MPIDR_AFFLVL0);
/*
* Plat. management: Perform the platform specific actions
* for this cpu e.g. enabling the gic or zeroing the mailbox
* register. The actual state of this cpu has already been
* changed.
*/
if (psci_plat_pm_ops->affinst_on_finish) {
/* Get the previous physical state of this cpu */
plat_state = psci_get_phys_state(prev_state);
rc = psci_plat_pm_ops->affinst_on_finish(mpidr,
cpu_node->level,
plat_state);
assert(rc == PSCI_E_SUCCESS);
}
/*
* Arch. management: Turn on mmu & restore architectural state
*/
write_vbar((unsigned long) runtime_exceptions);
enable_mmu();
/*
* All the platform specific actions for turning this cpu
* on have completed. Perform enough arch.initialization
* to run in the non-secure address space.
*/
bl31_arch_setup();
/*
* Generic management: Now we just need to retrieve the
* information that we had stashed away during the cpu_on
* call to set this cpu on it's way. First get the index
* for restoring the re-entry info
*/
index = cpu_node->data;
rc = psci_get_ns_entry_info(index);
/* Clean caches before re-entering normal world */
dcsw_op_louis(DCCSW);
return rc;
}
static unsigned int psci_afflvl1_on_finish(unsigned long mpidr,
aff_map_node *cluster_node,
unsigned int prev_state)
{
unsigned int rc = PSCI_E_SUCCESS;
unsigned int plat_state;
assert(cluster_node->level == MPIDR_AFFLVL1);
/*
* Plat. management: Perform the platform specific actions
* as per the old state of the cluster e.g. enabling
* coherency at the interconnect depends upon the state with
* which this cluster was powered up. If anything goes wrong
* then assert as there is no way to recover from this
* situation.
*/
if (psci_plat_pm_ops->affinst_on_finish) {
plat_state = psci_get_phys_state(prev_state);
rc = psci_plat_pm_ops->affinst_on_finish(mpidr,
cluster_node->level,
plat_state);
assert(rc == PSCI_E_SUCCESS);
}
return rc;
}
static unsigned int psci_afflvl2_on_finish(unsigned long mpidr,
aff_map_node *system_node,
unsigned int prev_state)
{
int rc = PSCI_E_SUCCESS;
unsigned int plat_state;
/* Cannot go beyond this affinity level */
assert(system_node->level == MPIDR_AFFLVL2);
/*
* Currently, there are no architectural actions to perform
* at the system level.
*/
/*
* Plat. management: Perform the platform specific actions
* as per the old state of the cluster e.g. enabling
* coherency at the interconnect depends upon the state with
* which this cluster was powered up. If anything goes wrong
* then assert as there is no way to recover from this
* situation.
*/
if (psci_plat_pm_ops->affinst_on_finish) {
plat_state = psci_get_phys_state(system_node->state);
rc = psci_plat_pm_ops->affinst_on_finish(mpidr,
system_node->level,
plat_state);
assert(rc == PSCI_E_SUCCESS);
}
return rc;
}
const afflvl_power_on_finisher psci_afflvl_on_finishers[] = {
psci_afflvl0_on_finish,
psci_afflvl1_on_finish,
psci_afflvl2_on_finish,
};
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <arch_helpers.h>
#include <console.h>
#include <platform.h>
#include <psci.h>
#include <psci_private.h>
typedef int (*afflvl_suspend_handler)(unsigned long,
aff_map_node *,
unsigned long,
unsigned long,
unsigned int);
/*******************************************************************************
* The next three functions implement a handler for each supported affinity
* level which is called when that affinity level is about to be suspended.
******************************************************************************/
static int psci_afflvl0_suspend(unsigned long mpidr,
aff_map_node *cpu_node,
unsigned long ns_entrypoint,
unsigned long context_id,
unsigned int power_state)
{
unsigned int index, plat_state;
unsigned long psci_entrypoint, sctlr = read_sctlr();
int rc = PSCI_E_SUCCESS;
/* Sanity check to safeguard against data corruption */
assert(cpu_node->level == MPIDR_AFFLVL0);
/*
* Generic management: Store the re-entry information for the
* non-secure world
*/
index = cpu_node->data;
rc = psci_set_ns_entry_info(index, ns_entrypoint, context_id);
if (rc != PSCI_E_SUCCESS)
return rc;
/*
* Arch. management: Save the secure context, flush the
* L1 caches and exit intra-cluster coherency et al
*/
psci_secure_context[index].sctlr = read_sctlr();
psci_secure_context[index].scr = read_scr();
psci_secure_context[index].cptr = read_cptr();
psci_secure_context[index].cpacr = read_cpacr();
psci_secure_context[index].cntfrq = read_cntfrq_el0();
psci_secure_context[index].mair = read_mair();
psci_secure_context[index].tcr = read_tcr();
psci_secure_context[index].ttbr = read_ttbr0();
psci_secure_context[index].vbar = read_vbar();
/* Set the secure world (EL3) re-entry point after BL1 */
psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
/*
* Arch. management. Perform the necessary steps to flush all
* cpu caches.
*
* TODO: This power down sequence varies across cpus so it needs to be
* abstracted out on the basis of the MIDR like in cpu_reset_handler().
* Do the bare minimal for the time being. Fix this before porting to
* Cortex models.
*/
sctlr &= ~SCTLR_C_BIT;
write_sctlr(sctlr);
/*
* CAUTION: This flush to the level of unification makes an assumption
* about the cache hierarchy at affinity level 0 (cpu) in the platform.
* Ideally the platform should tell psci which levels to flush to exit
* coherency.
*/
dcsw_op_louis(DCCISW);
/*
* Plat. management: Allow the platform to perform the
* necessary actions to turn off this cpu e.g. set the
* platform defined mailbox with the psci entrypoint,
* program the power controller etc.
*/
if (psci_plat_pm_ops->affinst_suspend) {
plat_state = psci_get_aff_phys_state(cpu_node);
rc = psci_plat_pm_ops->affinst_suspend(mpidr,
psci_entrypoint,
ns_entrypoint,
cpu_node->level,
plat_state);
}
return rc;
}
static int psci_afflvl1_suspend(unsigned long mpidr,
aff_map_node *cluster_node,
unsigned long ns_entrypoint,
unsigned long context_id,
unsigned int power_state)
{
int rc = PSCI_E_SUCCESS;
unsigned int plat_state;
unsigned long psci_entrypoint;
/* Sanity check the cluster level */
assert(cluster_node->level == MPIDR_AFFLVL1);
/*
* Keep the physical state of this cluster handy to decide
* what action needs to be taken
*/
plat_state = psci_get_aff_phys_state(cluster_node);
/*
* Arch. management: Flush all levels of caches to PoC if the
* cluster is to be shutdown
*/
if (plat_state == PSCI_STATE_OFF)
dcsw_op_all(DCCISW);
/*
* Plat. Management. Allow the platform to do it's cluster
* specific bookeeping e.g. turn off interconnect coherency,
* program the power controller etc.
*/
if (psci_plat_pm_ops->affinst_suspend) {
/*
* Sending the psci entrypoint is currently redundant
* beyond affinity level 0 but one never knows what a
* platform might do. Also it allows us to keep the
* platform handler prototype the same.
*/
psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
rc = psci_plat_pm_ops->affinst_suspend(mpidr,
psci_entrypoint,
ns_entrypoint,
cluster_node->level,
plat_state);
}
return rc;
}
static int psci_afflvl2_suspend(unsigned long mpidr,
aff_map_node *system_node,
unsigned long ns_entrypoint,
unsigned long context_id,
unsigned int power_state)
{
int rc = PSCI_E_SUCCESS;
unsigned int plat_state;
unsigned long psci_entrypoint;
/* Cannot go beyond this */
assert(system_node->level == MPIDR_AFFLVL2);
/*
* Keep the physical state of the system handy to decide what
* action needs to be taken
*/
plat_state = psci_get_aff_phys_state(system_node);
/*
* Plat. Management : Allow the platform to do it's bookeeping
* at this affinity level
*/
if (psci_plat_pm_ops->affinst_suspend) {
/*
* Sending the psci entrypoint is currently redundant
* beyond affinity level 0 but one never knows what a
* platform might do. Also it allows us to keep the
* platform handler prototype the same.
*/
psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
rc = psci_plat_pm_ops->affinst_suspend(mpidr,
psci_entrypoint,
ns_entrypoint,
system_node->level,
plat_state);
}
return rc;
}
static const afflvl_suspend_handler psci_afflvl_suspend_handlers[] = {
psci_afflvl0_suspend,
psci_afflvl1_suspend,
psci_afflvl2_suspend,
};
/*******************************************************************************
* This function implements the core of the processing required to suspend a cpu
* It'S assumed that along with suspending the cpu, higher affinity levels will
* be suspended as far as possible. Suspending a cpu is equivalent to physically
* powering it down, but the cpu is still available to the OS for scheduling.
* We first need to determine the new state off all the affinity instances in
* the mpidr corresponding to the target cpu. Action will be taken on the basis
* of this new state. To do the state change we first need to acquire the locks
* for all the implemented affinity level to be able to snapshot the system
* state. Then we need to start suspending affinity levels from the lowest to
* the highest (e.g. a cpu needs to be suspended before a cluster can be). To
* achieve this flow, we start acquiring the locks from the highest to the
* lowest affinity level. Once we reach affinity level 0, we do the state change
* followed by the actions corresponding to the new state for affinity level 0.
* Actions as per the updated state for higher affinity levels are performed as
* we unwind back to highest affinity level.
******************************************************************************/
int psci_afflvl_suspend(unsigned long mpidr,
unsigned long entrypoint,
unsigned long context_id,
unsigned int power_state,
int cur_afflvl,
int tgt_afflvl)
{
int rc = PSCI_E_SUCCESS, level;
unsigned int prev_state, next_state;
aff_map_node *aff_node;
mpidr &= MPIDR_AFFINITY_MASK;
/*
* Some affinity instances at levels between the current and
* target levels could be absent in the mpidr. Skip them and
* start from the first present instance.
*/
level = psci_get_first_present_afflvl(mpidr,
cur_afflvl,
tgt_afflvl,
&aff_node);
/*
* Return if there are no more affinity instances beyond this
* level to process. Else ensure that the returned affinity
* node makes sense.
*/
if (aff_node == NULL)
return rc;
assert(level == aff_node->level);
/*
* This function acquires the lock corresponding to each
* affinity level so that state management can be done safely.
*/
bakery_lock_get(mpidr, &aff_node->lock);
/* Keep the old state and the next one handy */
prev_state = psci_get_state(aff_node->state);
next_state = PSCI_STATE_SUSPEND;
/*
* We start from the highest affinity level and work our way
* downwards to the lowest i.e. MPIDR_AFFLVL0.
*/
if (aff_node->level == tgt_afflvl) {
psci_change_state(mpidr,
tgt_afflvl,
get_max_afflvl(),
next_state);
} else {
rc = psci_afflvl_suspend(mpidr,
entrypoint,
context_id,
power_state,
level - 1,
tgt_afflvl);
if (rc != PSCI_E_SUCCESS) {
psci_set_state(aff_node->state, prev_state);
goto exit;
}
}
/*
* Perform generic, architecture and platform specific
* handling
*/
rc = psci_afflvl_suspend_handlers[level](mpidr,
aff_node,
entrypoint,
context_id,
power_state);
if (rc != PSCI_E_SUCCESS) {
psci_set_state(aff_node->state, prev_state);
goto exit;
}
/*
* If all has gone as per plan then this cpu should be
* marked as OFF
*/
if (level == MPIDR_AFFLVL0) {
next_state = psci_get_state(aff_node->state);
assert(next_state == PSCI_STATE_SUSPEND);
}
exit:
bakery_lock_release(mpidr, &aff_node->lock);
return rc;
}
/*******************************************************************************
* The following functions finish an earlier affinity suspend request. They
* are called by the common finisher routine in psci_common.c.
******************************************************************************/
static unsigned int psci_afflvl0_suspend_finish(unsigned long mpidr,
aff_map_node *cpu_node,
unsigned int prev_state)
{
unsigned int index, plat_state, rc = 0;
assert(cpu_node->level == MPIDR_AFFLVL0);
/*
* Plat. management: Perform the platform specific actions
* before we change the state of the cpu e.g. enabling the
* gic or zeroing the mailbox register. If anything goes
* wrong then assert as there is no way to recover from this
* situation.
*/
if (psci_plat_pm_ops->affinst_suspend_finish) {
plat_state = psci_get_phys_state(prev_state);
rc = psci_plat_pm_ops->affinst_suspend_finish(mpidr,
cpu_node->level,
plat_state);
assert(rc == PSCI_E_SUCCESS);
}
/* Get the index for restoring the re-entry information */
index = cpu_node->data;
/*
* Arch. management: Restore the stashed secure architectural
* context in the right order.
*/
write_vbar(psci_secure_context[index].vbar);
write_mair(psci_secure_context[index].mair);
write_tcr(psci_secure_context[index].tcr);
write_ttbr0(psci_secure_context[index].ttbr);
write_sctlr(psci_secure_context[index].sctlr);
/* MMU and coherency should be enabled by now */
write_scr(psci_secure_context[index].scr);
write_cptr(psci_secure_context[index].cptr);
write_cpacr(psci_secure_context[index].cpacr);
write_cntfrq_el0(psci_secure_context[index].cntfrq);
/*
* Generic management: Now we just need to retrieve the
* information that we had stashed away during the suspend
* call to set this cpu on it's way.
*/
rc = psci_get_ns_entry_info(index);
/* Clean caches before re-entering normal world */
dcsw_op_louis(DCCSW);
return rc;
}
static unsigned int psci_afflvl1_suspend_finish(unsigned long mpidr,
aff_map_node *cluster_node,
unsigned int prev_state)
{
unsigned int rc = 0;
unsigned int plat_state;
assert(cluster_node->level == MPIDR_AFFLVL1);
/*
* Plat. management: Perform the platform specific actions
* as per the old state of the cluster e.g. enabling
* coherency at the interconnect depends upon the state with
* which this cluster was powered up. If anything goes wrong
* then assert as there is no way to recover from this
* situation.
*/
if (psci_plat_pm_ops->affinst_suspend_finish) {
plat_state = psci_get_phys_state(prev_state);
rc = psci_plat_pm_ops->affinst_suspend_finish(mpidr,
cluster_node->level,
plat_state);
assert(rc == PSCI_E_SUCCESS);
}
return rc;
}
static unsigned int psci_afflvl2_suspend_finish(unsigned long mpidr,
aff_map_node *system_node,
unsigned int target_afflvl)
{
int rc = PSCI_E_SUCCESS;
unsigned int plat_state;
/* Cannot go beyond this affinity level */
assert(system_node->level == MPIDR_AFFLVL2);
/*
* Currently, there are no architectural actions to perform
* at the system level.
*/
/*
* Plat. management: Perform the platform specific actions
* as per the old state of the cluster e.g. enabling
* coherency at the interconnect depends upon the state with
* which this cluster was powered up. If anything goes wrong
* then assert as there is no way to recover from this
* situation.
*/
if (psci_plat_pm_ops->affinst_suspend_finish) {
plat_state = psci_get_phys_state(system_node->state);
rc = psci_plat_pm_ops->affinst_suspend_finish(mpidr,
system_node->level,
plat_state);
assert(rc == PSCI_E_SUCCESS);
}
return rc;
}
const afflvl_power_on_finisher psci_afflvl_suspend_finishers[] = {
psci_afflvl0_suspend_finish,
psci_afflvl1_suspend_finish,
psci_afflvl2_suspend_finish,
};
This diff is collapsed.
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <platform.h>
#include <psci.h>
#include <psci_private.h>
#include <asm_macros.S>
.globl psci_aff_on_finish_entry
.globl psci_aff_suspend_finish_entry
.globl __psci_cpu_off
.globl __psci_cpu_suspend
.section platform_code, "ax"; .align 3
/* -----------------------------------------------------
* This cpu has been physically powered up. Depending
* upon whether it was resumed from suspend or simply
* turned on, call the common power on finisher with
* the handlers (chosen depending upon original state).
* For ease, the finisher is called with coherent
* stacks. This allows the cluster/cpu finishers to
* enter coherency and enable the mmu without running
* into issues. We switch back to normal stacks once
* all this is done.
* -----------------------------------------------------
*/
psci_aff_on_finish_entry:
adr x23, psci_afflvl_on_finishers
b psci_aff_common_finish_entry
psci_aff_suspend_finish_entry:
adr x23, psci_afflvl_suspend_finishers
psci_aff_common_finish_entry:
adr x22, psci_afflvl_power_on_finish
bl read_mpidr
mov x19, x0
bl platform_set_coherent_stack
/* ---------------------------------------------
* Call the finishers starting from affinity
* level 0.
* ---------------------------------------------
*/
bl get_max_afflvl
mov x3, x23
mov x2, x0
mov x0, x19
mov x1, #MPIDR_AFFLVL0
blr x22
mov x21, x0
/* --------------------------------------------
* Give ourselves a stack allocated in Normal
* -IS-WBWA memory
* --------------------------------------------
*/
mov x0, x19
bl platform_set_stack
/* --------------------------------------------
* Restore the context id. value
* --------------------------------------------
*/
mov x0, x21
/* --------------------------------------------
* Jump back to the non-secure world assuming
* that the elr and spsr setup has been done
* by the finishers
* --------------------------------------------
*/
eret
_panic:
b _panic
/* -----------------------------------------------------
* The following two stubs give the calling cpu a
* coherent stack to allow flushing of caches without
* suffering from stack coherency issues
* -----------------------------------------------------
*/
__psci_cpu_off:
func_prologue
sub sp, sp, #0x10
stp x19, x20, [sp, #0]
mov x19, sp
bl read_mpidr
bl platform_set_coherent_stack
bl psci_cpu_off
mov x1, #PSCI_E_SUCCESS
cmp x0, x1
b.eq final_wfi
mov sp, x19
ldp x19, x20, [sp,#0]
add sp, sp, #0x10
func_epilogue
ret
__psci_cpu_suspend:
func_prologue
sub sp, sp, #0x20
stp x19, x20, [sp, #0]
stp x21, x22, [sp, #0x10]
mov x19, sp
mov x20, x0
mov x21, x1
mov x22, x2
bl read_mpidr
bl platform_set_coherent_stack
mov x0, x20
mov x1, x21
mov x2, x22
bl psci_cpu_suspend
mov x1, #PSCI_E_SUCCESS
cmp x0, x1
b.eq final_wfi
mov sp, x19
ldp x21, x22, [sp,#0x10]
ldp x19, x20, [sp,#0]
add sp, sp, #0x20
func_epilogue
ret
final_wfi:
dsb sy
wfi
wfi_spill:
b wfi_spill
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <arch_helpers.h>
#include <console.h>
#include <platform.h>
#include <psci_private.h>
/*******************************************************************************
* PSCI frontend api for servicing SMCs. Described in the PSCI spec.
******************************************************************************/
int psci_cpu_on(unsigned long target_cpu,
unsigned long entrypoint,
unsigned long context_id)
{
int rc;
unsigned int start_afflvl, target_afflvl;
/* Determine if the cpu exists of not */
rc = psci_validate_mpidr(target_cpu, MPIDR_AFFLVL0);
if (rc != PSCI_E_SUCCESS) {
goto exit;
}
start_afflvl = get_max_afflvl();
target_afflvl = MPIDR_AFFLVL0;
rc = psci_afflvl_on(target_cpu,
entrypoint,
context_id,
start_afflvl,
target_afflvl);
exit:
return rc;
}
unsigned int psci_version(void)
{
return PSCI_MAJOR_VER | PSCI_MINOR_VER;
}
int psci_cpu_suspend(unsigned int power_state,
unsigned long entrypoint,
unsigned long context_id)
{
int rc;
unsigned long mpidr;
unsigned int tgt_afflvl, pstate_type;
/* TODO: Standby states are not supported at the moment */
pstate_type = psci_get_pstate_type(power_state);
if (pstate_type == 0) {
rc = PSCI_E_INVALID_PARAMS;
goto exit;
}
/* Sanity check the requested state */
tgt_afflvl = psci_get_pstate_afflvl(power_state);
if (tgt_afflvl > MPIDR_MAX_AFFLVL) {
rc = PSCI_E_INVALID_PARAMS;
goto exit;
}
mpidr = read_mpidr();
rc = psci_afflvl_suspend(mpidr,
entrypoint,
context_id,
power_state,
tgt_afflvl,
MPIDR_AFFLVL0);
exit:
if (rc != PSCI_E_SUCCESS)
assert(rc == PSCI_E_INVALID_PARAMS);
return rc;
}
int psci_cpu_off(void)
{
int rc;
unsigned long mpidr;
int target_afflvl = get_max_afflvl();
mpidr = read_mpidr();
/*
* Traverse from the highest to the lowest affinity level. When the
* lowest affinity level is hit, all the locks are acquired. State
* management is done immediately followed by cpu, cluster ...
* ..target_afflvl specific actions as this function unwinds back.
*/
rc = psci_afflvl_off(mpidr, target_afflvl, MPIDR_AFFLVL0);
if (rc != PSCI_E_SUCCESS) {
assert(rc == PSCI_E_DENIED);
}
return rc;
}
int psci_affinity_info(unsigned long target_affinity,
unsigned int lowest_affinity_level)
{
int rc = PSCI_E_INVALID_PARAMS;
unsigned int aff_state;
aff_map_node *node;
if (lowest_affinity_level > get_max_afflvl()) {
goto exit;
}
node = psci_get_aff_map_node(target_affinity, lowest_affinity_level);
if (node && (node->state & PSCI_AFF_PRESENT)) {
aff_state = psci_get_state(node->state);
/* A suspended cpu is available & on for the OS */
if (aff_state == PSCI_STATE_SUSPEND) {
aff_state = PSCI_STATE_ON;
}
rc = aff_state;
}
exit:
return rc;
}
/* Unimplemented */
int psci_migrate(unsigned int target_cpu)
{
return PSCI_E_NOT_SUPPORTED;
}
/* Unimplemented */
unsigned int psci_migrate_info_type(void)
{
return PSCI_TOS_NOT_PRESENT;
}
unsigned long psci_migrate_info_up_cpu(void)
{
/*
* Return value of this currently unsupported call depends upon
* what psci_migrate_info_type() returns.
*/
return PSCI_E_SUCCESS;
}
/* Unimplemented */
void psci_system_off(void)
{
assert(0);
}
/* Unimplemented */
void psci_system_reset(void)
{
assert(0);
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment