Commit 937108a0 authored by danh-arm's avatar danh-arm Committed by GitHub
Browse files

Merge pull request #678 from soby-mathew/sm/PSCI_AArch32

Introduce AArch32 support for PSCI library
parents 974603b5 9d29c227
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <asm_macros.S>
.globl spin_lock
.globl spin_unlock
func spin_lock
mov r2, #1
1:
ldrex r1, [r0]
cmp r1, #0
wfene
strexeq r1, r2, [r0]
cmpeq r1, #0
bne 1b
dmb
bx lr
endfunc spin_lock
func spin_unlock
mov r1, #0
stl r1, [r0]
bx lr
endfunc spin_unlock
/*
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <asm_macros.S>
.globl spin_lock
.globl spin_unlock
func spin_lock
mov w2, #1
sevl
l1: wfe
l2: ldaxr w1, [x0]
cbnz w1, l1
stxr w1, w2, [x0]
cbnz w1, l2
ret
endfunc spin_lock
func spin_unlock
stlr wzr, [x0]
ret
endfunc spin_unlock
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
......@@ -28,25 +28,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <asm_macros.S>
.globl spin_lock
.globl spin_unlock
func spin_lock
mov w2, #1
sevl
l1: wfe
l2: ldaxr w1, [x0]
cbnz w1, l1
stxr w1, w2, [x0]
cbnz w1, l2
ret
endfunc spin_lock
func spin_unlock
stlr wzr, [x0]
ret
endfunc spin_unlock
#if !ERROR_DEPRECATED
#include "./aarch64/spinlock.S"
#endif
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <asm_macros.S>
#include <platform_def.h>
#include <psci.h>
.globl psci_do_pwrdown_cache_maintenance
.globl psci_do_pwrup_cache_maintenance
.globl psci_power_down_wfi
/* -----------------------------------------------------------------------
* void psci_do_pwrdown_cache_maintenance(unsigned int power level);
*
* This function performs cache maintenance for the specified power
* level. The levels of cache affected are determined by the power
* level which is passed as the argument i.e. level 0 results
* in a flush of the L1 cache. Both the L1 and L2 caches are flushed
* for a higher power level.
*
* Additionally, this function also ensures that stack memory is correctly
* flushed out to avoid coherency issues due to a change in its memory
* attributes after the data cache is disabled.
* -----------------------------------------------------------------------
*/
func psci_do_pwrdown_cache_maintenance
push {r4, lr}
/* ----------------------------------------------
* Turn OFF cache and do stack maintenance
* prior to cpu operations . This sequence is
* different from AArch64 because in AArch32 the
* assembler routines for cpu operations utilize
* the stack whereas in AArch64 it doesn't.
* ----------------------------------------------
*/
mov r4, r0
bl do_stack_maintenance
/* ---------------------------------------------
* Determine how many levels of cache will be
* subject to cache maintenance. Power level
* 0 implies that only the cpu is being powered
* down. Only the L1 data cache needs to be
* flushed to the PoU in this case. For a higher
* power level we are assuming that a flush
* of L1 data and L2 unified cache is enough.
* This information should be provided by the
* platform.
* ---------------------------------------------
*/
cmp r4, #PSCI_CPU_PWR_LVL
pop {r4,lr}
beq prepare_core_pwr_dwn
b prepare_cluster_pwr_dwn
endfunc psci_do_pwrdown_cache_maintenance
/* -----------------------------------------------------------------------
* void psci_do_pwrup_cache_maintenance(void);
*
* This function performs cache maintenance after this cpu is powered up.
* Currently, this involves managing the used stack memory before turning
* on the data cache.
* -----------------------------------------------------------------------
*/
func psci_do_pwrup_cache_maintenance
push {lr}
/* ---------------------------------------------
* Ensure any inflight stack writes have made it
* to main memory.
* ---------------------------------------------
*/
dmb st
/* ---------------------------------------------
* Calculate and store the size of the used
* stack memory in r1. Calculate and store the
* stack base address in r0.
* ---------------------------------------------
*/
bl plat_get_my_stack
mov r1, sp
sub r1, r0, r1
mov r0, sp
bl inv_dcache_range
/* ---------------------------------------------
* Enable the data cache.
* ---------------------------------------------
*/
ldcopr r0, SCTLR
orr r0, r0, #SCTLR_C_BIT
stcopr r0, SCTLR
isb
pop {pc}
endfunc psci_do_pwrup_cache_maintenance
/* ---------------------------------------------
* void do_stack_maintenance(void)
* Do stack maintenance by flushing the used
* stack to the main memory and invalidating the
* remainder.
* ---------------------------------------------
*/
func do_stack_maintenance
push {r4, lr}
bl plat_get_my_stack
/* Turn off the D-cache */
ldcopr r1, SCTLR
bic r1, #SCTLR_C_BIT
stcopr r1, SCTLR
isb
/* ---------------------------------------------
* Calculate and store the size of the used
* stack memory in r1.
* ---------------------------------------------
*/
mov r4, r0
mov r1, sp
sub r1, r0, r1
mov r0, sp
bl flush_dcache_range
/* ---------------------------------------------
* Calculate and store the size of the unused
* stack memory in r1. Calculate and store the
* stack base address in r0.
* ---------------------------------------------
*/
sub r0, r4, #PLATFORM_STACK_SIZE
sub r1, sp, r0
bl inv_dcache_range
pop {r4, pc}
endfunc do_stack_maintenance
/* -----------------------------------------------------------------------
* This function is called to indicate to the power controller that it
* is safe to power down this cpu. It should not exit the wfi and will
* be released from reset upon power up.
* -----------------------------------------------------------------------
*/
func psci_power_down_wfi
dsb sy // ensure write buffer empty
wfi
bl plat_panic_handler
endfunc psci_power_down_wfi
......@@ -592,6 +592,53 @@ int psci_validate_mpidr(u_register_t mpidr)
* This function determines the full entrypoint information for the requested
* PSCI entrypoint on power on/resume and returns it.
******************************************************************************/
#ifdef AARCH32
static int psci_get_ns_ep_info(entry_point_info_t *ep,
uintptr_t entrypoint,
u_register_t context_id)
{
u_register_t ep_attr;
unsigned int aif, ee, mode;
u_register_t scr = read_scr();
u_register_t ns_sctlr, sctlr;
/* Switch to non secure state */
write_scr(scr | SCR_NS_BIT);
isb();
ns_sctlr = read_sctlr();
sctlr = scr & SCR_HCE_BIT ? read_hsctlr() : ns_sctlr;
/* Return to original state */
write_scr(scr);
isb();
ee = 0;
ep_attr = NON_SECURE | EP_ST_DISABLE;
if (sctlr & SCTLR_EE_BIT) {
ep_attr |= EP_EE_BIG;
ee = 1;
}
SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);
ep->pc = entrypoint;
memset(&ep->args, 0, sizeof(ep->args));
ep->args.arg0 = context_id;
mode = scr & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc;
/*
* TODO: Choose async. exception bits if HYP mode is not
* implemented according to the values of SCR.{AW, FW} bits
*/
aif = SPSR_ABT_BIT | SPSR_IRQ_BIT | SPSR_FIQ_BIT;
ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, aif);
return PSCI_E_SUCCESS;
}
#else
static int psci_get_ns_ep_info(entry_point_info_t *ep,
uintptr_t entrypoint,
u_register_t context_id)
......@@ -646,6 +693,7 @@ static int psci_get_ns_ep_info(entry_point_info_t *ep,
return PSCI_E_SUCCESS;
}
#endif
/*******************************************************************************
* This function validates the entrypoint with the platform layer if the
......
......@@ -29,11 +29,10 @@
#
PSCI_LIB_SOURCES := lib/el3_runtime/cpu_data_array.c \
lib/el3_runtime/aarch64/context.S \
lib/el3_runtime/aarch64/cpu_data.S \
lib/el3_runtime/aarch64/context_mgmt.c \
lib/cpus/aarch64/cpu_helpers.S \
lib/locks/exclusive/spinlock.S \
lib/el3_runtime/${ARCH}/cpu_data.S \
lib/el3_runtime/${ARCH}/context_mgmt.c \
lib/cpus/${ARCH}/cpu_helpers.S \
lib/locks/exclusive/${ARCH}/spinlock.S \
lib/psci/psci_off.c \
lib/psci/psci_on.c \
lib/psci/psci_suspend.c \
......@@ -41,7 +40,11 @@ PSCI_LIB_SOURCES := lib/el3_runtime/cpu_data_array.c \
lib/psci/psci_main.c \
lib/psci/psci_setup.c \
lib/psci/psci_system_off.c \
lib/psci/aarch64/psci_helpers.S
lib/psci/${ARCH}/psci_helpers.S
ifeq (${ARCH}, aarch64)
PSCI_LIB_SOURCES += lib/el3_runtime/aarch64/context.S
endif
ifeq (${USE_COHERENT_MEM}, 1)
PSCI_LIB_SOURCES += lib/locks/bakery/bakery_lock_coherent.c
......
......@@ -278,3 +278,15 @@ void psci_arch_setup(void)
/* Initialize the cpu_ops pointer. */
init_cpu_ops();
}
/******************************************************************************
* PSCI Library interface to initialize the cpu context for the next non
* secure image during cold boot. The relevant registers in the cpu context
* need to be retrieved and programmed on return from this interface.
*****************************************************************************/
void psci_prepare_next_non_secure_ctx(entry_point_info_t *next_image_info)
{
assert(GET_SECURITY_STATE(next_image_info->h.attr) == NON_SECURE);
cm_init_my_context(next_image_info);
cm_prepare_el3_exit(NON_SECURE);
}
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
#include <cassert.h>
#include <platform_def.h>
#include <utils.h>
#include <xlat_tables.h>
#include "../xlat_tables_private.h"
/*
* The virtual address space size must be a power of two. As we start the initial
* lookup at level 1, it must also be between 2 GB and 4 GB. See section
* G4.6.5 in the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
* information.
*/
CASSERT(ADDR_SPACE_SIZE >= (1ull << 31) && ADDR_SPACE_SIZE <= (1ull << 32) &&
IS_POWER_OF_TWO(ADDR_SPACE_SIZE), assert_valid_addr_space_size);
#define NUM_L1_ENTRIES (ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
static uint64_t l1_xlation_table[NUM_L1_ENTRIES]
__aligned(NUM_L1_ENTRIES * sizeof(uint64_t));
void init_xlat_tables(void)
{
unsigned long long max_pa;
uintptr_t max_va;
print_mmap();
init_xlation_table(0, l1_xlation_table, 1, &max_va, &max_pa);
assert(max_va < ADDR_SPACE_SIZE);
}
/*******************************************************************************
* Function for enabling the MMU in Secure PL1, assuming that the
* page-tables have already been created.
******************************************************************************/
void enable_mmu_secure(unsigned int flags)
{
unsigned int mair0, ttbcr, sctlr;
uint64_t ttbr0;
assert(IS_IN_SECURE());
assert((read_sctlr() & SCTLR_M_BIT) == 0);
/* Set attributes in the right indices of the MAIR */
mair0 = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
mair0 |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
ATTR_IWBWA_OWBWA_NTR_INDEX);
mair0 |= MAIR0_ATTR_SET(ATTR_NON_CACHEABLE,
ATTR_NON_CACHEABLE_INDEX);
write_mair0(mair0);
/* Invalidate TLBs at the current exception level */
tlbiall();
/*
* Set TTBCR bits as well. Set TTBR0 table properties as Inner
* & outer WBWA & shareable. Disable TTBR1.
*/
ttbcr = TTBCR_EAE_BIT |
TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA |
TTBCR_RGN0_INNER_WBA |
(32 - __builtin_ctzl((uintptr_t)ADDR_SPACE_SIZE));
ttbcr |= TTBCR_EPD1_BIT;
write_ttbcr(ttbcr);
/* Set TTBR0 bits as well */
ttbr0 = (uintptr_t) l1_xlation_table;
write64_ttbr0(ttbr0);
write64_ttbr1(0);
/*
* Ensure all translation table writes have drained
* into memory, the TLB invalidation is complete,
* and translation register writes are committed
* before enabling the MMU
*/
dsb();
isb();
sctlr = read_sctlr();
sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT;
if (flags & DISABLE_DCACHE)
sctlr &= ~SCTLR_C_BIT;
else
sctlr |= SCTLR_C_BIT;
write_sctlr(sctlr);
/* Ensure the MMU enable takes effect immediately */
isb();
}
......@@ -289,17 +289,17 @@ static int mmap_region_attr(mmap_region_t *mm, uintptr_t base_va,
if (!mm->size)
return attr; /* Reached end of list */
if (mm->base_va >= base_va + size)
if (mm->base_va > base_va + size - 1)
return attr; /* Next region is after area so end */
if (mm->base_va + mm->size <= base_va)
if (mm->base_va + mm->size - 1 < base_va)
continue; /* Next region has already been overtaken */
if (mm->attr == attr)
continue; /* Region doesn't override attribs so skip */
if (mm->base_va > base_va ||
mm->base_va + mm->size < base_va + size)
mm->base_va + mm->size - 1 < base_va + size - 1)
return -1; /* Region doesn't fully cover our area */
attr = mm->attr;
......@@ -328,7 +328,7 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
if (!mm->size) {
/* Done mapping regions; finish zeroing the table */
desc = INVALID_DESC;
} else if (mm->base_va + mm->size <= base_va) {
} else if (mm->base_va + mm->size - 1 < base_va) {
/* This area is after the region so get next region */
++mm;
continue;
......@@ -337,7 +337,7 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
debug_print("%s VA:%p size:0x%x ", get_level_spacer(level),
(void *)base_va, level_size);
if (mm->base_va >= base_va + level_size) {
if (mm->base_va > base_va + level_size - 1) {
/* Next region is after this area. Nothing to map yet */
desc = INVALID_DESC;
} else {
......@@ -369,7 +369,7 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
*table++ = desc;
base_va += level_size;
} while ((base_va & level_index_mask) && (base_va < ADDR_SPACE_SIZE));
} while ((base_va & level_index_mask) && (base_va - 1 < ADDR_SPACE_SIZE - 1));
return mm;
}
......
......@@ -31,8 +31,10 @@
PLAT_INCLUDES += -Iinclude/plat/arm/board/common/ \
-Iinclude/plat/arm/board/common/drivers
PLAT_BL_COMMON_SOURCES += drivers/arm/pl011/pl011_console.S \
plat/arm/board/common/aarch64/board_arm_helpers.S
PLAT_BL_COMMON_SOURCES += drivers/arm/pl011/${ARCH}/pl011_console.S
ifeq (${ARCH}, aarch64)
PLAT_BL_COMMON_SOURCES += plat/arm/board/common/aarch64/board_arm_helpers.S
endif
BL1_SOURCES += plat/arm/board/common/drivers/norflash/norflash.c
......
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <asm_macros.S>
#include <platform_def.h>
#include "../drivers/pwrc/fvp_pwrc.h"
#include "../fvp_def.h"
.globl plat_get_my_entrypoint
.globl plat_is_my_cpu_primary
/* ---------------------------------------------------------------------
* unsigned long plat_get_my_entrypoint (void);
*
* Main job of this routine is to distinguish between a cold and warm
* boot. On FVP, this information can be queried from the power
* controller. The Power Control SYS Status Register (PSYSR) indicates
* the wake-up reason for the CPU.
*
* For a cold boot, return 0.
* For a warm boot, read the mailbox and return the address it contains.
*
* TODO: PSYSR is a common register and should be
* accessed using locks. Since it is not possible
* to use locks immediately after a cold reset
* we are relying on the fact that after a cold
* reset all cpus will read the same WK field
* ---------------------------------------------------------------------
*/
func plat_get_my_entrypoint
/* ---------------------------------------------------------------------
* When bit PSYSR.WK indicates either "Wake by PPONR" or "Wake by GIC
* WakeRequest signal" then it is a warm boot.
* ---------------------------------------------------------------------
*/
ldcopr r2, MPIDR
ldr r1, =PWRC_BASE
str r2, [r1, #PSYSR_OFF]
ldr r2, [r1, #PSYSR_OFF]
ubfx r2, r2, #PSYSR_WK_SHIFT, #PSYSR_WK_WIDTH
cmp r2, #WKUP_PPONR
beq warm_reset
cmp r2, #WKUP_GICREQ
beq warm_reset
/* Cold reset */
mov r0, #0
bx lr
warm_reset:
/* ---------------------------------------------------------------------
* A mailbox is maintained in the trusted SRAM. It is flushed out of the
* caches after every update using normal memory so it is safe to read
* it here with SO attributes.
* ---------------------------------------------------------------------
*/
ldr r0, =PLAT_ARM_TRUSTED_MAILBOX_BASE
ldr r0, [r0]
cmp r0, #0
beq _panic
bx lr
/* ---------------------------------------------------------------------
* The power controller indicates this is a warm reset but the mailbox
* is empty. This should never happen!
* ---------------------------------------------------------------------
*/
_panic:
b _panic
endfunc plat_get_my_entrypoint
/* -----------------------------------------------------
* unsigned int plat_is_my_cpu_primary (void);
*
* Find out whether the current cpu is the primary
* cpu.
* -----------------------------------------------------
*/
func plat_is_my_cpu_primary
ldcopr r0, MPIDR
ldr r1, =(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK)
and r0, r1
cmp r0, #FVP_PRIMARY_CPU
moveq r0, #1
movne r0, #0
bx lr
endfunc plat_is_my_cpu_primary
......@@ -121,6 +121,9 @@ const mmap_region_t plat_arm_mmap[] = {
#endif
#if IMAGE_BL32
const mmap_region_t plat_arm_mmap[] = {
#ifdef AARCH32
ARM_MAP_SHARED_RAM,
#endif
V2M_MAP_IOFPGA,
MAP_DEVICE0,
MAP_DEVICE1,
......
......@@ -69,6 +69,9 @@ FVP_GIC_SOURCES := drivers/arm/gic/common/gic_common.c \
plat/common/plat_gicv2.c \
plat/arm/common/arm_gicv2.c
else ifeq (${FVP_USE_GIC_DRIVER}, FVP_GICV3_LEGACY)
ifeq (${ARCH}, aarch32)
$(error "GICV3 Legacy driver not supported for AArch32 build")
endif
FVP_GIC_SOURCES := drivers/arm/gic/arm_gic.c \
drivers/arm/gic/gic_v2.c \
drivers/arm/gic/gic_v3.c \
......@@ -98,12 +101,15 @@ PLAT_INCLUDES := -Iplat/arm/board/fvp/include
PLAT_BL_COMMON_SOURCES := plat/arm/board/fvp/fvp_common.c
FVP_CPU_LIBS := lib/cpus/aarch64/aem_generic.S \
lib/cpus/aarch64/cortex_a35.S \
FVP_CPU_LIBS := lib/cpus/${ARCH}/aem_generic.S
ifeq (${ARCH}, aarch64)
FVP_CPU_LIBS += lib/cpus/aarch64/cortex_a35.S \
lib/cpus/aarch64/cortex_a53.S \
lib/cpus/aarch64/cortex_a57.S \
lib/cpus/aarch64/cortex_a72.S \
lib/cpus/aarch64/cortex_a73.S
endif
BL1_SOURCES += drivers/io/io_semihosting.c \
lib/semihosting/semihosting.c \
......
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <plat_arm.h>
#include "../fvp_private.h"
void sp_min_early_platform_setup(void)
{
arm_sp_min_early_platform_setup();
/* Initialize the platform config for future decision making */
fvp_config_setup();
/*
* Initialize the correct interconnect for this cluster during cold
* boot. No need for locks as no other CPU is active.
*/
fvp_interconnect_init();
/*
* Enable coherency in interconnect for the primary CPU's cluster.
* Earlier bootloader stages might already do this (e.g. Trusted
* Firmware's BL1 does it) but we can't assume so. There is no harm in
* executing this code twice anyway.
* FVP PSCI code will enable coherency for other clusters.
*/
fvp_interconnect_enable();
}
#
# Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of ARM nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SP_MIN source files specific to FVP platform
BL32_SOURCES += plat/arm/board/fvp/aarch32/fvp_helpers.S \
plat/arm/board/fvp/drivers/pwrc/fvp_pwrc.c \
plat/arm/board/fvp/fvp_pm.c \
plat/arm/board/fvp/fvp_topology.c \
plat/arm/board/fvp/sp_min/fvp_sp_min_setup.c \
${FVP_CPU_LIBS} \
${FVP_GIC_SOURCES} \
${FVP_INTERCONNECT_SOURCES} \
${FVP_SECURITY_SOURCES}
include plat/arm/common/sp_min/arm_sp_min.mk
\ No newline at end of file
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <asm_macros.S>
#include <platform_def.h>
.weak plat_arm_calc_core_pos
.weak plat_my_core_pos
/* -----------------------------------------------------
* unsigned int plat_my_core_pos(void)
* This function uses the plat_arm_calc_core_pos()
* definition to get the index of the calling CPU.
* -----------------------------------------------------
*/
func plat_my_core_pos
ldcopr r0, MPIDR
b plat_arm_calc_core_pos
endfunc plat_my_core_pos
/* -----------------------------------------------------
* unsigned int plat_arm_calc_core_pos(uint64_t mpidr)
* Helper function to calculate the core position.
* With this function: CorePos = (ClusterId * 4) +
* CoreId
* -----------------------------------------------------
*/
func plat_arm_calc_core_pos
and r1, r0, #MPIDR_CPU_MASK
and r0, r0, #MPIDR_CLUSTER_MASK
add r0, r1, r0, LSR #6
bx lr
endfunc plat_arm_calc_core_pos
......@@ -134,6 +134,7 @@ uint32_t arm_get_spsr_for_bl32_entry(void)
/*******************************************************************************
* Gets SPSR for BL33 entry
******************************************************************************/
#ifndef AARCH32
uint32_t arm_get_spsr_for_bl33_entry(void)
{
unsigned long el_status;
......@@ -154,6 +155,28 @@ uint32_t arm_get_spsr_for_bl33_entry(void)
spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
return spsr;
}
#else
/*******************************************************************************
* Gets SPSR for BL33 entry
******************************************************************************/
uint32_t arm_get_spsr_for_bl33_entry(void)
{
unsigned int hyp_status, mode, spsr;
hyp_status = GET_VIRT_EXT(read_id_pfr1());
mode = (hyp_status) ? MODE32_hyp : MODE32_svc;
/*
* TODO: Consider the possibility of specifying the SPSR in
* the FIP ToC and allowing the platform to have a say as
* well.
*/
spsr = SPSR_MODE32(mode, plat_get_ns_image_entrypoint() & 0x1,
SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
return spsr;
}
#endif /* AARCH32 */
/*******************************************************************************
* Configures access to the system counter timer module.
......
......@@ -28,23 +28,30 @@
# POSSIBILITY OF SUCH DAMAGE.
#
# On ARM standard platorms, the TSP can execute from Trusted SRAM, Trusted
# DRAM (if available) or the TZC secured area of DRAM.
# Trusted SRAM is the default.
ARM_TSP_RAM_LOCATION := tsram
ifeq (${ARM_TSP_RAM_LOCATION}, tsram)
ARM_TSP_RAM_LOCATION_ID = ARM_TRUSTED_SRAM_ID
else ifeq (${ARM_TSP_RAM_LOCATION}, tdram)
ARM_TSP_RAM_LOCATION_ID = ARM_TRUSTED_DRAM_ID
else ifeq (${ARM_TSP_RAM_LOCATION}, dram)
ARM_TSP_RAM_LOCATION_ID = ARM_DRAM_ID
else
$(error "Unsupported ARM_TSP_RAM_LOCATION value")
endif
ifeq (${ARCH}, aarch64)
# On ARM standard platorms, the TSP can execute from Trusted SRAM, Trusted
# DRAM (if available) or the TZC secured area of DRAM.
# Trusted SRAM is the default.
ARM_TSP_RAM_LOCATION := tsram
ifeq (${ARM_TSP_RAM_LOCATION}, tsram)
ARM_TSP_RAM_LOCATION_ID = ARM_TRUSTED_SRAM_ID
else ifeq (${ARM_TSP_RAM_LOCATION}, tdram)
ARM_TSP_RAM_LOCATION_ID = ARM_TRUSTED_DRAM_ID
else ifeq (${ARM_TSP_RAM_LOCATION}, dram)
ARM_TSP_RAM_LOCATION_ID = ARM_DRAM_ID
else
$(error "Unsupported ARM_TSP_RAM_LOCATION value")
endif
# Process flags
$(eval $(call add_define,ARM_TSP_RAM_LOCATION_ID))
# Process flags
$(eval $(call add_define,ARM_TSP_RAM_LOCATION_ID))
# Process ARM_BL31_IN_DRAM flag
ARM_BL31_IN_DRAM := 0
$(eval $(call assert_boolean,ARM_BL31_IN_DRAM))
$(eval $(call add_define,ARM_BL31_IN_DRAM))
endif
# For the original power-state parameter format, the State-ID can be encoded
# according to the recommended encoding or zero. This flag determines which
......@@ -83,7 +90,7 @@ $(eval $(call assert_boolean,ARM_BL31_IN_DRAM))
$(eval $(call add_define,ARM_BL31_IN_DRAM))
# Enable PSCI_STAT_COUNT/RESIDENCY APIs on ARM platforms
ENABLE_PSCI_STAT = 1
ENABLE_PSCI_STAT := 1
# On ARM platforms, separate the code and read-only data sections to allow
# mapping the former as executable and the latter as execute-never.
......@@ -91,15 +98,17 @@ SEPARATE_CODE_AND_RODATA := 1
PLAT_INCLUDES += -Iinclude/common/tbbr \
-Iinclude/plat/arm/common \
-Iinclude/plat/arm/common/aarch64
-Iinclude/plat/arm/common
ifeq (${ARCH}, aarch64)
PLAT_INCLUDES += -Iinclude/plat/arm/common/aarch64
endif
PLAT_BL_COMMON_SOURCES += lib/xlat_tables/xlat_tables_common.c \
lib/xlat_tables/aarch64/xlat_tables.c \
plat/arm/common/aarch64/arm_helpers.S \
lib/xlat_tables/${ARCH}/xlat_tables.c \
plat/arm/common/${ARCH}/arm_helpers.S \
plat/arm/common/arm_common.c \
plat/common/aarch64/plat_common.c
plat/common/${ARCH}/plat_common.c
BL1_SOURCES += drivers/arm/sp805/sp805.c \
drivers/io/io_fip.c \
......
......@@ -77,7 +77,7 @@ void plat_arm_gic_driver_init(void)
* can use GIC system registers to manage interrupts and does
* not need GIC interface base addresses to be configured.
*/
#if IMAGE_BL31
#if (AARCH32 && IMAGE_BL32) || (IMAGE_BL31 && !AARCH32)
gicv3_driver_init(&arm_gic_data);
#endif
}
......
#
# Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of ARM nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SP MIN source files common to ARM standard platforms
BL32_SOURCES += plat/arm/common/arm_pm.c \
plat/arm/common/arm_topology.c \
plat/arm/common/sp_min/arm_sp_min_setup.c \
plat/common/aarch32/platform_mp_stack.S \
plat/common/plat_psci_common.c
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment