Commit 4f6ad66a authored by Achin Gupta's avatar Achin Gupta Committed by James Morrissey
Browse files

ARMv8 Trusted Firmware release v0.2

parents
#
# Copyright (c) 2013, ARM Limited. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of ARM nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Decrease the verbosity of the make script
# can be made verbose by passing V=1 at the make command line
ifdef V
KBUILD_VERBOSE = $(V)
else
KBUILD_VERBOSE = 0
endif
ifeq "$(KBUILD_VERBOSE)" "0"
Q=@
else
Q=
endif
DEBUG ?= 0
BL_COMMON_OBJS = misc_helpers.o cache_helpers.o tlb_helpers.o \
semihosting_call.o mmio.o pl011.o semihosting.o \
std.o bl_common.o platform_helpers.o sysreg_helpers.o
ARCH := aarch64
all: $(patsubst %,%.bin,bl1 bl2 bl31) ;
#$(info $(filter bl2.%, $(MAKECMDGOALS)))
#$(info $(filter bl1.%, $(MAKECMDGOALS)))
#$(info $(MAKECMDGOALS))
$(info Including bl1.mk)
include bl1/bl1.mk
$(info Including bl2.mk)
include bl2/bl2.mk
$(info Including bl31.mk)
include bl31/bl31.mk
OBJS += $(BL_COMMON_OBJS)
INCLUDES += -Ilib/include/ -Iinclude/aarch64/ -Iinclude/ \
-Idrivers/arm/interconnect/cci-400/ \
-Idrivers/arm/peripherals/pl011/ \
-Iplat/fvp -Idrivers/power \
-Iarch/system/gic -Icommon/psci
ASFLAGS += -D__ASSEMBLY__ $(INCLUDES)
CFLAGS := -Wall -std=c99 -c -Os -DDEBUG=$(DEBUG) $(INCLUDES) ${CFLAGS}
LDFLAGS += -O1
BL1_LDFLAGS := -Map=$(BL1_MAPFILE) --script $(BL1_LINKERFILE) --entry=$(BL1_ENTRY_POINT)
BL2_LDFLAGS := -Map=$(BL2_MAPFILE) --script $(BL2_LINKERFILE) --entry=$(BL2_ENTRY_POINT)
BL31_LDFLAGS := -Map=$(BL31_MAPFILE) --script $(BL31_LINKERFILE) --entry=$(BL31_ENTRY_POINT)
vpath %.ld.S bl1:bl2:bl31
vpath %.c bl1:bl2:bl31
vpath %.c bl1/${ARCH}:bl2/${ARCH}:bl31/${ARCH}
vpath %.S bl1/${ARCH}:bl2/${ARCH}:bl31/${ARCH}
ifneq ($(DEBUG), 0)
#CFLAGS += -g -O0
CFLAGS += -g
# -save-temps -fverbose-asm
ASFLAGS += -g -Wa,--gdwarf-2
endif
CC = $(CROSS_COMPILE)gcc
CPP = $(CROSS_COMPILE)cpp
AS = $(CROSS_COMPILE)gcc
AR = $(CROSS_COMPILE)ar
LD = $(CROSS_COMPILE)ld
OC = $(CROSS_COMPILE)objcopy
OD = $(CROSS_COMPILE)objdump
NM = $(CROSS_COMPILE)nm
PP = $(CROSS_COMPILE)gcc -E $(CFLAGS)
distclean: clean
@echo " DISTCLEAN"
$(Q)rm -rf *.zi
$(Q)rm -rf *.dump
$(Q)rm -rf *.bin
$(Q)rm -f *.axf
$(Q)rm -f *.i *.s
$(Q)rm -f *.ar
$(Q)rm -f *.map
$(Q)rm -f *.scf
$(Q)rm -f *.txt
$(Q)rm -f *.elf
$(Q)rm -rf *.bin
$(Q)rm -f $(LISTFILE)
clean:
@echo " CLEAN"
$(Q)rm -f *.o *.ld
.PHONY: dump
dump:
@echo " OBJDUMP"
$(OD) -d bl1.elf > bl1.dump
$(OD) -d bl2.elf > bl2.dump
$(OD) -d bl31.elf > bl31.dump
%.o: %.S
@echo " AS $<"
$(Q)$(AS) $(ASFLAGS) -c $< -o $@
%.o: %.c
@echo " CC $<"
$(Q)$(CC) $(CFLAGS) -c $< -o $@
%.ld: %.ld.S
@echo " LDS $<"
$(Q)$(AS) $(ASFLAGS) -P -E $< -o $@
bl1.elf: $(OBJS) $(BL1_OBJS) bl1.ld
@echo " LD $@"
$(Q)$(LD) -o $@ $(LDFLAGS) $(BL1_LDFLAGS) $(OBJS) $(BL1_OBJS)
@echo "Built $@ successfully"
@echo
bl2.elf: $(OBJS) $(BL2_OBJS) bl2.ld
@echo " LD $@"
$(Q)$(LD) -o $@ $(LDFLAGS) $(BL2_LDFLAGS) $(OBJS) $(BL2_OBJS)
@echo "Built $@ successfully"
@echo
bl31.elf: $(OBJS) $(BL31_OBJS) bl31.ld
@echo " LD $@"
$(Q)$(LD) -o $@ $(LDFLAGS) $(BL31_LDFLAGS) $(OBJS) $(BL31_OBJS)
@echo "Built $@ successfully"
@echo
%.bin: %.elf
$(OC) -O binary $< $@
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
.weak cpu_reset_handler
.section aarch64_code, "ax"; .align 3
cpu_reset_handler:; .type cpu_reset_handler, %function
mov x19, x30 // lr
/* ---------------------------------------------
* As a bare minimal enable the SMP bit and the
* I$ for all aarch64 processors. Also set the
* exception vector to something sane.
* ---------------------------------------------
*/
adr x0, early_exceptions
bl write_vbar
bl read_midr
lsr x0, x0, #MIDR_PN_SHIFT
and x0, x0, #MIDR_PN_MASK
cmp x0, #MIDR_PN_A57
b.eq smp_setup_begin
cmp x0, #MIDR_PN_A53
b.ne smp_setup_end
smp_setup_begin:
bl read_cpuectlr
orr x0, x0, #CPUECTLR_SMP_BIT
bl write_cpuectlr
smp_setup_end:
bl read_sctlr
orr x0, x0, #SCTLR_I_BIT
bl write_sctlr
ret x19
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
.globl read_icc_sre_el1
.globl read_icc_sre_el2
.globl read_icc_sre_el3
.globl write_icc_sre_el1
.globl write_icc_sre_el2
.globl write_icc_sre_el3
.globl write_icc_pmr_el1
/*
* Register definitions used by GCC for GICv3 access.
* These are defined by ARMCC, so keep them in the GCC specific code for now.
*/
#define ICC_SRE_EL1 S3_0_C12_C12_5
#define ICC_SRE_EL2 S3_4_C12_C9_5
#define ICC_SRE_EL3 S3_6_C12_C12_5
#define ICC_CTLR_EL1 S3_0_C12_C12_4
#define ICC_CTLR_EL3 S3_6_C12_C12_4
#define ICC_PMR_EL1 S3_0_C4_C6_0
.section platform_code, "ax"; .align 3
read_icc_sre_el1:; .type read_icc_sre_el1, %function
mrs x0, ICC_SRE_EL1
ret
read_icc_sre_el2:; .type read_icc_sre_el2, %function
mrs x0, ICC_SRE_EL2
ret
read_icc_sre_el3:; .type read_icc_sre_el3, %function
mrs x0, ICC_SRE_EL3
ret
write_icc_sre_el1:; .type write_icc_sre_el1, %function
msr ICC_SRE_EL1, x0
isb
ret
write_icc_sre_el2:; .type write_icc_sre_el2, %function
msr ICC_SRE_EL2, x0
isb
ret
write_icc_sre_el3:; .type write_icc_sre_el3, %function
msr ICC_SRE_EL3, x0
isb
ret
write_icc_pmr_el1:; .type write_icc_pmr_el1, %function
msr ICC_PMR_EL1, x0
isb
ret
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __GIC_H__
#define __GIC_H__
#define MAX_SPIS 480
#define MAX_PPIS 14
#define MAX_SGIS 16
#define GRP0 0
#define GRP1 1
#define MAX_PRI_VAL 0xff
#define ENABLE_GRP0 (1 << 0)
#define ENABLE_GRP1 (1 << 1)
/* Distributor interface definitions */
#define GICD_CTLR 0x0
#define GICD_TYPER 0x4
#define GICD_IGROUPR 0x80
#define GICD_ISENABLER 0x100
#define GICD_ICENABLER 0x180
#define GICD_ISPENDR 0x200
#define GICD_ICPENDR 0x280
#define GICD_ISACTIVER 0x300
#define GICD_ICACTIVER 0x380
#define GICD_IPRIORITYR 0x400
#define GICD_ITARGETSR 0x800
#define GICD_ICFGR 0xC00
#define GICD_SGIR 0xF00
#define GICD_CPENDSGIR 0xF10
#define GICD_SPENDSGIR 0xF20
#define IGROUPR_SHIFT 5
#define ISENABLER_SHIFT 5
#define ICENABLER_SHIFT ISENABLER_SHIFT
#define ISPENDR_SHIFT 5
#define ICPENDR_SHIFT ISPENDR_SHIFT
#define ISACTIVER_SHIFT 5
#define ICACTIVER_SHIFT ISACTIVER_SHIFT
#define IPRIORITYR_SHIFT 2
#define ITARGETSR_SHIFT 2
#define ICFGR_SHIFT 4
#define CPENDSGIR_SHIFT 2
#define SPENDSGIR_SHIFT CPENDSGIR_SHIFT
/* GICD_TYPER bit definitions */
#define IT_LINES_NO_MASK 0x1f
/* Physical CPU Interface registers */
#define GICC_CTLR 0x0
#define GICC_PMR 0x4
#define GICC_BPR 0x8
#define GICC_IAR 0xC
#define GICC_EOIR 0x10
#define GICC_RPR 0x14
#define GICC_HPPIR 0x18
#define GICC_IIDR 0xFC
#define GICC_DIR 0x1000
#define GICC_PRIODROP GICC_EOIR
/* GICC_CTLR bit definitions */
#define EOI_MODE_NS (1 << 10)
#define EOI_MODE_S (1 << 9)
#define IRQ_BYP_DIS_GRP1 (1 << 8)
#define FIQ_BYP_DIS_GRP1 (1 << 7)
#define IRQ_BYP_DIS_GRP0 (1 << 6)
#define FIQ_BYP_DIS_GRP0 (1 << 5)
#define CBPR (1 << 4)
#define FIQ_EN (1 << 3)
#define ACK_CTL (1 << 2)
/* GICC_IIDR bit masks and shifts */
#define GICC_IIDR_PID_SHIFT 20
#define GICC_IIDR_ARCH_SHIFT 16
#define GICC_IIDR_REV_SHIFT 12
#define GICC_IIDR_IMP_SHIFT 0
#define GICC_IIDR_PID_MASK 0xfff
#define GICC_IIDR_ARCH_MASK 0xf
#define GICC_IIDR_REV_MASK 0xf
#define GICC_IIDR_IMP_MASK 0xfff
/* HYP view virtual CPU Interface registers */
#define GICH_CTL 0x0
#define GICH_VTR 0x4
#define GICH_ELRSR0 0x30
#define GICH_ELRSR1 0x34
#define GICH_APR0 0xF0
#define GICH_LR_BASE 0x100
/* Virtual CPU Interface registers */
#define GICV_CTL 0x0
#define GICV_PRIMASK 0x4
#define GICV_BP 0x8
#define GICV_INTACK 0xC
#define GICV_EOI 0x10
#define GICV_RUNNINGPRI 0x14
#define GICV_HIGHESTPEND 0x18
#define GICV_DEACTIVATE 0x1000
/* GICv3 Re-distributor interface registers & shifts */
#define GICR_PCPUBASE_SHIFT 0x11
#define GICR_WAKER 0x14
/* GICR_WAKER bit definitions */
#define WAKER_CA (1UL << 2)
#define WAKER_PS (1UL << 1)
/* GICv3 ICC_SRE register bit definitions*/
#define ICC_SRE_EN (1UL << 3)
#define ICC_SRE_SRE (1UL << 0)
#ifndef __ASSEMBLY__
/*******************************************************************************
* Function prototypes
******************************************************************************/
extern inline unsigned int gicd_read_typer(unsigned int);
extern inline unsigned int gicd_read_ctlr(unsigned int);
extern unsigned int gicd_read_igroupr(unsigned int, unsigned int);
extern unsigned int gicd_read_isenabler(unsigned int, unsigned int);
extern unsigned int gicd_read_icenabler(unsigned int, unsigned int);
extern unsigned int gicd_read_ispendr(unsigned int, unsigned int);
extern unsigned int gicd_read_icpendr(unsigned int, unsigned int);
extern unsigned int gicd_read_isactiver(unsigned int, unsigned int);
extern unsigned int gicd_read_icactiver(unsigned int, unsigned int);
extern unsigned int gicd_read_ipriorityr(unsigned int, unsigned int);
extern unsigned int gicd_read_itargetsr(unsigned int, unsigned int);
extern unsigned int gicd_read_icfgr(unsigned int, unsigned int);
extern unsigned int gicd_read_sgir(unsigned int);
extern unsigned int gicd_read_cpendsgir(unsigned int, unsigned int);
extern unsigned int gicd_read_spendsgir(unsigned int, unsigned int);
extern inline void gicd_write_ctlr(unsigned int, unsigned int);
extern void gicd_write_igroupr(unsigned int, unsigned int, unsigned int);
extern void gicd_write_isenabler(unsigned int, unsigned int, unsigned int);
extern void gicd_write_icenabler(unsigned int, unsigned int, unsigned int);
extern void gicd_write_ispendr(unsigned int, unsigned int, unsigned int);
extern void gicd_write_icpendr(unsigned int, unsigned int, unsigned int);
extern void gicd_write_isactiver(unsigned int, unsigned int, unsigned int);
extern void gicd_write_icactiver(unsigned int, unsigned int, unsigned int);
extern void gicd_write_ipriorityr(unsigned int, unsigned int, unsigned int);
extern void gicd_write_itargetsr(unsigned int, unsigned int, unsigned int);
extern void gicd_write_icfgr(unsigned int, unsigned int, unsigned int);
extern void gicd_write_sgir(unsigned int, unsigned int);
extern void gicd_write_cpendsgir(unsigned int, unsigned int, unsigned int);
extern void gicd_write_spendsgir(unsigned int, unsigned int, unsigned int);
extern unsigned int gicd_get_igroupr(unsigned int, unsigned int);
extern void gicd_set_igroupr(unsigned int, unsigned int);
extern void gicd_clr_igroupr(unsigned int, unsigned int);
extern void gicd_set_isenabler(unsigned int, unsigned int);
extern void gicd_set_icenabler(unsigned int, unsigned int);
extern void gicd_set_ispendr(unsigned int, unsigned int);
extern void gicd_set_icpendr(unsigned int, unsigned int);
extern void gicd_set_isactiver(unsigned int, unsigned int);
extern void gicd_set_icactiver(unsigned int, unsigned int);
extern void gicd_set_ipriorityr(unsigned int, unsigned int, unsigned int);
extern void gicd_set_itargetsr(unsigned int, unsigned int, unsigned int);
extern inline unsigned int gicc_read_ctlr(unsigned int);
extern inline unsigned int gicc_read_pmr(unsigned int);
extern inline unsigned int gicc_read_BPR(unsigned int);
extern inline unsigned int gicc_read_IAR(unsigned int);
extern inline unsigned int gicc_read_EOIR(unsigned int);
extern inline unsigned int gicc_read_hppir(unsigned int);
extern inline unsigned int gicc_read_iidr(unsigned int);
extern inline unsigned int gicc_read_dir(unsigned int);
extern inline void gicc_write_ctlr(unsigned int, unsigned int);
extern inline void gicc_write_pmr(unsigned int, unsigned int);
extern inline void gicc_write_BPR(unsigned int, unsigned int);
extern inline void gicc_write_IAR(unsigned int, unsigned int);
extern inline void gicc_write_EOIR(unsigned int, unsigned int);
extern inline void gicc_write_hppir(unsigned int, unsigned int);
extern inline void gicc_write_dir(unsigned int, unsigned int);
/* GICv3 functions */
extern inline unsigned int gicr_read_waker(unsigned int);
extern inline void gicr_write_waker(unsigned int, unsigned int);
extern unsigned int read_icc_sre_el1(void);
extern unsigned int read_icc_sre_el2(void);
extern unsigned int read_icc_sre_el3(void);
extern void write_icc_sre_el1(unsigned int);
extern void write_icc_sre_el2(unsigned int);
extern void write_icc_sre_el3(unsigned int);
extern void write_icc_pmr_el1(unsigned int);
#endif /*__ASSEMBLY__*/
#endif /* __GIC_H__ */
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <gic.h>
#include <mmio.h>
/*******************************************************************************
* GIC Distributor interface accessesors for reading entire registers
******************************************************************************/
inline unsigned int gicd_read_ctlr(unsigned int base)
{
return mmio_read_32(base + GICD_CTLR);
}
inline unsigned int gicd_read_typer(unsigned int base)
{
return mmio_read_32(base + GICD_TYPER);
}
unsigned int gicd_read_igroupr(unsigned int base, unsigned int id)
{
unsigned n = id >> IGROUPR_SHIFT;
return mmio_read_32(base + GICD_IGROUPR + (n << 2));
}
unsigned int gicd_read_isenabler(unsigned int base, unsigned int id)
{
unsigned n = id >> ISENABLER_SHIFT;
return mmio_read_32(base + GICD_ISENABLER + (n << 2));
}
unsigned int gicd_read_icenabler(unsigned int base, unsigned int id)
{
unsigned n = id >> ICENABLER_SHIFT;
return mmio_read_32(base + GICD_ICENABLER + (n << 2));
}
unsigned int gicd_read_ispendr(unsigned int base, unsigned int id)
{
unsigned n = id >> ISPENDR_SHIFT;
return mmio_read_32(base + GICD_ISPENDR + (n << 2));
}
unsigned int gicd_read_icpendr(unsigned int base, unsigned int id)
{
unsigned n = id >> ICPENDR_SHIFT;
return mmio_read_32(base + GICD_ICPENDR + (n << 2));
}
unsigned int gicd_read_isactiver(unsigned int base, unsigned int id)
{
unsigned n = id >> ISACTIVER_SHIFT;
return mmio_read_32(base + GICD_ISACTIVER + (n << 2));
}
unsigned int gicd_read_icactiver(unsigned int base, unsigned int id)
{
unsigned n = id >> ICACTIVER_SHIFT;
return mmio_read_32(base + GICD_ICACTIVER + (n << 2));
}
unsigned int gicd_read_ipriorityr(unsigned int base, unsigned int id)
{
unsigned n = id >> IPRIORITYR_SHIFT;
return mmio_read_32(base + GICD_IPRIORITYR + (n << 2));
}
unsigned int gicd_read_itargetsr(unsigned int base, unsigned int id)
{
unsigned n = id >> ITARGETSR_SHIFT;
return mmio_read_32(base + GICD_ITARGETSR + (n << 2));
}
unsigned int gicd_read_icfgr(unsigned int base, unsigned int id)
{
unsigned n = id >> ICFGR_SHIFT;
return mmio_read_32(base + GICD_ICFGR + (n << 2));
}
unsigned int gicd_read_sgir(unsigned int base)
{
return mmio_read_32(base + GICD_SGIR);
}
unsigned int gicd_read_cpendsgir(unsigned int base, unsigned int id)
{
unsigned n = id >> CPENDSGIR_SHIFT;
return mmio_read_32(base + GICD_CPENDSGIR + (n << 2));
}
unsigned int gicd_read_spendsgir(unsigned int base, unsigned int id)
{
unsigned n = id >> SPENDSGIR_SHIFT;
return mmio_read_32(base + GICD_SPENDSGIR + (n << 2));
}
/*******************************************************************************
* GIC Distributor interface accessesors for writing entire registers
******************************************************************************/
inline void gicd_write_ctlr(unsigned int base, unsigned int val)
{
mmio_write_32(base + GICD_CTLR, val);
return;
}
void gicd_write_igroupr(unsigned int base, unsigned int id, unsigned int val)
{
unsigned n = id >> IGROUPR_SHIFT;
mmio_write_32(base + GICD_IGROUPR + (n << 2), val);
return;
}
void gicd_write_isenabler(unsigned int base, unsigned int id, unsigned int val)
{
unsigned n = id >> ISENABLER_SHIFT;
mmio_write_32(base + GICD_ISENABLER + (n << 2), val);
return;
}
void gicd_write_icenabler(unsigned int base, unsigned int id, unsigned int val)
{
unsigned n = id >> ICENABLER_SHIFT;
mmio_write_32(base + GICD_ICENABLER + (n << 2), val);
return;
}
void gicd_write_ispendr(unsigned int base, unsigned int id, unsigned int val)
{
unsigned n = id >> ISPENDR_SHIFT;
mmio_write_32(base + GICD_ISPENDR + (n << 2), val);
return;
}
void gicd_write_icpendr(unsigned int base, unsigned int id, unsigned int val)
{
unsigned n = id >> ICPENDR_SHIFT;
mmio_write_32(base + GICD_ICPENDR + (n << 2), val);
return;
}
void gicd_write_isactiver(unsigned int base, unsigned int id, unsigned int val)
{
unsigned n = id >> ISACTIVER_SHIFT;
mmio_write_32(base + GICD_ISACTIVER + (n << 2), val);
return;
}
void gicd_write_icactiver(unsigned int base, unsigned int id, unsigned int val)
{
unsigned n = id >> ICACTIVER_SHIFT;
mmio_write_32(base + GICD_ICACTIVER + (n << 2), val);
return;
}
void gicd_write_ipriorityr(unsigned int base, unsigned int id, unsigned int val)
{
unsigned n = id >> IPRIORITYR_SHIFT;
mmio_write_32(base + GICD_IPRIORITYR + (n << 2), val);
return;
}
void gicd_write_itargetsr(unsigned int base, unsigned int id, unsigned int val)
{
unsigned n = id >> ITARGETSR_SHIFT;
mmio_write_32(base + GICD_ITARGETSR + (n << 2), val);
return;
}
void gicd_write_icfgr(unsigned int base, unsigned int id, unsigned int val)
{
unsigned n = id >> ICFGR_SHIFT;
mmio_write_32(base + GICD_ICFGR + (n << 2), val);
return;
}
void gicd_write_sgir(unsigned int base, unsigned int val)
{
mmio_write_32(base + GICD_SGIR, val);
return;
}
void gicd_write_cpendsgir(unsigned int base, unsigned int id, unsigned int val)
{
unsigned n = id >> CPENDSGIR_SHIFT;
mmio_write_32(base + GICD_CPENDSGIR + (n << 2), val);
return;
}
void gicd_write_spendsgir(unsigned int base, unsigned int id, unsigned int val)
{
unsigned n = id >> SPENDSGIR_SHIFT;
mmio_write_32(base + GICD_SPENDSGIR + (n << 2), val);
return;
}
/*******************************************************************************
* GIC Distributor interface accessesors for individual interrupt manipulation
******************************************************************************/
unsigned int gicd_get_igroupr(unsigned int base, unsigned int id)
{
unsigned bit_num = id & ((1 << IGROUPR_SHIFT) - 1);
unsigned int reg_val = gicd_read_igroupr(base, id);
return (reg_val >> bit_num) & 0x1;
}
void gicd_set_igroupr(unsigned int base, unsigned int id)
{
unsigned bit_num = id & ((1 << IGROUPR_SHIFT) - 1);
unsigned int reg_val = gicd_read_igroupr(base, id);
gicd_write_igroupr(base, id, reg_val | (1 << bit_num));
return;
}
void gicd_clr_igroupr(unsigned int base, unsigned int id)
{
unsigned bit_num = id & ((1 << IGROUPR_SHIFT) - 1);
unsigned int reg_val = gicd_read_igroupr(base, id);
gicd_write_igroupr(base, id, reg_val & ~(1 << bit_num));
return;
}
void gicd_set_isenabler(unsigned int base, unsigned int id)
{
unsigned bit_num = id & ((1 << ISENABLER_SHIFT) - 1);
unsigned int reg_val = gicd_read_isenabler(base, id);
gicd_write_isenabler(base, id, reg_val | (1 << bit_num));
return;
}
void gicd_set_icenabler(unsigned int base, unsigned int id)
{
unsigned bit_num = id & ((1 << ICENABLER_SHIFT) - 1);
unsigned int reg_val = gicd_read_icenabler(base, id);
gicd_write_icenabler(base, id, reg_val & ~(1 << bit_num));
return;
}
void gicd_set_ispendr(unsigned int base, unsigned int id)
{
unsigned bit_num = id & ((1 << ISPENDR_SHIFT) - 1);
unsigned int reg_val = gicd_read_ispendr(base, id);
gicd_write_ispendr(base, id, reg_val | (1 << bit_num));
return;
}
void gicd_set_icpendr(unsigned int base, unsigned int id)
{
unsigned bit_num = id & ((1 << ICPENDR_SHIFT) - 1);
unsigned int reg_val = gicd_read_icpendr(base, id);
gicd_write_icpendr(base, id, reg_val & ~(1 << bit_num));
return;
}
void gicd_set_isactiver(unsigned int base, unsigned int id)
{
unsigned bit_num = id & ((1 << ISACTIVER_SHIFT) - 1);
unsigned int reg_val = gicd_read_isactiver(base, id);
gicd_write_isactiver(base, id, reg_val | (1 << bit_num));
return;
}
void gicd_set_icactiver(unsigned int base, unsigned int id)
{
unsigned bit_num = id & ((1 << ICACTIVER_SHIFT) - 1);
unsigned int reg_val = gicd_read_icactiver(base, id);
gicd_write_icactiver(base, id, reg_val & ~(1 << bit_num));
return;
}
/*
* Make sure that the interrupt's group is set before expecting
* this function to do its job correctly.
*/
void gicd_set_ipriorityr(unsigned int base, unsigned int id, unsigned int pri)
{
unsigned byte_off = id & ((1 << ICACTIVER_SHIFT) - 1);
unsigned int reg_val = gicd_read_icactiver(base, id);
/*
* Enforce ARM recommendation to manage priority values such
* that group1 interrupts always have a lower priority than
* group0 interrupts
*/
if (gicd_get_igroupr(base, id) == GRP1)
pri |= 1 << 7;
else
pri &= ~(1 << 7);
gicd_write_icactiver(base, id, reg_val & ~(pri << (byte_off << 3)));
return;
}
void gicd_set_itargetsr(unsigned int base, unsigned int id, unsigned int iface)
{
unsigned byte_off = id & ((1 << ITARGETSR_SHIFT) - 1);
unsigned int reg_val = gicd_read_itargetsr(base, id);
gicd_write_itargetsr(base, id, reg_val |
(1 << iface) << (byte_off << 3));
return;
}
/*******************************************************************************
* GIC CPU interface accessesors for reading entire registers
******************************************************************************/
inline unsigned int gicc_read_ctlr(unsigned int base)
{
return mmio_read_32(base + GICC_CTLR);
}
inline unsigned int gicc_read_pmr(unsigned int base)
{
return mmio_read_32(base + GICC_PMR);
}
inline unsigned int gicc_read_BPR(unsigned int base)
{
return mmio_read_32(base + GICC_BPR);
}
inline unsigned int gicc_read_IAR(unsigned int base)
{
return mmio_read_32(base + GICC_IAR);
}
inline unsigned int gicc_read_EOIR(unsigned int base)
{
return mmio_read_32(base + GICC_EOIR);
}
inline unsigned int gicc_read_hppir(unsigned int base)
{
return mmio_read_32(base + GICC_HPPIR);
}
inline unsigned int gicc_read_dir(unsigned int base)
{
return mmio_read_32(base + GICC_DIR);
}
inline unsigned int gicc_read_iidr(unsigned int base)
{
return mmio_read_32(base + GICC_IIDR);
}
/*******************************************************************************
* GIC CPU interface accessesors for writing entire registers
******************************************************************************/
inline void gicc_write_ctlr(unsigned int base, unsigned int val)
{
mmio_write_32(base + GICC_CTLR, val);
return;
}
inline void gicc_write_pmr(unsigned int base, unsigned int val)
{
mmio_write_32(base + GICC_PMR, val);
return;
}
inline void gicc_write_BPR(unsigned int base, unsigned int val)
{
mmio_write_32(base + GICC_BPR, val);
return;
}
inline void gicc_write_IAR(unsigned int base, unsigned int val)
{
mmio_write_32(base + GICC_IAR, val);
return;
}
inline void gicc_write_EOIR(unsigned int base, unsigned int val)
{
mmio_write_32(base + GICC_EOIR, val);
return;
}
inline void gicc_write_hppir(unsigned int base, unsigned int val)
{
mmio_write_32(base + GICC_HPPIR, val);
return;
}
inline void gicc_write_dir(unsigned int base, unsigned int val)
{
mmio_write_32(base + GICC_DIR, val);
return;
}
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <gic.h>
#include <mmio.h>
/*******************************************************************************
* GIC Redistributor interface accessesors
******************************************************************************/
inline unsigned int gicr_read_waker(unsigned int base)
{
return mmio_read_32(base + GICR_WAKER);
}
inline void gicr_write_waker(unsigned int base, unsigned int val)
{
mmio_write_32(base + GICR_WAKER, val);
return;
}
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch_helpers.h>
#include <platform.h>
#include <assert.h>
/*******************************************************************************
* Function that does the first bit of architectural setup that affects
* execution in the non-secure address space.
******************************************************************************/
void bl1_arch_setup(void)
{
unsigned long tmp_reg = 0;
unsigned int counter_base_frequency;
/* Enable alignment checks and set the exception endianess to LE */
tmp_reg = read_sctlr();
tmp_reg |= (SCTLR_A_BIT | SCTLR_SA_BIT);
tmp_reg &= ~SCTLR_EE_BIT;
write_sctlr(tmp_reg);
/*
* Enable HVCs, route FIQs to EL3, set the next EL to be aarch64
*/
tmp_reg = SCR_RES1_BITS | SCR_RW_BIT | SCR_HCE_BIT | SCR_FIQ_BIT;
write_scr(tmp_reg);
/* Do not trap coprocessor accesses from lower ELs to EL3 */
write_cptr_el3(0);
/* Read the frequency from Frequency modes table */
counter_base_frequency = mmio_read_32(SYS_CNTCTL_BASE + CNTFID_OFF);
/* The first entry of the frequency modes table must not be 0 */
assert(counter_base_frequency != 0);
/* Program the counter frequency */
write_cntfrq_el0(counter_base_frequency);
return;
}
/*******************************************************************************
* Set the Secure EL1 required architectural state
******************************************************************************/
void bl1_arch_next_el_setup(void) {
unsigned long current_sctlr, next_sctlr;
/* Use the same endianness than the current BL */
current_sctlr = read_sctlr();
next_sctlr = (current_sctlr & SCTLR_EE_BIT);
/* Set SCTLR Secure EL1 */
next_sctlr |= SCTLR_EL1_RES1;
write_sctlr_el1(next_sctlr);
}
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
.globl reset_handler
.section reset_code, "ax"; .align 3
/* -----------------------------------------------------
* reset_handler() is the entry point into the trusted
* firmware code when a cpu is released from warm or
* cold reset.
* -----------------------------------------------------
*/
reset_handler:; .type reset_handler, %function
/* ---------------------------------------------
* Perform any processor specific actions upon
* reset e.g. cache, tlb invalidations etc.
* ---------------------------------------------
*/
bl cpu_reset_handler
_wait_for_entrypoint:
/* ---------------------------------------------
* Find the type of reset and jump to handler
* if present. If the handler is null then it is
* a cold boot. The primary cpu will set up the
* platform while the secondaries wait for
* their turn to be woken up
* ---------------------------------------------
*/
bl read_mpidr
bl platform_get_entrypoint
cbnz x0, _do_warm_boot
bl read_mpidr
bl platform_is_primary_cpu
cbnz x0, _do_cold_boot
/* ---------------------------------------------
* Perform any platform specific secondary cpu
* actions
* ---------------------------------------------
*/
bl plat_secondary_cold_boot_setup
b _wait_for_entrypoint
_do_cold_boot:
/* ---------------------------------------------
* Initialize platform and jump to our c-entry
* point for this type of reset
* ---------------------------------------------
*/
adr x0, bl1_main
bl platform_cold_boot_init
b _panic
_do_warm_boot:
/* ---------------------------------------------
* Jump to BL31 for all warm boot init.
* ---------------------------------------------
*/
blr x0
_panic:
b _panic
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <bl_common.h>
#include <bl1.h>
#include <platform.h>
#include <runtime_svc.h>
.globl early_exceptions
.section .text, "ax"; .align 11
/* -----------------------------------------------------
* Very simple exception handlers used by BL1 and BL2.
* Apart from one SMC exception all other traps loop
* endlessly.
* -----------------------------------------------------
*/
.align 7
early_exceptions:
/* -----------------------------------------------------
* Current EL with SP0 : 0x0 - 0x180
* -----------------------------------------------------
*/
SynchronousExceptionSP0:
mov x0, #SYNC_EXCEPTION_SP_EL0
bl plat_report_exception
b SynchronousExceptionSP0
.align 7
IrqSP0:
mov x0, #IRQ_SP_EL0
bl plat_report_exception
b IrqSP0
.align 7
FiqSP0:
mov x0, #FIQ_SP_EL0
bl plat_report_exception
b FiqSP0
.align 7
SErrorSP0:
mov x0, #SERROR_SP_EL0
bl plat_report_exception
b SErrorSP0
/* -----------------------------------------------------
* Current EL with SPx: 0x200 - 0x380
* -----------------------------------------------------
*/
.align 7
SynchronousExceptionSPx:
mov x0, #SYNC_EXCEPTION_SP_ELX
bl plat_report_exception
b SynchronousExceptionSPx
.align 7
IrqSPx:
mov x0, #IRQ_SP_ELX
bl plat_report_exception
b IrqSPx
.align 7
FiqSPx:
mov x0, #FIQ_SP_ELX
bl plat_report_exception
b FiqSPx
.align 7
SErrorSPx:
mov x0, #SERROR_SP_ELX
bl plat_report_exception
b SErrorSPx
/* -----------------------------------------------------
* Lower EL using AArch64 : 0x400 - 0x580
* -----------------------------------------------------
*/
.align 7
SynchronousExceptionA64:
/* ---------------------------------------------
* Only a single SMC exception from BL2 to ask
* BL1 to pass EL3 control to BL31 is expected
* here.
* ---------------------------------------------
*/
sub sp, sp, #0x40
stp x0, x1, [sp, #0x0]
stp x2, x3, [sp, #0x10]
stp x4, x5, [sp, #0x20]
stp x6, x7, [sp, #0x30]
mov x19, x0
mov x20, x1
mov x21, x2
mov x0, #SYNC_EXCEPTION_AARCH64
bl plat_report_exception
bl read_esr
ubfx x1, x0, #ESR_EC_SHIFT, #ESR_EC_LENGTH
cmp x1, #EC_AARCH64_SMC
b.ne panic
mov x1, #RUN_IMAGE
cmp x19, x1
b.ne panic
mov x0, x20
mov x1, x21
mov x2, x3
mov x3, x4
bl display_boot_progress
mov x0, x20
bl write_elr
mov x0, x21
bl write_spsr
ubfx x0, x21, #MODE_EL_SHIFT, #2
cmp x0, #MODE_EL3
b.ne skip_mmu_teardown
/* ---------------------------------------------
* If BL31 is to be executed in EL3 as well
* then turn off the MMU so that it can perform
* its own setup. TODO: Assuming flat mapped
* translations here. Also all should go into a
* separate MMU teardown function
* ---------------------------------------------
*/
mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
bl read_sctlr
bic x0, x0, x1
bl write_sctlr
mov x0, #DCCISW
bl dcsw_op_all
bl tlbialle3
skip_mmu_teardown:
ldp x6, x7, [sp, #0x30]
ldp x4, x5, [sp, #0x20]
ldp x2, x3, [sp, #0x10]
ldp x0, x1, [sp, #0x0]
add sp, sp, #0x40
eret
panic:
b panic
.align 7
IrqA64:
mov x0, #IRQ_AARCH64
bl plat_report_exception
b IrqA64
.align 7
FiqA64:
mov x0, #FIQ_AARCH64
bl plat_report_exception
b FiqA64
.align 7
SErrorA64:
mov x0, #SERROR_AARCH64
bl plat_report_exception
b SErrorA64
/* -----------------------------------------------------
* Lower EL using AArch32 : 0x0 - 0x180
* -----------------------------------------------------
*/
.align 7
SynchronousExceptionA32:
mov x0, #SYNC_EXCEPTION_AARCH32
bl plat_report_exception
b SynchronousExceptionA32
.align 7
IrqA32:
mov x0, #IRQ_AARCH32
bl plat_report_exception
b IrqA32
.align 7
FiqA32:
mov x0, #FIQ_AARCH32
bl plat_report_exception
b FiqA32
.align 7
SErrorA32:
mov x0, #SERROR_AARCH32
bl plat_report_exception
b SErrorA32
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <platform.h>
OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
MEMORY {
/* ROM is read-only and executable */
ROM (rx): ORIGIN = TZROM_BASE, LENGTH = TZROM_SIZE
/* RAM is read/write and Initialised */
RAM (rwx): ORIGIN = TZRAM_BASE, LENGTH = TZRAM_SIZE
}
SECTIONS
{
FIRMWARE_ROM : {
*(reset_code)
*(.text)
*(.rodata)
} >ROM
.bss : {
__BSS_RAM_START__ = .;
*(.bss)
*(COMMON)
__BSS_RAM_STOP__ = .;
} >RAM AT>ROM
.data : {
__DATA_RAM_START__ = .;
*(.data)
__DATA_RAM_STOP__ = .;
} >RAM AT>ROM
FIRMWARE_RAM_STACKS ALIGN (PLATFORM_CACHE_LINE_SIZE) : {
. += 0x1000;
*(tzfw_normal_stacks)
. = ALIGN(4096);
} >RAM AT>ROM
FIRMWARE_RAM_COHERENT ALIGN (4096): {
*(tzfw_coherent_mem)
/* . += 0x1000;*/
/* Do we need to make sure this is at least 4k? */
. = ALIGN(4096);
} >RAM
__FIRMWARE_ROM_START__ = LOADADDR(FIRMWARE_ROM);
__FIRMWARE_ROM_SIZE__ = SIZEOF(FIRMWARE_ROM);
__FIRMWARE_DATA_START__ = LOADADDR(.data);
__FIRMWARE_DATA_SIZE__ = SIZEOF(.data);
__FIRMWARE_BSS_START__ = LOADADDR(.bss);
__FIRMWARE_BSS_SIZE__ = SIZEOF(.bss);
__FIRMWARE_RAM_STACKS_START__ = LOADADDR(FIRMWARE_RAM_STACKS);
__FIRMWARE_RAM_STACKS_SIZE__ = SIZEOF(FIRMWARE_RAM_STACKS);
__FIRMWARE_RAM_COHERENT_START__ = LOADADDR(FIRMWARE_RAM_COHERENT);
__FIRMWARE_RAM_COHERENT_SIZE__ = SIZEOF(FIRMWARE_RAM_COHERENT);
}
#
# Copyright (c) 2013, ARM Limited. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of ARM nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
vpath %.c drivers/arm/interconnect/cci-400/ plat/fvp \
plat/fvp/${ARCH} drivers/arm/peripherals/pl011 common/ lib/ \
lib/semihosting arch/aarch64/ lib/non-semihosting
vpath %.S arch/${ARCH}/cpu plat/common/aarch64 \
plat/fvp/${ARCH} lib/semihosting/aarch64 \
include/ lib/arch/aarch64
BL1_ASM_OBJS := bl1_entrypoint.o bl1_plat_helpers.o cpu_helpers.o
BL1_C_OBJS := bl1_main.o cci400.o bl1_plat_setup.o bl1_arch_setup.o \
fvp_common.o fvp_helpers.o early_exceptions.o
BL1_ENTRY_POINT := reset_handler
BL1_MAPFILE := bl1.map
BL1_LINKERFILE := bl1.ld
BL1_OBJS := $(BL1_C_OBJS) $(BL1_ASM_OBJS)
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <arch_helpers.h>
#include <platform.h>
#include <semihosting.h>
#include <bl1.h>
void bl1_arch_next_el_setup(void);
/*******************************************************************************
* Function to perform late architectural and platform specific initialization.
* It also locates and loads the BL2 raw binary image in the trusted DRAM. Only
* called by the primary cpu after a cold boot.
* TODO: Add support for alternative image load mechanism e.g using virtio/elf
* loader etc.
******************************************************************************/
void bl1_main(void)
{
unsigned long sctlr_el3 = read_sctlr();
unsigned long bl2_base;
unsigned int load_type = TOP_LOAD, spsr;
meminfo bl1_tzram_layout, *bl2_tzram_layout = 0x0;
/*
* Ensure that MMU/Caches and coherency are turned on
*/
assert(sctlr_el3 | SCTLR_M_BIT);
assert(sctlr_el3 | SCTLR_C_BIT);
assert(sctlr_el3 | SCTLR_I_BIT);
/* Perform remaining generic architectural setup from EL3 */
bl1_arch_setup();
/* Perform platform setup in BL1. */
bl1_platform_setup();
/* Announce our arrival */
printf(FIRMWARE_WELCOME_STR);
printf("Built : %s, %s\n\r", __TIME__, __DATE__);
/*
* Find out how much free trusted ram remains after BL1 load
* & load the BL2 image at its top
*/
bl1_tzram_layout = bl1_get_sec_mem_layout();
bl2_base = load_image(&bl1_tzram_layout,
(const char *) BL2_IMAGE_NAME,
load_type, BL2_BASE);
/*
* Create a new layout of memory for BL2 as seen by BL1 i.e.
* tell it the amount of total and free memory available.
* This layout is created at the first free address visible
* to BL2. BL2 will read the memory layout before using its
* memory for other purposes.
*/
bl2_tzram_layout = (meminfo *) bl1_tzram_layout.free_base;
init_bl2_mem_layout(&bl1_tzram_layout,
bl2_tzram_layout,
load_type,
bl2_base);
if (bl2_base) {
bl1_arch_next_el_setup();
spsr = make_spsr(MODE_EL1, MODE_SP_ELX, MODE_RW_64);
printf("Booting trusted firmware boot loader stage 2\n\r");
#if DEBUG
printf("BL2 address = 0x%llx \n\r", (unsigned long long) bl2_base);
printf("BL2 cpsr = 0x%x \n\r", spsr);
printf("BL2 memory layout address = 0x%llx \n\r",
(unsigned long long) bl2_tzram_layout);
#endif
run_image(bl2_base, spsr, SECURE, bl2_tzram_layout, 0);
}
/*
* TODO: print failure to load BL2 but also add a tzwdog timer
* which will reset the system eventually.
*/
printf("Failed to load boot loader stage 2 (BL2) firmware.\n\r");
return;
}
/*******************************************************************************
* Temporary function to print the fact that BL2 has done its job and BL31 is
* about to be loaded. This is needed as long as printfs cannot be used
******************************************************************************/
void display_boot_progress(unsigned long entrypoint,
unsigned long spsr,
unsigned long mem_layout,
unsigned long ns_image_info)
{
printf("Booting trusted firmware boot loader stage 3\n\r");
#if DEBUG
printf("BL31 address = 0x%llx \n\r", (unsigned long long) entrypoint);
printf("BL31 cpsr = 0x%llx \n\r", (unsigned long long)spsr);
printf("BL31 memory layout address = 0x%llx \n\r", (unsigned long long)mem_layout);
printf("BL31 non-trusted image info address = 0x%llx\n\r", (unsigned long long)ns_image_info);
#endif
return;
}
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch_helpers.h>
#include <platform.h>
/*******************************************************************************
* Place holder function to perform any S-EL1 specific architectural setup. At
* the moment there is nothing to do.
******************************************************************************/
void bl2_arch_setup(void)
{
/* Give access to FP/SIMD registers */
write_cpacr(CPACR_EL1_FPEN(CPACR_EL1_FP_TRAP_NONE));
}
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <bl_common.h>
.globl bl2_entrypoint
.section entry_code, "ax"; .align 3
bl2_entrypoint:; .type bl2_entrypoint, %function
/*---------------------------------------------
* Store the extents of the tzram available to
* BL2 for future use. Use the opcode param to
* allow implement other functions if needed.
* ---------------------------------------------
*/
mov x20, x0
mov x21, x1
mov x22, x2
/* ---------------------------------------------
* This is BL2 which is expected to be executed
* only by the primary cpu (at least for now).
* So, make sure no secondary has lost its way.
* ---------------------------------------------
*/
bl read_mpidr
mov x19, x0
bl platform_is_primary_cpu
cbz x0, _panic
/* --------------------------------------------
* Give ourselves a small coherent stack to
* ease the pain of initializing the MMU
* --------------------------------------------
*/
mov x0, x19
bl platform_set_coherent_stack
/* ---------------------------------------------
* Perform early platform setup & platform
* specific early arch. setup e.g. mmu setup
* ---------------------------------------------
*/
mov x0, x21
mov x1, x22
bl bl2_early_platform_setup
bl bl2_plat_arch_setup
/* ---------------------------------------------
* Give ourselves a stack allocated in Normal
* -IS-WBWA memory
* ---------------------------------------------
*/
mov x0, x19
bl platform_set_stack
/* ---------------------------------------------
* Jump to main function.
* ---------------------------------------------
*/
bl bl2_main
_panic:
b _panic
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <platform.h>
OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
MEMORY {
/* RAM is read/write and Initialised */
RAM (rwx): ORIGIN = TZRAM_BASE, LENGTH = TZRAM_SIZE
}
SECTIONS
{
. = BL2_BASE;
BL2_RO NEXT (4096): {
*(entry_code)
*(.text .rodata)
} >RAM
BL2_STACKS NEXT (4096): {
*(tzfw_normal_stacks)
} >RAM
BL2_COHERENT_RAM NEXT (4096): {
*(tzfw_coherent_mem)
/* . += 0x1000;*/
/* Do we need to ensure at least 4k here? */
. = NEXT(4096);
} >RAM
__BL2_DATA_START__ = .;
.bss NEXT (4096): {
*(SORT_BY_ALIGNMENT(.bss))
*(COMMON)
} >RAM
.data : {
*(.data)
} >RAM
__BL2_DATA_STOP__ = .;
__BL2_RO_BASE__ = LOADADDR(BL2_RO);
__BL2_RO_SIZE__ = SIZEOF(BL2_RO);
__BL2_STACKS_BASE__ = LOADADDR(BL2_STACKS);
__BL2_STACKS_SIZE__ = SIZEOF(BL2_STACKS);
__BL2_COHERENT_RAM_BASE__ = LOADADDR(BL2_COHERENT_RAM);
__BL2_COHERENT_RAM_SIZE__ = SIZEOF(BL2_COHERENT_RAM);
__BL2_RW_BASE__ = __BL2_DATA_START__;
__BL2_RW_SIZE__ = __BL2_DATA_STOP__ - __BL2_DATA_START__;
}
#
# Copyright (c) 2013, ARM Limited. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of ARM nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
vpath %.c common/ drivers/arm/interconnect/cci-400/ \
drivers/arm/peripherals/pl011 common/ lib/ \
plat/fvp plat/fvp/${ARCH} lib/semihosting arch/aarch64/ \
lib/non-semihosting
vpath %.S lib/arch/aarch64 \
lib/semihosting/aarch64 \
include lib/sync/locks/exclusive
BL2_ASM_OBJS := bl2_entrypoint.o spinlock.o
BL2_C_OBJS := bl2_main.o bl2_plat_setup.o bl2_arch_setup.o fvp_common.o \
early_exceptions.o
BL2_ENTRY_POINT := bl2_entrypoint
BL2_MAPFILE := bl2.map
BL2_LINKERFILE := bl2.ld
BL2_OBJS := $(BL2_C_OBJS) $(BL2_ASM_OBJS)
CFLAGS += $(DEFINES)
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <arch_helpers.h>
#include <console.h>
#include <platform.h>
#include <semihosting.h>
#include <bl_common.h>
#include <bl2.h>
/*******************************************************************************
* The only thing to do in BL2 is to load further images and pass control to
* BL31. The memory occupied by BL2 will be reclaimed by BL3_x stages. BL2 runs
* entirely in S-EL1. Since arm standard c libraries are not PIC, printf et al
* are not available. We rely on assertions to signal error conditions
******************************************************************************/
void bl2_main(void)
{
meminfo bl2_tzram_layout, *bl31_tzram_layout;
el_change_info *ns_image_info;
unsigned long bl31_base, el_status;
unsigned int bl2_load, bl31_load, mode;
/* Perform remaining generic architectural setup in S-El1 */
bl2_arch_setup();
/* Perform platform setup in BL1 */
bl2_platform_setup();
#if defined (__GNUC__)
printf("BL2 Built : %s, %s\n\r", __TIME__, __DATE__);
#endif
/* Find out how much free trusted ram remains after BL2 load */
bl2_tzram_layout = bl2_get_sec_mem_layout();
/*
* Load BL31. BL1 tells BL2 whether it has been TOP or BOTTOM loaded.
* To avoid fragmentation of trusted SRAM memory, BL31 is always
* loaded opposite to BL2. This allows BL31 to reclaim BL2 memory
* while maintaining its free space in one contiguous chunk.
*/
bl2_load = bl2_tzram_layout.attr & LOAD_MASK;
assert((bl2_load == TOP_LOAD) || (bl2_load == BOT_LOAD));
bl31_load = (bl2_load == TOP_LOAD) ? BOT_LOAD : TOP_LOAD;
bl31_base = load_image(&bl2_tzram_layout, BL31_IMAGE_NAME,
bl31_load, BL31_BASE);
/* Assert if it has not been possible to load BL31 */
assert(bl31_base != 0);
/*
* Create a new layout of memory for BL31 as seen by BL2. This
* will gobble up all the BL2 memory.
*/
bl31_tzram_layout = (meminfo *) get_el_change_mem_ptr();
init_bl31_mem_layout(&bl2_tzram_layout, bl31_tzram_layout, bl31_load);
/*
* BL2 also needs to tell BL31 where the non-trusted software image
* has been loaded. Place this info right after the BL31 memory layout
*/
ns_image_info = (el_change_info *) ((unsigned char *) bl31_tzram_layout
+ sizeof(meminfo));
/*
* Assume that the non-secure bootloader has already been
* loaded to its platform-specific location.
*/
ns_image_info->entrypoint = plat_get_ns_image_entrypoint();
/* Figure out what mode we enter the non-secure world in */
el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT;
el_status &= ID_AA64PFR0_ELX_MASK;
if (el_status)
mode = MODE_EL2;
else
mode = MODE_EL1;
ns_image_info->spsr = make_spsr(mode, MODE_SP_ELX, MODE_RW_64);
ns_image_info->security_state = NON_SECURE;
flush_dcache_range((unsigned long) ns_image_info,
sizeof(el_change_info));
/*
* Run BL31 via an SMC to BL1. Information on how to pass control to
* the non-trusted software image will be passed to BL31 in x2.
*/
if (bl31_base)
run_image(bl31_base,
make_spsr(MODE_EL3, MODE_SP_ELX, MODE_RW_64),
SECURE,
bl31_tzram_layout,
(void *) ns_image_info);
/* There is no valid reason for run_image() to return */
assert(0);
}
/*******************************************************************************
* BL1 has this function to print the fact that BL2 has done its job and BL31 is
* about to be loaded. Since BL2 re-uses BL1's exception table, it needs to
* define this function as well.
* TODO: Remove this function from BL2.
******************************************************************************/
void display_boot_progress(unsigned long entrypoint,
unsigned long spsr,
unsigned long mem_layout,
unsigned long ns_image_info)
{
return;
}
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch_helpers.h>
#include <platform.h>
#include <assert.h>
/*******************************************************************************
* This duplicates what the primary cpu did after a cold boot in BL1. The same
* needs to be done when a cpu is hotplugged in. This function could also over-
* ride any EL3 setup done by BL1 as this code resides in rw memory.
******************************************************************************/
void bl31_arch_setup(void)
{
unsigned long tmp_reg = 0;
unsigned int counter_base_frequency;
/* Enable alignment checks and set the exception endianness to LE */
tmp_reg = read_sctlr();
tmp_reg |= (SCTLR_A_BIT | SCTLR_SA_BIT);
tmp_reg &= ~SCTLR_EE_BIT;
write_sctlr(tmp_reg);
/*
* Enable HVCs, allow NS to mask CPSR.A, route FIQs to EL3, set the
* next EL to be aarch64
*/
tmp_reg = SCR_RES1_BITS | SCR_RW_BIT | SCR_HCE_BIT | SCR_FIQ_BIT;
write_scr(tmp_reg);
/* Do not trap coprocessor accesses from lower ELs to EL3 */
write_cptr_el3(0);
/* Read the frequency from Frequency modes table */
counter_base_frequency = mmio_read_32(SYS_CNTCTL_BASE + CNTFID_OFF);
/* The first entry of the frequency modes table must not be 0 */
assert(counter_base_frequency != 0);
/* Program the counter frequency */
write_cntfrq_el0(counter_base_frequency);
return;
}
/*******************************************************************************
* Detect what is the next Non-Secure EL and setup the required architectural
* state
******************************************************************************/
void bl31_arch_next_el_setup(void) {
unsigned long id_aa64pfr0 = read_id_aa64pfr0_el1();
unsigned long current_sctlr, next_sctlr;
unsigned long el_status;
unsigned long scr = read_scr();
/* Use the same endianness than the current BL */
current_sctlr = read_sctlr();
next_sctlr = (current_sctlr & SCTLR_EE_BIT);
/* Find out which EL we are going to */
el_status = (id_aa64pfr0 >> ID_AA64PFR0_EL2_SHIFT) & ID_AA64PFR0_ELX_MASK;
/* Check what if EL2 is supported */
if (el_status && (scr & SCR_HCE_BIT)) {
/* Set SCTLR EL2 */
next_sctlr |= SCTLR_EL2_RES1;
write_sctlr_el2(next_sctlr);
} else {
/* Set SCTLR Non-Secure EL1 */
next_sctlr |= SCTLR_EL1_RES1;
write_sctlr_el1(next_sctlr);
}
}
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <bl1.h>
#include <bl_common.h>
#include <platform.h>
.globl bl31_entrypoint
.section entry_code, "ax"; .align 3
/* -----------------------------------------------------
* bl31_entrypoint() is the cold boot entrypoint,
* executed only by the primary cpu.
* -----------------------------------------------------
*/
bl31_entrypoint:; .type bl31_entrypoint, %function
/* ---------------------------------------------
* BL2 has populated x0,x3,x4 with the opcode
* indicating BL31 should be run, memory layout
* of the trusted SRAM available to BL31 and
* information about running the non-trusted
* software already loaded by BL2. Check the
* opcode out of paranoia.
* ---------------------------------------------
*/
mov x19, #RUN_IMAGE
cmp x0, x19
b.ne _panic
mov x20, x3
mov x21, x4
/* ---------------------------------------------
* This is BL31 which is expected to be executed
* only by the primary cpu (at least for now).
* So, make sure no secondary has lost its way.
* ---------------------------------------------
*/
bl read_mpidr
mov x19, x0
bl platform_is_primary_cpu
cbz x0, _panic
/* --------------------------------------------
* Give ourselves a small coherent stack to
* ease the pain of initializing the MMU
* --------------------------------------------
*/
mov x0, x19
bl platform_set_coherent_stack
/* ---------------------------------------------
* Perform platform specific early arch. setup
* ---------------------------------------------
*/
mov x0, x20
mov x1, x21
mov x2, x19
bl bl31_early_platform_setup
bl bl31_plat_arch_setup
/* ---------------------------------------------
* Give ourselves a stack allocated in Normal
* -IS-WBWA memory
* ---------------------------------------------
*/
mov x0, x19
bl platform_set_stack
/* ---------------------------------------------
* Use SP_EL0 to initialize BL31. It allows us
* to jump to the next image without having to
* come back here to ensure all of the stack's
* been popped out. run_image() is not nice
* enough to reset the stack pointer before
* handing control to the next stage.
* ---------------------------------------------
*/
mov x0, sp
msr sp_el0, x0
msr spsel, #0
isb
/* ---------------------------------------------
* Jump to main function.
* ---------------------------------------------
*/
bl bl31_main
_panic:
b _panic
/*
* Copyright (c) 2013, ARM Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch_helpers.h>
#include <platform.h>
#include <bl_common.h>
#include <bl31.h>
#include <psci.h>
#include <assert.h>
#include <runtime_svc.h>
/*******************************************************************************
* This function checks whether this is a valid smc e.g.
* the function id is correct, top word of args are zeroed
* when aarch64 makes an aarch32 call etc.
******************************************************************************/
int validate_smc(gp_regs *regs)
{
unsigned int rw = GET_RW(regs->spsr);
unsigned int cc = GET_SMC_CC(regs->x0);
/* Check if there is a difference in the caller RW and SMC CC */
if (rw == cc) {
/* Check whether the caller has chosen the right func. id */
if (cc == SMC_64) {
regs->x0 = SMC_UNK;
return SMC_UNK;
}
/*
* Paranoid check to zero the top word of passed args
* irrespective of caller's register width.
*
* TODO: Check if this needed if the caller is aarch32
*/
regs->x0 &= (unsigned int) 0xFFFFFFFF;
regs->x1 &= (unsigned int) 0xFFFFFFFF;
regs->x2 &= (unsigned int) 0xFFFFFFFF;
regs->x3 &= (unsigned int) 0xFFFFFFFF;
regs->x4 &= (unsigned int) 0xFFFFFFFF;
regs->x5 &= (unsigned int) 0xFFFFFFFF;
regs->x6 &= (unsigned int) 0xFFFFFFFF;
}
return 0;
}
/* TODO: Break down the SMC handler into fast and standard SMC handlers. */
void smc_handler(unsigned type, unsigned long esr, gp_regs *regs)
{
/* Check if the SMC has been correctly called */
if (validate_smc(regs) != 0)
return;
switch (regs->x0) {
case PSCI_VERSION:
regs->x0 = psci_version();
break;
case PSCI_CPU_OFF:
regs->x0 = __psci_cpu_off();
break;
case PSCI_CPU_SUSPEND_AARCH64:
case PSCI_CPU_SUSPEND_AARCH32:
regs->x0 = __psci_cpu_suspend(regs->x1, regs->x2, regs->x3);
break;
case PSCI_CPU_ON_AARCH64:
case PSCI_CPU_ON_AARCH32:
regs->x0 = psci_cpu_on(regs->x1, regs->x2, regs->x3);
break;
case PSCI_AFFINITY_INFO_AARCH32:
case PSCI_AFFINITY_INFO_AARCH64:
regs->x0 = psci_affinity_info(regs->x1, regs->x2);
break;
default:
regs->x0 = SMC_UNK;
}
return;
}
void irq_handler(unsigned type, unsigned long esr, gp_regs *regs)
{
plat_report_exception(type);
assert(0);
}
void fiq_handler(unsigned type, unsigned long esr, gp_regs *regs)
{
plat_report_exception(type);
assert(0);
}
void serror_handler(unsigned type, unsigned long esr, gp_regs *regs)
{
plat_report_exception(type);
assert(0);
}
void sync_exception_handler(unsigned type, gp_regs *regs)
{
unsigned long esr = read_esr();
unsigned int ec = EC_BITS(esr);
switch (ec) {
case EC_AARCH32_SMC:
case EC_AARCH64_SMC:
smc_handler(type, esr, regs);
break;
default:
plat_report_exception(type);
assert(0);
}
return;
}
void async_exception_handler(unsigned type, gp_regs *regs)
{
unsigned long esr = read_esr();
switch (type) {
case IRQ_SP_EL0:
case IRQ_SP_ELX:
case IRQ_AARCH64:
case IRQ_AARCH32:
irq_handler(type, esr, regs);
break;
case FIQ_SP_EL0:
case FIQ_SP_ELX:
case FIQ_AARCH64:
case FIQ_AARCH32:
fiq_handler(type, esr, regs);
break;
case SERROR_SP_EL0:
case SERROR_SP_ELX:
case SERROR_AARCH64:
case SERROR_AARCH32:
serror_handler(type, esr, regs);
break;
default:
plat_report_exception(type);
assert(0);
}
return;
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment