Commit 937108a0 authored by danh-arm's avatar danh-arm Committed by GitHub
Browse files

Merge pull request #678 from soby-mathew/sm/PSCI_AArch32

Introduce AArch32 support for PSCI library
parents 974603b5 9d29c227
......@@ -45,6 +45,8 @@ include ${MAKE_HELPERS_DIRECTORY}build_env.mk
# Default values for build configurations
################################################################################
# The Target build architecture. Supported values are: aarch64, aarch32.
ARCH := aarch64
# Build verbosity
V := 0
# Debug build
......@@ -54,6 +56,8 @@ DEFAULT_PLAT := fvp
PLAT := ${DEFAULT_PLAT}
# SPD choice
SPD := none
# The AArch32 Secure Payload to be built as BL32 image
AARCH32_SP := none
# Base commit to perform code check on
BASE_COMMIT := origin/master
# NS timer register save and restore
......@@ -198,14 +202,20 @@ OD := ${CROSS_COMPILE}objdump
NM := ${CROSS_COMPILE}nm
PP := ${CROSS_COMPILE}gcc -E
ASFLAGS_aarch64 = -mgeneral-regs-only
TF_CFLAGS_aarch64 = -mgeneral-regs-only -mstrict-align
ASFLAGS_aarch32 = -march=armv8-a
TF_CFLAGS_aarch32 = -march=armv8-a
ASFLAGS += -nostdinc -ffreestanding -Wa,--fatal-warnings \
-Werror -Wmissing-include-dirs \
-mgeneral-regs-only -D__ASSEMBLY__ \
-D__ASSEMBLY__ $(ASFLAGS_$(ARCH)) \
${DEFINES} ${INCLUDES}
TF_CFLAGS += -nostdinc -ffreestanding -Wall \
-Werror -Wmissing-include-dirs \
-mgeneral-regs-only -mstrict-align \
-std=c99 -c -Os \
$(TF_CFLAGS_$(ARCH)) \
${DEFINES} ${INCLUDES}
TF_CFLAGS += -ffunction-sections -fdata-sections
......@@ -220,26 +230,26 @@ include lib/stdlib/stdlib.mk
BL_COMMON_SOURCES += common/bl_common.c \
common/tf_printf.c \
common/aarch64/debug.S \
lib/aarch64/cache_helpers.S \
lib/aarch64/misc_helpers.S \
plat/common/aarch64/platform_helpers.S \
common/${ARCH}/debug.S \
lib/${ARCH}/cache_helpers.S \
lib/${ARCH}/misc_helpers.S \
plat/common/${ARCH}/platform_helpers.S \
${STDLIB_SRCS}
INCLUDES += -Iinclude/bl1 \
-Iinclude/bl31 \
-Iinclude/common \
-Iinclude/common/aarch64 \
-Iinclude/common/${ARCH} \
-Iinclude/drivers \
-Iinclude/drivers/arm \
-Iinclude/drivers/auth \
-Iinclude/drivers/io \
-Iinclude/drivers/ti/uart \
-Iinclude/lib \
-Iinclude/lib/aarch64 \
-Iinclude/lib/cpus/aarch64 \
-Iinclude/lib/${ARCH} \
-Iinclude/lib/cpus/${ARCH} \
-Iinclude/lib/el3_runtime \
-Iinclude/lib/el3_runtime/aarch64 \
-Iinclude/lib/el3_runtime/${ARCH} \
-Iinclude/lib/psci \
-Iinclude/plat/common \
-Iinclude/services \
......@@ -267,6 +277,9 @@ INCLUDE_TBBR_MK := 1
################################################################################
ifneq (${SPD},none)
ifeq (${ARCH},aarch32)
$(error "Error: SPD is incompatible with AArch32.")
endif
ifdef EL3_PAYLOAD_BASE
$(warning "SPD and EL3_PAYLOAD_BASE are incompatible build options.")
$(warning "The SPD and its BL32 companion will be present but ignored.")
......@@ -299,6 +312,8 @@ endif
include ${PLAT_MAKEFILE_FULL}
# Platform compatibility is not supported in AArch32
ifneq (${ARCH},aarch32)
# If the platform has not defined ENABLE_PLAT_COMPAT, then enable it by default
ifndef ENABLE_PLAT_COMPAT
ENABLE_PLAT_COMPAT := 1
......@@ -308,6 +323,7 @@ endif
ifneq (${ENABLE_PLAT_COMPAT}, 0)
include plat/compat/plat_compat.mk
endif
endif
# Include the CPU specific operations makefile, which provides default
# values for all CPU errata workarounds and CPU specific optimisations.
......@@ -468,11 +484,18 @@ else
$(eval $(call add_define,PRELOADED_BL33_BASE))
endif
endif
# Define the AARCH32/AARCH64 flag based on the ARCH flag
ifeq (${ARCH},aarch32)
$(eval $(call add_define,AARCH32))
else
$(eval $(call add_define,AARCH64))
endif
################################################################################
# Include BL specific makefiles
################################################################################
# BL31 is not needed and BL1, BL2 & BL2U are not currently supported in AArch32
ifneq (${ARCH},aarch32)
ifdef BL1_SOURCES
NEED_BL1 := yes
include bl1/bl1.mk
......@@ -496,7 +519,27 @@ NEED_BL31 := yes
include bl31/bl31.mk
endif
endif
endif
ifeq (${ARCH},aarch32)
NEED_BL32 := yes
################################################################################
# Build `AARCH32_SP` as BL32 image for AArch32
################################################################################
ifneq (${AARCH32_SP},none)
# We expect to locate an sp.mk under the specified AARCH32_SP directory
AARCH32_SP_MAKE := $(wildcard bl32/${AARCH32_SP}/${AARCH32_SP}.mk)
ifeq (${AARCH32_SP_MAKE},)
$(error Error: No bl32/${AARCH32_SP}/${AARCH32_SP}.mk located)
endif
$(info Including ${AARCH32_SP_MAKE})
include ${AARCH32_SP_MAKE}
endif
endif
################################################################################
# Build targets
......@@ -665,7 +708,8 @@ help:
@echo " bl2 Build the BL2 binary"
@echo " bl2u Build the BL2U binary"
@echo " bl31 Build the BL31 binary"
@echo " bl32 Build the BL32 binary"
@echo " bl32 Build the BL32 binary. If ARCH=aarch32, then "
@echo " this builds secure payload specified by AARCH32_SP"
@echo " certificates Build the certificates (requires 'GENERATE_COT=1')"
@echo " fip Build the Firmware Image Package (FIP)"
@echo " fwu_fip Build the FWU Firmware Image Package (FIP)"
......
......@@ -38,6 +38,7 @@
#include <platform.h>
#include <platform_def.h>
#include <smcc_helpers.h>
#include <utils.h>
#include "bl1_private.h"
#include <uuid.h>
......
......@@ -32,6 +32,6 @@ BL2_SOURCES += bl2/bl2_main.c \
bl2/aarch64/bl2_entrypoint.S \
bl2/aarch64/bl2_arch_setup.c \
common/aarch64/early_exceptions.S \
lib/locks/exclusive/spinlock.S
lib/locks/exclusive/aarch64/spinlock.S
BL2_LINKERFILE := bl2/bl2.ld.S
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <asm_macros.S>
#include <bl_common.h>
#include <context.h>
#include <runtime_svc.h>
#include <smcc_helpers.h>
#include <smcc_macros.S>
#include <xlat_tables.h>
.globl sp_min_vector_table
.globl sp_min_entrypoint
.globl sp_min_warm_entrypoint
func sp_min_vector_table
b sp_min_entrypoint
b plat_panic_handler /* Undef */
b handle_smc /* Syscall */
b plat_panic_handler /* Prefetch abort */
b plat_panic_handler /* Data abort */
b plat_panic_handler /* Reserved */
b plat_panic_handler /* IRQ */
b plat_panic_handler /* FIQ */
endfunc sp_min_vector_table
func handle_smc
smcc_save_gp_mode_regs
/* r0 points to smc_context */
mov r2, r0 /* handle */
ldcopr r0, SCR
/* Save SCR in stack */
push {r0}
and r3, r0, #SCR_NS_BIT /* flags */
/* Switch to Secure Mode*/
bic r0, #SCR_NS_BIT
stcopr r0, SCR
isb
ldr r0, [r2, #SMC_CTX_GPREG_R0] /* smc_fid */
/* Check whether an SMC64 is issued */
tst r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT)
beq 1f /* SMC32 is detected */
mov r0, #SMC_UNK
str r0, [r2, #SMC_CTX_GPREG_R0]
mov r0, r2
b 2f /* Skip handling the SMC */
1:
mov r1, #0 /* cookie */
bl handle_runtime_svc
2:
/* r0 points to smc context */
/* Restore SCR from stack */
pop {r1}
stcopr r1, SCR
isb
b sp_min_exit
endfunc handle_smc
/*
* The Cold boot/Reset entrypoint for SP_MIN
*/
func sp_min_entrypoint
/*
* The caches and TLBs are disabled at reset. If any implementation
* allows the caches/TLB to be hit while they are disabled, ensure
* that they are invalidated here
*/
/* Make sure we are in Secure Mode*/
ldcopr r0, SCR
bic r0, #SCR_NS_BIT
stcopr r0, SCR
isb
/* Switch to monitor mode */
cps #MODE32_mon
isb
/*
* Set sane values for NS SCTLR as well.
* Switch to non secure mode for this.
*/
ldr r0, =(SCTLR_RES1)
ldcopr r1, SCR
orr r2, r1, #SCR_NS_BIT
stcopr r2, SCR
isb
ldcopr r2, SCTLR
orr r0, r0, r2
stcopr r0, SCTLR
isb
stcopr r1, SCR
isb
/*
* Set the CPU endianness before doing anything that might involve
* memory reads or writes.
*/
ldcopr r0, SCTLR
bic r0, r0, #SCTLR_EE_BIT
stcopr r0, SCTLR
isb
/* Run the CPU Specific Reset handler */
bl reset_handler
/*
* Enable the instruction cache and data access
* alignment checks
*/
ldcopr r0, SCTLR
ldr r1, =(SCTLR_RES1 | SCTLR_A_BIT | SCTLR_I_BIT)
orr r0, r0, r1
stcopr r0, SCTLR
isb
/* Set the vector tables */
ldr r0, =sp_min_vector_table
stcopr r0, VBAR
stcopr r0, MVBAR
isb
/*
* Enable the SIF bit to disable instruction fetches
* from Non-secure memory.
*/
ldcopr r0, SCR
orr r0, r0, #SCR_SIF_BIT
stcopr r0, SCR
/*
* Enable the SError interrupt now that the exception vectors have been
* setup.
*/
cpsie a
isb
/* Enable access to Advanced SIMD registers */
ldcopr r0, NSACR
bic r0, r0, #NSASEDIS_BIT
orr r0, r0, #(NASCR_CP10_BIT | NASCR_CP11_BIT)
stcopr r0, NSACR
isb
/*
* Enable access to Advanced SIMD, Floating point and to the Trace
* functionality as well.
*/
ldcopr r0, CPACR
bic r0, r0, #ASEDIS_BIT
bic r0, r0, #TRCDIS_BIT
orr r0, r0, #CPACR_ENABLE_FP_ACCESS
stcopr r0, CPACR
isb
vmrs r0, FPEXC
orr r0, r0, #FPEXC_EN_BIT
vmsr FPEXC, r0
/* Detect whether Warm or Cold boot */
bl plat_get_my_entrypoint
cmp r0, #0
/* If warm boot detected, jump to warm boot entry */
bxne r0
/* Setup C runtime stack */
bl plat_set_my_stack
/* Perform platform specific memory initialization */
bl platform_mem_init
/* Initialize the C Runtime Environment */
/*
* Invalidate the RW memory used by SP_MIN image. This includes
* the data and NOBITS sections. This is done to safeguard against
* possible corruption of this memory by dirty cache lines in a system
* cache as a result of use by an earlier boot loader stage.
*/
ldr r0, =__RW_START__
ldr r1, =__RW_END__
sub r1, r1, r0
bl inv_dcache_range
ldr r0, =__BSS_START__
ldr r1, =__BSS_SIZE__
bl zeromem
#if USE_COHERENT_MEM
ldr r0, =__COHERENT_RAM_START__
ldr r1, =__COHERENT_RAM_UNALIGNED_SIZE__
bl zeromem
#endif
/* Perform platform specific early arch. setup */
bl sp_min_early_platform_setup
bl sp_min_plat_arch_setup
/* Jump to the main function */
bl sp_min_main
/* -------------------------------------------------------------
* Clean the .data & .bss sections to main memory. This ensures
* that any global data which was initialised by the primary CPU
* is visible to secondary CPUs before they enable their data
* caches and participate in coherency.
* -------------------------------------------------------------
*/
ldr r0, =__DATA_START__
ldr r1, =__DATA_END__
sub r1, r1, r0
bl clean_dcache_range
ldr r0, =__BSS_START__
ldr r1, =__BSS_END__
sub r1, r1, r0
bl clean_dcache_range
/* Program the registers in cpu_context and exit monitor mode */
mov r0, #NON_SECURE
bl cm_get_context
/* Restore the SCR */
ldr r2, [r0, #CTX_REGS_OFFSET + CTX_SCR]
stcopr r2, SCR
isb
/* Restore the SCTLR */
ldr r2, [r0, #CTX_REGS_OFFSET + CTX_NS_SCTLR]
stcopr r2, SCTLR
bl smc_get_next_ctx
/* The other cpu_context registers have been copied to smc context */
b sp_min_exit
endfunc sp_min_entrypoint
/*
* The Warm boot entrypoint for SP_MIN.
*/
func sp_min_warm_entrypoint
/* Setup C runtime stack */
bl plat_set_my_stack
/* --------------------------------------------
* Enable the MMU with the DCache disabled. It
* is safe to use stacks allocated in normal
* memory as a result. All memory accesses are
* marked nGnRnE when the MMU is disabled. So
* all the stack writes will make it to memory.
* All memory accesses are marked Non-cacheable
* when the MMU is enabled but D$ is disabled.
* So used stack memory is guaranteed to be
* visible immediately after the MMU is enabled
* Enabling the DCache at the same time as the
* MMU can lead to speculatively fetched and
* possibly stale stack memory being read from
* other caches. This can lead to coherency
* issues.
* --------------------------------------------
*/
mov r0, #DISABLE_DCACHE
bl bl32_plat_enable_mmu
bl sp_min_warm_boot
/* Program the registers in cpu_context and exit monitor mode */
mov r0, #NON_SECURE
bl cm_get_context
/* Restore the SCR */
ldr r2, [r0, #CTX_REGS_OFFSET + CTX_SCR]
stcopr r2, SCR
isb
/* Restore the SCTLR */
ldr r2, [r0, #CTX_REGS_OFFSET + CTX_NS_SCTLR]
stcopr r2, SCTLR
bl smc_get_next_ctx
/* The other cpu_context registers have been copied to smc context */
b sp_min_exit
endfunc sp_min_warm_entrypoint
/*
* The function to restore the registers from SMC context and return
* to the mode restored to SPSR.
*
* Arguments : r0 must point to the SMC context to restore from.
*/
func sp_min_exit
smcc_restore_gp_mode_regs
eret
endfunc sp_min_exit
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <platform_def.h>
OUTPUT_FORMAT(elf32-littlearm)
OUTPUT_ARCH(arm)
ENTRY(sp_min_vector_table)
MEMORY {
RAM (rwx): ORIGIN = BL32_BASE, LENGTH = BL32_LIMIT - BL32_BASE
}
SECTIONS
{
. = BL32_BASE;
ASSERT(. == ALIGN(4096),
"BL32_BASE address is not aligned on a page boundary.")
#if SEPARATE_CODE_AND_RODATA
.text . : {
__TEXT_START__ = .;
*entrypoint.o(.text*)
*(.text*)
. = NEXT(4096);
__TEXT_END__ = .;
} >RAM
.rodata . : {
__RODATA_START__ = .;
*(.rodata*)
/* Ensure 4-byte alignment for descriptors and ensure inclusion */
. = ALIGN(4);
__RT_SVC_DESCS_START__ = .;
KEEP(*(rt_svc_descs))
__RT_SVC_DESCS_END__ = .;
/*
* Ensure 4-byte alignment for cpu_ops so that its fields are also
* aligned. Also ensure cpu_ops inclusion.
*/
. = ALIGN(4);
__CPU_OPS_START__ = .;
KEEP(*(cpu_ops))
__CPU_OPS_END__ = .;
. = NEXT(4096);
__RODATA_END__ = .;
} >RAM
#else
ro . : {
__RO_START__ = .;
*entrypoint.o(.text*)
*(.text*)
*(.rodata*)
/* Ensure 4-byte alignment for descriptors and ensure inclusion */
. = ALIGN(4);
__RT_SVC_DESCS_START__ = .;
KEEP(*(rt_svc_descs))
__RT_SVC_DESCS_END__ = .;
/*
* Ensure 4-byte alignment for cpu_ops so that its fields are also
* aligned. Also ensure cpu_ops inclusion.
*/
. = ALIGN(4);
__CPU_OPS_START__ = .;
KEEP(*(cpu_ops))
__CPU_OPS_END__ = .;
__RO_END_UNALIGNED__ = .;
/*
* Memory page(s) mapped to this section will be marked as
* read-only, executable. No RW data from the next section must
* creep in. Ensure the rest of the current memory block is unused.
*/
. = NEXT(4096);
__RO_END__ = .;
} >RAM
#endif
ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
"cpu_ops not defined for this platform.")
/*
* Define a linker symbol to mark start of the RW memory area for this
* image.
*/
__RW_START__ = . ;
.data . : {
__DATA_START__ = .;
*(.data*)
__DATA_END__ = .;
} >RAM
stacks (NOLOAD) : {
__STACKS_START__ = .;
*(tzfw_normal_stacks)
__STACKS_END__ = .;
} >RAM
/*
* The .bss section gets initialised to 0 at runtime.
* Its base address must be 16-byte aligned.
*/
.bss (NOLOAD) : ALIGN(16) {
__BSS_START__ = .;
*(.bss*)
*(COMMON)
#if !USE_COHERENT_MEM
/*
* Bakery locks are stored in normal .bss memory
*
* Each lock's data is spread across multiple cache lines, one per CPU,
* but multiple locks can share the same cache line.
* The compiler will allocate enough memory for one CPU's bakery locks,
* the remaining cache lines are allocated by the linker script
*/
. = ALIGN(CACHE_WRITEBACK_GRANULE);
__BAKERY_LOCK_START__ = .;
*(bakery_lock)
. = ALIGN(CACHE_WRITEBACK_GRANULE);
__PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(. - __BAKERY_LOCK_START__);
. = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1));
__BAKERY_LOCK_END__ = .;
#ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE
ASSERT(__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE,
"PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements");
#endif
#endif
#if ENABLE_PMF
/*
* Time-stamps are stored in normal .bss memory
*
* The compiler will allocate enough memory for one CPU's time-stamps,
* the remaining memory for other CPU's is allocated by the
* linker script
*/
. = ALIGN(CACHE_WRITEBACK_GRANULE);
__PMF_TIMESTAMP_START__ = .;
KEEP(*(pmf_timestamp_array))
. = ALIGN(CACHE_WRITEBACK_GRANULE);
__PMF_PERCPU_TIMESTAMP_END__ = .;
__PERCPU_TIMESTAMP_SIZE__ = ABSOLUTE(. - __PMF_TIMESTAMP_START__);
. = . + (__PERCPU_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1));
__PMF_TIMESTAMP_END__ = .;
#endif /* ENABLE_PMF */
__BSS_END__ = .;
} >RAM
/*
* The xlat_table section is for full, aligned page tables (4K).
* Removing them from .bss avoids forcing 4K alignment on
* the .bss section and eliminates the unecessary zero init
*/
xlat_table (NOLOAD) : {
*(xlat_table)
} >RAM
__BSS_SIZE__ = SIZEOF(.bss);
#if USE_COHERENT_MEM
/*
* The base address of the coherent memory section must be page-aligned (4K)
* to guarantee that the coherent data are stored on their own pages and
* are not mixed with normal data. This is required to set up the correct
* memory attributes for the coherent data page tables.
*/
coherent_ram (NOLOAD) : ALIGN(4096) {
__COHERENT_RAM_START__ = .;
/*
* Bakery locks are stored in coherent memory
*
* Each lock's data is contiguous and fully allocated by the compiler
*/
*(bakery_lock)
*(tzfw_coherent_mem)
__COHERENT_RAM_END_UNALIGNED__ = .;
/*
* Memory page(s) mapped to this section will be marked
* as device memory. No other unexpected data must creep in.
* Ensure the rest of the current memory page is unused.
*/
. = NEXT(4096);
__COHERENT_RAM_END__ = .;
} >RAM
__COHERENT_RAM_UNALIGNED_SIZE__ =
__COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
#endif
/*
* Define a linker symbol to mark end of the RW memory area for this
* image.
*/
__RW_END__ = .;
__BL32_END__ = .;
}
#
# Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of ARM nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
ifneq (${ARCH}, aarch32)
$(error SP_MIN is only supported on AArch32 platforms)
endif
include lib/psci/psci_lib.mk
INCLUDES += -Iinclude/bl32/sp_min
BL32_SOURCES += bl32/sp_min/sp_min_main.c \
bl32/sp_min/aarch32/entrypoint.S \
common/runtime_svc.c \
services/std_svc/std_svc_setup.c \
${PSCI_LIB_SOURCES}
ifeq (${ENABLE_PMF}, 1)
BL32_SOURCES += lib/pmf/pmf_main.c
endif
BL32_LINKERFILE := bl32/sp_min/sp_min.ld.S
# Include the platform-specific SP_MIN Makefile
# If no platform-specific SP_MIN Makefile exists, it means SP_MIN is not supported
# on this platform.
SP_MIN_PLAT_MAKEFILE := $(wildcard ${PLAT_DIR}/sp_min/sp_min-${PLAT}.mk)
ifeq (,${SP_MIN_PLAT_MAKEFILE})
$(error SP_MIN is not supported on platform ${PLAT})
else
include ${SP_MIN_PLAT_MAKEFILE}
endif
RESET_TO_SP_MIN := 1
$(eval $(call add_define,RESET_TO_SP_MIN))
$(eval $(call assert_boolean,RESET_TO_SP_MIN))
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
#include <bl_common.h>
#include <context.h>
#include <context_mgmt.h>
#include <debug.h>
#include <platform.h>
#include <platform_def.h>
#include <platform_sp_min.h>
#include <psci.h>
#include <runtime_svc.h>
#include <smcc_helpers.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <types.h>
#include "sp_min_private.h"
/* Pointers to per-core cpu contexts */
static void *sp_min_cpu_ctx_ptr[PLATFORM_CORE_COUNT];
/* SP_MIN only stores the non secure smc context */
static smc_ctx_t sp_min_smc_context[PLATFORM_CORE_COUNT];
/******************************************************************************
* Define the smcc helper library API's
*****************************************************************************/
void *smc_get_ctx(int security_state)
{
assert(security_state == NON_SECURE);
return &sp_min_smc_context[plat_my_core_pos()];
}
void smc_set_next_ctx(int security_state)
{
assert(security_state == NON_SECURE);
/* SP_MIN stores only non secure smc context. Nothing to do here */
}
void *smc_get_next_ctx(void)
{
return &sp_min_smc_context[plat_my_core_pos()];
}
/*******************************************************************************
* This function returns a pointer to the most recent 'cpu_context' structure
* for the calling CPU that was set as the context for the specified security
* state. NULL is returned if no such structure has been specified.
******************************************************************************/
void *cm_get_context(uint32_t security_state)
{
assert(security_state == NON_SECURE);
return sp_min_cpu_ctx_ptr[plat_my_core_pos()];
}
/*******************************************************************************
* This function sets the pointer to the current 'cpu_context' structure for the
* specified security state for the calling CPU
******************************************************************************/
void cm_set_context(void *context, uint32_t security_state)
{
assert(security_state == NON_SECURE);
sp_min_cpu_ctx_ptr[plat_my_core_pos()] = context;
}
/*******************************************************************************
* This function returns a pointer to the most recent 'cpu_context' structure
* for the CPU identified by `cpu_idx` that was set as the context for the
* specified security state. NULL is returned if no such structure has been
* specified.
******************************************************************************/
void *cm_get_context_by_index(unsigned int cpu_idx,
unsigned int security_state)
{
assert(security_state == NON_SECURE);
return sp_min_cpu_ctx_ptr[cpu_idx];
}
/*******************************************************************************
* This function sets the pointer to the current 'cpu_context' structure for the
* specified security state for the CPU identified by CPU index.
******************************************************************************/
void cm_set_context_by_index(unsigned int cpu_idx, void *context,
unsigned int security_state)
{
assert(security_state == NON_SECURE);
sp_min_cpu_ctx_ptr[cpu_idx] = context;
}
static void copy_cpu_ctx_to_smc_stx(const regs_t *cpu_reg_ctx,
smc_ctx_t *next_smc_ctx)
{
next_smc_ctx->r0 = read_ctx_reg(cpu_reg_ctx, CTX_GPREG_R0);
next_smc_ctx->lr_mon = read_ctx_reg(cpu_reg_ctx, CTX_LR);
next_smc_ctx->spsr_mon = read_ctx_reg(cpu_reg_ctx, CTX_SPSR);
}
/*******************************************************************************
* This function invokes the PSCI library interface to initialize the
* non secure cpu context and copies the relevant cpu context register values
* to smc context. These registers will get programmed during `smc_exit`.
******************************************************************************/
static void sp_min_prepare_next_image_entry(void)
{
entry_point_info_t *next_image_info;
/* Program system registers to proceed to non-secure */
next_image_info = sp_min_plat_get_bl33_ep_info();
assert(next_image_info);
assert(NON_SECURE == GET_SECURITY_STATE(next_image_info->h.attr));
INFO("SP_MIN: Preparing exit to normal world\n");
psci_prepare_next_non_secure_ctx(next_image_info);
smc_set_next_ctx(NON_SECURE);
/* Copy r0, lr and spsr from cpu context to SMC context */
copy_cpu_ctx_to_smc_stx(get_regs_ctx(cm_get_context(NON_SECURE)),
smc_get_next_ctx());
}
/******************************************************************************
* The SP_MIN main function. Do the platform and PSCI Library setup. Also
* initialize the runtime service framework.
*****************************************************************************/
void sp_min_main(void)
{
/* Perform platform setup in TSP MIN */
sp_min_platform_setup();
/*
* Initialize the PSCI library and perform the remaining generic
* architectural setup from PSCI.
*/
psci_setup((uintptr_t)sp_min_warm_entrypoint);
/*
* Initialize the runtime services e.g. psci
* This is where the monitor mode will be initialized
*/
INFO("SP_MIN: Initializing runtime services\n");
runtime_svc_init();
/*
* We are ready to enter the next EL. Prepare entry into the image
* corresponding to the desired security state after the next ERET.
*/
sp_min_prepare_next_image_entry();
}
/******************************************************************************
* This function is invoked during warm boot. Invoke the PSCI library
* warm boot entry point which takes care of Architectural and platform setup/
* restore. Copy the relevant cpu_context register values to smc context which
* will get programmed during `smc_exit`.
*****************************************************************************/
void sp_min_warm_boot(void)
{
smc_ctx_t *next_smc_ctx;
psci_warmboot_entrypoint();
smc_set_next_ctx(NON_SECURE);
next_smc_ctx = smc_get_next_ctx();
memset(next_smc_ctx, 0, sizeof(smc_ctx_t));
copy_cpu_ctx_to_smc_stx(get_regs_ctx(cm_get_context(NON_SECURE)),
next_smc_ctx);
}
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __SP_MIN_H__
#define __SP_MIN_H__
void sp_min_warm_entrypoint(void);
void sp_min_main(void);
void sp_min_warm_boot(void);
#endif /* __SP_MIN_H__ */
......@@ -37,7 +37,7 @@ BL32_SOURCES += bl32/tsp/tsp_main.c \
bl32/tsp/tsp_interrupt.c \
bl32/tsp/tsp_timer.c \
common/aarch64/early_exceptions.S \
lib/locks/exclusive/spinlock.S
lib/locks/exclusive/aarch64/spinlock.S
BL32_LINKERFILE := bl32/tsp/tsp.ld.S
......
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <asm_macros.S>
.globl do_panic
/***********************************************************
* The common implementation of do_panic for all BL stages
***********************************************************/
func do_panic
b plat_panic_handler
endfunc do_panic
......@@ -51,6 +51,34 @@ static rt_svc_desc_t *rt_svc_descs;
#define RT_SVC_DECS_NUM ((RT_SVC_DESCS_END - RT_SVC_DESCS_START)\
/ sizeof(rt_svc_desc_t))
/*******************************************************************************
* Function to invoke the registered `handle` corresponding to the smc_fid.
******************************************************************************/
uintptr_t handle_runtime_svc(uint32_t smc_fid,
void *cookie,
void *handle,
unsigned int flags)
{
u_register_t x1, x2, x3, x4;
int index, idx;
const rt_svc_desc_t *rt_svc_descs;
assert(handle);
idx = get_unique_oen_from_smc_fid(smc_fid);
assert(idx >= 0 && idx < MAX_RT_SVCS);
index = rt_svc_descs_indices[idx];
if (index < 0 || index >= RT_SVC_DECS_NUM)
SMC_RET1(handle, SMC_UNK);
rt_svc_descs = (rt_svc_desc_t *) RT_SVC_DESCS_START;
get_smc_params_from_ctx(handle, x1, x2, x3, x4);
return rt_svc_descs[index].handle(smc_fid, x1, x2, x3, x4, cookie,
handle, flags);
}
/*******************************************************************************
* Simple routine to sanity check a runtime service descriptor before using it
******************************************************************************/
......
......@@ -27,7 +27,11 @@
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
#include <debug.h>
#include <limits.h>
#include <stdarg.h>
#include <stdint.h>
......@@ -49,6 +53,85 @@ static void string_print(const char *str)
putchar(*str++);
}
#ifdef AARCH32
#define unsigned_num_print(unum, radix) \
do { \
if ((radix) == 16) \
unsigned_hex_print(unum); \
else if ((radix) == 10) \
unsigned_dec_print(unum); \
else \
string_print("tf_printf : Unsupported radix");\
} while (0);
/*
* Utility function to print an unsigned number in decimal format for AArch32.
* The function doesn't support printing decimal integers higher than 32 bits
* to avoid having to implement 64-bit integer compiler library functions.
*/
static void unsigned_dec_print(unsigned long long int unum)
{
unsigned int local_num;
/* Just need enough space to store 32 bit decimal integer */
unsigned char num_buf[10];
int i = 0, rem;
if (unum > UINT_MAX) {
string_print("tf_printf : decimal numbers higher than 32 bits"
" not supported\n");
return;
}
local_num = (unsigned int)unum;
do {
rem = local_num % 10;
num_buf[i++] = '0' + rem;
} while (local_num /= 10);
while (--i >= 0)
putchar(num_buf[i]);
}
/*
* Utility function to print an unsigned number in hexadecimal format for
* AArch32. The function doesn't use 64-bit integer arithmetic to avoid
* having to implement 64-bit compiler library functions. It splits the
* 64 bit number into two 32 bit numbers and converts them into equivalent
* ASCII characters.
*/
static void unsigned_hex_print(unsigned long long int unum)
{
/* Just need enough space to store 16 characters */
unsigned char num_buf[16];
int i = 0, rem;
uint32_t num_local = 0, num_msb = 0;
/* Get the LSB of 64 bit unum */
num_local = (uint32_t)unum;
/* Get the MSB of 64 bit unum. This works only on Little Endian */
assert((read_sctlr() & SCTLR_EE_BIT) == 0);
num_msb = *(((uint32_t *) &unum) + 1);
do {
do {
rem = (num_local & 0xf);
if (rem < 0xa)
num_buf[i++] = '0' + rem;
else
num_buf[i++] = 'a' + (rem - 0xa);
} while (num_local >>= 4);
num_local = num_msb;
num_msb = 0;
} while (num_local);
while (--i >= 0)
putchar(num_buf[i]);
}
#else
static void unsigned_num_print(unsigned long long int unum, unsigned int radix)
{
/* Just need enough space to store 64 bit decimal integer */
......@@ -66,6 +149,7 @@ static void unsigned_num_print(unsigned long long int unum, unsigned int radix)
while (--i >= 0)
putchar(num_buf[i]);
}
#endif /* AARCH32 */
/*******************************************************************
* Reduced format print for Trusted firmware.
......
......@@ -208,11 +208,21 @@ performed.
platform name must be subdirectory of any depth under `plat/`, and must
contain a platform makefile named `platform.mk`.
* `ARCH` : Choose the target build architecture for ARM Trusted Firmware.
It can take either `aarch64` or `aarch32` as values. By default, it is
defined to `aarch64`.
* `SPD`: Choose a Secure Payload Dispatcher component to be built into the
Trusted Firmware. The value should be the path to the directory containing
the SPD source, relative to `services/spd/`; the directory is expected to
Trusted Firmware. This build option is only valid if `ARCH=aarch64`. The
value should be the path to the directory containing the SPD source,
relative to `services/spd/`; the directory is expected to
contain a makefile called `<spd-value>.mk`.
* `AARCH32_SP` : Choose the AArch32 Secure Payload component to be built as
as the BL32 image when `ARCH=aarch32`. The value should be the path to the
directory containing the SP source, relative to the `bl32/`; the directory
is expected to contain a makefile called `<aarch32_sp-value>.mk`.
* `V`: Verbose build. If assigned anything other than 0, the build commands
are printed. Default is 0.
......
......@@ -75,8 +75,12 @@ void gicv3_driver_init(const gicv3_driver_data_t *plat_driver_data)
plat_driver_data->g1s_interrupt_num == 0);
/* Check for system register support */
#ifdef AARCH32
assert(read_id_pfr1() & (ID_PFR1_GIC_MASK << ID_PFR1_GIC_SHIFT));
#else
assert(read_id_aa64pfr0_el1() &
(ID_AA64PFR0_GIC_MASK << ID_AA64PFR0_GIC_SHIFT));
#endif /* AARCH32 */
/* The GIC version should be 3.0 */
gic_version = gicd_read_pidr2(plat_driver_data->gicd_base);
......
......@@ -79,9 +79,13 @@
* Macro to convert a GICR_TYPER affinity value into a MPIDR value. Bits[31:24]
* are zeroes.
*/
#ifdef AARCH32
#define mpidr_from_gicr_typer(typer_val) (((typer_val) >> 32) & 0xffffff)
#else
#define mpidr_from_gicr_typer(typer_val) \
((((typer_val >> 56) & MPIDR_AFFLVL_MASK) << MPIDR_AFF3_SHIFT) | \
((typer_val >> 32) & 0xffffff))
(((((typer_val) >> 56) & MPIDR_AFFLVL_MASK) << MPIDR_AFF3_SHIFT) | \
(((typer_val) >> 32) & 0xffffff))
#endif
/*******************************************************************************
* Private GICv3 function prototypes for accessing entire registers.
......
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <asm_macros.S>
#include <pl011.h>
/*
* Pull in generic functions to provide backwards compatibility for
* platform makefiles
*/
#include "../../../console/aarch32/console.S"
.globl console_core_init
.globl console_core_putc
.globl console_core_getc
/* -----------------------------------------------
* int console_core_init(uintptr_t base_addr,
* unsigned int uart_clk, unsigned int baud_rate)
* Function to initialize the console without a
* C Runtime to print debug information. This
* function will be accessed by console_init and
* crash reporting.
* In: r0 - console base address
* r1 - Uart clock in Hz
* r2 - Baud rate
* Out: return 1 on success else 0 on error
* Clobber list : r1, r2, r3
* -----------------------------------------------
*/
func console_core_init
/* Check the input base address */
cmp r0, #0
beq core_init_fail
#if !PL011_GENERIC_UART
/* Check baud rate and uart clock for sanity */
cmp r1, #0
beq core_init_fail
cmp r2, #0
beq core_init_fail
/* Disable the UART before initialization */
ldr r3, [r0, #UARTCR]
bic r3, r3, #PL011_UARTCR_UARTEN
str r3, [r0, #UARTCR]
/* Program the baudrate */
/* Divisor = (Uart clock * 4) / baudrate */
lsl r1, r1, #2
udiv r2, r1, r2
/* IBRD = Divisor >> 6 */
lsr r1, r2, #6
/* Write the IBRD */
str r1, [r0, #UARTIBRD]
/* FBRD = Divisor & 0x3F */
and r1, r2, #0x3f
/* Write the FBRD */
str r1, [r0, #UARTFBRD]
mov r1, #PL011_LINE_CONTROL
str r1, [r0, #UARTLCR_H]
/* Clear any pending errors */
mov r1, #0
str r1, [r0, #UARTECR]
/* Enable tx, rx, and uart overall */
ldr r1, =(PL011_UARTCR_RXE | PL011_UARTCR_TXE | PL011_UARTCR_UARTEN)
str r1, [r0, #UARTCR]
#endif
mov r0, #1
bx lr
core_init_fail:
mov r0, #0
bx lr
endfunc console_core_init
/* --------------------------------------------------------
* int console_core_putc(int c, uintptr_t base_addr)
* Function to output a character over the console. It
* returns the character printed on success or -1 on error.
* In : r0 - character to be printed
* r1 - console base address
* Out : return -1 on error else return character.
* Clobber list : r2
* --------------------------------------------------------
*/
func console_core_putc
/* Check the input parameter */
cmp r1, #0
beq putc_error
/* Prepend '\r' to '\n' */
cmp r0, #0xA
bne 2f
1:
/* Check if the transmit FIFO is full */
ldr r2, [r1, #UARTFR]
tst r2, #PL011_UARTFR_TXFF_BIT
beq 1b
mov r2, #0xD
str r2, [r1, #UARTDR]
2:
/* Check if the transmit FIFO is full */
ldr r2, [r1, #UARTFR]
tst r2, #PL011_UARTFR_TXFF_BIT
beq 2b
str r0, [r1, #UARTDR]
bx lr
putc_error:
mov r0, #-1
bx lr
endfunc console_core_putc
/* ---------------------------------------------
* int console_core_getc(uintptr_t base_addr)
* Function to get a character from the console.
* It returns the character grabbed on success
* or -1 on error.
* In : r0 - console base address
* Clobber list : r0, r1
* ---------------------------------------------
*/
func console_core_getc
cmp r0, #0
beq getc_error
1:
/* Check if the receive FIFO is empty */
ldr r1, [r0, #UARTFR]
tst r1, #PL011_UARTFR_RXFE_BIT
beq 1b
ldr r1, [r0, #UARTDR]
mov r0, r1
bx lr
getc_error:
mov r0, #-1
bx lr
endfunc console_core_getc
/*
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <asm_macros.S>
#include <pl011.h>
/*
* Pull in generic functions to provide backwards compatibility for
* platform makefiles
*/
#include "../../../console/aarch64/console.S"
.globl console_core_init
.globl console_core_putc
.globl console_core_getc
/* -----------------------------------------------
* int console_core_init(uintptr_t base_addr,
* unsigned int uart_clk, unsigned int baud_rate)
* Function to initialize the console without a
* C Runtime to print debug information. This
* function will be accessed by console_init and
* crash reporting.
* In: x0 - console base address
* w1 - Uart clock in Hz
* w2 - Baud rate
* Out: return 1 on success else 0 on error
* Clobber list : x1, x2, x3, x4
* -----------------------------------------------
*/
func console_core_init
/* Check the input base address */
cbz x0, core_init_fail
#if !PL011_GENERIC_UART
/* Check baud rate and uart clock for sanity */
cbz w1, core_init_fail
cbz w2, core_init_fail
/* Disable uart before programming */
ldr w3, [x0, #UARTCR]
mov w4, #PL011_UARTCR_UARTEN
bic w3, w3, w4
str w3, [x0, #UARTCR]
/* Program the baudrate */
/* Divisor = (Uart clock * 4) / baudrate */
lsl w1, w1, #2
udiv w2, w1, w2
/* IBRD = Divisor >> 6 */
lsr w1, w2, #6
/* Write the IBRD */
str w1, [x0, #UARTIBRD]
/* FBRD = Divisor & 0x3F */
and w1, w2, #0x3f
/* Write the FBRD */
str w1, [x0, #UARTFBRD]
mov w1, #PL011_LINE_CONTROL
str w1, [x0, #UARTLCR_H]
/* Clear any pending errors */
str wzr, [x0, #UARTECR]
/* Enable tx, rx, and uart overall */
mov w1, #(PL011_UARTCR_RXE | PL011_UARTCR_TXE | PL011_UARTCR_UARTEN)
str w1, [x0, #UARTCR]
#endif
mov w0, #1
ret
core_init_fail:
mov w0, wzr
ret
endfunc console_core_init
/* --------------------------------------------------------
* int console_core_putc(int c, uintptr_t base_addr)
* Function to output a character over the console. It
* returns the character printed on success or -1 on error.
* In : w0 - character to be printed
* x1 - console base address
* Out : return -1 on error else return character.
* Clobber list : x2
* --------------------------------------------------------
*/
func console_core_putc
/* Check the input parameter */
cbz x1, putc_error
/* Prepend '\r' to '\n' */
cmp w0, #0xA
b.ne 2f
1:
/* Check if the transmit FIFO is full */
ldr w2, [x1, #UARTFR]
tbnz w2, #PL011_UARTFR_TXFF_BIT, 1b
mov w2, #0xD
str w2, [x1, #UARTDR]
2:
/* Check if the transmit FIFO is full */
ldr w2, [x1, #UARTFR]
tbnz w2, #PL011_UARTFR_TXFF_BIT, 2b
str w0, [x1, #UARTDR]
ret
putc_error:
mov w0, #-1
ret
endfunc console_core_putc
/* ---------------------------------------------
* int console_core_getc(uintptr_t base_addr)
* Function to get a character from the console.
* It returns the character grabbed on success
* or -1 on error.
* In : x0 - console base address
* Clobber list : x0, x1
* ---------------------------------------------
*/
func console_core_getc
cbz x0, getc_error
1:
/* Check if the receive FIFO is empty */
ldr w1, [x0, #UARTFR]
tbnz w1, #PL011_UARTFR_RXFE_BIT, 1b
ldr w1, [x0, #UARTDR]
mov w0, w1
ret
getc_error:
mov w0, #-1
ret
endfunc console_core_getc
/*
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
......@@ -27,127 +27,7 @@
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <asm_macros.S>
#include <pl011.h>
/*
* Pull in generic functions to provide backwards compatibility for
* platform makefiles
*/
#include "../../console/console.S"
.globl console_core_init
.globl console_core_putc
.globl console_core_getc
/* -----------------------------------------------
* int console_core_init(uintptr_t base_addr,
* unsigned int uart_clk, unsigned int baud_rate)
* Function to initialize the console without a
* C Runtime to print debug information. This
* function will be accessed by console_init and
* crash reporting.
* In: x0 - console base address
* w1 - Uart clock in Hz
* w2 - Baud rate
* Out: return 1 on success else 0 on error
* Clobber list : x1, x2, x3, x4
* -----------------------------------------------
*/
func console_core_init
/* Check the input base address */
cbz x0, core_init_fail
#if !PL011_GENERIC_UART
/* Check baud rate and uart clock for sanity */
cbz w1, core_init_fail
cbz w2, core_init_fail
/* Disable uart before programming */
ldr w3, [x0, #UARTCR]
mov w4, #PL011_UARTCR_UARTEN
bic w3, w3, w4
str w3, [x0, #UARTCR]
/* Program the baudrate */
/* Divisor = (Uart clock * 4) / baudrate */
lsl w1, w1, #2
udiv w2, w1, w2
/* IBRD = Divisor >> 6 */
lsr w1, w2, #6
/* Write the IBRD */
str w1, [x0, #UARTIBRD]
/* FBRD = Divisor & 0x3F */
and w1, w2, #0x3f
/* Write the FBRD */
str w1, [x0, #UARTFBRD]
mov w1, #PL011_LINE_CONTROL
str w1, [x0, #UARTLCR_H]
/* Clear any pending errors */
str wzr, [x0, #UARTECR]
/* Enable tx, rx, and uart overall */
mov w1, #(PL011_UARTCR_RXE | PL011_UARTCR_TXE | PL011_UARTCR_UARTEN)
str w1, [x0, #UARTCR]
#if !ERROR_DEPRECATED
#include "./aarch64/pl011_console.S"
#endif
mov w0, #1
ret
core_init_fail:
mov w0, wzr
ret
endfunc console_core_init
/* --------------------------------------------------------
* int console_core_putc(int c, uintptr_t base_addr)
* Function to output a character over the console. It
* returns the character printed on success or -1 on error.
* In : w0 - character to be printed
* x1 - console base address
* Out : return -1 on error else return character.
* Clobber list : x2
* --------------------------------------------------------
*/
func console_core_putc
/* Check the input parameter */
cbz x1, putc_error
/* Prepend '\r' to '\n' */
cmp w0, #0xA
b.ne 2f
1:
/* Check if the transmit FIFO is full */
ldr w2, [x1, #UARTFR]
tbnz w2, #PL011_UARTFR_TXFF_BIT, 1b
mov w2, #0xD
str w2, [x1, #UARTDR]
2:
/* Check if the transmit FIFO is full */
ldr w2, [x1, #UARTFR]
tbnz w2, #PL011_UARTFR_TXFF_BIT, 2b
str w0, [x1, #UARTDR]
ret
putc_error:
mov w0, #-1
ret
endfunc console_core_putc
/* ---------------------------------------------
* int console_core_getc(uintptr_t base_addr)
* Function to get a character from the console.
* It returns the character grabbed on success
* or -1 on error.
* In : x0 - console base address
* Clobber list : x0, x1
* ---------------------------------------------
*/
func console_core_getc
cbz x0, getc_error
1:
/* Check if the receive FIFO is empty */
ldr w1, [x0, #UARTFR]
tbnz w1, #PL011_UARTFR_RXFE_BIT, 1b
ldr w1, [x0, #UARTDR]
mov w0, w1
ret
getc_error:
mov w0, #-1
ret
endfunc console_core_getc
......@@ -206,7 +206,7 @@ void tzc400_configure_region(unsigned int filters,
* Do address range check based on TZC configuration. A 64bit address is
* the max and expected case.
*/
assert(((region_top <= (UINT64_MAX >> (64 - tzc400.addr_width))) &&
assert(((region_top <= _tzc_get_max_top_addr(tzc400.addr_width)) &&
(region_base < region_top)));
/* region_base and (region_top + 1) must be 4KB aligned */
......
......@@ -28,6 +28,8 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <arch_helpers.h>
#include <mmio.h>
#include <tzc_common.h>
......@@ -199,4 +201,35 @@ static unsigned int _tzc_read_peripheral_id(uintptr_t base)
return id;
}
#ifdef AARCH32
static unsigned long long _tzc_get_max_top_addr(int addr_width)
{
/*
* Assume at least 32 bit wide address and initialize the max.
* This function doesn't use 64-bit integer arithmetic to avoid
* having to implement additional compiler library functions.
*/
unsigned long long addr_mask = 0xFFFFFFFF;
uint32_t *addr_ptr = (uint32_t *)&addr_mask;
assert(addr_width >= 32);
/* This logic works only on little - endian platforms */
assert((read_sctlr() & SCTLR_EE_BIT) == 0);
/*
* If required address width is greater than 32, populate the higher
* 32 bits of the 64 bit field with the max address.
*/
if (addr_width > 32)
*(addr_ptr + 1) = ((1 << (addr_width - 32)) - 1);
return addr_mask;
}
#else
#define _tzc_get_max_top_addr(addr_width)\
(UINT64_MAX >> (64 - (addr_width)))
#endif /* AARCH32 */
#endif
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment