Commit 8545a874 authored by Andrew Thoelke's avatar Andrew Thoelke
Browse files

Merge pull request #102 from achingupta:ag/tf-issues#104-v2

parents 92535302 a20a81e5
......@@ -31,8 +31,6 @@
#include <arch.h>
#include <asm_macros.S>
#include <bl_common.h>
#include <cm_macros.S>
.globl bl31_entrypoint
......
......@@ -30,14 +30,118 @@
#include <arch.h>
#include <asm_macros.S>
#include <cm_macros.S>
#include <context.h>
#include <interrupt_mgmt.h>
#include <platform.h>
#include <runtime_svc.h>
.globl runtime_exceptions
.globl el3_exit
/* -----------------------------------------------------
* Handle SMC exceptions seperately from other sync.
* exceptions.
* -----------------------------------------------------
*/
.macro handle_sync_exception
str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
mrs x30, esr_el3
ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
cmp x30, #EC_AARCH32_SMC
b.eq smc_handler32
cmp x30, #EC_AARCH64_SMC
b.eq smc_handler64
/* -----------------------------------------------------
* The following code handles any synchronous exception
* that is not an SMC.
* -----------------------------------------------------
*/
bl dump_state_and_die
.endm
/* -----------------------------------------------------
* This macro handles FIQ or IRQ interrupts i.e. EL3,
* S-EL1 and NS interrupts.
* -----------------------------------------------------
*/
.macro handle_interrupt_exception label
str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
bl save_gp_registers
/* Switch to the runtime stack i.e. SP_EL0 */
ldr x2, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
mov x20, sp
msr spsel, #0
mov sp, x2
/*
* Find out whether this is a valid interrupt type. If the
* interrupt controller reports a spurious interrupt then
* return to where we came from.
*/
bl ic_get_pending_interrupt_type
cmp x0, #INTR_TYPE_INVAL
b.eq interrupt_exit_\label
/*
* Get the registered handler for this interrupt type. A
* NULL return value implies that an interrupt was generated
* for which there is no handler registered or the interrupt
* was routed incorrectly. This is a problem of the framework
* so report it as an error.
*/
bl get_interrupt_type_handler
cbz x0, interrupt_error_\label
mov x21, x0
mov x0, #INTR_ID_UNAVAILABLE
#if IMF_READ_INTERRUPT_ID
/*
* Read the id of the highest priority pending interrupt. If
* no interrupt is asserted then return to where we came from.
*/
bl ic_get_pending_interrupt_id
cmp x0, #INTR_ID_UNAVAILABLE
b.eq interrupt_exit_\label
#endif
/*
* Save the EL3 system registers needed to return from
* this exception.
*/
mrs x3, spsr_el3
mrs x4, elr_el3
stp x3, x4, [x20, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
/* Set the current security state in the 'flags' parameter */
mrs x2, scr_el3
ubfx x1, x2, #0, #1
/* Restore the reference to the 'handle' i.e. SP_EL3 */
mov x2, x20
/* Call the interrupt type handler */
blr x21
interrupt_exit_\label:
/* Return from exception, possibly in a different security state */
b el3_exit
/*
* This label signifies a problem with the interrupt management
* framework where it is not safe to go back to the instruction
* where the interrupt was generated.
*/
interrupt_error_\label:
bl dump_intr_state_and_die
.endm
.macro save_x18_to_x29_sp_el0
stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
......@@ -140,12 +244,12 @@ sync_exception_aarch64:
* -----------------------------------------------------
*/
irq_aarch64:
bl dump_intr_state_and_die
handle_interrupt_exception irq_aarch64
check_vector_size irq_aarch64
.align 7
fiq_aarch64:
bl dump_intr_state_and_die
handle_interrupt_exception fiq_aarch64
check_vector_size fiq_aarch64
.align 7
......@@ -177,12 +281,12 @@ sync_exception_aarch32:
* -----------------------------------------------------
*/
irq_aarch32:
bl dump_intr_state_and_die
handle_interrupt_exception irq_aarch32
check_vector_size irq_aarch32
.align 7
fiq_aarch32:
bl dump_intr_state_and_die
handle_interrupt_exception fiq_aarch32
check_vector_size fiq_aarch32
.align 7
......
......@@ -31,6 +31,7 @@
BL31_SOURCES += bl31/bl31_main.c \
bl31/context_mgmt.c \
bl31/runtime_svc.c \
bl31/interrupt_mgmt.c \
bl31/aarch64/bl31_arch_setup.c \
bl31/aarch64/bl31_entrypoint.S \
bl31/aarch64/context.S \
......@@ -50,3 +51,11 @@ BL31_SOURCES += bl31/bl31_main.c \
services/std_svc/psci/psci_setup.c
BL31_LINKERFILE := bl31/bl31.ld.S
# Flag used by the generic interrupt management framework to determine if
# upon the assertion of an interrupt, it should pass the interrupt id or not
IMF_READ_INTERRUPT_ID := 0
$(eval $(call assert_boolean,IMF_READ_INTERRUPT_ID))
$(eval $(call add_define,IMF_READ_INTERRUPT_ID))
......@@ -28,12 +28,14 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
#include <bl_common.h>
#include <bl31.h>
#include <context.h>
#include <context_mgmt.h>
#include <interrupt_mgmt.h>
#include <platform.h>
#include <runtime_svc.h>
......@@ -145,10 +147,10 @@ void cm_el1_sysregs_context_restore(uint32_t security_state)
}
/*******************************************************************************
* This function function populates 'cpu_context' pertaining to the given
* security state with the entrypoint, SPSR and SCR values so that an ERET from
* this securit state correctly restores corresponding values to drop the CPU to
* the next exception level
* This function populates 'cpu_context' pertaining to the given security state
* with the entrypoint, SPSR and SCR values so that an ERET from this security
* state correctly restores corresponding values to drop the CPU to the next
* exception level
******************************************************************************/
void cm_set_el3_eret_context(uint32_t security_state, uint64_t entrypoint,
uint32_t spsr, uint32_t scr)
......@@ -159,6 +161,11 @@ void cm_set_el3_eret_context(uint32_t security_state, uint64_t entrypoint,
ctx = cm_get_context(read_mpidr(), security_state);
assert(ctx);
/* Program the interrupt routing model for this security state */
scr &= ~SCR_FIQ_BIT;
scr &= ~SCR_IRQ_BIT;
scr |= get_scr_el3_from_routing_model(security_state);
/* Populate EL3 state so that we've the right context before doing ERET */
state = get_el3state_ctx(ctx);
write_ctx_reg(state, CTX_SPSR_EL3, spsr);
......@@ -167,10 +174,10 @@ void cm_set_el3_eret_context(uint32_t security_state, uint64_t entrypoint,
}
/*******************************************************************************
* This function function populates ELR_EL3 member of 'cpu_context' pertaining
* to the given security state with the given entrypoint
* This function populates ELR_EL3 member of 'cpu_context' pertaining to the
* given security state with the given entrypoint
******************************************************************************/
void cm_set_el3_elr(uint32_t security_state, uint64_t entrypoint)
void cm_set_elr_el3(uint32_t security_state, uint64_t entrypoint)
{
cpu_context_t *ctx;
el3_state_t *state;
......@@ -183,6 +190,56 @@ void cm_set_el3_elr(uint32_t security_state, uint64_t entrypoint)
write_ctx_reg(state, CTX_ELR_EL3, entrypoint);
}
/*******************************************************************************
* This function updates a single bit in the SCR_EL3 member of the 'cpu_context'
* pertaining to the given security state using the value and bit position
* specified in the parameters. It preserves all other bits.
******************************************************************************/
void cm_write_scr_el3_bit(uint32_t security_state,
uint32_t bit_pos,
uint32_t value)
{
cpu_context_t *ctx;
el3_state_t *state;
uint32_t scr_el3;
ctx = cm_get_context(read_mpidr(), security_state);
assert(ctx);
/* Ensure that the bit position is a valid one */
assert((1 << bit_pos) & SCR_VALID_BIT_MASK);
/* Ensure that the 'value' is only a bit wide */
assert(value <= 1);
/*
* Get the SCR_EL3 value from the cpu context, clear the desired bit
* and set it to its new value.
*/
state = get_el3state_ctx(ctx);
scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
scr_el3 &= ~(1 << bit_pos);
scr_el3 |= value << bit_pos;
write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
}
/*******************************************************************************
* This function retrieves SCR_EL3 member of 'cpu_context' pertaining to the
* given security state.
******************************************************************************/
uint32_t cm_get_scr_el3(uint32_t security_state)
{
cpu_context_t *ctx;
el3_state_t *state;
ctx = cm_get_context(read_mpidr(), security_state);
assert(ctx);
/* Populate EL3 state so that ERET jumps to the correct entry */
state = get_el3state_ctx(ctx);
return read_ctx_reg(state, CTX_SCR_EL3);
}
/*******************************************************************************
* This function is used to program the context that's used for exception
* return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for
......
/*
* Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <assert.h>
#include <bl_common.h>
#include <context_mgmt.h>
#include <errno.h>
#include <interrupt_mgmt.h>
#include <platform.h>
#include <stdio.h>
/*******************************************************************************
* Local structure and corresponding array to keep track of the state of the
* registered interrupt handlers for each interrupt type.
* The field descriptions are:
*
* 'flags' : Bit[0], Routing model for this interrupt type when execution is
* not in EL3 in the secure state. '1' implies that this
* interrupt will be routed to EL3. '0' implies that this
* interrupt will be routed to the current exception level.
*
* Bit[1], Routing model for this interrupt type when execution is
* not in EL3 in the non-secure state. '1' implies that this
* interrupt will be routed to EL3. '0' implies that this
* interrupt will be routed to the current exception level.
*
* All other bits are reserved and SBZ.
*
* 'scr_el3[2]' : Mapping of the routing model in the 'flags' field to the
* value of the SCR_EL3.IRQ or FIQ bit for each security state.
* There are two instances of this field corresponding to the
* two security states.
******************************************************************************/
typedef struct intr_type_desc {
interrupt_type_handler_t handler;
uint32_t flags;
uint32_t scr_el3[2];
} intr_type_desc_t;
static intr_type_desc_t intr_type_descs[MAX_INTR_TYPES];
/*******************************************************************************
* This function validates the interrupt type. EL3 interrupts are currently not
* supported.
******************************************************************************/
static int32_t validate_interrupt_type(uint32_t type)
{
if (type == INTR_TYPE_EL3)
return -ENOTSUP;
if (type != INTR_TYPE_S_EL1 && type != INTR_TYPE_NS)
return -EINVAL;
return 0;
}
/*******************************************************************************
* This function validates the routing model for this type of interrupt
******************************************************************************/
static int32_t validate_routing_model(uint32_t type, uint32_t flags)
{
flags >>= INTR_RM_FLAGS_SHIFT;
flags &= INTR_RM_FLAGS_MASK;
if (type == INTR_TYPE_S_EL1)
return validate_sel1_interrupt_rm(flags);
if (type == INTR_TYPE_NS)
return validate_ns_interrupt_rm(flags);
return -EINVAL;
}
/*******************************************************************************
* This function returns the cached copy of the SCR_EL3 which contains the
* routing model (expressed through the IRQ and FIQ bits) for a security state
* which was stored through a call to 'set_routing_model()' earlier.
******************************************************************************/
uint32_t get_scr_el3_from_routing_model(uint32_t security_state)
{
uint32_t scr_el3;
assert(security_state <= NON_SECURE);
scr_el3 = intr_type_descs[INTR_TYPE_NS].scr_el3[security_state];
scr_el3 |= intr_type_descs[INTR_TYPE_S_EL1].scr_el3[security_state];
scr_el3 |= intr_type_descs[INTR_TYPE_EL3].scr_el3[security_state];
return scr_el3;
}
/*******************************************************************************
* This function uses the 'interrupt_type_flags' parameter to obtain the value
* of the trap bit (IRQ/FIQ) in the SCR_EL3 for a security state for this
* interrupt type. It uses it to update the SCR_EL3 in the cpu context and the
* 'intr_type_desc' for that security state.
******************************************************************************/
static void set_scr_el3_from_rm(uint32_t type,
uint32_t interrupt_type_flags,
uint32_t security_state)
{
uint32_t flag, bit_pos;
flag = get_interrupt_rm_flag(interrupt_type_flags, security_state);
bit_pos = plat_interrupt_type_to_line(type, security_state);
intr_type_descs[type].scr_el3[security_state] = flag << bit_pos;
cm_write_scr_el3_bit(security_state, bit_pos, flag);
}
/*******************************************************************************
* This function validates the routing model specified in the 'flags' and
* updates internal data structures to reflect the new routing model. It also
* updates the copy of SCR_EL3 for each security state with the new routing
* model in the 'cpu_context' structure for this cpu.
******************************************************************************/
int32_t set_routing_model(uint32_t type, uint32_t flags)
{
int32_t rc;
rc = validate_interrupt_type(type);
if (rc)
return rc;
rc = validate_routing_model(type, flags);
if (rc)
return rc;
/* Update the routing model in internal data structures */
intr_type_descs[type].flags = flags;
set_scr_el3_from_rm(type, flags, SECURE);
set_scr_el3_from_rm(type, flags, NON_SECURE);
return 0;
}
/*******************************************************************************
* This function registers a handler for the 'type' of interrupt specified. It
* also validates the routing model specified in the 'flags' for this type of
* interrupt.
******************************************************************************/
int32_t register_interrupt_type_handler(uint32_t type,
interrupt_type_handler_t handler,
uint32_t flags)
{
int32_t rc;
/* Validate the 'handler' parameter */
if (!handler)
return -EINVAL;
/* Validate the 'flags' parameter */
if (flags & INTR_TYPE_FLAGS_MASK)
return -EINVAL;
/* Check if a handler has already been registered */
if (intr_type_descs[type].handler)
return -EALREADY;
rc = set_routing_model(type, flags);
if (rc)
return rc;
/* Save the handler */
intr_type_descs[type].handler = handler;
return 0;
}
/*******************************************************************************
* This function is called when an interrupt is generated and returns the
* handler for the interrupt type (if registered). It returns NULL if the
* interrupt type is not supported or its handler has not been registered.
******************************************************************************/
interrupt_type_handler_t get_interrupt_type_handler(uint32_t type)
{
if (validate_interrupt_type(type))
return NULL;
return intr_type_descs[type].handler;
}
......@@ -39,6 +39,7 @@
.globl tsp_cpu_suspend_entry
.globl tsp_cpu_resume_entry
.globl tsp_fast_smc_entry
.globl tsp_fiq_entry
/* ---------------------------------------------
* Populate the params in x0-x7 from the pointer
......@@ -53,6 +54,22 @@
smc #0
.endm
.macro save_eret_context reg1 reg2
mrs \reg1, elr_el1
mrs \reg2, spsr_el1
stp \reg1, \reg2, [sp, #-0x10]!
stp x30, x18, [sp, #-0x10]!
.endm
.macro restore_eret_context reg1 reg2
ldp x30, x18, [sp], #0x10
ldp \reg1, \reg2, [sp], #0x10
msr elr_el1, \reg1
msr spsr_el1, \reg2
.endm
.section .text, "ax"
.align 3
func tsp_entrypoint
......@@ -70,7 +87,7 @@ func tsp_entrypoint
* Set the exception vector to something sane.
* ---------------------------------------------
*/
adr x0, early_exceptions
adr x0, tsp_exceptions
msr vbar_el1, x0
/* ---------------------------------------------
......@@ -167,7 +184,7 @@ func tsp_cpu_on_entry
* Set the exception vector to something sane.
* ---------------------------------------------
*/
adr x0, early_exceptions
adr x0, tsp_exceptions
msr vbar_el1, x0
/* ---------------------------------------------
......@@ -226,6 +243,58 @@ func tsp_cpu_suspend_entry
bl tsp_cpu_suspend_main
restore_args_call_smc
/*---------------------------------------------
* This entrypoint is used by the TSPD to pass
* control for handling a pending S-EL1 FIQ.
* 'x0' contains a magic number which indicates
* this. TSPD expects control to be handed back
* at the end of FIQ processing. This is done
* through an SMC. The handover agreement is:
*
* 1. PSTATE.DAIF are set upon entry. 'x1' has
* the ELR_EL3 from the non-secure state.
* 2. TSP has to preserve the callee saved
* general purpose registers, SP_EL1/EL0 and
* LR.
* 3. TSP has to preserve the system and vfp
* registers (if applicable).
* 4. TSP can use 'x0-x18' to enable its C
* runtime.
* 5. TSP returns to TSPD using an SMC with
* 'x0' = TSP_HANDLED_S_EL1_FIQ
* ---------------------------------------------
*/
func tsp_fiq_entry
#if DEBUG
mov x2, #(TSP_HANDLE_FIQ_AND_RETURN & ~0xffff)
movk x2, #(TSP_HANDLE_FIQ_AND_RETURN & 0xffff)
cmp x0, x2
b.ne tsp_fiq_entry_panic
#endif
/*---------------------------------------------
* Save any previous context needed to perform
* an exception return from S-EL1 e.g. context
* from a previous IRQ. Update statistics and
* handle the FIQ before returning to the TSPD.
* IRQ/FIQs are not enabled since that will
* complicate the implementation. Execution
* will be transferred back to the normal world
* in any case. A non-zero return value from the
* fiq handler is an error.
* ---------------------------------------------
*/
save_eret_context x2 x3
bl tsp_update_sync_fiq_stats
bl tsp_fiq_handler
cbnz x0, tsp_fiq_entry_panic
restore_eret_context x2 x3
mov x0, #(TSP_HANDLED_S_EL1_FIQ & ~0xffff)
movk x0, #(TSP_HANDLED_S_EL1_FIQ & 0xffff)
smc #0
tsp_fiq_entry_panic:
b tsp_fiq_entry_panic
/*---------------------------------------------
* This entrypoint is used by the TSPD when this
* cpu resumes execution after an earlier
......
......@@ -27,31 +27,174 @@
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <bl_common.h>
#include <arch.h>
#include <context.h>
#include <tsp.h>
#include <asm_macros.S>
/* ----------------------------------------------------
* The caller-saved registers x0-x18 and LR are saved
* here.
* ----------------------------------------------------
*/
#define SCRATCH_REG_SIZE #(20 * 8)
.macro save_caller_regs_and_lr
sub sp, sp, SCRATCH_REG_SIZE
stp x0, x1, [sp]
stp x2, x3, [sp, #0x10]
stp x4, x5, [sp, #0x20]
stp x6, x7, [sp, #0x30]
stp x8, x9, [sp, #0x40]
stp x10, x11, [sp, #0x50]
stp x12, x13, [sp, #0x60]
stp x14, x15, [sp, #0x70]
stp x16, x17, [sp, #0x80]
stp x18, x30, [sp, #0x90]
.endm
.macro restore_caller_regs_and_lr
ldp x0, x1, [sp]
ldp x2, x3, [sp, #0x10]
ldp x4, x5, [sp, #0x20]
ldp x6, x7, [sp, #0x30]
ldp x8, x9, [sp, #0x40]
ldp x10, x11, [sp, #0x50]
ldp x12, x13, [sp, #0x60]
ldp x14, x15, [sp, #0x70]
ldp x16, x17, [sp, #0x80]
ldp x18, x30, [sp, #0x90]
add sp, sp, SCRATCH_REG_SIZE
.endm
.globl tsp_exceptions
/* -----------------------------------------------------
* Handle SMC exceptions seperately from other sync.
* exceptions.
* TSP exception handlers.
* -----------------------------------------------------
*/
.macro handle_sync_exception
str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
mrs x30, esr_el3
ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
.section .vectors, "ax"; .align 11
cmp x30, #EC_AARCH32_SMC
b.eq smc_handler32
.align 7
tsp_exceptions:
/* -----------------------------------------------------
* Current EL with _sp_el0 : 0x0 - 0x180. No exceptions
* are expected and treated as irrecoverable errors.
* -----------------------------------------------------
*/
sync_exception_sp_el0:
wfi
b sync_exception_sp_el0
check_vector_size sync_exception_sp_el0
.align 7
irq_sp_el0:
b irq_sp_el0
check_vector_size irq_sp_el0
.align 7
fiq_sp_el0:
b fiq_sp_el0
check_vector_size fiq_sp_el0
.align 7
serror_sp_el0:
b serror_sp_el0
check_vector_size serror_sp_el0
cmp x30, #EC_AARCH64_SMC
b.eq smc_handler64
/* -----------------------------------------------------
* The following code handles any synchronous exception
* that is not an SMC.
* Current EL with SPx: 0x200 - 0x380. Only IRQs/FIQs
* are expected and handled
* -----------------------------------------------------
*/
.align 7
sync_exception_sp_elx:
wfi
b sync_exception_sp_elx
check_vector_size sync_exception_sp_elx
bl dump_state_and_die
.endm
.align 7
irq_sp_elx:
b irq_sp_elx
check_vector_size irq_sp_elx
.align 7
fiq_sp_elx:
save_caller_regs_and_lr
bl tsp_fiq_handler
cbz x0, fiq_sp_elx_done
/*
* This FIQ was not targetted to S-EL1 so send it to
* the monitor and wait for execution to resume.
*/
smc #0
fiq_sp_elx_done:
restore_caller_regs_and_lr
eret
check_vector_size fiq_sp_elx
.align 7
serror_sp_elx:
b serror_sp_elx
check_vector_size serror_sp_elx
/* -----------------------------------------------------
* Lower EL using AArch64 : 0x400 - 0x580. No exceptions
* are handled since TSP does not implement a lower EL
* -----------------------------------------------------
*/
.align 7
sync_exception_aarch64:
wfi
b sync_exception_aarch64
check_vector_size sync_exception_aarch64
.align 7
irq_aarch64:
b irq_aarch64
check_vector_size irq_aarch64
.align 7
fiq_aarch64:
b fiq_aarch64
check_vector_size fiq_aarch64
.align 7
serror_aarch64:
b serror_aarch64
check_vector_size serror_aarch64
/* -----------------------------------------------------
* Lower EL using AArch32 : 0x600 - 0x780. No exceptions
* handled since the TSP does not implement a lower EL.
* -----------------------------------------------------
*/
.align 7
sync_exception_aarch32:
wfi
b sync_exception_aarch32
check_vector_size sync_exception_aarch32
.align 7
irq_aarch32:
b irq_aarch32
check_vector_size irq_aarch32
.align 7
fiq_aarch32:
b fiq_aarch32
check_vector_size fiq_aarch32
.align 7
serror_aarch32:
b serror_aarch32
check_vector_size serror_aarch32
.align 7
......@@ -29,7 +29,9 @@
#
# TSP source files specific to FVP platform
BL32_SOURCES += plat/common/aarch64/platform_mp_stack.S \
plat/fvp/bl32_plat_setup.c \
BL32_SOURCES += drivers/arm/gic/gic_v2.c \
plat/common/aarch64/platform_mp_stack.S \
plat/fvp/aarch64/plat_common.c \
plat/fvp/aarch64/plat_helpers.S
plat/fvp/aarch64/plat_helpers.S \
plat/fvp/bl32_plat_setup.c \
plat/fvp/plat_gic.c
......@@ -30,7 +30,10 @@
BL32_SOURCES += bl32/tsp/tsp_main.c \
bl32/tsp/aarch64/tsp_entrypoint.S \
bl32/tsp/aarch64/tsp_exceptions.S \
bl32/tsp/aarch64/tsp_request.S \
bl32/tsp/tsp_interrupt.c \
bl32/tsp/tsp_timer.c \
common/aarch64/early_exceptions.S \
lib/locks/exclusive/spinlock.S
......
/*
* Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch_helpers.h>
#include <assert.h>
#include <debug.h>
#include <gic_v2.h>
#include <tsp.h>
#include <platform.h>
/*******************************************************************************
* This function updates the TSP statistics for FIQs handled synchronously i.e
* the ones that have been handed over by the TSPD. It also keeps count of the
* number of times control was passed back to the TSPD after handling an FIQ.
* In the future it will be possible that the TSPD hands over an FIQ to the TSP
* but does not expect it to return execution. This statistic will be useful to
* distinguish between these two models of synchronous FIQ handling.
* The 'elr_el3' parameter contains the address of the instruction in normal
* world where this FIQ was generated.
******************************************************************************/
void tsp_update_sync_fiq_stats(uint32_t type, uint64_t elr_el3)
{
uint64_t mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr);
tsp_stats[linear_id].sync_fiq_count++;
if (type == TSP_HANDLE_FIQ_AND_RETURN)
tsp_stats[linear_id].sync_fiq_ret_count++;
spin_lock(&console_lock);
printf("TSP: cpu 0x%x sync fiq request from 0x%llx \n\r",
mpidr, elr_el3);
INFO("cpu 0x%x: %d sync fiq requests, %d sync fiq returns\n",
mpidr,
tsp_stats[linear_id].sync_fiq_count,
tsp_stats[linear_id].sync_fiq_ret_count);
spin_unlock(&console_lock);
}
/*******************************************************************************
* TSP FIQ handler called as a part of both synchronous and asynchronous
* handling of FIQ interrupts. It returns 0 upon successfully handling a S-EL1
* FIQ and treats all other FIQs as EL3 interrupts. It assumes that the GIC
* architecture version in v2.0 and the secure physical timer interrupt is the
* only S-EL1 interrupt that it needs to handle.
******************************************************************************/
int32_t tsp_fiq_handler()
{
uint64_t mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr), id;
/*
* Get the highest priority pending interrupt id and see if it is the
* secure physical generic timer interrupt in which case, handle it.
* Otherwise throw this interrupt at the EL3 firmware.
*/
id = ic_get_pending_interrupt_id();
/* TSP can only handle the secure physical timer interrupt */
if (id != IRQ_SEC_PHY_TIMER)
return TSP_EL3_FIQ;
/*
* Handle the interrupt. Also sanity check if it has been preempted by
* another secure interrupt through an assertion.
*/
id = ic_acknowledge_interrupt();
assert(id == IRQ_SEC_PHY_TIMER);
tsp_generic_timer_handler();
ic_end_of_interrupt(id);
/* Update the statistics and print some messages */
tsp_stats[linear_id].fiq_count++;
spin_lock(&console_lock);
printf("TSP: cpu 0x%x handled fiq %d \n\r",
mpidr, id);
INFO("cpu 0x%x: %d fiq requests \n",
mpidr, tsp_stats[linear_id].fiq_count);
spin_unlock(&console_lock);
return 0;
}
......@@ -58,7 +58,7 @@ static tsp_args_t tsp_smc_args[PLATFORM_CORE_COUNT];
/*******************************************************************************
* Per cpu data structure to keep track of TSP activity
******************************************************************************/
static work_statistics_t tsp_stats[PLATFORM_CORE_COUNT];
work_statistics_t tsp_stats[PLATFORM_CORE_COUNT];
/*******************************************************************************
* Single reference to the various entry points exported by the test secure
......@@ -71,6 +71,7 @@ static const entry_info_t tsp_entry_info = {
tsp_cpu_off_entry,
tsp_cpu_resume_entry,
tsp_cpu_suspend_entry,
tsp_fiq_entry,
};
......@@ -127,6 +128,7 @@ uint64_t tsp_main(void)
bl32_platform_setup();
/* Initialize secure/applications state here */
tsp_generic_timer_start();
/* Update this cpu's statistics */
tsp_stats[linear_id].smc_count++;
......@@ -162,6 +164,9 @@ tsp_args_t *tsp_cpu_on_main(void)
uint64_t mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr);
/* Initialize secure/applications state here */
tsp_generic_timer_start();
/* Update this cpu's statistics */
tsp_stats[linear_id].smc_count++;
tsp_stats[linear_id].eret_count++;
......@@ -195,6 +200,13 @@ tsp_args_t *tsp_cpu_off_main(uint64_t arg0,
uint64_t mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr);
/*
* This cpu is being turned off, so disable the timer to prevent the
* secure timer interrupt from interfering with power down. A pending
* interrupt will be lost but we do not care as we are turning off.
*/
tsp_generic_timer_stop();
/* Update this cpu's statistics */
tsp_stats[linear_id].smc_count++;
tsp_stats[linear_id].eret_count++;
......@@ -230,6 +242,13 @@ tsp_args_t *tsp_cpu_suspend_main(uint64_t power_state,
uint64_t mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr);
/*
* Save the time context and disable it to prevent the secure timer
* interrupt from interfering with wakeup from the suspend state.
*/
tsp_generic_timer_save();
tsp_generic_timer_stop();
/* Update this cpu's statistics */
tsp_stats[linear_id].smc_count++;
tsp_stats[linear_id].eret_count++;
......@@ -265,6 +284,9 @@ tsp_args_t *tsp_cpu_resume_main(uint64_t suspend_level,
uint64_t mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr);
/* Restore the generic timer context */
tsp_generic_timer_restore();
/* Update this cpu's statistics */
tsp_stats[linear_id].smc_count++;
tsp_stats[linear_id].eret_count++;
......
/*
* Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch_helpers.h>
#include <assert.h>
#include <tsp.h>
/*******************************************************************************
* Data structure to keep track of per-cpu secure generic timer context across
* power management operations.
******************************************************************************/
typedef struct timer_context {
uint64_t cval;
uint32_t ctl;
} timer_context_t;
static timer_context_t pcpu_timer_context[PLATFORM_CORE_COUNT];
/*******************************************************************************
* This function initializes the generic timer to fire every 0.5 second
******************************************************************************/
void tsp_generic_timer_start()
{
uint64_t cval;
uint32_t ctl = 0;
/* The timer will fire every 0.5 second */
cval = read_cntpct_el0() + (read_cntfrq_el0() >> 1);
write_cntps_cval_el1(cval);
/* Enable the secure physical timer */
set_cntp_ctl_enable(ctl);
write_cntps_ctl_el1(ctl);
}
/*******************************************************************************
* This function deasserts the timer interrupt and sets it up again
******************************************************************************/
void tsp_generic_timer_handler()
{
/* Ensure that the timer did assert the interrupt */
assert(get_cntp_ctl_istatus(read_cntps_ctl_el1()));
/* Disable the timer and reprogram it */
write_cntps_ctl_el1(0);
tsp_generic_timer_start();
}
/*******************************************************************************
* This function deasserts the timer interrupt prior to cpu power down
******************************************************************************/
void tsp_generic_timer_stop()
{
/* Disable the timer */
write_cntps_ctl_el1(0);
}
/*******************************************************************************
* This function saves the timer context prior to cpu suspension
******************************************************************************/
void tsp_generic_timer_save()
{
uint32_t linear_id = platform_get_core_pos(read_mpidr());
pcpu_timer_context[linear_id].cval = read_cntps_cval_el1();
pcpu_timer_context[linear_id].ctl = read_cntps_ctl_el1();
flush_dcache_range((uint64_t) &pcpu_timer_context[linear_id],
sizeof(pcpu_timer_context[linear_id]));
}
/*******************************************************************************
* This function restores the timer context post cpu resummption
******************************************************************************/
void tsp_generic_timer_restore()
{
uint32_t linear_id = platform_get_core_pos(read_mpidr());
write_cntps_cval_el1(pcpu_timer_context[linear_id].cval);
write_cntps_ctl_el1(pcpu_timer_context[linear_id].ctl);
}
......@@ -158,6 +158,15 @@ performed.
* `V`: Verbose build. If assigned anything other than 0, the build commands
are printed. Default is 0
* `FVP_GIC_ARCH`: Choice of ARM GIC architecture version used by the FVP port
for implementing the platform GIC API. This API is used by the interrupt
management framework. Default is 2 i.e. version 2.0
* `IMF_READ_INTERRUPT_ID`: Boolean flag used by the interrupt management
framework to enable passing of the interrupt id to its handler. The id is
read using a platform GIC API. `INTR_ID_UNAVAILABLE` is passed instead if
this option set to 0. Default is 0.
### Creating a Firmware Image Package
FIPs are automatically created as part of the build instructions described in
......
......@@ -28,8 +28,10 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <assert.h>
#include <gic_v2.h>
#include <interrupt_mgmt.h>
#include <mmio.h>
/*******************************************************************************
......@@ -290,3 +292,27 @@ void gicd_set_itargetsr(unsigned int base, unsigned int id, unsigned int iface)
(1 << iface) << (byte_off << 3));
}
/*******************************************************************************
* This function allows the interrupt management framework to determine (through
* the platform) which interrupt line (IRQ/FIQ) to use for an interrupt type to
* route it to EL3. The interrupt line is represented as the bit position of the
* IRQ or FIQ bit in the SCR_EL3.
******************************************************************************/
uint32_t gicv2_interrupt_type_to_line(uint32_t cpuif_base, uint32_t type)
{
uint32_t gicc_ctlr;
/* Non-secure interrupts are signalled on the IRQ line always */
if (type == INTR_TYPE_NS)
return __builtin_ctz(SCR_IRQ_BIT);
/*
* Secure interrupts are signalled using the IRQ line if the FIQ_EN
* bit is not set else they are signalled using the FIQ line.
*/
gicc_ctlr = gicc_read_ctlr(cpuif_base);
if (gicc_ctlr & FIQ_EN)
return __builtin_ctz(SCR_FIQ_BIT);
else
return __builtin_ctz(SCR_IRQ_BIT);
}
......@@ -47,10 +47,13 @@ extern void cm_el1_sysregs_context_save(uint32_t security_state);
extern void cm_el1_sysregs_context_restore(uint32_t security_state);
extern void cm_set_el3_eret_context(uint32_t security_state, uint64_t entrypoint,
uint32_t spsr, uint32_t scr);
extern void cm_set_el3_elr(uint32_t security_state, uint64_t entrypoint);
extern void cm_set_elr_el3(uint32_t security_state, uint64_t entrypoint);
extern void cm_write_scr_el3_bit(uint32_t security_state,
uint32_t bit_pos,
uint32_t value);
extern void cm_set_next_eret_context(uint32_t security_state);
extern void cm_init_pcpu_ptr_cache();
extern void cm_set_pcpu_ptr_cache(const void *pcpu_ptr);
extern void *cm_get_pcpu_ptr_cache(void);
extern uint32_t cm_get_scr_el3(uint32_t security_state);
#endif /* __CM_H__ */
/*
* Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __INTERRUPT_MGMT_H__
#define __INTERRUPT_MGMT_H__
#include <arch.h>
/*******************************************************************************
* Constants for the types of interrupts recognised by the IM framework
******************************************************************************/
#define INTR_TYPE_S_EL1 0
#define INTR_TYPE_EL3 1
#define INTR_TYPE_NS 2
#define MAX_INTR_TYPES 3
#define INTR_TYPE_INVAL MAX_INTR_TYPES
/*
* Constant passed to the interrupt handler in the 'id' field when the
* framework does not read the gic registers to determine the interrupt id.
*/
#define INTR_ID_UNAVAILABLE 0xFFFFFFFF
/*******************************************************************************
* Mask for _both_ the routing model bits in the 'flags' parameter and
* constants to define the valid routing models for each supported interrupt
* type
******************************************************************************/
#define INTR_RM_FLAGS_SHIFT 0x0
#define INTR_RM_FLAGS_MASK 0x3
/* Routed to EL3 from NS. Taken to S-EL1 from Secure */
#define INTR_SEL1_VALID_RM0 0x2
/* Routed to EL3 from NS and Secure */
#define INTR_SEL1_VALID_RM1 0x3
/* Routed to EL1/EL2 from NS and to S-EL1 from Secure */
#define INTR_NS_VALID_RM0 0x0
/* Routed to EL1/EL2 from NS and to EL3 from Secure */
#define INTR_NS_VALID_RM1 0x1
/*******************************************************************************
* Constants for the _individual_ routing model bits in the 'flags' field for
* each interrupt type and mask to validate the 'flags' parameter while
* registering an interrupt handler
******************************************************************************/
#define INTR_TYPE_FLAGS_MASK 0xFFFFFFFC
#define INTR_RM_FROM_SEC_SHIFT SECURE /* BIT[0] */
#define INTR_RM_FROM_NS_SHIFT NON_SECURE /* BIT[1] */
#define INTR_RM_FROM_FLAG_MASK 1
#define get_interrupt_rm_flag(flag, ss) (((flag >> INTR_RM_FLAGS_SHIFT) >> ss) \
& INTR_RM_FROM_FLAG_MASK)
#define set_interrupt_rm_flag(flag, ss) (flag |= 1 << ss)
#define clr_interrupt_rm_flag(flag, ss) (flag &= ~(1 << ss))
/*******************************************************************************
* Macros to validate the routing model bits in the 'flags' for a type
* of interrupt. If the model does not match one of the valid masks
* -EINVAL is returned.
******************************************************************************/
#define validate_sel1_interrupt_rm(x) (x == INTR_SEL1_VALID_RM0 ? 0 : \
(x == INTR_SEL1_VALID_RM1 ? 0 :\
-EINVAL))
#define validate_ns_interrupt_rm(x) (x == INTR_NS_VALID_RM0 ? 0 : \
(x == INTR_NS_VALID_RM1 ? 0 :\
-EINVAL))
/*******************************************************************************
* Macros to set the 'flags' parameter passed to an interrupt type handler. Only
* the flag to indicate the security state when the exception was generated is
* supported.
******************************************************************************/
#define INTR_SRC_SS_FLAG_SHIFT 0 /* BIT[0] */
#define INTR_SRC_SS_FLAG_MASK 1
#define set_interrupt_src_ss(flag, val) (flag |= val << INTR_SRC_SS_FLAG_SHIFT)
#define clr_interrupt_src_ss(flag) (flag &= ~(1 << INTR_SRC_SS_FLAG_SHIFT))
#define get_interrupt_src_ss(flag) ((flag >> INTR_SRC_SS_FLAG_SHIFT) & \
INTR_SRC_SS_FLAG_MASK)
#ifndef __ASSEMBLY__
/* Prototype for defining a handler for an interrupt type */
typedef uint64_t (*interrupt_type_handler_t)(uint32_t id,
uint32_t flags,
void *handle,
void *cookie);
/*******************************************************************************
* Function & variable prototypes
******************************************************************************/
extern uint32_t get_scr_el3_from_routing_model(uint32_t security_state);
extern int32_t set_routing_model(uint32_t type, uint32_t flags);
extern int32_t register_interrupt_type_handler(uint32_t type,
interrupt_type_handler_t handler,
uint32_t flags);
extern interrupt_type_handler_t get_interrupt_type_handler(uint32_t interrupt_type);
#endif /*__ASSEMBLY__*/
#endif /* __INTERRUPT_MGMT_H__ */
......@@ -135,9 +135,12 @@
typedef int32_t (*rt_svc_init_t)(void);
/* Convenience macros to return from SMC handler */
#define SMC_RET0(_h) { \
return (uint64_t) (_h); \
}
#define SMC_RET1(_h, _x0) { \
write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X0, (_x0)); \
return _x0; \
SMC_RET0(_h); \
}
#define SMC_RET2(_h, _x0, _x1) { \
write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X1, (_x1)); \
......
......@@ -42,7 +42,16 @@
#define TSP_RESUME_DONE 0xf2000004
#define TSP_WORK_DONE 0xf2000005
/* SMC function ID that TSP uses to request service from secure montior */
/*
* Function identifiers to handle FIQs through the synchronous handling model.
* If the TSP was previously interrupted then control has to be returned to
* the TSPD after handling the interrupt else execution can remain in the TSP.
*/
#define TSP_HANDLED_S_EL1_FIQ 0xf2000006
#define TSP_EL3_FIQ 0xf2000007
#define TSP_HANDLE_FIQ_AND_RETURN 0x2004
/* SMC function ID that TSP uses to request service from secure monitor */
#define TSP_GET_ARGS 0xf2001000
/* Function IDs for various TSP services */
......@@ -86,6 +95,7 @@
#include <cassert.h>
#include <platform.h> /* For CACHE_WRITEBACK_GRANULE */
#include <spinlock.h>
#include <stdint.h>
typedef void (*tsp_generic_fptr_t)(uint64_t arg0,
......@@ -103,9 +113,13 @@ typedef struct entry_info {
tsp_generic_fptr_t cpu_off_entry;
tsp_generic_fptr_t cpu_resume_entry;
tsp_generic_fptr_t cpu_suspend_entry;
tsp_generic_fptr_t fiq_entry;
} entry_info_t;
typedef struct work_statistics {
uint32_t fiq_count; /* Number of FIQs on this cpu */
uint32_t sync_fiq_count; /* Number of sync. fiqs on this cpu */
uint32_t sync_fiq_ret_count; /* Number of fiq returns on this cpu */
uint32_t smc_count; /* Number of returns on this cpu */
uint32_t eret_count; /* Number of entries on this cpu */
uint32_t cpu_on_count; /* Number of cpu on requests */
......@@ -120,7 +134,7 @@ typedef struct tsp_args {
/* Macros to access members of the above structure using their offsets */
#define read_sp_arg(args, offset) ((args)->_regs[offset >> 3])
#define write_sp_arg(args, offset, val)(((args)->_regs[offset >> 3]) \
#define write_sp_arg(args, offset, val) (((args)->_regs[offset >> 3]) \
= val)
/*
......@@ -131,6 +145,14 @@ CASSERT(TSP_ARGS_SIZE == sizeof(tsp_args_t), assert_sp_args_size_mismatch);
extern void tsp_get_magic(uint64_t args[4]);
extern void tsp_fiq_entry(uint64_t arg0,
uint64_t arg1,
uint64_t arg2,
uint64_t arg3,
uint64_t arg4,
uint64_t arg5,
uint64_t arg6,
uint64_t arg7);
extern void tsp_fast_smc_entry(uint64_t arg0,
uint64_t arg1,
uint64_t arg2,
......@@ -196,6 +218,20 @@ extern tsp_args_t *tsp_cpu_off_main(uint64_t arg0,
uint64_t arg5,
uint64_t arg6,
uint64_t arg7);
/* Generic Timer functions */
extern void tsp_generic_timer_start(void);
extern void tsp_generic_timer_handler(void);
extern void tsp_generic_timer_stop(void);
extern void tsp_generic_timer_save(void);
extern void tsp_generic_timer_restore(void);
/* FIQ management functions */
extern void tsp_update_sync_fiq_stats(uint32_t type, uint64_t elr_el3);
/* Data structure to keep track of TSP statistics */
extern spinlock_t console_lock;
extern work_statistics_t tsp_stats[PLATFORM_CORE_COUNT];
#endif /* __ASSEMBLY__ */
#endif /* __BL2_H__ */
......@@ -43,6 +43,7 @@
#define GIC_LOWEST_SEC_PRIORITY 127
#define GIC_HIGHEST_NS_PRIORITY 128
#define GIC_LOWEST_NS_PRIORITY 254 /* 255 would disable an interrupt */
#define GIC_SPURIOUS_INTERRUPT 1023
#define ENABLE_GRP0 (1 << 0)
#define ENABLE_GRP1 (1 << 1)
......@@ -88,6 +89,7 @@
#define GICC_EOIR 0x10
#define GICC_RPR 0x14
#define GICC_HPPIR 0x18
#define GICC_AHPPIR 0x28
#define GICC_IIDR 0xFC
#define GICC_DIR 0x1000
#define GICC_PRIODROP GICC_EOIR
......@@ -247,6 +249,11 @@ static inline unsigned int gicc_read_hppir(unsigned int base)
return mmio_read_32(base + GICC_HPPIR);
}
static inline unsigned int gicc_read_ahppir(unsigned int base)
{
return mmio_read_32(base + GICC_AHPPIR);
}
static inline unsigned int gicc_read_dir(unsigned int base)
{
return mmio_read_32(base + GICC_DIR);
......@@ -298,6 +305,12 @@ static inline void gicc_write_dir(unsigned int base, unsigned int val)
mmio_write_32(base + GICC_DIR, val);
}
/*******************************************************************************
* Prototype of function to map an interrupt type to the interrupt line used to
* signal it.
******************************************************************************/
uint32_t gicv2_interrupt_type_to_line(uint32_t cpuif_base, uint32_t type);
#endif /*__ASSEMBLY__*/
#endif /* __GIC_V2_H__ */
......@@ -148,6 +148,7 @@
#define SCR_FIQ_BIT (1 << 2)
#define SCR_IRQ_BIT (1 << 1)
#define SCR_NS_BIT (1 << 0)
#define SCR_VALID_BIT_MASK 0x2f8f
/* HCR definitions */
#define HCR_RW_BIT (1ull << 31)
......@@ -264,6 +265,28 @@
((aif) & SPSR_AIF_MASK) << SPSR_AIF_SHIFT)
/* Physical timer control register bit fields shifts and masks */
#define CNTP_CTL_ENABLE_SHIFT 0
#define CNTP_CTL_IMASK_SHIFT 1
#define CNTP_CTL_ISTATUS_SHIFT 2
#define CNTP_CTL_ENABLE_MASK 1
#define CNTP_CTL_IMASK_MASK 1
#define CNTP_CTL_ISTATUS_MASK 1
#define get_cntp_ctl_enable(x) ((x >> CNTP_CTL_ENABLE_SHIFT) & \
CNTP_CTL_ENABLE_MASK)
#define get_cntp_ctl_imask(x) ((x >> CNTP_CTL_IMASK_SHIFT) & \
CNTP_CTL_IMASK_MASK)
#define get_cntp_ctl_istatus(x) ((x >> CNTP_CTL_ISTATUS_SHIFT) & \
CNTP_CTL_ISTATUS_MASK)
#define set_cntp_ctl_enable(x) (x |= 1 << CNTP_CTL_ENABLE_SHIFT)
#define set_cntp_ctl_imask(x) (x |= 1 << CNTP_CTL_IMASK_SHIFT)
#define clr_cntp_ctl_enable(x) (x &= ~(1 << CNTP_CTL_ENABLE_SHIFT))
#define clr_cntp_ctl_imask(x) (x &= ~(1 << CNTP_CTL_IMASK_SHIFT))
/* Miscellaneous MMU related constants */
#define NUM_2MB_IN_GB (1 << 9)
#define NUM_4K_IN_2MB (1 << 9)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment