Commit 421c572a authored by Andrew Thoelke's avatar Andrew Thoelke
Browse files

Merge commit 'for-v0.4/05.20^' into for-v0.4

Conflicts:
	plat/fvp/bl32_plat_setup.c
	plat/fvp/platform.mk
	services/spd/tspd/tspd_main.c

Change-Id: Ib4bad39a8a476a6b1796298f95dfb97c8b100975
parents 558c76b1 956e09a2
Showing with 707 additions and 98 deletions
+707 -98
...@@ -190,6 +190,7 @@ extern void psci_system_reset(void); ...@@ -190,6 +190,7 @@ extern void psci_system_reset(void);
extern int psci_cpu_on(unsigned long, extern int psci_cpu_on(unsigned long,
unsigned long, unsigned long,
unsigned long); unsigned long);
extern void __dead2 psci_power_down_wfi(void);
extern void psci_aff_on_finish_entry(void); extern void psci_aff_on_finish_entry(void);
extern void psci_aff_suspend_finish_entry(void); extern void psci_aff_suspend_finish_entry(void);
extern void psci_register_spd_pm_hook(const spd_pm_ops_t *); extern void psci_register_spd_pm_hook(const spd_pm_ops_t *);
......
...@@ -40,16 +40,44 @@ ...@@ -40,16 +40,44 @@
#define TSP_OFF_DONE 0xf2000002 #define TSP_OFF_DONE 0xf2000002
#define TSP_SUSPEND_DONE 0xf2000003 #define TSP_SUSPEND_DONE 0xf2000003
#define TSP_RESUME_DONE 0xf2000004 #define TSP_RESUME_DONE 0xf2000004
#define TSP_WORK_DONE 0xf2000005 #define TSP_PREEMPTED 0xf2000005
/* SMC function ID that TSP uses to request service from secure montior */ /*
* Function identifiers to handle FIQs through the synchronous handling model.
* If the TSP was previously interrupted then control has to be returned to
* the TSPD after handling the interrupt else execution can remain in the TSP.
*/
#define TSP_HANDLED_S_EL1_FIQ 0xf2000006
#define TSP_EL3_FIQ 0xf2000007
/* SMC function ID that TSP uses to request service from secure monitor */
#define TSP_GET_ARGS 0xf2001000 #define TSP_GET_ARGS 0xf2001000
/* Function IDs for various TSP services */ /*
#define TSP_FID_ADD 0xf2002000 * Identifiers for various TSP services. Corresponding function IDs (whether
#define TSP_FID_SUB 0xf2002001 * fast or standard) are generated by macros defined below
#define TSP_FID_MUL 0xf2002002 */
#define TSP_FID_DIV 0xf2002003 #define TSP_ADD 0x2000
#define TSP_SUB 0x2001
#define TSP_MUL 0x2002
#define TSP_DIV 0x2003
#define TSP_HANDLE_FIQ_AND_RETURN 0x2004
/*
* Generate function IDs for TSP services to be used in SMC calls, by
* appropriately setting bit 31 to differentiate standard and fast SMC calls
*/
#define TSP_STD_FID(fid) ((fid) | 0x72000000 | (0 << 31))
#define TSP_FAST_FID(fid) ((fid) | 0x72000000 | (1 << 31))
/* SMC function ID to request a previously preempted std smc */
#define TSP_FID_RESUME TSP_STD_FID(0x3000)
/*
* Identify a TSP service from function ID filtering the last 16 bits from the
* SMC function ID
*/
#define TSP_BARE_FID(fid) ((fid) & 0xffff)
/* /*
* Total number of function IDs implemented for services offered to NS clients. * Total number of function IDs implemented for services offered to NS clients.
...@@ -86,6 +114,7 @@ ...@@ -86,6 +114,7 @@
#include <cassert.h> #include <cassert.h>
#include <platform.h> /* For CACHE_WRITEBACK_GRANULE */ #include <platform.h> /* For CACHE_WRITEBACK_GRANULE */
#include <spinlock.h>
#include <stdint.h> #include <stdint.h>
typedef void (*tsp_generic_fptr_t)(uint64_t arg0, typedef void (*tsp_generic_fptr_t)(uint64_t arg0,
...@@ -98,14 +127,20 @@ typedef void (*tsp_generic_fptr_t)(uint64_t arg0, ...@@ -98,14 +127,20 @@ typedef void (*tsp_generic_fptr_t)(uint64_t arg0,
uint64_t arg7); uint64_t arg7);
typedef struct entry_info { typedef struct entry_info {
tsp_generic_fptr_t std_smc_entry;
tsp_generic_fptr_t fast_smc_entry; tsp_generic_fptr_t fast_smc_entry;
tsp_generic_fptr_t cpu_on_entry; tsp_generic_fptr_t cpu_on_entry;
tsp_generic_fptr_t cpu_off_entry; tsp_generic_fptr_t cpu_off_entry;
tsp_generic_fptr_t cpu_resume_entry; tsp_generic_fptr_t cpu_resume_entry;
tsp_generic_fptr_t cpu_suspend_entry; tsp_generic_fptr_t cpu_suspend_entry;
tsp_generic_fptr_t fiq_entry;
} entry_info_t; } entry_info_t;
typedef struct work_statistics { typedef struct work_statistics {
uint32_t fiq_count; /* Number of FIQs on this cpu */
uint32_t irq_count; /* Number of IRQs on this cpu */
uint32_t sync_fiq_count; /* Number of sync. fiqs on this cpu */
uint32_t sync_fiq_ret_count; /* Number of fiq returns on this cpu */
uint32_t smc_count; /* Number of returns on this cpu */ uint32_t smc_count; /* Number of returns on this cpu */
uint32_t eret_count; /* Number of entries on this cpu */ uint32_t eret_count; /* Number of entries on this cpu */
uint32_t cpu_on_count; /* Number of cpu on requests */ uint32_t cpu_on_count; /* Number of cpu on requests */
...@@ -120,7 +155,7 @@ typedef struct tsp_args { ...@@ -120,7 +155,7 @@ typedef struct tsp_args {
/* Macros to access members of the above structure using their offsets */ /* Macros to access members of the above structure using their offsets */
#define read_sp_arg(args, offset) ((args)->_regs[offset >> 3]) #define read_sp_arg(args, offset) ((args)->_regs[offset >> 3])
#define write_sp_arg(args, offset, val)(((args)->_regs[offset >> 3]) \ #define write_sp_arg(args, offset, val) (((args)->_regs[offset >> 3]) \
= val) = val)
/* /*
...@@ -131,6 +166,22 @@ CASSERT(TSP_ARGS_SIZE == sizeof(tsp_args_t), assert_sp_args_size_mismatch); ...@@ -131,6 +166,22 @@ CASSERT(TSP_ARGS_SIZE == sizeof(tsp_args_t), assert_sp_args_size_mismatch);
extern void tsp_get_magic(uint64_t args[4]); extern void tsp_get_magic(uint64_t args[4]);
extern void tsp_fiq_entry(uint64_t arg0,
uint64_t arg1,
uint64_t arg2,
uint64_t arg3,
uint64_t arg4,
uint64_t arg5,
uint64_t arg6,
uint64_t arg7);
extern void tsp_std_smc_entry(uint64_t arg0,
uint64_t arg1,
uint64_t arg2,
uint64_t arg3,
uint64_t arg4,
uint64_t arg5,
uint64_t arg6,
uint64_t arg7);
extern void tsp_fast_smc_entry(uint64_t arg0, extern void tsp_fast_smc_entry(uint64_t arg0,
uint64_t arg1, uint64_t arg1,
uint64_t arg2, uint64_t arg2,
...@@ -196,6 +247,20 @@ extern tsp_args_t *tsp_cpu_off_main(uint64_t arg0, ...@@ -196,6 +247,20 @@ extern tsp_args_t *tsp_cpu_off_main(uint64_t arg0,
uint64_t arg5, uint64_t arg5,
uint64_t arg6, uint64_t arg6,
uint64_t arg7); uint64_t arg7);
/* Generic Timer functions */
extern void tsp_generic_timer_start(void);
extern void tsp_generic_timer_handler(void);
extern void tsp_generic_timer_stop(void);
extern void tsp_generic_timer_save(void);
extern void tsp_generic_timer_restore(void);
/* FIQ management functions */
extern void tsp_update_sync_fiq_stats(uint32_t type, uint64_t elr_el3);
/* Data structure to keep track of TSP statistics */
extern spinlock_t console_lock;
extern work_statistics_t tsp_stats[PLATFORM_CORE_COUNT];
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __BL2_H__ */ #endif /* __BL2_H__ */
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
#define GIC_LOWEST_SEC_PRIORITY 127 #define GIC_LOWEST_SEC_PRIORITY 127
#define GIC_HIGHEST_NS_PRIORITY 128 #define GIC_HIGHEST_NS_PRIORITY 128
#define GIC_LOWEST_NS_PRIORITY 254 /* 255 would disable an interrupt */ #define GIC_LOWEST_NS_PRIORITY 254 /* 255 would disable an interrupt */
#define GIC_SPURIOUS_INTERRUPT 1023
#define ENABLE_GRP0 (1 << 0) #define ENABLE_GRP0 (1 << 0)
#define ENABLE_GRP1 (1 << 1) #define ENABLE_GRP1 (1 << 1)
...@@ -88,6 +89,7 @@ ...@@ -88,6 +89,7 @@
#define GICC_EOIR 0x10 #define GICC_EOIR 0x10
#define GICC_RPR 0x14 #define GICC_RPR 0x14
#define GICC_HPPIR 0x18 #define GICC_HPPIR 0x18
#define GICC_AHPPIR 0x28
#define GICC_IIDR 0xFC #define GICC_IIDR 0xFC
#define GICC_DIR 0x1000 #define GICC_DIR 0x1000
#define GICC_PRIODROP GICC_EOIR #define GICC_PRIODROP GICC_EOIR
...@@ -247,6 +249,11 @@ static inline unsigned int gicc_read_hppir(unsigned int base) ...@@ -247,6 +249,11 @@ static inline unsigned int gicc_read_hppir(unsigned int base)
return mmio_read_32(base + GICC_HPPIR); return mmio_read_32(base + GICC_HPPIR);
} }
static inline unsigned int gicc_read_ahppir(unsigned int base)
{
return mmio_read_32(base + GICC_AHPPIR);
}
static inline unsigned int gicc_read_dir(unsigned int base) static inline unsigned int gicc_read_dir(unsigned int base)
{ {
return mmio_read_32(base + GICC_DIR); return mmio_read_32(base + GICC_DIR);
...@@ -298,6 +305,12 @@ static inline void gicc_write_dir(unsigned int base, unsigned int val) ...@@ -298,6 +305,12 @@ static inline void gicc_write_dir(unsigned int base, unsigned int val)
mmio_write_32(base + GICC_DIR, val); mmio_write_32(base + GICC_DIR, val);
} }
/*******************************************************************************
* Prototype of function to map an interrupt type to the interrupt line used to
* signal it.
******************************************************************************/
uint32_t gicv2_interrupt_type_to_line(uint32_t cpuif_base, uint32_t type);
#endif /*__ASSEMBLY__*/ #endif /*__ASSEMBLY__*/
#endif /* __GIC_V2_H__ */ #endif /* __GIC_V2_H__ */
...@@ -148,6 +148,7 @@ ...@@ -148,6 +148,7 @@
#define SCR_FIQ_BIT (1 << 2) #define SCR_FIQ_BIT (1 << 2)
#define SCR_IRQ_BIT (1 << 1) #define SCR_IRQ_BIT (1 << 1)
#define SCR_NS_BIT (1 << 0) #define SCR_NS_BIT (1 << 0)
#define SCR_VALID_BIT_MASK 0x2f8f
/* HCR definitions */ /* HCR definitions */
#define HCR_RW_BIT (1ull << 31) #define HCR_RW_BIT (1ull << 31)
...@@ -264,6 +265,28 @@ ...@@ -264,6 +265,28 @@
((aif) & SPSR_AIF_MASK) << SPSR_AIF_SHIFT) ((aif) & SPSR_AIF_MASK) << SPSR_AIF_SHIFT)
/* Physical timer control register bit fields shifts and masks */
#define CNTP_CTL_ENABLE_SHIFT 0
#define CNTP_CTL_IMASK_SHIFT 1
#define CNTP_CTL_ISTATUS_SHIFT 2
#define CNTP_CTL_ENABLE_MASK 1
#define CNTP_CTL_IMASK_MASK 1
#define CNTP_CTL_ISTATUS_MASK 1
#define get_cntp_ctl_enable(x) ((x >> CNTP_CTL_ENABLE_SHIFT) & \
CNTP_CTL_ENABLE_MASK)
#define get_cntp_ctl_imask(x) ((x >> CNTP_CTL_IMASK_SHIFT) & \
CNTP_CTL_IMASK_MASK)
#define get_cntp_ctl_istatus(x) ((x >> CNTP_CTL_ISTATUS_SHIFT) & \
CNTP_CTL_ISTATUS_MASK)
#define set_cntp_ctl_enable(x) (x |= 1 << CNTP_CTL_ENABLE_SHIFT)
#define set_cntp_ctl_imask(x) (x |= 1 << CNTP_CTL_IMASK_SHIFT)
#define clr_cntp_ctl_enable(x) (x &= ~(1 << CNTP_CTL_ENABLE_SHIFT))
#define clr_cntp_ctl_imask(x) (x &= ~(1 << CNTP_CTL_IMASK_SHIFT))
/* Miscellaneous MMU related constants */ /* Miscellaneous MMU related constants */
#define NUM_2MB_IN_GB (1 << 9) #define NUM_2MB_IN_GB (1 << 9)
#define NUM_4K_IN_2MB (1 << 9) #define NUM_4K_IN_2MB (1 << 9)
......
...@@ -202,6 +202,10 @@ extern unsigned long read_cptr_el3(void); ...@@ -202,6 +202,10 @@ extern unsigned long read_cptr_el3(void);
extern unsigned long read_cpacr(void); extern unsigned long read_cpacr(void);
extern unsigned long read_cpuectlr(void); extern unsigned long read_cpuectlr(void);
extern unsigned int read_cntfrq_el0(void); extern unsigned int read_cntfrq_el0(void);
extern unsigned int read_cntps_ctl_el1(void);
extern unsigned int read_cntps_tval_el1(void);
extern unsigned long read_cntps_cval_el1(void);
extern unsigned long read_cntpct_el0(void);
extern unsigned long read_cnthctl_el2(void); extern unsigned long read_cnthctl_el2(void);
extern unsigned long read_tpidr_el3(void); extern unsigned long read_tpidr_el3(void);
...@@ -210,6 +214,9 @@ extern void write_scr(unsigned long); ...@@ -210,6 +214,9 @@ extern void write_scr(unsigned long);
extern void write_hcr(unsigned long); extern void write_hcr(unsigned long);
extern void write_cpacr(unsigned long); extern void write_cpacr(unsigned long);
extern void write_cntfrq_el0(unsigned int); extern void write_cntfrq_el0(unsigned int);
extern void write_cntps_ctl_el1(unsigned int);
extern void write_cntps_tval_el1(unsigned int);
extern void write_cntps_cval_el1(unsigned long);
extern void write_cnthctl_el2(unsigned long); extern void write_cnthctl_el2(unsigned long);
extern void write_vbar_el1(unsigned long); extern void write_vbar_el1(unsigned long);
......
...@@ -142,6 +142,15 @@ ...@@ -142,6 +142,15 @@
.globl read_cntfrq_el0 .globl read_cntfrq_el0
.globl write_cntfrq_el0 .globl write_cntfrq_el0
.globl read_cntps_ctl_el1
.globl write_cntps_ctl_el1
.globl read_cntps_cval_el1
.globl write_cntps_cval_el1
.globl read_cntps_tval_el1
.globl write_cntps_tval_el1
.globl read_scr .globl read_scr
.globl write_scr .globl write_scr
...@@ -151,6 +160,7 @@ ...@@ -151,6 +160,7 @@
.globl read_midr .globl read_midr
.globl read_mpidr .globl read_mpidr
.globl read_cntpct_el0
.globl read_current_el .globl read_current_el
.globl read_id_pfr1_el1 .globl read_id_pfr1_el1
.globl read_id_aa64pfr0_el1 .globl read_id_aa64pfr0_el1
...@@ -672,6 +682,33 @@ func write_cntfrq_el0 ...@@ -672,6 +682,33 @@ func write_cntfrq_el0
msr cntfrq_el0, x0 msr cntfrq_el0, x0
ret ret
func read_cntps_ctl_el1
mrs x0, cntps_ctl_el1
ret
func write_cntps_ctl_el1
msr cntps_ctl_el1, x0
ret
func read_cntps_cval_el1
mrs x0, cntps_cval_el1
ret
func write_cntps_cval_el1
msr cntps_cval_el1, x0
ret
func read_cntps_tval_el1
mrs x0, cntps_tval_el1
ret
func write_cntps_tval_el1
msr cntps_tval_el1, x0
ret
func read_cntpct_el0
mrs x0, cntpct_el0
ret
func read_cpuectlr func read_cpuectlr
mrs x0, CPUECTLR_EL1 mrs x0, CPUECTLR_EL1
......
...@@ -73,6 +73,9 @@ void bl32_early_platform_setup(void) ...@@ -73,6 +73,9 @@ void bl32_early_platform_setup(void)
* messages from TSP * messages from TSP
*/ */
console_init(PL011_UART1_BASE); console_init(PL011_UART1_BASE);
/* Initialize the platform config for future decision making */
platform_config_setup();
} }
/******************************************************************************* /*******************************************************************************
......
...@@ -29,18 +29,15 @@ ...@@ -29,18 +29,15 @@
*/ */
#include <arch_helpers.h> #include <arch_helpers.h>
#include <assert.h>
#include <bl_common.h>
#include <debug.h> #include <debug.h>
#include <gic_v2.h> #include <gic_v2.h>
#include <gic_v3.h> #include <gic_v3.h>
#include <interrupt_mgmt.h>
#include <platform.h> #include <platform.h>
#include <stdint.h> #include <stdint.h>
/*******************************************************************************
* TODO: Revisit if priorities are being set such that no non-secure interrupt
* can have a higher priority than a secure one as recommended in the GICv2 spec
******************************************************************************/
/******************************************************************************* /*******************************************************************************
* This function does some minimal GICv3 configuration. The Firmware itself does * This function does some minimal GICv3 configuration. The Firmware itself does
* not fully support GICv3 at this time and relies on GICv2 emulation as * not fully support GICv3 at this time and relies on GICv2 emulation as
...@@ -284,3 +281,126 @@ void gic_setup(void) ...@@ -284,3 +281,126 @@ void gic_setup(void)
gic_cpuif_setup(gicc_base); gic_cpuif_setup(gicc_base);
gic_distif_setup(gicd_base); gic_distif_setup(gicd_base);
} }
/*******************************************************************************
* An ARM processor signals interrupt exceptions through the IRQ and FIQ pins.
* The interrupt controller knows which pin/line it uses to signal a type of
* interrupt. The platform knows which interrupt controller type is being used
* in a particular security state e.g. with an ARM GIC, normal world could use
* the GICv2 features while the secure world could use GICv3 features and vice
* versa.
* This function is exported by the platform to let the interrupt management
* framework determine for a type of interrupt and security state, which line
* should be used in the SCR_EL3 to control its routing to EL3. The interrupt
* line is represented as the bit position of the IRQ or FIQ bit in the SCR_EL3.
******************************************************************************/
uint32_t plat_interrupt_type_to_line(uint32_t type, uint32_t security_state)
{
uint32_t gicc_base = platform_get_cfgvar(CONFIG_GICC_ADDR);
assert(type == INTR_TYPE_S_EL1 ||
type == INTR_TYPE_EL3 ||
type == INTR_TYPE_NS);
assert(security_state == NON_SECURE || security_state == SECURE);
/*
* We ignore the security state parameter under the assumption that
* both normal and secure worlds are using ARM GICv2. This parameter
* will be used when the secure world starts using GICv3.
*/
#if FVP_GIC_ARCH == 2
return gicv2_interrupt_type_to_line(gicc_base, type);
#else
#error "Invalid GIC architecture version specified for FVP port"
#endif
}
#if FVP_GIC_ARCH == 2
/*******************************************************************************
* This function returns the type of the highest priority pending interrupt at
* the GIC cpu interface. INTR_TYPE_INVAL is returned when there is no
* interrupt pending.
******************************************************************************/
uint32_t ic_get_pending_interrupt_type()
{
uint32_t id, gicc_base;
gicc_base = platform_get_cfgvar(CONFIG_GICC_ADDR);
id = gicc_read_hppir(gicc_base);
/* Assume that all secure interrupts are S-EL1 interrupts */
if (id < 1022)
return INTR_TYPE_S_EL1;
if (id == GIC_SPURIOUS_INTERRUPT)
return INTR_TYPE_INVAL;
return INTR_TYPE_NS;
}
/*******************************************************************************
* This function returns the id of the highest priority pending interrupt at
* the GIC cpu interface. INTR_ID_UNAVAILABLE is returned when there is no
* interrupt pending.
******************************************************************************/
uint32_t ic_get_pending_interrupt_id()
{
uint32_t id, gicc_base;
gicc_base = platform_get_cfgvar(CONFIG_GICC_ADDR);
id = gicc_read_hppir(gicc_base);
if (id < 1022)
return id;
if (id == 1023)
return INTR_ID_UNAVAILABLE;
/*
* Find out which non-secure interrupt it is under the assumption that
* the GICC_CTLR.AckCtl bit is 0.
*/
return gicc_read_ahppir(gicc_base);
}
/*******************************************************************************
* This functions reads the GIC cpu interface Interrupt Acknowledge register
* to start handling the pending interrupt. It returns the contents of the IAR.
******************************************************************************/
uint32_t ic_acknowledge_interrupt()
{
return gicc_read_IAR(platform_get_cfgvar(CONFIG_GICC_ADDR));
}
/*******************************************************************************
* This functions writes the GIC cpu interface End Of Interrupt register with
* the passed value to finish handling the active interrupt
******************************************************************************/
void ic_end_of_interrupt(uint32_t id)
{
gicc_write_EOIR(platform_get_cfgvar(CONFIG_GICC_ADDR), id);
return;
}
/*******************************************************************************
* This function returns the type of the interrupt id depending upon the group
* this interrupt has been configured under by the interrupt controller i.e.
* group0 or group1.
******************************************************************************/
uint32_t ic_get_interrupt_type(uint32_t id)
{
uint32_t group;
group = gicd_get_igroupr(platform_get_cfgvar(CONFIG_GICD_ADDR), id);
/* Assume that all secure interrupts are S-EL1 interrupts */
if (group == GRP0)
return INTR_TYPE_S_EL1;
else
return INTR_TYPE_NS;
}
#else
#error "Invalid GIC architecture version specified for FVP port"
#endif
...@@ -445,13 +445,20 @@ extern void plat_get_entry_point_info(unsigned long target_security, ...@@ -445,13 +445,20 @@ extern void plat_get_entry_point_info(unsigned long target_security,
#endif #endif
extern void plat_cci_setup(void); extern void plat_cci_setup(void);
/* Declarations for fvp_gic.c */ /* Declarations for plat_gic.c */
extern uint32_t ic_get_pending_interrupt_id(void);
extern uint32_t ic_get_pending_interrupt_type(void);
extern uint32_t ic_acknowledge_interrupt(void);
extern uint32_t ic_get_interrupt_type(uint32_t id);
extern void ic_end_of_interrupt(uint32_t id);
extern void gic_cpuif_deactivate(unsigned int); extern void gic_cpuif_deactivate(unsigned int);
extern void gic_cpuif_setup(unsigned int); extern void gic_cpuif_setup(unsigned int);
extern void gic_pcpu_distif_setup(unsigned int); extern void gic_pcpu_distif_setup(unsigned int);
extern void gic_setup(void); extern void gic_setup(void);
extern uint32_t plat_interrupt_type_to_line(uint32_t type,
uint32_t security_state);
/* Declarations for fvp_topology.c */ /* Declarations for plat_topology.c */
extern int plat_setup_topology(void); extern int plat_setup_topology(void);
extern int plat_get_max_afflvl(void); extern int plat_get_max_afflvl(void);
extern unsigned int plat_get_aff_count(unsigned int, unsigned long); extern unsigned int plat_get_aff_count(unsigned int, unsigned long);
......
...@@ -86,3 +86,9 @@ ifeq (${RESET_TO_BL31}, 1) ...@@ -86,3 +86,9 @@ ifeq (${RESET_TO_BL31}, 1)
BL31_SOURCES += drivers/arm/tzc400/tzc400.c \ BL31_SOURCES += drivers/arm/tzc400/tzc400.c \
plat/fvp/plat_security.c plat/fvp/plat_security.c
endif endif
# Flag used by the FVP port to determine the version of ARM GIC architecture
# to use for interrupt management in EL3.
FVP_GIC_ARCH := 2
$(eval $(call add_define,FVP_GIC_ARCH))
...@@ -65,10 +65,14 @@ int32_t tspd_init_secure_context(uint64_t entrypoint, ...@@ -65,10 +65,14 @@ int32_t tspd_init_secure_context(uint64_t entrypoint,
*/ */
memset(tsp_ctx, 0, sizeof(*tsp_ctx)); memset(tsp_ctx, 0, sizeof(*tsp_ctx));
/* Set the right security state and register width for the SP */ /*
* Set the right security state, register width and enable access to
* the secure physical timer for the SP.
*/
scr = read_scr(); scr = read_scr();
scr &= ~SCR_NS_BIT; scr &= ~SCR_NS_BIT;
scr &= ~SCR_RW_BIT; scr &= ~SCR_RW_BIT;
scr |= SCR_ST_BIT;
if (rw == TSP_AARCH64) if (rw == TSP_AARCH64)
scr |= SCR_RW_BIT; scr |= SCR_RW_BIT;
...@@ -85,7 +89,14 @@ int32_t tspd_init_secure_context(uint64_t entrypoint, ...@@ -85,7 +89,14 @@ int32_t tspd_init_secure_context(uint64_t entrypoint,
write_ctx_reg(el1_state, CTX_SCTLR_EL1, sctlr); write_ctx_reg(el1_state, CTX_SCTLR_EL1, sctlr);
/* Set this context as ready to be initialised i.e OFF */ /* Set this context as ready to be initialised i.e OFF */
tsp_ctx->state = TSP_STATE_OFF; set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_OFF);
/*
* This context has not been used yet. It will become valid
* when the TSP is interrupted and wants the TSPD to preserve
* the context.
*/
clr_std_smc_active_flag(tsp_ctx->state);
/* Associate this context with the cpu specified */ /* Associate this context with the cpu specified */
tsp_ctx->mpidr = mpidr; tsp_ctx->mpidr = mpidr;
......
...@@ -43,6 +43,9 @@ ...@@ -43,6 +43,9 @@
#include <bl_common.h> #include <bl_common.h>
#include <bl31.h> #include <bl31.h>
#include <context_mgmt.h> #include <context_mgmt.h>
#include <debug.h>
#include <errno.h>
#include <platform.h>
#include <runtime_svc.h> #include <runtime_svc.h>
#include <stddef.h> #include <stddef.h>
#include <tsp.h> #include <tsp.h>
...@@ -68,6 +71,75 @@ DEFINE_SVC_UUID(tsp_uuid, ...@@ -68,6 +71,75 @@ DEFINE_SVC_UUID(tsp_uuid,
int32_t tspd_init(void); int32_t tspd_init(void);
/*******************************************************************************
* This function is the handler registered for S-EL1 interrupts by the TSPD. It
* validates the interrupt and upon success arranges entry into the TSP at
* 'tsp_fiq_entry()' for handling the interrupt.
******************************************************************************/
static uint64_t tspd_sel1_interrupt_handler(uint32_t id,
uint32_t flags,
void *handle,
void *cookie)
{
uint32_t linear_id;
uint64_t mpidr;
tsp_context_t *tsp_ctx;
/* Check the security state when the exception was generated */
assert(get_interrupt_src_ss(flags) == NON_SECURE);
#if IMF_READ_INTERRUPT_ID
/* Check the security status of the interrupt */
assert(ic_get_interrupt_group(id) == SECURE);
#endif
/* Sanity check the pointer to this cpu's context */
mpidr = read_mpidr();
assert(handle == cm_get_context(mpidr, NON_SECURE));
/* Save the non-secure context before entering the TSP */
cm_el1_sysregs_context_save(NON_SECURE);
/* Get a reference to this cpu's TSP context */
linear_id = platform_get_core_pos(mpidr);
tsp_ctx = &tspd_sp_context[linear_id];
assert(&tsp_ctx->cpu_ctx == cm_get_context(mpidr, SECURE));
/*
* Determine if the TSP was previously preempted. Its last known
* context has to be preserved in this case.
* The TSP should return control to the TSPD after handling this
* FIQ. Preserve essential EL3 context to allow entry into the
* TSP at the FIQ entry point using the 'cpu_context' structure.
* There is no need to save the secure system register context
* since the TSP is supposed to preserve it during S-EL1 interrupt
* handling.
*/
if (get_std_smc_active_flag(tsp_ctx->state)) {
tsp_ctx->saved_spsr_el3 = SMC_GET_EL3(&tsp_ctx->cpu_ctx,
CTX_SPSR_EL3);
tsp_ctx->saved_elr_el3 = SMC_GET_EL3(&tsp_ctx->cpu_ctx,
CTX_ELR_EL3);
}
SMC_SET_EL3(&tsp_ctx->cpu_ctx,
CTX_SPSR_EL3,
SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTION));
SMC_SET_EL3(&tsp_ctx->cpu_ctx,
CTX_ELR_EL3,
(uint64_t) tsp_entry_info->fiq_entry);
cm_el1_sysregs_context_restore(SECURE);
cm_set_next_eret_context(SECURE);
/*
* Tell the TSP that it has to handle an FIQ synchronously. Also the
* instruction in normal world where the interrupt was generated is
* passed for debugging purposes. It is safe to retrieve this address
* from ELR_EL3 as the secure context will not take effect until
* el3_exit().
*/
SMC_RET2(&tsp_ctx->cpu_ctx, TSP_HANDLE_FIQ_AND_RETURN, read_elr_el3());
}
/******************************************************************************* /*******************************************************************************
* Secure Payload Dispatcher setup. The SPD finds out the SP entrypoint and type * Secure Payload Dispatcher setup. The SPD finds out the SP entrypoint and type
...@@ -131,7 +203,7 @@ int32_t tspd_setup(void) ...@@ -131,7 +203,7 @@ int32_t tspd_setup(void)
int32_t tspd_init(void) int32_t tspd_init(void)
{ {
uint64_t mpidr = read_mpidr(); uint64_t mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr); uint32_t linear_id = platform_get_core_pos(mpidr), flags;
uint64_t rc; uint64_t rc;
tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
...@@ -142,7 +214,7 @@ int32_t tspd_init(void) ...@@ -142,7 +214,7 @@ int32_t tspd_init(void)
rc = tspd_synchronous_sp_entry(tsp_ctx); rc = tspd_synchronous_sp_entry(tsp_ctx);
assert(rc != 0); assert(rc != 0);
if (rc) { if (rc) {
tsp_ctx->state = TSP_STATE_ON; set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_ON);
/* /*
* TSP has been successfully initialized. Register power * TSP has been successfully initialized. Register power
...@@ -151,6 +223,18 @@ int32_t tspd_init(void) ...@@ -151,6 +223,18 @@ int32_t tspd_init(void)
psci_register_spd_pm_hook(&tspd_pm); psci_register_spd_pm_hook(&tspd_pm);
} }
/*
* Register an interrupt handler for S-EL1 interrupts when generated
* during code executing in the non-secure state.
*/
flags = 0;
set_interrupt_rm_flag(flags, NON_SECURE);
rc = register_interrupt_type_handler(INTR_TYPE_S_EL1,
tspd_sel1_interrupt_handler,
flags);
if (rc)
panic();
return rc; return rc;
} }
...@@ -173,7 +257,6 @@ uint64_t tspd_smc_handler(uint32_t smc_fid, ...@@ -173,7 +257,6 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
uint64_t flags) uint64_t flags)
{ {
cpu_context_t *ns_cpu_context; cpu_context_t *ns_cpu_context;
gp_regs_t *ns_gp_regs;
unsigned long mpidr = read_mpidr(); unsigned long mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr), ns; uint32_t linear_id = platform_get_core_pos(mpidr), ns;
tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
...@@ -183,6 +266,98 @@ uint64_t tspd_smc_handler(uint32_t smc_fid, ...@@ -183,6 +266,98 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
switch (smc_fid) { switch (smc_fid) {
/*
* This function ID is used by TSP to indicate that it was
* preempted by a normal world IRQ.
*
*/
case TSP_PREEMPTED:
if (ns)
SMC_RET1(handle, SMC_UNK);
assert(handle == cm_get_context(mpidr, SECURE));
cm_el1_sysregs_context_save(SECURE);
/* Get a reference to the non-secure context */
ns_cpu_context = cm_get_context(mpidr, NON_SECURE);
assert(ns_cpu_context);
/*
* Restore non-secure state. There is no need to save the
* secure system register context since the TSP was supposed
* to preserve it during S-EL1 interrupt handling.
*/
cm_el1_sysregs_context_restore(NON_SECURE);
cm_set_next_eret_context(NON_SECURE);
SMC_RET1(ns_cpu_context, SMC_PREEMPTED);
/*
* This function ID is used only by the TSP to indicate that it has
* finished handling a S-EL1 FIQ interrupt. Execution should resume
* in the normal world.
*/
case TSP_HANDLED_S_EL1_FIQ:
if (ns)
SMC_RET1(handle, SMC_UNK);
assert(handle == cm_get_context(mpidr, SECURE));
/*
* Restore the relevant EL3 state which saved to service
* this SMC.
*/
if (get_std_smc_active_flag(tsp_ctx->state)) {
SMC_SET_EL3(&tsp_ctx->cpu_ctx,
CTX_SPSR_EL3,
tsp_ctx->saved_spsr_el3);
SMC_SET_EL3(&tsp_ctx->cpu_ctx,
CTX_ELR_EL3,
tsp_ctx->saved_elr_el3);
}
/* Get a reference to the non-secure context */
ns_cpu_context = cm_get_context(mpidr, NON_SECURE);
assert(ns_cpu_context);
/*
* Restore non-secure state. There is no need to save the
* secure system register context since the TSP was supposed
* to preserve it during S-EL1 interrupt handling.
*/
cm_el1_sysregs_context_restore(NON_SECURE);
cm_set_next_eret_context(NON_SECURE);
SMC_RET0((uint64_t) ns_cpu_context);
/*
* This function ID is used only by the TSP to indicate that it was
* interrupted due to a EL3 FIQ interrupt. Execution should resume
* in the normal world.
*/
case TSP_EL3_FIQ:
if (ns)
SMC_RET1(handle, SMC_UNK);
assert(handle == cm_get_context(mpidr, SECURE));
/* Assert that standard SMC execution has been preempted */
assert(get_std_smc_active_flag(tsp_ctx->state));
/* Save the secure system register state */
cm_el1_sysregs_context_save(SECURE);
/* Get a reference to the non-secure context */
ns_cpu_context = cm_get_context(mpidr, NON_SECURE);
assert(ns_cpu_context);
/* Restore non-secure state */
cm_el1_sysregs_context_restore(NON_SECURE);
cm_set_next_eret_context(NON_SECURE);
SMC_RET1(ns_cpu_context, TSP_EL3_FIQ);
/* /*
* This function ID is used only by the SP to indicate it has * This function ID is used only by the SP to indicate it has
* finished initialising itself after a cold boot * finished initialising itself after a cold boot
...@@ -206,9 +381,6 @@ uint64_t tspd_smc_handler(uint32_t smc_fid, ...@@ -206,9 +381,6 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
*/ */
tspd_synchronous_sp_exit(tsp_ctx, x1); tspd_synchronous_sp_exit(tsp_ctx, x1);
/* Should never reach here */
assert(0);
/* /*
* These function IDs is used only by the SP to indicate it has * These function IDs is used only by the SP to indicate it has
* finished: * finished:
...@@ -241,18 +413,20 @@ uint64_t tspd_smc_handler(uint32_t smc_fid, ...@@ -241,18 +413,20 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
*/ */
tspd_synchronous_sp_exit(tsp_ctx, x1); tspd_synchronous_sp_exit(tsp_ctx, x1);
/* Should never reach here */
assert(0);
/* /*
* Request from non-secure client to perform an * Request from non-secure client to perform an
* arithmetic operation or response from secure * arithmetic operation or response from secure
* payload to an earlier request. * payload to an earlier request.
*/ */
case TSP_FID_ADD: case TSP_FAST_FID(TSP_ADD):
case TSP_FID_SUB: case TSP_FAST_FID(TSP_SUB):
case TSP_FID_MUL: case TSP_FAST_FID(TSP_MUL):
case TSP_FID_DIV: case TSP_FAST_FID(TSP_DIV):
case TSP_STD_FID(TSP_ADD):
case TSP_STD_FID(TSP_SUB):
case TSP_STD_FID(TSP_MUL):
case TSP_STD_FID(TSP_DIV):
if (ns) { if (ns) {
/* /*
* This is a fresh request from the non-secure client. * This is a fresh request from the non-secure client.
...@@ -261,11 +435,15 @@ uint64_t tspd_smc_handler(uint32_t smc_fid, ...@@ -261,11 +435,15 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
* state and send the request to the secure payload. * state and send the request to the secure payload.
*/ */
assert(handle == cm_get_context(mpidr, NON_SECURE)); assert(handle == cm_get_context(mpidr, NON_SECURE));
/* Check if we are already preempted */
if (get_std_smc_active_flag(tsp_ctx->state))
SMC_RET1(handle, SMC_UNK);
cm_el1_sysregs_context_save(NON_SECURE); cm_el1_sysregs_context_save(NON_SECURE);
/* Save x1 and x2 for use by TSP_GET_ARGS call below */ /* Save x1 and x2 for use by TSP_GET_ARGS call below */
SMC_SET_GP(handle, CTX_GPREG_X1, x1); store_tsp_args(tsp_ctx, x1, x2);
SMC_SET_GP(handle, CTX_GPREG_X2, x2);
/* /*
* We are done stashing the non-secure context. Ask the * We are done stashing the non-secure context. Ask the
...@@ -280,17 +458,27 @@ uint64_t tspd_smc_handler(uint32_t smc_fid, ...@@ -280,17 +458,27 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
* from this function. * from this function.
*/ */
assert(&tsp_ctx->cpu_ctx == cm_get_context(mpidr, SECURE)); assert(&tsp_ctx->cpu_ctx == cm_get_context(mpidr, SECURE));
set_aapcs_args7(&tsp_ctx->cpu_ctx, smc_fid, x1, x2, 0, 0,
0, 0, 0); /* Set appropriate entry for SMC.
cm_set_el3_elr(SECURE, (uint64_t) tsp_entry_info->fast_smc_entry); * We expect the TSP to manage the PSTATE.I and PSTATE.F
* flags as appropriate.
*/
if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_FAST) {
cm_set_elr_el3(SECURE, (uint64_t)
tsp_entry_info->fast_smc_entry);
} else {
set_std_smc_active_flag(tsp_ctx->state);
cm_set_elr_el3(SECURE, (uint64_t)
tsp_entry_info->std_smc_entry);
}
cm_el1_sysregs_context_restore(SECURE); cm_el1_sysregs_context_restore(SECURE);
cm_set_next_eret_context(SECURE); cm_set_next_eret_context(SECURE);
SMC_RET3(&tsp_ctx->cpu_ctx, smc_fid, x1, x2);
return smc_fid;
} else { } else {
/* /*
* This is the result from the secure client of an * This is the result from the secure client of an
* earlier request. The results are in x1-x2. Copy it * earlier request. The results are in x1-x3. Copy it
* into the non-secure context, save the secure state * into the non-secure context, save the secure state
* and return to the non-secure state. * and return to the non-secure state.
*/ */
...@@ -300,17 +488,52 @@ uint64_t tspd_smc_handler(uint32_t smc_fid, ...@@ -300,17 +488,52 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
/* Get a reference to the non-secure context */ /* Get a reference to the non-secure context */
ns_cpu_context = cm_get_context(mpidr, NON_SECURE); ns_cpu_context = cm_get_context(mpidr, NON_SECURE);
assert(ns_cpu_context); assert(ns_cpu_context);
ns_gp_regs = get_gpregs_ctx(ns_cpu_context);
/* Restore non-secure state */ /* Restore non-secure state */
cm_el1_sysregs_context_restore(NON_SECURE); cm_el1_sysregs_context_restore(NON_SECURE);
cm_set_next_eret_context(NON_SECURE); cm_set_next_eret_context(NON_SECURE);
if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_STD)
SMC_RET2(ns_gp_regs, x1, x2); clr_std_smc_active_flag(tsp_ctx->state);
SMC_RET3(ns_cpu_context, x1, x2, x3);
} }
break; break;
/*
* Request from non secure world to resume the preempted
* Standard SMC call.
*/
case TSP_FID_RESUME:
/* RESUME should be invoked only by normal world */
if (!ns) {
assert(0);
break;
}
/*
* This is a resume request from the non-secure client.
* save the non-secure state and send the request to
* the secure payload.
*/
assert(handle == cm_get_context(mpidr, NON_SECURE));
/* Check if we are already preempted before resume */
if (!get_std_smc_active_flag(tsp_ctx->state))
SMC_RET1(handle, SMC_UNK);
cm_el1_sysregs_context_save(NON_SECURE);
/*
* We are done stashing the non-secure context. Ask the
* secure payload to do the work now.
*/
/* We just need to return to the preempted point in
* TSP and the execution will resume as normal.
*/
cm_el1_sysregs_context_restore(SECURE);
cm_set_next_eret_context(SECURE);
/* /*
* This is a request from the secure payload for more arguments * This is a request from the secure payload for more arguments
* for an ongoing arithmetic operation requested by the * for an ongoing arithmetic operation requested by the
...@@ -324,10 +547,9 @@ uint64_t tspd_smc_handler(uint32_t smc_fid, ...@@ -324,10 +547,9 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
/* Get a reference to the non-secure context */ /* Get a reference to the non-secure context */
ns_cpu_context = cm_get_context(mpidr, NON_SECURE); ns_cpu_context = cm_get_context(mpidr, NON_SECURE);
assert(ns_cpu_context); assert(ns_cpu_context);
ns_gp_regs = get_gpregs_ctx(ns_cpu_context);
SMC_RET2(handle, read_ctx_reg(ns_gp_regs, CTX_GPREG_X1), get_tsp_args(tsp_ctx, x1, x2);
read_ctx_reg(ns_gp_regs, CTX_GPREG_X2)); SMC_RET2(handle, x1, x2);
case TOS_CALL_COUNT: case TOS_CALL_COUNT:
/* /*
...@@ -351,9 +573,9 @@ uint64_t tspd_smc_handler(uint32_t smc_fid, ...@@ -351,9 +573,9 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
SMC_RET1(handle, SMC_UNK); SMC_RET1(handle, SMC_UNK);
} }
/* Define a SPD runtime service descriptor */ /* Define a SPD runtime service descriptor for fast SMC calls */
DECLARE_RT_SVC( DECLARE_RT_SVC(
spd, tspd_fast,
OEN_TOS_START, OEN_TOS_START,
OEN_TOS_END, OEN_TOS_END,
...@@ -361,3 +583,14 @@ DECLARE_RT_SVC( ...@@ -361,3 +583,14 @@ DECLARE_RT_SVC(
tspd_setup, tspd_setup,
tspd_smc_handler tspd_smc_handler
); );
/* Define a SPD runtime service descriptor for standard SMC calls */
DECLARE_RT_SVC(
tspd_std,
OEN_TOS_START,
OEN_TOS_END,
SMC_TYPE_STD,
NULL,
tspd_smc_handler
);
...@@ -56,10 +56,10 @@ static int32_t tspd_cpu_off_handler(uint64_t cookie) ...@@ -56,10 +56,10 @@ static int32_t tspd_cpu_off_handler(uint64_t cookie)
tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
assert(tsp_entry_info); assert(tsp_entry_info);
assert(tsp_ctx->state == TSP_STATE_ON); assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_ON);
/* Program the entry point and enter the TSP */ /* Program the entry point and enter the TSP */
cm_set_el3_elr(SECURE, (uint64_t) tsp_entry_info->cpu_off_entry); cm_set_elr_el3(SECURE, (uint64_t) tsp_entry_info->cpu_off_entry);
rc = tspd_synchronous_sp_entry(tsp_ctx); rc = tspd_synchronous_sp_entry(tsp_ctx);
/* /*
...@@ -73,7 +73,7 @@ static int32_t tspd_cpu_off_handler(uint64_t cookie) ...@@ -73,7 +73,7 @@ static int32_t tspd_cpu_off_handler(uint64_t cookie)
* Reset TSP's context for a fresh start when this cpu is turned on * Reset TSP's context for a fresh start when this cpu is turned on
* subsequently. * subsequently.
*/ */
tsp_ctx->state = TSP_STATE_OFF; set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_OFF);
return 0; return 0;
} }
...@@ -90,13 +90,13 @@ static void tspd_cpu_suspend_handler(uint64_t power_state) ...@@ -90,13 +90,13 @@ static void tspd_cpu_suspend_handler(uint64_t power_state)
tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
assert(tsp_entry_info); assert(tsp_entry_info);
assert(tsp_ctx->state == TSP_STATE_ON); assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_ON);
/* Program the entry point, power_state parameter and enter the TSP */ /* Program the entry point, power_state parameter and enter the TSP */
write_ctx_reg(get_gpregs_ctx(&tsp_ctx->cpu_ctx), write_ctx_reg(get_gpregs_ctx(&tsp_ctx->cpu_ctx),
CTX_GPREG_X0, CTX_GPREG_X0,
power_state); power_state);
cm_set_el3_elr(SECURE, (uint64_t) tsp_entry_info->cpu_suspend_entry); cm_set_elr_el3(SECURE, (uint64_t) tsp_entry_info->cpu_suspend_entry);
rc = tspd_synchronous_sp_entry(tsp_ctx); rc = tspd_synchronous_sp_entry(tsp_ctx);
/* /*
...@@ -107,7 +107,7 @@ static void tspd_cpu_suspend_handler(uint64_t power_state) ...@@ -107,7 +107,7 @@ static void tspd_cpu_suspend_handler(uint64_t power_state)
panic(); panic();
/* Update its context to reflect the state the TSP is in */ /* Update its context to reflect the state the TSP is in */
tsp_ctx->state = TSP_STATE_SUSPEND; set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_SUSPEND);
} }
/******************************************************************************* /*******************************************************************************
...@@ -124,7 +124,7 @@ static void tspd_cpu_on_finish_handler(uint64_t cookie) ...@@ -124,7 +124,7 @@ static void tspd_cpu_on_finish_handler(uint64_t cookie)
tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
assert(tsp_entry_info); assert(tsp_entry_info);
assert(tsp_ctx->state == TSP_STATE_OFF); assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_OFF);
/* Initialise this cpu's secure context */ /* Initialise this cpu's secure context */
tspd_init_secure_context((uint64_t) tsp_entry_info->cpu_on_entry, tspd_init_secure_context((uint64_t) tsp_entry_info->cpu_on_entry,
...@@ -143,7 +143,7 @@ static void tspd_cpu_on_finish_handler(uint64_t cookie) ...@@ -143,7 +143,7 @@ static void tspd_cpu_on_finish_handler(uint64_t cookie)
panic(); panic();
/* Update its context to reflect the state the SP is in */ /* Update its context to reflect the state the SP is in */
tsp_ctx->state = TSP_STATE_ON; set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_ON);
} }
/******************************************************************************* /*******************************************************************************
...@@ -159,13 +159,13 @@ static void tspd_cpu_suspend_finish_handler(uint64_t suspend_level) ...@@ -159,13 +159,13 @@ static void tspd_cpu_suspend_finish_handler(uint64_t suspend_level)
tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
assert(tsp_entry_info); assert(tsp_entry_info);
assert(tsp_ctx->state == TSP_STATE_SUSPEND); assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_SUSPEND);
/* Program the entry point, suspend_level and enter the SP */ /* Program the entry point, suspend_level and enter the SP */
write_ctx_reg(get_gpregs_ctx(&tsp_ctx->cpu_ctx), write_ctx_reg(get_gpregs_ctx(&tsp_ctx->cpu_ctx),
CTX_GPREG_X0, CTX_GPREG_X0,
suspend_level); suspend_level);
cm_set_el3_elr(SECURE, (uint64_t) tsp_entry_info->cpu_resume_entry); cm_set_elr_el3(SECURE, (uint64_t) tsp_entry_info->cpu_resume_entry);
rc = tspd_synchronous_sp_entry(tsp_ctx); rc = tspd_synchronous_sp_entry(tsp_ctx);
/* /*
...@@ -176,7 +176,7 @@ static void tspd_cpu_suspend_finish_handler(uint64_t suspend_level) ...@@ -176,7 +176,7 @@ static void tspd_cpu_suspend_finish_handler(uint64_t suspend_level)
panic(); panic();
/* Update its context to reflect the state the SP is in */ /* Update its context to reflect the state the SP is in */
tsp_ctx->state = TSP_STATE_ON; set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_ON);
} }
/******************************************************************************* /*******************************************************************************
......
...@@ -33,15 +33,47 @@ ...@@ -33,15 +33,47 @@
#include <arch.h> #include <arch.h>
#include <context.h> #include <context.h>
#include <interrupt_mgmt.h>
#include <platform.h> #include <platform.h>
#include <psci.h> #include <psci.h>
/******************************************************************************* /*******************************************************************************
* Secure Payload PM state information e.g. SP is suspended, uninitialised etc * Secure Payload PM state information e.g. SP is suspended, uninitialised etc
* and macros to access the state information in the per-cpu 'state' flags
******************************************************************************/ ******************************************************************************/
#define TSP_STATE_OFF 0 #define TSP_PSTATE_OFF 0
#define TSP_STATE_ON 1 #define TSP_PSTATE_ON 1
#define TSP_STATE_SUSPEND 2 #define TSP_PSTATE_SUSPEND 2
#define TSP_PSTATE_SHIFT 0
#define TSP_PSTATE_MASK 0x3
#define get_tsp_pstate(state) ((state >> TSP_PSTATE_SHIFT) & TSP_PSTATE_MASK)
#define clr_tsp_pstate(state) (state &= ~(TSP_PSTATE_MASK \
<< TSP_PSTATE_SHIFT))
#define set_tsp_pstate(st, pst) do { \
clr_tsp_pstate(st); \
st |= (pst & TSP_PSTATE_MASK) << \
TSP_PSTATE_SHIFT; \
} while (0);
/*
* This flag is used by the TSPD to determine if the TSP is servicing a standard
* SMC request prior to programming the next entry into the TSP e.g. if TSP
* execution is preempted by a non-secure interrupt and handed control to the
* normal world. If another request which is distinct from what the TSP was
* previously doing arrives, then this flag will be help the TSPD to either
* reject the new request or service it while ensuring that the previous context
* is not corrupted.
*/
#define STD_SMC_ACTIVE_FLAG_SHIFT 2
#define STD_SMC_ACTIVE_FLAG_MASK 1
#define get_std_smc_active_flag(state) ((state >> STD_SMC_ACTIVE_FLAG_SHIFT) \
& STD_SMC_ACTIVE_FLAG_MASK)
#define set_std_smc_active_flag(state) (state |= \
1 << STD_SMC_ACTIVE_FLAG_SHIFT)
#define clr_std_smc_active_flag(state) (state &= \
~(STD_SMC_ACTIVE_FLAG_MASK \
<< STD_SMC_ACTIVE_FLAG_SHIFT))
/******************************************************************************* /*******************************************************************************
* Secure Payload execution state information i.e. aarch32 or aarch64 * Secure Payload execution state information i.e. aarch32 or aarch64
...@@ -93,6 +125,12 @@ ...@@ -93,6 +125,12 @@
#include <cassert.h> #include <cassert.h>
#include <stdint.h> #include <stdint.h>
/*
* The number of arguments to save during a SMC call for TSP.
* Currently only x1 and x2 are used by TSP.
*/
#define TSP_NUM_ARGS 0x2
/* AArch64 callee saved general purpose register context structure. */ /* AArch64 callee saved general purpose register context structure. */
DEFINE_REG_STRUCT(c_rt_regs, TSPD_C_RT_CTX_ENTRIES); DEFINE_REG_STRUCT(c_rt_regs, TSPD_C_RT_CTX_ENTRIES);
...@@ -106,19 +144,39 @@ CASSERT(TSPD_C_RT_CTX_SIZE == sizeof(c_rt_regs_t), \ ...@@ -106,19 +144,39 @@ CASSERT(TSPD_C_RT_CTX_SIZE == sizeof(c_rt_regs_t), \
/******************************************************************************* /*******************************************************************************
* Structure which helps the SPD to maintain the per-cpu state of the SP. * Structure which helps the SPD to maintain the per-cpu state of the SP.
* 'saved_spsr_el3' - temporary copy to allow FIQ handling when the TSP has been
* preempted.
* 'saved_elr_el3' - temporary copy to allow FIQ handling when the TSP has been
* preempted.
* 'state' - collection of flags to track SP state e.g. on/off * 'state' - collection of flags to track SP state e.g. on/off
* 'mpidr' - mpidr to associate a context with a cpu * 'mpidr' - mpidr to associate a context with a cpu
* 'c_rt_ctx' - stack address to restore C runtime context from after returning * 'c_rt_ctx' - stack address to restore C runtime context from after
* from a synchronous entry into the SP. * returning from a synchronous entry into the SP.
* 'cpu_ctx' - space to maintain SP architectural state * 'cpu_ctx' - space to maintain SP architectural state
* 'saved_tsp_args' - space to store arguments for TSP arithmetic operations
* which will queried using the TSP_GET_ARGS SMC by TSP.
******************************************************************************/ ******************************************************************************/
typedef struct tsp_context { typedef struct tsp_context {
uint64_t saved_elr_el3;
uint32_t saved_spsr_el3;
uint32_t state; uint32_t state;
uint64_t mpidr; uint64_t mpidr;
uint64_t c_rt_ctx; uint64_t c_rt_ctx;
cpu_context_t cpu_ctx; cpu_context_t cpu_ctx;
uint64_t saved_tsp_args[TSP_NUM_ARGS];
} tsp_context_t; } tsp_context_t;
/* Helper macros to store and retrieve tsp args from tsp_context */
#define store_tsp_args(tsp_ctx, x1, x2) do {\
tsp_ctx->saved_tsp_args[0] = x1;\
tsp_ctx->saved_tsp_args[1] = x2;\
} while (0)
#define get_tsp_args(tsp_ctx, x1, x2) do {\
x1 = tsp_ctx->saved_tsp_args[0];\
x2 = tsp_ctx->saved_tsp_args[1];\
} while (0)
/* TSPD power management handlers */ /* TSPD power management handlers */
extern const spd_pm_ops_t tspd_pm; extern const spd_pm_ops_t tspd_pm;
......
...@@ -30,13 +30,13 @@ ...@@ -30,13 +30,13 @@
#include <arch.h> #include <arch.h>
#include <asm_macros.S> #include <asm_macros.S>
#include <cm_macros.S>
#include <psci.h> #include <psci.h>
.globl psci_aff_on_finish_entry .globl psci_aff_on_finish_entry
.globl psci_aff_suspend_finish_entry .globl psci_aff_suspend_finish_entry
.globl __psci_cpu_off .globl __psci_cpu_off
.globl __psci_cpu_suspend .globl __psci_cpu_suspend
.globl psci_power_down_wfi
/* ----------------------------------------------------- /* -----------------------------------------------------
* This cpu has been physically powered up. Depending * This cpu has been physically powered up. Depending
...@@ -120,9 +120,6 @@ func __psci_cpu_off ...@@ -120,9 +120,6 @@ func __psci_cpu_off
mrs x0, mpidr_el1 mrs x0, mpidr_el1
bl platform_set_coherent_stack bl platform_set_coherent_stack
bl psci_cpu_off bl psci_cpu_off
mov x1, #PSCI_E_SUCCESS
cmp x0, x1
b.eq final_wfi
mov sp, x19 mov sp, x19
ldp x19, x20, [sp,#0] ldp x19, x20, [sp,#0]
add sp, sp, #0x10 add sp, sp, #0x10
...@@ -144,9 +141,6 @@ func __psci_cpu_suspend ...@@ -144,9 +141,6 @@ func __psci_cpu_suspend
mov x1, x21 mov x1, x21
mov x2, x22 mov x2, x22
bl psci_cpu_suspend bl psci_cpu_suspend
mov x1, #PSCI_E_SUCCESS
cmp x0, x1
b.eq final_wfi
mov sp, x19 mov sp, x19
ldp x21, x22, [sp,#0x10] ldp x21, x22, [sp,#0x10]
ldp x19, x20, [sp,#0] ldp x19, x20, [sp,#0]
...@@ -154,7 +148,16 @@ func __psci_cpu_suspend ...@@ -154,7 +148,16 @@ func __psci_cpu_suspend
func_epilogue func_epilogue
ret ret
func final_wfi /* --------------------------------------------
* This function is called to indicate to the
* power controller that it is safe to power
* down this cpu. It should not exit the wfi
* and will be released from reset upon power
* up. 'wfi_spill' is used to catch erroneous
* exits from wfi.
* --------------------------------------------
*/
func psci_power_down_wfi
dsb sy // ensure write buffer empty dsb sy // ensure write buffer empty
wfi wfi
wfi_spill: wfi_spill:
......
...@@ -90,13 +90,27 @@ int psci_cpu_suspend(unsigned int power_state, ...@@ -90,13 +90,27 @@ int psci_cpu_suspend(unsigned int power_state,
if (target_afflvl > MPIDR_MAX_AFFLVL) if (target_afflvl > MPIDR_MAX_AFFLVL)
return PSCI_E_INVALID_PARAMS; return PSCI_E_INVALID_PARAMS;
/* Determine the 'state type' in the 'power_state' parameter */
pstate_type = psci_get_pstate_type(power_state); pstate_type = psci_get_pstate_type(power_state);
/*
* Ensure that we have a platform specific handler for entering
* a standby state.
*/
if (pstate_type == PSTATE_TYPE_STANDBY) { if (pstate_type == PSTATE_TYPE_STANDBY) {
if (psci_plat_pm_ops->affinst_standby) if (!psci_plat_pm_ops->affinst_standby)
rc = psci_plat_pm_ops->affinst_standby(power_state);
else
return PSCI_E_INVALID_PARAMS; return PSCI_E_INVALID_PARAMS;
} else {
rc = psci_plat_pm_ops->affinst_standby(power_state);
assert(rc == PSCI_E_INVALID_PARAMS || rc == PSCI_E_SUCCESS);
return rc;
}
/*
* Do what is needed to enter the power down state. Upon success,
* enter the final wfi which will power down this cpu else return
* an error.
*/
mpidr = read_mpidr(); mpidr = read_mpidr();
rc = psci_afflvl_suspend(mpidr, rc = psci_afflvl_suspend(mpidr,
entrypoint, entrypoint,
...@@ -104,9 +118,9 @@ int psci_cpu_suspend(unsigned int power_state, ...@@ -104,9 +118,9 @@ int psci_cpu_suspend(unsigned int power_state,
power_state, power_state,
MPIDR_AFFLVL0, MPIDR_AFFLVL0,
target_afflvl); target_afflvl);
} if (rc == PSCI_E_SUCCESS)
psci_power_down_wfi();
assert(rc == PSCI_E_INVALID_PARAMS || rc == PSCI_E_SUCCESS); assert(rc == PSCI_E_INVALID_PARAMS);
return rc; return rc;
} }
...@@ -126,11 +140,19 @@ int psci_cpu_off(void) ...@@ -126,11 +140,19 @@ int psci_cpu_off(void)
*/ */
rc = psci_afflvl_off(mpidr, MPIDR_AFFLVL0, target_afflvl); rc = psci_afflvl_off(mpidr, MPIDR_AFFLVL0, target_afflvl);
/*
* Check if all actions needed to safely power down this cpu have
* successfully completed. Enter a wfi loop which will allow the
* power controller to physically power down this cpu.
*/
if (rc == PSCI_E_SUCCESS)
psci_power_down_wfi();
/* /*
* The only error cpu_off can return is E_DENIED. So check if that's * The only error cpu_off can return is E_DENIED. So check if that's
* indeed the case. * indeed the case.
*/ */
assert (rc == PSCI_E_SUCCESS || rc == PSCI_E_DENIED); assert (rc == PSCI_E_DENIED);
return rc; return rc;
} }
......
...@@ -53,7 +53,7 @@ uuid_t uuid_null = {0}; ...@@ -53,7 +53,7 @@ uuid_t uuid_null = {0};
* const char* format_type_str[] = { "RAW", "ELF", "PIC" }; * const char* format_type_str[] = { "RAW", "ELF", "PIC" };
*/ */
/* Currently only BL2 and BL31 images are supported. */ /* The images used depends on the platform. */
static entry_lookup_list_t toc_entry_lookup_list[] = { static entry_lookup_list_t toc_entry_lookup_list[] = {
{ "Trusted Boot Firmware BL2", UUID_TRUSTED_BOOT_FIRMWARE_BL2, { "Trusted Boot Firmware BL2", UUID_TRUSTED_BOOT_FIRMWARE_BL2,
"bl2", NULL, FLAG_FILENAME }, "bl2", NULL, FLAG_FILENAME },
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment