Unverified Commit c195f1a7 authored by davidcunado-arm's avatar davidcunado-arm Committed by GitHub
Browse files

Merge pull request #1152 from jeenu-arm/ehf-and-sdei

EHF and SDEI
parents bf2de7e4 cafad7be
Showing with 1174 additions and 11 deletions
+1174 -11
......@@ -155,6 +155,7 @@ DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s12e1r)
DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s12e1w)
DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s12e0r)
DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s12e0w)
DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s1e2r)
void flush_dcache_range(uintptr_t addr, size_t size);
void clean_dcache_range(uintptr_t addr, size_t size);
......
/*
* Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
......@@ -7,6 +7,7 @@
#ifndef __CPU_DATA_H__
#define __CPU_DATA_H__
#include <ehf.h>
#include <platform_def.h> /* CACHE_WRITEBACK_GRANULE required */
#ifdef AARCH32
......@@ -96,6 +97,9 @@ typedef struct cpu_data {
#if PLAT_PCPU_DATA_SIZE
uint8_t platform_cpu_data[PLAT_PCPU_DATA_SIZE];
#endif
#if defined(IMAGE_BL31) && EL3_EXCEPTION_HANDLING
pe_exc_data_t ehf_data;
#endif
} __aligned(CACHE_WRITEBACK_GRANULE) cpu_data_t;
#if CRASH_REPORTING
......
......@@ -202,7 +202,7 @@
GIC_INTR_CFG_EDGE)
#define ARM_G0_IRQ_PROPS(grp) \
INTR_PROP_DESC(ARM_IRQ_SEC_SGI_0, GIC_HIGHEST_SEC_PRIORITY, grp, \
INTR_PROP_DESC(ARM_IRQ_SEC_SGI_0, PLAT_SDEI_NORMAL_PRI, grp, \
GIC_INTR_CFG_EDGE), \
INTR_PROP_DESC(ARM_IRQ_SEC_SGI_6, GIC_HIGHEST_SEC_PRIORITY, grp, \
GIC_INTR_CFG_EDGE)
......@@ -454,5 +454,24 @@
*/
#define PLAT_PERCPU_BAKERY_LOCK_SIZE (1 * CACHE_WRITEBACK_GRANULE)
/* Priority levels for ARM platforms */
#define PLAT_SDEI_CRITICAL_PRI 0x60
#define PLAT_SDEI_NORMAL_PRI 0x70
/* ARM platforms use 3 upper bits of secure interrupt priority */
#define ARM_PRI_BITS 3
/* SGI used for SDEI signalling */
#define ARM_SDEI_SGI ARM_IRQ_SEC_SGI_0
/* ARM SDEI dynamic private event numbers */
#define ARM_SDEI_DP_EVENT_0 1000
#define ARM_SDEI_DP_EVENT_1 1001
#define ARM_SDEI_DP_EVENT_2 1002
/* ARM SDEI dynamic shared event numbers */
#define ARM_SDEI_DS_EVENT_0 2000
#define ARM_SDEI_DS_EVENT_1 2001
#define ARM_SDEI_DS_EVENT_2 2002
#endif /* __ARM_DEF_H__ */
......@@ -119,6 +119,7 @@ void arm_configure_sys_timer(void);
/* PM utility functions */
int arm_validate_power_state(unsigned int power_state,
psci_power_state_t *req_state);
int arm_validate_psci_entrypoint(uintptr_t entrypoint);
int arm_validate_ns_entrypoint(uintptr_t entrypoint);
void arm_system_pwr_domain_save(void);
void arm_system_pwr_domain_resume(void);
......
......@@ -90,6 +90,7 @@ void plat_ic_set_spi_routing(unsigned int id, unsigned int routing_mode,
void plat_ic_set_interrupt_pending(unsigned int id);
void plat_ic_clear_interrupt_pending(unsigned int id);
unsigned int plat_ic_set_priority_mask(unsigned int mask);
unsigned int plat_ic_get_interrupt_id(unsigned int raw);
/*******************************************************************************
* Optional common functions (may be overridden)
......@@ -113,6 +114,16 @@ void bl1_plat_arch_setup(void);
void bl1_platform_setup(void);
struct meminfo *bl1_plat_sec_mem_layout(void);
/*******************************************************************************
* Optional EL3 component functions in BL31
******************************************************************************/
/* SDEI platform functions */
#if SDEI_SUPPORT
int plat_sdei_validate_entry_point(uintptr_t ep, unsigned int client_mode);
void plat_sdei_handle_masked_trigger(uint64_t mpidr, unsigned int intr);
#endif
/*
* The following function is mandatory when the
* firmware update feature is used.
......
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef __SDEI_H__
#define __SDEI_H__
#include <spinlock.h>
#include <utils_def.h>
/* Range 0xC4000020 - 0xC400003F reserved for SDE 64bit smc calls */
#define SDEI_VERSION 0xC4000020
#define SDEI_EVENT_REGISTER 0xC4000021
#define SDEI_EVENT_ENABLE 0xC4000022
#define SDEI_EVENT_DISABLE 0xC4000023
#define SDEI_EVENT_CONTEXT 0xC4000024
#define SDEI_EVENT_COMPLETE 0xC4000025
#define SDEI_EVENT_COMPLETE_AND_RESUME 0xC4000026
#define SDEI_EVENT_UNREGISTER 0xC4000027
#define SDEI_EVENT_STATUS 0xC4000028
#define SDEI_EVENT_GET_INFO 0xC4000029
#define SDEI_EVENT_ROUTING_SET 0xC400002A
#define SDEI_PE_MASK 0xC400002B
#define SDEI_PE_UNMASK 0xC400002C
#define SDEI_INTERRUPT_BIND 0xC400002D
#define SDEI_INTERRUPT_RELEASE 0xC400002E
#define SDEI_EVENT_SIGNAL 0xC400002F
#define SDEI_FEATURES 0xC4000030
#define SDEI_PRIVATE_RESET 0xC4000031
#define SDEI_SHARED_RESET 0xC4000032
/* SDEI_EVENT_REGISTER flags */
#define SDEI_REGF_RM_ANY 0
#define SDEI_REGF_RM_PE 1
/* SDEI_EVENT_COMPLETE status flags */
#define SDEI_EV_HANDLED 0
#define SDEI_EV_FAILED 1
/* SDE event status values in bit position */
#define SDEI_STATF_REGISTERED 0
#define SDEI_STATF_ENABLED 1
#define SDEI_STATF_RUNNING 2
/* Internal: SDEI flag bit positions */
#define _SDEI_MAPF_DYNAMIC_SHIFT 1
#define _SDEI_MAPF_BOUND_SHIFT 2
#define _SDEI_MAPF_SIGNALABLE_SHIFT 3
#define _SDEI_MAPF_PRIVATE_SHIFT 4
#define _SDEI_MAPF_CRITICAL_SHIFT 5
/* SDEI event 0 */
#define SDEI_EVENT_0 0
/* Placeholder interrupt for dynamic mapping */
#define SDEI_DYN_IRQ 0
/* SDEI flags */
/*
* These flags determine whether or not an event can be associated with an
* interrupt. Static events are permanently associated with an interrupt, and
* can't be changed at runtime. Association of dynamic events with interrupts
* can be changed at run time using the SDEI_INTERRUPT_BIND and
* SDEI_INTERRUPT_RELEASE calls.
*
* SDEI_MAPF_DYNAMIC only indicates run time configurability, where as
* SDEI_MAPF_BOUND indicates interrupt association. For example:
*
* - Calling SDEI_INTERRUPT_BIND on a dynamic event will have both
* SDEI_MAPF_DYNAMIC and SDEI_MAPF_BOUND set.
*
* - Statically-bound events will always have SDEI_MAPF_BOUND set, and neither
* SDEI_INTERRUPT_BIND nor SDEI_INTERRUPT_RELEASE can be called on them.
*
* See also the is_map_bound() macro.
*/
#define SDEI_MAPF_DYNAMIC BIT(_SDEI_MAPF_DYNAMIC_SHIFT)
#define SDEI_MAPF_BOUND BIT(_SDEI_MAPF_BOUND_SHIFT)
#define SDEI_MAPF_SIGNALABLE BIT(_SDEI_MAPF_SIGNALABLE_SHIFT)
#define SDEI_MAPF_PRIVATE BIT(_SDEI_MAPF_PRIVATE_SHIFT)
#define SDEI_MAPF_CRITICAL BIT(_SDEI_MAPF_CRITICAL_SHIFT)
/* Indices of private and shared mappings */
#define _SDEI_MAP_IDX_PRIV 0
#define _SDEI_MAP_IDX_SHRD 1
#define _SDEI_MAP_IDX_MAX 2
/* The macros below are used to identify SDEI calls from the SMC function ID */
#define SDEI_FID_MASK U(0xffe0)
#define SDEI_FID_VALUE U(0x20)
#define is_sdei_fid(_fid) \
((((_fid) & SDEI_FID_MASK) == SDEI_FID_VALUE) && \
(((_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_64))
#define SDEI_EVENT_MAP(_event, _intr, _flags) \
{ \
.ev_num = _event, \
.intr = _intr, \
.map_flags = _flags \
}
#define SDEI_SHARED_EVENT(_event, _intr, _flags) \
SDEI_EVENT_MAP(_event, _intr, _flags)
#define SDEI_PRIVATE_EVENT(_event, _intr, _flags) \
SDEI_EVENT_MAP(_event, _intr, _flags | SDEI_MAPF_PRIVATE)
#define SDEI_DEFINE_EVENT_0(_intr) \
SDEI_PRIVATE_EVENT(SDEI_EVENT_0, _intr, SDEI_MAPF_SIGNALABLE)
/*
* Declare shared and private entries for each core. Also declare a global
* structure containing private and share entries.
*
* This macro must be used in the same file as the platform SDEI mappings are
* declared. Only then would ARRAY_SIZE() yield a meaningful value.
*/
#define REGISTER_SDEI_MAP(_private, _shared) \
sdei_entry_t sdei_private_event_table \
[PLATFORM_CORE_COUNT * ARRAY_SIZE(_private)]; \
sdei_entry_t sdei_shared_event_table[ARRAY_SIZE(_shared)]; \
const sdei_mapping_t sdei_global_mappings[] = { \
[_SDEI_MAP_IDX_PRIV] = { \
.map = _private, \
.num_maps = ARRAY_SIZE(_private) \
}, \
[_SDEI_MAP_IDX_SHRD] = { \
.map = _shared, \
.num_maps = ARRAY_SIZE(_shared) \
}, \
}
typedef uint8_t sdei_state_t;
/* Runtime data of SDEI event */
typedef struct sdei_entry {
uint64_t ep; /* Entry point */
uint64_t arg; /* Entry point argument */
uint64_t affinity; /* Affinity of shared event */
unsigned int reg_flags; /* Registration flags */
/* Event handler states: registered, enabled, running */
sdei_state_t state;
} sdei_entry_t;
/* Mapping of SDEI events to interrupts, and associated data */
typedef struct sdei_ev_map {
int32_t ev_num; /* Event number */
unsigned int intr; /* Physical interrupt number for a bound map */
unsigned int map_flags; /* Mapping flags, see SDEI_MAPF_* */
unsigned int reg_count; /* Registration count */
spinlock_t lock; /* Per-event lock */
} sdei_ev_map_t;
typedef struct sdei_mapping {
sdei_ev_map_t *map;
size_t num_maps;
} sdei_mapping_t;
/* Handler to be called to handle SDEI smc calls */
uint64_t sdei_smc_handler(uint32_t smc_fid,
uint64_t x1,
uint64_t x2,
uint64_t x3,
uint64_t x4,
void *cookie,
void *handle,
uint64_t flags);
void sdei_init(void);
/* Public API to dispatch an event to Normal world */
int sdei_dispatch_event(int ev_num, unsigned int preempted_sec_state);
#endif /* __SDEI_H__ */
......@@ -62,6 +62,9 @@ ENABLE_RUNTIME_INSTRUMENTATION := 0
# Flag to enable stack corruption protection
ENABLE_STACK_PROTECTOR := 0
# Flag to enable exception handling in EL3
EL3_EXCEPTION_HANDLING := 0
# Build flag to treat usage of deprecated platform and framework APIs as error.
ERROR_DEPRECATED := 0
......@@ -111,6 +114,9 @@ RESET_TO_BL31 := 0
# For Chain of Trust
SAVE_KEYS := 0
# Software Delegated Exception support
SDEI_SUPPORT := 0
# Whether code and read-only data should be put on separate memory pages. The
# platform Makefile is free to override this value.
SEPARATE_CODE_AND_RODATA := 0
......
......@@ -398,7 +398,7 @@ plat_psci_ops_t plat_arm_psci_pm_ops = {
.system_off = fvp_system_off,
.system_reset = fvp_system_reset,
.validate_power_state = fvp_validate_power_state,
.validate_ns_entrypoint = arm_validate_ns_entrypoint,
.validate_ns_entrypoint = arm_validate_psci_entrypoint,
.translate_power_state_by_mpidr = fvp_translate_power_state_by_mpidr,
.get_node_hw_state = fvp_node_hw_state,
.get_sys_suspend_power_state = fvp_get_sys_suspend_power_state,
......
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <ehf.h>
#include <platform_def.h>
/*
* Enumeration of priority levels on ARM platforms.
*/
ehf_pri_desc_t arm_exceptions[] = {
#if SDEI_SUPPORT
/* Critical priority SDEI */
EHF_PRI_DESC(ARM_PRI_BITS, PLAT_SDEI_CRITICAL_PRI),
/* Normal priority SDEI */
EHF_PRI_DESC(ARM_PRI_BITS, PLAT_SDEI_NORMAL_PRI),
#endif
};
/* Plug in ARM exceptions to Exception Handling Framework. */
EHF_REGISTER_PRIORITIES(arm_exceptions, ARRAY_SIZE(arm_exceptions), ARM_PRI_BITS);
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/* SDEI configuration for ARM platforms */
#include <ehf.h>
#include <platform_def.h>
#include <sdei.h>
/* Private event mappings */
static sdei_ev_map_t arm_private_sdei[] = {
/* Event 0 */
SDEI_DEFINE_EVENT_0(ARM_SDEI_SGI),
/* Dynamic private events */
SDEI_PRIVATE_EVENT(ARM_SDEI_DP_EVENT_0, SDEI_DYN_IRQ, SDEI_MAPF_DYNAMIC),
SDEI_PRIVATE_EVENT(ARM_SDEI_DP_EVENT_1, SDEI_DYN_IRQ, SDEI_MAPF_DYNAMIC),
SDEI_PRIVATE_EVENT(ARM_SDEI_DP_EVENT_2, SDEI_DYN_IRQ, SDEI_MAPF_DYNAMIC),
};
/* Shared event mappings */
static sdei_ev_map_t arm_shared_sdei[] = {
/* Dynamic shared events */
SDEI_SHARED_EVENT(ARM_SDEI_DS_EVENT_0, SDEI_DYN_IRQ, SDEI_MAPF_DYNAMIC),
SDEI_SHARED_EVENT(ARM_SDEI_DS_EVENT_1, SDEI_DYN_IRQ, SDEI_MAPF_DYNAMIC),
SDEI_SHARED_EVENT(ARM_SDEI_DS_EVENT_2, SDEI_DYN_IRQ, SDEI_MAPF_DYNAMIC),
};
/* Export ARM SDEI events */
REGISTER_SDEI_MAP(arm_private_sdei, arm_shared_sdei);
......@@ -204,3 +204,51 @@ unsigned int plat_get_syscnt_freq2(void)
}
#endif /* ARM_SYS_CNTCTL_BASE */
#if SDEI_SUPPORT
/*
* Translate SDEI entry point to PA, and perform standard ARM entry point
* validation on it.
*/
int plat_sdei_validate_entry_point(uintptr_t ep, unsigned int client_mode)
{
uint64_t par, pa;
uint32_t scr_el3;
/* Doing Non-secure address translation requires SCR_EL3.NS set */
scr_el3 = read_scr_el3();
write_scr_el3(scr_el3 | SCR_NS_BIT);
isb();
assert((client_mode == MODE_EL2) || (client_mode == MODE_EL1));
if (client_mode == MODE_EL2) {
/*
* Translate entry point to Physical Address using the EL2
* translation regime.
*/
ats1e2r(ep);
} else {
/*
* Translate entry point to Physical Address using the EL1&0
* translation regime, including stage 2.
*/
ats12e1r(ep);
}
isb();
par = read_par_el1();
/* Restore original SCRL_EL3 */
write_scr_el3(scr_el3);
isb();
/* If the translation resulted in fault, return failure */
if ((par & PAR_F_MASK) != 0)
return -1;
/* Extract Physical Address from PAR */
pa = (par & (PAR_ADDR_MASK << PAR_ADDR_SHIFT));
/* Perform NS entry point validation on the physical address */
return arm_validate_ns_entrypoint(pa);
}
#endif
......@@ -184,6 +184,14 @@ BL31_SOURCES += plat/arm/common/arm_sip_svc.c \
lib/pmf/pmf_smc.c
endif
ifeq (${EL3_EXCEPTION_HANDLING},1)
BL31_SOURCES += plat/arm/common/aarch64/arm_ehf.c
endif
ifeq (${SDEI_SUPPORT},1)
BL31_SOURCES += plat/arm/common/aarch64/arm_sdei.c
endif
ifneq (${TRUSTED_BOARD_BOOT},0)
# Include common TBB sources
......
......@@ -51,6 +51,7 @@ void plat_arm_gic_init(void)
{
gicv2_distif_init();
gicv2_pcpu_distif_init();
gicv2_set_pe_target_mask(plat_my_core_pos());
gicv2_cpuif_enable();
}
......
/*
* Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
......@@ -112,7 +112,7 @@ int arm_validate_power_state(unsigned int power_state,
/*******************************************************************************
* ARM standard platform handler called to check the validity of the non secure
* entrypoint.
* entrypoint. Returns 0 if the entrypoint is valid, or -1 otherwise.
******************************************************************************/
int arm_validate_ns_entrypoint(uintptr_t entrypoint)
{
......@@ -121,15 +121,23 @@ int arm_validate_ns_entrypoint(uintptr_t entrypoint)
* secure DRAM.
*/
if ((entrypoint >= ARM_NS_DRAM1_BASE) && (entrypoint <
(ARM_NS_DRAM1_BASE + ARM_NS_DRAM1_SIZE)))
return PSCI_E_SUCCESS;
(ARM_NS_DRAM1_BASE + ARM_NS_DRAM1_SIZE))) {
return 0;
}
#ifndef AARCH32
if ((entrypoint >= ARM_DRAM2_BASE) && (entrypoint <
(ARM_DRAM2_BASE + ARM_DRAM2_SIZE)))
return PSCI_E_SUCCESS;
(ARM_DRAM2_BASE + ARM_DRAM2_SIZE))) {
return 0;
}
#endif
return PSCI_E_INVALID_ADDRESS;
return -1;
}
int arm_validate_psci_entrypoint(uintptr_t entrypoint)
{
return arm_validate_ns_entrypoint(entrypoint) == 0 ? PSCI_E_SUCCESS :
PSCI_E_INVALID_ADDRESS;
}
/******************************************************************************
......
......@@ -299,7 +299,7 @@ plat_psci_ops_t plat_arm_psci_pm_ops = {
.system_off = css_system_off,
.system_reset = css_system_reset,
.validate_power_state = css_validate_power_state,
.validate_ns_entrypoint = arm_validate_ns_entrypoint,
.validate_ns_entrypoint = arm_validate_psci_entrypoint,
.translate_power_state_by_mpidr = css_translate_power_state_by_mpidr,
.get_node_hw_state = css_node_hw_state,
.get_sys_suspend_power_state = css_get_sys_suspend_power_state,
......
......@@ -3,6 +3,8 @@
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch_helpers.h>
#include <assert.h>
#include <console.h>
#include <platform.h>
......@@ -20,6 +22,11 @@
#pragma weak plat_get_syscnt_freq2
#endif /* ERROR_DEPRECATED */
#if SDEI_SUPPORT
#pragma weak plat_sdei_handle_masked_trigger
#pragma weak plat_sdei_validate_entry_point
#endif
void bl31_plat_enable_mmu(uint32_t flags)
{
enable_mmu_el3(flags);
......@@ -64,3 +71,22 @@ unsigned int plat_get_syscnt_freq2(void)
return (unsigned int)freq;
}
#endif /* ERROR_DEPRECATED */
#if SDEI_SUPPORT
/*
* Function that handles spurious SDEI interrupts while events are masked.
*/
void plat_sdei_handle_masked_trigger(uint64_t mpidr, unsigned int intr)
{
WARN("Spurious SDEI interrupt %u on masked PE %lx\n", intr, mpidr);
}
/*
* Default Function to validate SDEI entry point, which returns success.
* Platforms may override this with their own validation mechanism.
*/
int plat_sdei_validate_entry_point(uintptr_t ep, unsigned int client_mode)
{
return 0;
}
#endif
......@@ -277,3 +277,13 @@ unsigned int plat_ic_set_priority_mask(unsigned int mask)
{
return gicv2_set_pmr(mask);
}
unsigned int plat_ic_get_interrupt_id(unsigned int raw)
{
unsigned int id = (raw & INT_ID_MASK);
if (id == GIC_SPURIOUS_INTERRUPT)
id = INTR_ID_UNAVAILABLE;
return id;
}
......@@ -271,6 +271,14 @@ unsigned int plat_ic_set_priority_mask(unsigned int mask)
{
return gicv3_set_pmr(mask);
}
unsigned int plat_ic_get_interrupt_id(unsigned int raw)
{
unsigned int id = (raw & INT_ID_MASK);
return (gicv3_is_intr_id_special_identifier(id) ?
INTR_ID_UNAVAILABLE : id);
}
#endif
#ifdef IMAGE_BL32
......
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
#include <utils.h>
#include "sdei_private.h"
#define MAP_OFF(_map, _mapping) ((_map) - (_mapping)->map)
/*
* Get SDEI entry with the given mapping: on success, returns pointer to SDEI
* entry. On error, returns NULL.
*
* Both shared and private maps are stored in single-dimensional array. Private
* event entries are kept for each PE forming a 2D array.
*/
sdei_entry_t *get_event_entry(sdei_ev_map_t *map)
{
const sdei_mapping_t *mapping;
sdei_entry_t *cpu_priv_base;
unsigned int idx, base_idx;
if (is_event_private(map)) {
/*
* For a private map, find the index of the mapping in the
* array.
*/
mapping = SDEI_PRIVATE_MAPPING();
idx = MAP_OFF(map, mapping);
/* Base of private mappings for this CPU */
base_idx = plat_my_core_pos() * mapping->num_maps;
cpu_priv_base = &sdei_private_event_table[base_idx];
/*
* Return the address of the entry at the same index in the
* per-CPU event entry.
*/
return &cpu_priv_base[idx];
} else {
mapping = SDEI_SHARED_MAPPING();
idx = MAP_OFF(map, mapping);
return &sdei_shared_event_table[idx];
}
}
/*
* Find event mapping for a given interrupt number: On success, returns pointer
* to the event mapping. On error, returns NULL.
*/
sdei_ev_map_t *find_event_map_by_intr(int intr_num, int shared)
{
const sdei_mapping_t *mapping;
sdei_ev_map_t *map;
unsigned int i;
/*
* Look for a match in private and shared mappings, as requested. This
* is a linear search. However, if the mappings are required to be
* sorted, for large maps, we could consider binary search.
*/
mapping = shared ? SDEI_SHARED_MAPPING() : SDEI_PRIVATE_MAPPING();
iterate_mapping(mapping, i, map) {
if (map->intr == intr_num)
return map;
}
return NULL;
}
/*
* Find event mapping for a given event number: On success returns pointer to
* the event mapping. On error, returns NULL.
*/
sdei_ev_map_t *find_event_map(int ev_num)
{
const sdei_mapping_t *mapping;
sdei_ev_map_t *map;
unsigned int i, j;
/*
* Iterate through mappings to find a match. This is a linear search.
* However, if the mappings are required to be sorted, for large maps,
* we could consider binary search.
*/
for_each_mapping_type(i, mapping) {
iterate_mapping(mapping, j, map) {
if (map->ev_num == ev_num)
return map;
}
}
return NULL;
}
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch_helpers.h>
#include <assert.h>
#include <bl_common.h>
#include <cassert.h>
#include <context_mgmt.h>
#include <debug.h>
#include <ehf.h>
#include <interrupt_mgmt.h>
#include <runtime_svc.h>
#include <sdei.h>
#include <string.h>
#include "sdei_private.h"
#define PE_MASKED 1
#define PE_NOT_MASKED 0
/* x0-x17 GPREGS context */
#define SDEI_SAVED_GPREGS 18
/* Maximum preemption nesting levels: Critical priority and Normal priority */
#define MAX_EVENT_NESTING 2
/* Per-CPU SDEI state access macro */
#define sdei_get_this_pe_state() (&sdei_cpu_state[plat_my_core_pos()])
/* Structure to store information about an outstanding dispatch */
typedef struct sdei_dispatch_context {
sdei_ev_map_t *map;
unsigned int sec_state;
unsigned int intr_raw;
uint64_t x[SDEI_SAVED_GPREGS];
/* Exception state registers */
uint64_t elr_el3;
uint64_t spsr_el3;
} sdei_dispatch_context_t;
/* Per-CPU SDEI state data */
typedef struct sdei_cpu_state {
sdei_dispatch_context_t dispatch_stack[MAX_EVENT_NESTING];
unsigned short stack_top; /* Empty ascending */
unsigned int pe_masked:1;
unsigned int pending_enables:1;
} sdei_cpu_state_t;
/* SDEI states for all cores in the system */
static sdei_cpu_state_t sdei_cpu_state[PLATFORM_CORE_COUNT];
unsigned int sdei_pe_mask(void)
{
unsigned int ret;
sdei_cpu_state_t *state = sdei_get_this_pe_state();
/*
* Return value indicates whether this call had any effect in the mask
* status of this PE.
*/
ret = (state->pe_masked ^ PE_MASKED);
state->pe_masked = PE_MASKED;
return ret;
}
void sdei_pe_unmask(void)
{
int i;
sdei_ev_map_t *map;
sdei_entry_t *se;
sdei_cpu_state_t *state = sdei_get_this_pe_state();
uint64_t my_mpidr = read_mpidr_el1() & MPIDR_AFFINITY_MASK;
/*
* If there are pending enables, iterate through the private mappings
* and enable those bound maps that are in enabled state. Also, iterate
* through shared mappings and enable interrupts of events that are
* targeted to this PE.
*/
if (state->pending_enables) {
for_each_private_map(i, map) {
se = get_event_entry(map);
if (is_map_bound(map) && GET_EV_STATE(se, ENABLED))
plat_ic_enable_interrupt(map->intr);
}
for_each_shared_map(i, map) {
se = get_event_entry(map);
sdei_map_lock(map);
if (is_map_bound(map) &&
GET_EV_STATE(se, ENABLED) &&
(se->reg_flags == SDEI_REGF_RM_PE) &&
(se->affinity == my_mpidr)) {
plat_ic_enable_interrupt(map->intr);
}
sdei_map_unlock(map);
}
}
state->pending_enables = 0;
state->pe_masked = PE_NOT_MASKED;
}
/* Push a dispatch context to the dispatch stack */
static sdei_dispatch_context_t *push_dispatch(void)
{
sdei_cpu_state_t *state = sdei_get_this_pe_state();
sdei_dispatch_context_t *disp_ctx;
/* Cannot have more than max events */
assert(state->stack_top < MAX_EVENT_NESTING);
disp_ctx = &state->dispatch_stack[state->stack_top];
state->stack_top++;
return disp_ctx;
}
/* Pop a dispatch context to the dispatch stack */
static sdei_dispatch_context_t *pop_dispatch(void)
{
sdei_cpu_state_t *state = sdei_get_this_pe_state();
if (state->stack_top == 0)
return NULL;
assert(state->stack_top <= MAX_EVENT_NESTING);
state->stack_top--;
return &state->dispatch_stack[state->stack_top];
}
/* Retrieve the context at the top of dispatch stack */
static sdei_dispatch_context_t *get_outstanding_dispatch(void)
{
sdei_cpu_state_t *state = sdei_get_this_pe_state();
if (state->stack_top == 0)
return NULL;
assert(state->stack_top <= MAX_EVENT_NESTING);
return &state->dispatch_stack[state->stack_top - 1];
}
static void save_event_ctx(sdei_ev_map_t *map, void *tgt_ctx, int sec_state,
unsigned int intr_raw)
{
sdei_dispatch_context_t *disp_ctx;
gp_regs_t *tgt_gpregs;
el3_state_t *tgt_el3;
assert(tgt_ctx);
tgt_gpregs = get_gpregs_ctx(tgt_ctx);
tgt_el3 = get_el3state_ctx(tgt_ctx);
disp_ctx = push_dispatch();
assert(disp_ctx);
disp_ctx->sec_state = sec_state;
disp_ctx->map = map;
disp_ctx->intr_raw = intr_raw;
/* Save general purpose and exception registers */
memcpy(disp_ctx->x, tgt_gpregs, sizeof(disp_ctx->x));
disp_ctx->spsr_el3 = read_ctx_reg(tgt_el3, CTX_SPSR_EL3);
disp_ctx->elr_el3 = read_ctx_reg(tgt_el3, CTX_ELR_EL3);
}
static void restore_event_ctx(sdei_dispatch_context_t *disp_ctx, void *tgt_ctx)
{
gp_regs_t *tgt_gpregs;
el3_state_t *tgt_el3;
assert(tgt_ctx);
tgt_gpregs = get_gpregs_ctx(tgt_ctx);
tgt_el3 = get_el3state_ctx(tgt_ctx);
CASSERT(sizeof(disp_ctx->x) == (SDEI_SAVED_GPREGS * sizeof(uint64_t)),
foo);
/* Restore general purpose and exception registers */
memcpy(tgt_gpregs, disp_ctx->x, sizeof(disp_ctx->x));
write_ctx_reg(tgt_el3, CTX_SPSR_EL3, disp_ctx->spsr_el3);
write_ctx_reg(tgt_el3, CTX_ELR_EL3, disp_ctx->elr_el3);
}
static void save_secure_context(void)
{
cm_el1_sysregs_context_save(SECURE);
}
/* Restore Secure context and arrange to resume it at the next ERET */
static void restore_and_resume_secure_context(void)
{
cm_el1_sysregs_context_restore(SECURE);
cm_set_next_eret_context(SECURE);
}
/*
* Restore Non-secure context and arrange to resume it at the next ERET. Return
* pointer to the Non-secure context.
*/
static cpu_context_t *restore_and_resume_ns_context(void)
{
cpu_context_t *ns_ctx;
cm_el1_sysregs_context_restore(NON_SECURE);
cm_set_next_eret_context(NON_SECURE);
ns_ctx = cm_get_context(NON_SECURE);
assert(ns_ctx);
return ns_ctx;
}
/*
* Populate the Non-secure context so that the next ERET will dispatch to the
* SDEI client.
*/
static void setup_ns_dispatch(sdei_ev_map_t *map, sdei_entry_t *se,
cpu_context_t *ctx, int sec_state_to_resume,
unsigned int intr_raw)
{
el3_state_t *el3_ctx = get_el3state_ctx(ctx);
/* Push the event and context */
save_event_ctx(map, ctx, sec_state_to_resume, intr_raw);
/*
* Setup handler arguments:
*
* - x0: Event number
* - x1: Handler argument supplied at the time of event registration
* - x2: Interrupted PC
* - x3: Interrupted SPSR
*/
SMC_SET_GP(ctx, CTX_GPREG_X0, map->ev_num);
SMC_SET_GP(ctx, CTX_GPREG_X1, se->arg);
SMC_SET_GP(ctx, CTX_GPREG_X2, read_ctx_reg(el3_ctx, CTX_ELR_EL3));
SMC_SET_GP(ctx, CTX_GPREG_X3, read_ctx_reg(el3_ctx, CTX_SPSR_EL3));
/*
* Prepare for ERET:
*
* - Set PC to the registered handler address
* - Set SPSR to jump to client EL with exceptions masked
*/
cm_set_elr_spsr_el3(NON_SECURE, (uintptr_t) se->ep,
SPSR_64(sdei_client_el(), MODE_SP_ELX,
DISABLE_ALL_EXCEPTIONS));
}
/* Handle a triggered SDEI interrupt while events were masked on this PE */
static void handle_masked_trigger(sdei_ev_map_t *map, sdei_entry_t *se,
sdei_cpu_state_t *state, unsigned int intr_raw)
{
uint64_t my_mpidr __unused = (read_mpidr_el1() & MPIDR_AFFINITY_MASK);
int disable = 0;
/* Nothing to do for event 0 */
if (map->ev_num == SDEI_EVENT_0)
return;
/*
* For a private event, or for a shared event specifically routed to
* this CPU, we disable interrupt, leave the interrupt pending, and do
* EOI.
*/
if (is_event_private(map)) {
disable = 1;
} else if (se->reg_flags == SDEI_REGF_RM_PE) {
assert(se->affinity == my_mpidr);
disable = 1;
}
if (disable) {
plat_ic_disable_interrupt(map->intr);
plat_ic_set_interrupt_pending(map->intr);
plat_ic_end_of_interrupt(intr_raw);
state->pending_enables = 1;
return;
}
/*
* We just received a shared event with routing set to ANY PE. The
* interrupt can't be delegated on this PE as SDEI events are masked.
* However, because its routing mode is ANY, it is possible that the
* event can be delegated on any other PE that hasn't masked events.
* Therefore, we set the interrupt back pending so as to give other
* suitable PEs a chance of handling it.
*/
assert(plat_ic_is_spi(map->intr));
plat_ic_set_interrupt_pending(map->intr);
/*
* Leaving the same interrupt pending also means that the same interrupt
* can target this PE again as soon as this PE leaves EL3. Whether and
* how often that happens depends on the implementation of GIC.
*
* We therefore call a platform handler to resolve this situation.
*/
plat_sdei_handle_masked_trigger(my_mpidr, map->intr);
/* This PE is masked. We EOI the interrupt, as it can't be delegated */
plat_ic_end_of_interrupt(intr_raw);
}
/* SDEI main interrupt handler */
int sdei_intr_handler(uint32_t intr_raw, uint32_t flags, void *handle,
void *cookie)
{
sdei_entry_t *se;
cpu_context_t *ctx;
sdei_ev_map_t *map;
sdei_dispatch_context_t *disp_ctx;
unsigned int sec_state;
sdei_cpu_state_t *state;
uint32_t intr;
/*
* To handle an event, the following conditions must be true:
*
* 1. Event must be signalled
* 2. Event must be enabled
* 3. This PE must be a target PE for the event
* 4. PE must be unmasked for SDEI
* 5. If this is a normal event, no event must be running
* 6. If this is a critical event, no critical event must be running
*
* (1) and (2) are true when this function is running
* (3) is enforced in GIC by selecting the appropriate routing option
* (4) is satisfied by client calling PE_UNMASK
* (5) and (6) is enforced using interrupt priority, the RPR, in GIC:
* - Normal SDEI events belong to Normal SDE priority class
* - Critical SDEI events belong to Critical CSDE priority class
*
* The interrupt has already been acknowledged, and therefore is active,
* so no other PE can handle this event while we are at it.
*
* Find if this is an SDEI interrupt. There must be an event mapped to
* this interrupt
*/
intr = plat_ic_get_interrupt_id(intr_raw);
map = find_event_map_by_intr(intr, plat_ic_is_spi(intr));
if (!map) {
ERROR("No SDEI map for interrupt %u\n", intr);
panic();
}
/*
* Received interrupt number must either correspond to event 0, or must
* be bound interrupt.
*/
assert((map->ev_num == SDEI_EVENT_0) || is_map_bound(map));
se = get_event_entry(map);
state = sdei_get_this_pe_state();
if (state->pe_masked == PE_MASKED) {
/*
* Interrupts received while this PE was masked can't be
* dispatched.
*/
SDEI_LOG("interrupt %u on %lx while PE masked\n", map->intr,
read_mpidr_el1());
if (is_event_shared(map))
sdei_map_lock(map);
handle_masked_trigger(map, se, state, intr_raw);
if (is_event_shared(map))
sdei_map_unlock(map);
return 0;
}
/* Insert load barrier for signalled SDEI event */
if (map->ev_num == SDEI_EVENT_0)
dmbld();
if (is_event_shared(map))
sdei_map_lock(map);
/* Assert shared event routed to this PE had been configured so */
if (is_event_shared(map) && (se->reg_flags == SDEI_REGF_RM_PE)) {
assert(se->affinity ==
(read_mpidr_el1() & MPIDR_AFFINITY_MASK));
}
if (!can_sdei_state_trans(se, DO_DISPATCH)) {
SDEI_LOG("SDEI event 0x%x can't be dispatched; state=0x%x\n",
map->ev_num, se->state);
/*
* If the event is registered, leave the interrupt pending so
* that it's delivered when the event is enabled.
*/
if (GET_EV_STATE(se, REGISTERED))
plat_ic_set_interrupt_pending(map->intr);
/*
* The interrupt was disabled or unregistered after the handler
* started to execute, which means now the interrupt is already
* disabled and we just need to EOI the interrupt.
*/
plat_ic_end_of_interrupt(intr_raw);
if (is_event_shared(map))
sdei_map_unlock(map);
return 0;
}
disp_ctx = get_outstanding_dispatch();
if (is_event_critical(map)) {
/*
* If this event is Critical, and if there's an outstanding
* dispatch, assert the latter is a Normal dispatch. Critical
* events can preempt an outstanding Normal event dispatch.
*/
if (disp_ctx)
assert(is_event_normal(disp_ctx->map));
} else {
/*
* If this event is Normal, assert that there are no outstanding
* dispatches. Normal events can't preempt any outstanding event
* dispatches.
*/
assert(disp_ctx == NULL);
}
sec_state = get_interrupt_src_ss(flags);
if (is_event_shared(map))
sdei_map_unlock(map);
SDEI_LOG("ACK %lx, ev:%d ss:%d spsr:%lx ELR:%lx\n", read_mpidr_el1(),
map->ev_num, sec_state, read_spsr_el3(),
read_elr_el3());
ctx = handle;
/*
* Check if we interrupted secure state. Perform a context switch so
* that we can delegate to NS.
*/
if (sec_state == SECURE) {
save_secure_context();
ctx = restore_and_resume_ns_context();
}
setup_ns_dispatch(map, se, ctx, sec_state, intr_raw);
/*
* End of interrupt is done in sdei_event_complete, when the client
* signals completion.
*/
return 0;
}
/* Explicitly dispatch the given SDEI event */
int sdei_dispatch_event(int ev_num, unsigned int preempted_sec_state)
{
sdei_entry_t *se;
sdei_ev_map_t *map;
cpu_context_t *ctx;
sdei_dispatch_context_t *disp_ctx;
sdei_cpu_state_t *state;
/* Validate preempted security state */
if ((preempted_sec_state != SECURE) || (preempted_sec_state != NON_SECURE))
return -1;
/* Can't dispatch if events are masked on this PE */
state = sdei_get_this_pe_state();
if (state->pe_masked == PE_MASKED)
return -1;
/* Event 0 can't be dispatched */
if (ev_num == SDEI_EVENT_0)
return -1;
/* Locate mapping corresponding to this event */
map = find_event_map(ev_num);
if (!map)
return -1;
/*
* Statically-bound or dynamic maps are dispatched only as a result of
* interrupt, and not upon explicit request.
*/
if (is_map_dynamic(map) || is_map_bound(map))
return -1;
/* The event must be private */
if (is_event_shared(map))
return -1;
/* Examine state of dispatch stack */
disp_ctx = get_outstanding_dispatch();
if (disp_ctx) {
/*
* There's an outstanding dispatch. If the outstanding dispatch
* is critical, no more dispatches are possible.
*/
if (is_event_critical(disp_ctx->map))
return -1;
/*
* If the outstanding dispatch is Normal, only critical events
* can be dispatched.
*/
if (is_event_normal(map))
return -1;
}
se = get_event_entry(map);
if (!can_sdei_state_trans(se, DO_DISPATCH))
return -1;
/* Activate the priority corresponding to the event being dispatched */
ehf_activate_priority(sdei_event_priority(map));
/*
* We assume the current context is SECURE, and that it's already been
* saved.
*/
ctx = restore_and_resume_ns_context();
/*
* The caller has effectively terminated execution. Record to resume the
* preempted context later when the event completes or
* complete-and-resumes.
*/
setup_ns_dispatch(map, se, ctx, preempted_sec_state, 0);
return 0;
}
int sdei_event_complete(int resume, uint64_t pc)
{
sdei_dispatch_context_t *disp_ctx;
sdei_entry_t *se;
sdei_ev_map_t *map;
cpu_context_t *ctx;
sdei_action_t act;
unsigned int client_el = sdei_client_el();
/* Return error if called without an active event */
disp_ctx = pop_dispatch();
if (!disp_ctx)
return SDEI_EDENY;
/* Validate resumption point */
if (resume && (plat_sdei_validate_entry_point(pc, client_el) != 0))
return SDEI_EDENY;
map = disp_ctx->map;
assert(map);
se = get_event_entry(map);
SDEI_LOG("EOI:%lx, %d spsr:%lx elr:%lx\n", read_mpidr_el1(),
map->ev_num, read_spsr_el3(), read_elr_el3());
if (is_event_shared(map))
sdei_map_lock(map);
act = resume ? DO_COMPLETE_RESUME : DO_COMPLETE;
if (!can_sdei_state_trans(se, act)) {
if (is_event_shared(map))
sdei_map_unlock(map);
return SDEI_EDENY;
}
/*
* Restore Non-secure to how it was originally interrupted. Once done,
* it's up-to-date with the saved copy.
*/
ctx = cm_get_context(NON_SECURE);
restore_event_ctx(disp_ctx, ctx);
if (resume) {
/*
* Complete-and-resume call. Prepare the Non-secure context
* (currently active) for complete and resume.
*/
cm_set_elr_spsr_el3(NON_SECURE, pc, SPSR_64(client_el,
MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS));
/*
* Make it look as if a synchronous exception were taken at the
* supplied Non-secure resumption point. Populate SPSR and
* ELR_ELx so that an ERET from there works as expected.
*
* The assumption is that the client, if necessary, would have
* saved any live content in these registers before making this
* call.
*/
if (client_el == MODE_EL2) {
write_elr_el2(disp_ctx->elr_el3);
write_spsr_el2(disp_ctx->spsr_el3);
} else {
/* EL1 */
write_elr_el1(disp_ctx->elr_el3);
write_spsr_el1(disp_ctx->spsr_el3);
}
}
/*
* If the cause of dispatch originally interrupted the Secure world, and
* if Non-secure world wasn't allowed to preempt Secure execution,
* resume Secure.
*
* No need to save the Non-secure context ahead of a world switch: the
* Non-secure context was fully saved before dispatch, and has been
* returned to its pre-dispatch state.
*/
if ((disp_ctx->sec_state == SECURE) &&
(ehf_is_ns_preemption_allowed() == 0)) {
restore_and_resume_secure_context();
}
if ((map->ev_num == SDEI_EVENT_0) || is_map_bound(map)) {
/*
* The event was dispatched after receiving SDEI interrupt. With
* the event handling completed, EOI the corresponding
* interrupt.
*/
plat_ic_end_of_interrupt(disp_ctx->intr_raw);
} else {
/*
* An unbound event must have been dispatched explicitly.
* Deactivate the priority level that was activated at the time
* of explicit dispatch.
*/
ehf_deactivate_priority(sdei_event_priority(map));
}
if (is_event_shared(map))
sdei_map_unlock(map);
return 0;
}
int sdei_event_context(void *handle, unsigned int param)
{
sdei_dispatch_context_t *disp_ctx;
if (param >= SDEI_SAVED_GPREGS)
return SDEI_EINVAL;
/* Get outstanding dispatch on this CPU */
disp_ctx = get_outstanding_dispatch();
if (!disp_ctx)
return SDEI_EDENY;
assert(disp_ctx->map);
if (!can_sdei_state_trans(get_event_entry(disp_ctx->map), DO_CONTEXT))
return SDEI_EDENY;
/*
* No locking is required for the Running status as this is the only CPU
* which can complete the event
*/
return disp_ctx->x[param];
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment