Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
Menu
Open sidebar
adam.huang
Arm Trusted Firmware
Commits
11dfe0b4
Unverified
Commit
11dfe0b4
authored
Aug 22, 2018
by
Dimitris Papastamos
Committed by
GitHub
Aug 22, 2018
Browse files
Merge pull request #1532 from jeenu-arm/misra-fixes
MISRA fixes
parents
61e7c054
b634fa91
Changes
18
Hide whitespace changes
Inline
Side-by-side
bl31/ehf.c
View file @
11dfe0b4
...
...
@@ -18,6 +18,7 @@
#include <interrupt_mgmt.h>
#include <platform.h>
#include <pubsub_events.h>
#include <stdbool.h>
/* Output EHF logs as verbose */
#define EHF_LOG(...) VERBOSE("EHF: " __VA_ARGS__)
...
...
@@ -26,43 +27,44 @@
/* For a valid handler, return the actual function pointer; otherwise, 0. */
#define RAW_HANDLER(h) \
((ehf_handler_t) ((h & _EHF_PRI_VALID) ? (h & ~_EHF_PRI_VALID) : 0))
((ehf_handler_t) ((((h) & EHF_PRI_VALID_) != 0U) ? \
((h) & ~EHF_PRI_VALID_) : 0U))
#define PRI_BIT(idx) (((ehf_pri_bits_t) 1) << idx)
#define PRI_BIT(idx) (((ehf_pri_bits_t) 1
u
) <<
(
idx)
)
/*
* Convert index into secure priority using the platform-defined priority bits
* field.
*/
#define IDX_TO_PRI(idx) \
((idx << (7 - exception_data.pri_bits)) & 0x7f)
((
((unsigned)
idx
)
<< (7
u
- exception_data.pri_bits)) & 0x7f
U
)
/* Check whether a given index is valid */
#define IS_IDX_VALID(idx) \
((exception_data.ehf_priorities[idx].ehf_handler &
_
EHF_PRI_VALID) != 0)
((exception_data.ehf_priorities[idx].ehf_handler & EHF_PRI_VALID
_
) != 0
U
)
/* Returns whether given priority is in secure priority range */
#define IS_PRI_SECURE(pri) ((pri & 0x80) == 0)
#define IS_PRI_SECURE(pri) ((
(
pri
)
& 0x80
U
) == 0
U
)
/* To be defined by the platform */
extern
const
ehf_priorities_t
exception_data
;
/* Translate priority to the index in the priority array */
static
int
pri_to_idx
(
unsigned
int
priority
)
static
unsigned
int
pri_to_idx
(
unsigned
int
priority
)
{
int
idx
;
unsigned
int
idx
;
idx
=
EHF_PRI_TO_IDX
(
priority
,
exception_data
.
pri_bits
);
assert
((
idx
>=
0
)
&&
(
idx
<
exception_data
.
num_priorities
)
)
;
assert
(
idx
<
exception_data
.
num_priorities
);
assert
(
IS_IDX_VALID
(
idx
));
return
idx
;
}
/* Return whether there are outstanding priority activation */
static
int
has_valid_pri_activations
(
pe_exc_data_t
*
pe_data
)
static
bool
has_valid_pri_activations
(
pe_exc_data_t
*
pe_data
)
{
return
pe_data
->
active_pri_bits
!=
0
;
return
pe_data
->
active_pri_bits
!=
0
U
;
}
static
pe_exc_data_t
*
this_cpu_data
(
void
)
...
...
@@ -80,7 +82,7 @@ static int get_pe_highest_active_idx(pe_exc_data_t *pe_data)
return
EHF_INVALID_IDX
;
/* Current priority is the right-most bit */
return
__builtin_ctz
(
pe_data
->
active_pri_bits
);
return
(
int
)
__builtin_ctz
(
pe_data
->
active_pri_bits
);
}
/*
...
...
@@ -95,8 +97,8 @@ static int get_pe_highest_active_idx(pe_exc_data_t *pe_data)
*/
void
ehf_activate_priority
(
unsigned
int
priority
)
{
int
idx
,
cur_pri_idx
;
unsigned
int
old_mask
,
run_pri
;
int
cur_pri_idx
;
unsigned
int
old_mask
,
run_pri
,
idx
;
pe_exc_data_t
*
pe_data
=
this_cpu_data
();
/*
...
...
@@ -118,7 +120,8 @@ void ehf_activate_priority(unsigned int priority)
*/
cur_pri_idx
=
get_pe_highest_active_idx
(
pe_data
);
idx
=
pri_to_idx
(
priority
);
if
((
cur_pri_idx
!=
EHF_INVALID_IDX
)
&&
(
idx
>=
cur_pri_idx
))
{
if
((
cur_pri_idx
!=
EHF_INVALID_IDX
)
&&
(
idx
>=
((
unsigned
int
)
cur_pri_idx
)))
{
ERROR
(
"Activation priority mismatch: req=0x%x current=0x%x
\n
"
,
priority
,
IDX_TO_PRI
(
cur_pri_idx
));
panic
();
...
...
@@ -144,7 +147,7 @@ void ehf_activate_priority(unsigned int priority)
* restored after the last deactivation.
*/
if
(
cur_pri_idx
==
EHF_INVALID_IDX
)
pe_data
->
init_pri_mask
=
old_mask
;
pe_data
->
init_pri_mask
=
(
uint8_t
)
old_mask
;
EHF_LOG
(
"activate prio=%d
\n
"
,
get_pe_highest_active_idx
(
pe_data
));
}
...
...
@@ -161,9 +164,9 @@ void ehf_activate_priority(unsigned int priority)
*/
void
ehf_deactivate_priority
(
unsigned
int
priority
)
{
int
idx
,
cur_pri_idx
;
int
cur_pri_idx
;
pe_exc_data_t
*
pe_data
=
this_cpu_data
();
unsigned
int
old_mask
,
run_pri
;
unsigned
int
old_mask
,
run_pri
,
idx
;
/*
* Query interrupt controller for the running priority, or idle priority
...
...
@@ -184,21 +187,22 @@ void ehf_deactivate_priority(unsigned int priority)
*/
cur_pri_idx
=
get_pe_highest_active_idx
(
pe_data
);
idx
=
pri_to_idx
(
priority
);
if
((
cur_pri_idx
==
EHF_INVALID_IDX
)
||
(
idx
!=
cur_pri_idx
))
{
if
((
cur_pri_idx
==
EHF_INVALID_IDX
)
||
(
idx
!=
((
unsigned
int
)
cur_pri_idx
)))
{
ERROR
(
"Deactivation priority mismatch: req=0x%x current=0x%x
\n
"
,
priority
,
IDX_TO_PRI
(
cur_pri_idx
));
panic
();
}
/* Clear bit corresponding to highest priority */
pe_data
->
active_pri_bits
&=
(
pe_data
->
active_pri_bits
-
1
);
pe_data
->
active_pri_bits
&=
(
pe_data
->
active_pri_bits
-
1
u
);
/*
* Restore priority mask corresponding to the next priority, or the
* one stashed earlier if there are no more to deactivate.
*/
idx
=
get_pe_highest_active_idx
(
pe_data
);
if
(
idx
==
EHF_INVALID_IDX
)
cur_pri_
idx
=
get_pe_highest_active_idx
(
pe_data
);
if
(
cur_pri_
idx
==
EHF_INVALID_IDX
)
old_mask
=
plat_ic_set_priority_mask
(
pe_data
->
init_pri_mask
);
else
old_mask
=
plat_ic_set_priority_mask
(
priority
);
...
...
@@ -231,16 +235,16 @@ static void *ehf_exited_normal_world(const void *arg)
/* If the running priority is in the secure range, do nothing */
run_pri
=
plat_ic_get_running_priority
();
if
(
IS_PRI_SECURE
(
run_pri
))
return
0
;
return
NULL
;
/* Do nothing if there are explicit activations */
if
(
has_valid_pri_activations
(
pe_data
))
return
0
;
return
NULL
;
assert
(
pe_data
->
ns_pri_mask
==
0
);
assert
(
pe_data
->
ns_pri_mask
==
0
u
);
pe_data
->
ns_pri_mask
=
plat_ic_set_priority_mask
(
GIC_HIGHEST_NS_PRIORITY
);
(
uint8_t
)
plat_ic_set_priority_mask
(
GIC_HIGHEST_NS_PRIORITY
);
/* The previous Priority Mask is not expected to be in secure range */
if
(
IS_PRI_SECURE
(
pe_data
->
ns_pri_mask
))
{
...
...
@@ -252,7 +256,7 @@ static void *ehf_exited_normal_world(const void *arg)
EHF_LOG
(
"Priority Mask: 0x%x => 0x%x
\n
"
,
pe_data
->
ns_pri_mask
,
GIC_HIGHEST_NS_PRIORITY
);
return
0
;
return
NULL
;
}
/*
...
...
@@ -274,18 +278,18 @@ static void *ehf_entering_normal_world(const void *arg)
/* If the running priority is in the secure range, do nothing */
run_pri
=
plat_ic_get_running_priority
();
if
(
IS_PRI_SECURE
(
run_pri
))
return
0
;
return
NULL
;
/*
* If there are explicit activations, do nothing. The Priority Mask will
* be restored upon the last deactivation.
*/
if
(
has_valid_pri_activations
(
pe_data
))
return
0
;
return
NULL
;
/* Do nothing if we don't have a valid Priority Mask to restore */
if
(
pe_data
->
ns_pri_mask
==
0
)
return
0
;
if
(
pe_data
->
ns_pri_mask
==
0
U
)
return
NULL
;
old_pmr
=
plat_ic_set_priority_mask
(
pe_data
->
ns_pri_mask
);
...
...
@@ -304,7 +308,7 @@ static void *ehf_entering_normal_world(const void *arg)
pe_data
->
ns_pri_mask
=
0
;
return
0
;
return
NULL
;
}
/*
...
...
@@ -328,7 +332,7 @@ void ehf_allow_ns_preemption(uint64_t preempt_ret_code)
* We should have been notified earlier of entering secure world, and
* therefore have stashed the Non-secure priority mask.
*/
assert
(
pe_data
->
ns_pri_mask
!=
0
);
assert
(
pe_data
->
ns_pri_mask
!=
0
U
);
/* Make sure no priority levels are active when requesting this */
if
(
has_valid_pri_activations
(
pe_data
))
{
...
...
@@ -343,7 +347,7 @@ void ehf_allow_ns_preemption(uint64_t preempt_ret_code)
* to populate it, the caller would find the correct return value.
*/
ns_ctx
=
cm_get_context
(
NON_SECURE
);
assert
(
ns_ctx
);
assert
(
ns_ctx
!=
NULL
);
write_ctx_reg
(
get_gpregs_ctx
(
ns_ctx
),
CTX_GPREG_X0
,
preempt_ret_code
);
old_pmr
=
plat_ic_set_priority_mask
(
pe_data
->
ns_pri_mask
);
...
...
@@ -376,7 +380,7 @@ unsigned int ehf_is_ns_preemption_allowed(void)
*/
if
(
has_valid_pri_activations
(
pe_data
))
return
0
;
if
(
pe_data
->
ns_pri_mask
!=
0
)
if
(
pe_data
->
ns_pri_mask
!=
0
U
)
return
0
;
return
1
;
...
...
@@ -388,7 +392,9 @@ unsigned int ehf_is_ns_preemption_allowed(void)
static
uint64_t
ehf_el3_interrupt_handler
(
uint32_t
id
,
uint32_t
flags
,
void
*
handle
,
void
*
cookie
)
{
int
pri
,
idx
,
intr
,
intr_raw
,
ret
=
0
;
int
ret
=
0
;
uint32_t
intr_raw
;
unsigned
int
intr
,
pri
,
idx
;
ehf_handler_t
handler
;
/*
...
...
@@ -425,8 +431,9 @@ static uint64_t ehf_el3_interrupt_handler(uint32_t id, uint32_t flags,
/* Validate priority */
assert
(
pri
==
IDX_TO_PRI
(
idx
));
handler
=
RAW_HANDLER
(
exception_data
.
ehf_priorities
[
idx
].
ehf_handler
);
if
(
!
handler
)
{
handler
=
(
ehf_handler_t
)
RAW_HANDLER
(
exception_data
.
ehf_priorities
[
idx
].
ehf_handler
);
if
(
handler
==
NULL
)
{
ERROR
(
"No EL3 exception handler for priority 0x%x
\n
"
,
IDX_TO_PRI
(
idx
));
panic
();
...
...
@@ -438,7 +445,7 @@ static uint64_t ehf_el3_interrupt_handler(uint32_t id, uint32_t flags,
*/
ret
=
handler
(
intr_raw
,
flags
,
handle
,
cookie
);
return
ret
;
return
(
uint64_t
)
ret
;
}
/*
...
...
@@ -450,21 +457,22 @@ void ehf_init(void)
int
ret
__unused
;
/* Ensure EL3 interrupts are supported */
assert
(
plat_ic_has_interrupt_type
(
INTR_TYPE_EL3
));
assert
(
plat_ic_has_interrupt_type
(
INTR_TYPE_EL3
)
!=
0
);
/*
* Make sure that priority water mark has enough bits to represent the
* whole priority array.
*/
assert
(
exception_data
.
num_priorities
<=
(
sizeof
(
ehf_pri_bits_t
)
*
8
));
assert
(
exception_data
.
num_priorities
<=
(
sizeof
(
ehf_pri_bits_t
)
*
8
U
));
assert
(
exception_data
.
ehf_priorities
);
assert
(
exception_data
.
ehf_priorities
!=
NULL
);
/*
* Bit 7 of GIC priority must be 0 for secure interrupts. This means
* platforms must use at least 1 of the remaining 7 bits.
*/
assert
((
exception_data
.
pri_bits
>=
1
)
||
(
exception_data
.
pri_bits
<
8
));
assert
((
exception_data
.
pri_bits
>=
1U
)
||
(
exception_data
.
pri_bits
<
8U
));
/* Route EL3 interrupts when in Secure and Non-secure. */
set_interrupt_rm_flag
(
flags
,
NON_SECURE
);
...
...
@@ -484,13 +492,13 @@ void ehf_init(void)
*/
void
ehf_register_priority_handler
(
unsigned
int
pri
,
ehf_handler_t
handler
)
{
int
idx
;
unsigned
int
idx
;
/* Sanity check for handler */
assert
(
handler
!=
NULL
);
/* Handler ought to be 4-byte aligned */
assert
((((
uintptr_t
)
handler
)
&
3
)
==
0
);
assert
((((
uintptr_t
)
handler
)
&
3
U
)
==
0
U
);
/* Ensure we register for valid priority */
idx
=
pri_to_idx
(
pri
);
...
...
@@ -498,7 +506,7 @@ void ehf_register_priority_handler(unsigned int pri, ehf_handler_t handler)
assert
(
IDX_TO_PRI
(
idx
)
==
pri
);
/* Return failure if a handler was already registered */
if
(
exception_data
.
ehf_priorities
[
idx
].
ehf_handler
!=
_
EHF_NO_HANDLER
)
{
if
(
exception_data
.
ehf_priorities
[
idx
].
ehf_handler
!=
EHF_NO_HANDLER
_
)
{
ERROR
(
"Handler already registered for priority 0x%x
\n
"
,
pri
);
panic
();
}
...
...
@@ -508,7 +516,7 @@ void ehf_register_priority_handler(unsigned int pri, ehf_handler_t handler)
* is 4-byte aligned, which is usually the case.
*/
exception_data
.
ehf_priorities
[
idx
].
ehf_handler
=
(((
uintptr_t
)
handler
)
|
_
EHF_PRI_VALID
);
(((
uintptr_t
)
handler
)
|
EHF_PRI_VALID
_
);
EHF_LOG
(
"register pri=0x%x handler=%p
\n
"
,
pri
,
handler
);
}
...
...
include/bl31/ehf.h
View file @
11dfe0b4
...
...
@@ -4,8 +4,8 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef
__
EHF_H
__
#define
__
EHF_H
__
#ifndef EHF_H
#define EHF_H
#ifndef __ASSEMBLY__
...
...
@@ -13,27 +13,27 @@
#include <utils_def.h>
/* Valid priorities set bit 0 of the priority handler. */
#define
_
EHF_PRI_VALID (((uintptr_t) 1) << 0)
#define EHF_PRI_VALID
_
(((uintptr_t) 1) << 0)
/* Marker for no handler registered for a valid priority */
#define
_
EHF_NO_HANDLER (0 |
_
EHF_PRI_VALID)
#define EHF_NO_HANDLER
_
(0
U
| EHF_PRI_VALID
_
)
/* Extract the specified number of top bits from 7 lower bits of priority */
#define EHF_PRI_TO_IDX(pri, plat_bits) \
((pri & 0x7f) >> (7 - plat_bits))
(
(((unsigned)
(pri
))
& 0x7f
u
) >> (7
u
-
(
plat_bits))
)
/* Install exception priority descriptor at a suitable index */
#define EHF_PRI_DESC(plat_bits, priority) \
[EHF_PRI_TO_IDX(priority, plat_bits)] = { \
.ehf_handler =
_
EHF_NO_HANDLER, \
.ehf_handler = EHF_NO_HANDLER
_
, \
}
/* Macro for platforms to regiter its exception priorities */
#define EHF_REGISTER_PRIORITIES(priorities, num, bits) \
const ehf_priorities_t exception_data = { \
.num_priorities = num, \
.ehf_priorities = priorities, \
.pri_bits = bits, \
.num_priorities =
(
num
)
, \
.ehf_priorities =
(
priorities
)
, \
.pri_bits =
(
bits
)
, \
}
/*
...
...
@@ -72,10 +72,10 @@ typedef struct ehf_pri_desc {
uintptr_t
ehf_handler
;
}
ehf_pri_desc_t
;
typedef
struct
ehf_priorit
ies
{
typedef
struct
ehf_priorit
y_type
{
ehf_pri_desc_t
*
ehf_priorities
;
unsigned
int
num_priorities
;
int
pri_bits
;
unsigned
int
pri_bits
;
}
ehf_priorities_t
;
void
ehf_init
(
void
);
...
...
@@ -87,4 +87,4 @@ unsigned int ehf_is_ns_preemption_allowed(void);
#endif
/* __ASSEMBLY__ */
#endif
/*
__
EHF_H
__
*/
#endif
/* EHF_H */
include/bl31/interrupt_mgmt.h
View file @
11dfe0b4
...
...
@@ -61,10 +61,10 @@
#define INTR_RM_FROM_SEC_SHIFT SECURE
/* BIT[0] */
#define INTR_RM_FROM_NS_SHIFT NON_SECURE
/* BIT[1] */
#define INTR_RM_FROM_FLAG_MASK U(1)
#define get_interrupt_rm_flag(flag, ss)
(((flag >> INTR_RM_FLAGS_SHIFT) >> ss)
\
& INTR_RM_FROM_FLAG_MASK)
#define set_interrupt_rm_flag(flag, ss) (flag |= U(1) << ss)
#define clr_interrupt_rm_flag(flag, ss) (flag &= ~(U(1) << ss))
#define get_interrupt_rm_flag(flag, ss) \
((((flag) >> INTR_RM_FLAGS_SHIFT) >> (ss))
& INTR_RM_FROM_FLAG_MASK)
#define set_interrupt_rm_flag(flag, ss)
(
(flag
)
|= U(1) <<
(
ss)
)
#define clr_interrupt_rm_flag(flag, ss)
(
(flag
)
&= ~(U(1) <<
(
ss))
)
/*******************************************************************************
...
...
@@ -101,9 +101,9 @@
******************************************************************************/
#define INTR_SRC_SS_FLAG_SHIFT U(0)
/* BIT[0] */
#define INTR_SRC_SS_FLAG_MASK U(1)
#define set_interrupt_src_ss(flag, val) (flag |= val << INTR_SRC_SS_FLAG_SHIFT)
#define clr_interrupt_src_ss(flag) (flag &= ~(U(1) << INTR_SRC_SS_FLAG_SHIFT))
#define get_interrupt_src_ss(flag) ((flag >> INTR_SRC_SS_FLAG_SHIFT) & \
#define set_interrupt_src_ss(flag, val)
(
(flag
)
|=
(
val
)
<< INTR_SRC_SS_FLAG_SHIFT)
#define clr_interrupt_src_ss(flag)
(
(flag
)
&= ~(U(1) << INTR_SRC_SS_FLAG_SHIFT))
#define get_interrupt_src_ss(flag) ((
(
flag
)
>> INTR_SRC_SS_FLAG_SHIFT) & \
INTR_SRC_SS_FLAG_MASK)
#ifndef __ASSEMBLY__
...
...
include/common/param_header.h
View file @
11dfe0b4
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017
-2018
, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
...
...
@@ -23,7 +23,7 @@
#define SET_PARAM_HEAD(_p, _type, _ver, _attr) do { \
(_p)->h.type = (uint8_t)(_type); \
(_p)->h.version = (uint8_t)(_ver); \
(_p)->h.size = (uint16_t)sizeof(*_p); \
(_p)->h.size = (uint16_t)sizeof(*
(
_p)
)
; \
(_p)->h.attr = (uint32_t)(_attr) ; \
} while (0)
...
...
include/drivers/arm/gic_common.h
View file @
11dfe0b4
...
...
@@ -7,6 +7,8 @@
#ifndef __GIC_COMMON_H__
#define __GIC_COMMON_H__
#include <utils_def.h>
/*******************************************************************************
* GIC Distributor interface general definitions
******************************************************************************/
...
...
@@ -34,10 +36,10 @@
#define GIC_INTR_CFG_EDGE (1 << 1)
/* Constants to categorise priorities */
#define GIC_HIGHEST_SEC_PRIORITY 0x0
#define GIC_LOWEST_SEC_PRIORITY 0x7f
#define GIC_HIGHEST_NS_PRIORITY 0x80
#define GIC_LOWEST_NS_PRIORITY 0xfe
/* 0xff would disable all interrupts */
#define GIC_HIGHEST_SEC_PRIORITY
U(
0x0
0)
#define GIC_LOWEST_SEC_PRIORITY
U(
0x7f
)
#define GIC_HIGHEST_NS_PRIORITY
U(
0x80
)
#define GIC_LOWEST_NS_PRIORITY
U(
0xfe
)
/* 0xff would disable all interrupts */
/*******************************************************************************
* GIC Distributor interface register offsets that are common to GICv3 & GICv2
...
...
include/lib/el3_runtime/aarch64/context.h
View file @
11dfe0b4
...
...
@@ -241,9 +241,9 @@ DEFINE_REG_STRUCT(cve_2018_3639, CTX_CVE_2018_3639_ALL);
* Macros to access members of any of the above structures using their
* offsets
*/
#define read_ctx_reg(ctx, offset) ((ctx)->_regs[offset >> DWORD_SHIFT])
#define write_ctx_reg(ctx, offset, val) (((ctx)->_regs[offset >> DWORD_SHIFT]) \
= val)
#define read_ctx_reg(ctx, offset) ((ctx)->_regs[
(
offset
)
>> DWORD_SHIFT])
#define write_ctx_reg(ctx, offset, val) (((ctx)->_regs[
(
offset
)
>> DWORD_SHIFT]) \
=
(uint64_t) (
val)
)
/*
* Top-level context structure which is used by EL3 firmware to
...
...
include/lib/extensions/ras.h
View file @
11dfe0b4
...
...
@@ -4,10 +4,10 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef
__
RAS_COMMON
__
#define
__
RAS_COMMON
__
#ifndef RAS_COMMON
#define RAS_COMMON
#define ERR_HANDLER_VERSION 1
#define ERR_HANDLER_VERSION 1
U
/* Error record access mechanism */
#define ERR_ACCESS_SYSREG 0
...
...
@@ -20,18 +20,18 @@
* are declared. Only then would ARRAY_SIZE() yield a meaningful value.
*/
#define REGISTER_ERR_RECORD_INFO(_records) \
const struct err_record_mapping err_record_mapping = { \
.err_records = _records, \
const struct err_record_mapping err_record_mapping
s
= { \
.err_records =
(
_records
)
, \
.num_err_records = ARRAY_SIZE(_records), \
}
/* Error record info iterator */
#define for_each_err_record_info(_i, _info) \
for (_i = 0, _info = err_record_mapping.err_records; \
_i < err_record_mapping.num_err_records; \
_i++, _info++)
for
(
(_i
)
= 0,
(
_info
)
= err_record_mapping
s
.err_records; \
(
_i
)
< err_record_mapping
s
.num_err_records; \
(
_i
)
++,
(
_info
)
++)
#define
_
ERR_RECORD_COMMON(_probe, _handler, _aux) \
#define ERR_RECORD_COMMON
_
(_probe, _handler, _aux) \
.probe = _probe, \
.handler = _handler, \
.aux_data = _aux,
...
...
@@ -42,7 +42,7 @@
.sysreg.idx_start = _idx_start, \
.sysreg.num_idx = _num_idx, \
.access = ERR_ACCESS_SYSREG, \
_
ERR_RECORD_COMMON(_probe, _handler, _aux) \
ERR_RECORD_COMMON
_
(_probe, _handler, _aux) \
}
#define ERR_RECORD_MEMMAP_V1(_base_addr, _size_num_k, _probe, _handler, _aux) \
...
...
@@ -51,7 +51,7 @@
.memmap.base_addr = _base_addr, \
.memmap.size_num_k = _size_num_k, \
.access = ERR_ACCESS_MEMMAP, \
_
ERR_RECORD_COMMON(_probe, _handler, _aux) \
ERR_RECORD_COMMON
_
(_probe, _handler, _aux) \
}
/*
...
...
@@ -63,8 +63,8 @@
* array is expected to be sorted in the increasing order of interrupt number.
*/
#define REGISTER_RAS_INTERRUPTS(_array) \
const struct ras_interrupt_mapping ras_interrupt_mapping = { \
.intrs = _array, \
const struct ras_interrupt_mapping ras_interrupt_mapping
s
= { \
.intrs =
(
_array
)
, \
.num_intrs = ARRAY_SIZE(_array), \
}
...
...
@@ -165,8 +165,8 @@ struct ras_interrupt_mapping {
size_t
num_intrs
;
};
extern
const
struct
err_record_mapping
err_record_mapping
;
extern
const
struct
ras_interrupt_mapping
ras_interrupt_mapping
;
extern
const
struct
err_record_mapping
err_record_mapping
s
;
extern
const
struct
ras_interrupt_mapping
ras_interrupt_mapping
s
;
/*
...
...
@@ -196,4 +196,4 @@ int ras_ea_handler(unsigned int ea_reason, uint64_t syndrome, void *cookie,
void
ras_init
(
void
);
#endif
/* __ASSEMBLY__ */
#endif
/*
__
RAS_COMMON
__
*/
#endif
/* RAS_COMMON */
include/lib/extensions/ras_arch.h
View file @
11dfe0b4
...
...
@@ -11,28 +11,28 @@
* Size of nodes implementing Standard Error Records - currently only 4k is
* supported.
*/
#define STD_ERR_NODE_SIZE_NUM_K 4
#define STD_ERR_NODE_SIZE_NUM_K 4
U
/*
* Individual register offsets within an error record in Standard Error Record
* format when error records are accessed through memory-mapped registers.
*/
#define ERR_FR(n) (0x0 + (64 * (n)))
#define ERR_CTLR(n) (0x8 + (64 * (n)))
#define ERR_STATUS(n) (0x10 + (64 * (n)))
#define ERR_ADDR(n) (0x18 + (64 * (n)))
#define ERR_MISC0(n) (0x20 + (64 * (n)))
#define ERR_MISC1(n) (0x28 + (64 * (n)))
#define ERR_FR(n) (0x0
ULL
+ (64
ULL
* (n)))
#define ERR_CTLR(n) (0x8
ULL
+ (64
ULL
* (n)))
#define ERR_STATUS(n) (0x10
ULL
+ (64
ULL
* (n)))
#define ERR_ADDR(n) (0x18
ULL
+ (64
ULL
* (n)))
#define ERR_MISC0(n) (0x20
ULL
+ (64
ULL
* (n)))
#define ERR_MISC1(n) (0x28
ULL
+ (64
ULL
* (n)))
/* Group Status Register (ERR_STATUS) offset */
#define ERR_GSR(base, size_num_k, n) \
((base) + (0x380 * (size_num_k)) + (8 * (n)))
((base) + (0x380
ULL
* (size_num_k)) + (8
ULL
* (n)))
/* Management register offsets */
#define ERR_DEVID(base, size_num_k) \
((base) + ((0x400 * (size_num_k)) - 0x100) + 0xc8)
((base) + ((0x400
ULL
* (size_num_k)) - 0x100
ULL
) + 0xc8
ULL
)
#define ERR_DEVID_MASK 0xffff
#define ERR_DEVID_MASK 0xffff
UL
/* Standard Error Record status register fields */
#define ERR_STATUS_AV_SHIFT 31
...
...
@@ -244,7 +244,8 @@ static inline uint64_t ser_get_misc1(uintptr_t base, unsigned int idx)
*/
static
inline
void
ser_sys_select_record
(
unsigned
int
idx
)
{
unsigned
int
max_idx
__unused
=
read_erridr_el1
()
&
ERRIDR_MASK
;
unsigned
int
max_idx
__unused
=
(
unsigned
int
)
read_erridr_el1
()
&
ERRIDR_MASK
;
assert
(
idx
<
max_idx
);
...
...
include/services/sdei.h
View file @
11dfe0b4
...
...
@@ -4,61 +4,56 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef
__
SDEI_H
__
#define
__
SDEI_H
__
#ifndef SDEI_H
#define SDEI_H
#include <spinlock.h>
#include <utils_def.h>
/* Range 0xC4000020 - 0xC400003F reserved for SDE 64bit smc calls */
#define SDEI_VERSION 0xC4000020
#define SDEI_EVENT_REGISTER 0xC4000021
#define SDEI_EVENT_ENABLE 0xC4000022
#define SDEI_EVENT_DISABLE 0xC4000023
#define SDEI_EVENT_CONTEXT 0xC4000024
#define SDEI_EVENT_COMPLETE 0xC4000025
#define SDEI_EVENT_COMPLETE_AND_RESUME 0xC4000026
#define SDEI_EVENT_UNREGISTER 0xC4000027
#define SDEI_EVENT_STATUS 0xC4000028
#define SDEI_EVENT_GET_INFO 0xC4000029
#define SDEI_EVENT_ROUTING_SET 0xC400002A
#define SDEI_PE_MASK 0xC400002B
#define SDEI_PE_UNMASK 0xC400002C
#define SDEI_INTERRUPT_BIND 0xC400002D
#define SDEI_INTERRUPT_RELEASE 0xC400002E
#define SDEI_EVENT_SIGNAL 0xC400002F
#define SDEI_FEATURES 0xC4000030
#define SDEI_PRIVATE_RESET 0xC4000031
#define SDEI_SHARED_RESET 0xC4000032
#define SDEI_VERSION 0xC4000020
U
#define SDEI_EVENT_REGISTER 0xC4000021
U
#define SDEI_EVENT_ENABLE 0xC4000022
U
#define SDEI_EVENT_DISABLE 0xC4000023
U
#define SDEI_EVENT_CONTEXT 0xC4000024
U
#define SDEI_EVENT_COMPLETE 0xC4000025
U
#define SDEI_EVENT_COMPLETE_AND_RESUME 0xC4000026
U
#define SDEI_EVENT_UNREGISTER 0xC4000027
U
#define SDEI_EVENT_STATUS 0xC4000028
U
#define SDEI_EVENT_GET_INFO 0xC4000029
U
#define SDEI_EVENT_ROUTING_SET 0xC400002A
U
#define SDEI_PE_MASK 0xC400002B
U
#define SDEI_PE_UNMASK 0xC400002C
U
#define SDEI_INTERRUPT_BIND 0xC400002D
U
#define SDEI_INTERRUPT_RELEASE 0xC400002E
U
#define SDEI_EVENT_SIGNAL 0xC400002F
U
#define SDEI_FEATURES 0xC4000030
U
#define SDEI_PRIVATE_RESET 0xC4000031
U
#define SDEI_SHARED_RESET 0xC4000032
U
/* SDEI_EVENT_REGISTER flags */
#define SDEI_REGF_RM_ANY 0
#define SDEI_REGF_RM_PE 1
#define SDEI_REGF_RM_ANY 0
ULL
#define SDEI_REGF_RM_PE 1
ULL
/* SDEI_EVENT_COMPLETE status flags */
#define SDEI_EV_HANDLED 0
#define SDEI_EV_FAILED 1
/* SDE event status values in bit position */
#define SDEI_STATF_REGISTERED 0
#define SDEI_STATF_ENABLED 1
#define SDEI_STATF_RUNNING 2
#define SDEI_EV_HANDLED 0U
#define SDEI_EV_FAILED 1U
/* Internal: SDEI flag bit positions */
#define
_
SDEI_MAPF_DYNAMIC_SHIFT 1
#define
_
SDEI_MAPF_BOUND_SHIFT 2
#define
_
SDEI_MAPF_SIGNALABLE_SHIFT 3
#define
_
SDEI_MAPF_PRIVATE_SHIFT 4
#define
_
SDEI_MAPF_CRITICAL_SHIFT 5
#define
_
SDEI_MAPF_EXPLICIT_SHIFT 6
#define SDEI_MAPF_DYNAMIC_SHIFT
_
1
U
#define SDEI_MAPF_BOUND_SHIFT
_
2
U
#define SDEI_MAPF_SIGNALABLE_SHIFT
_
3
U
#define SDEI_MAPF_PRIVATE_SHIFT
_
4
U
#define SDEI_MAPF_CRITICAL_SHIFT
_
5
U
#define SDEI_MAPF_EXPLICIT_SHIFT
_
6
U
/* SDEI event 0 */
#define SDEI_EVENT_0 0
/* Placeholder interrupt for dynamic mapping */
#define SDEI_DYN_IRQ 0
#define SDEI_DYN_IRQ 0
U
/* SDEI flags */
...
...
@@ -80,20 +75,20 @@
*
* See also the is_map_bound() macro.
*/
#define SDEI_MAPF_DYNAMIC BIT(
_
SDEI_MAPF_DYNAMIC_SHIFT)
#define SDEI_MAPF_BOUND BIT(
_
SDEI_MAPF_BOUND_SHIFT)
#define SDEI_MAPF_EXPLICIT BIT(
_
SDEI_MAPF_EXPLICIT_SHIFT)
#define SDEI_MAPF_DYNAMIC BIT(SDEI_MAPF_DYNAMIC_SHIFT
_
)
#define SDEI_MAPF_BOUND BIT(SDEI_MAPF_BOUND_SHIFT
_
)
#define SDEI_MAPF_EXPLICIT BIT(SDEI_MAPF_EXPLICIT_SHIFT
_
)
#define SDEI_MAPF_SIGNALABLE BIT(
_
SDEI_MAPF_SIGNALABLE_SHIFT)
#define SDEI_MAPF_PRIVATE BIT(
_
SDEI_MAPF_PRIVATE_SHIFT)
#define SDEI_MAPF_SIGNALABLE BIT(SDEI_MAPF_SIGNALABLE_SHIFT
_
)
#define SDEI_MAPF_PRIVATE BIT(SDEI_MAPF_PRIVATE_SHIFT
_
)
#define SDEI_MAPF_NORMAL 0
#define SDEI_MAPF_CRITICAL BIT(
_
SDEI_MAPF_CRITICAL_SHIFT)
#define SDEI_MAPF_CRITICAL BIT(SDEI_MAPF_CRITICAL_SHIFT
_
)
/* Indices of private and shared mappings */
#define
_
SDEI_MAP_IDX_PRIV 0
#define
_
SDEI_MAP_IDX_SHRD 1
#define
_
SDEI_MAP_IDX_MAX 2
#define SDEI_MAP_IDX_PRIV
_
0
U
#define SDEI_MAP_IDX_SHRD
_
1
U
#define SDEI_MAP_IDX_MAX
_
2
U
/* The macros below are used to identify SDEI calls from the SMC function ID */
#define SDEI_FID_MASK U(0xffe0)
...
...
@@ -104,22 +99,22 @@
#define SDEI_EVENT_MAP(_event, _intr, _flags) \
{ \
.ev_num = _event, \
.intr = _intr, \
.map_flags = _flags \
.ev_num =
(
_event
)
, \
.intr =
(
_intr
)
, \
.map_flags =
(
_flags
)
\
}
#define SDEI_SHARED_EVENT(_event, _intr, _flags) \
SDEI_EVENT_MAP(_event, _intr, _flags)
#define SDEI_PRIVATE_EVENT(_event, _intr, _flags) \
SDEI_EVENT_MAP(_event, _intr, _flags | SDEI_MAPF_PRIVATE)
SDEI_EVENT_MAP(_event, _intr,
(
_flags
)
| SDEI_MAPF_PRIVATE)
#define SDEI_DEFINE_EVENT_0(_intr) \
SDEI_PRIVATE_EVENT(SDEI_EVENT_0, _intr, SDEI_MAPF_SIGNALABLE)
SDEI_PRIVATE_EVENT(SDEI_EVENT_0,
(
_intr
)
, SDEI_MAPF_SIGNALABLE)
#define SDEI_EXPLICIT_EVENT(_event, _pri) \
SDEI_EVENT_MAP(_event, 0, _pri | SDEI_MAPF_EXPLICIT | SDEI_MAPF_PRIVATE)
SDEI_EVENT_MAP(
(
_event
)
, 0,
(
_pri
)
| SDEI_MAPF_EXPLICIT | SDEI_MAPF_PRIVATE)
/*
* Declare shared and private entries for each core. Also declare a global
...
...
@@ -133,12 +128,12 @@
[PLATFORM_CORE_COUNT * ARRAY_SIZE(_private)]; \
sdei_entry_t sdei_shared_event_table[ARRAY_SIZE(_shared)]; \
const sdei_mapping_t sdei_global_mappings[] = { \
[
_
SDEI_MAP_IDX_PRIV] = { \
.map = _private, \
[SDEI_MAP_IDX_PRIV
_
] = { \
.map =
(
_private
)
, \
.num_maps = ARRAY_SIZE(_private) \
}, \
[
_
SDEI_MAP_IDX_SHRD] = { \
.map = _shared, \
[SDEI_MAP_IDX_SHRD
_
] = { \
.map =
(
_shared
)
, \
.num_maps = ARRAY_SIZE(_shared) \
}, \
}
...
...
@@ -185,4 +180,4 @@ void sdei_init(void);
/* Public API to dispatch an event to Normal world */
int
sdei_dispatch_event
(
int
ev_num
);
#endif
/*
__
SDEI_H
__
*/
#endif
/* SDEI_H */
lib/extensions/ras/ras_common.c
View file @
11dfe0b4
...
...
@@ -11,6 +11,7 @@
#include <platform.h>
#include <ras.h>
#include <ras_arch.h>
#include <stdbool.h>
#ifndef PLAT_RAS_PRI
# error Platform must define RAS priority value
...
...
@@ -20,15 +21,15 @@
int
ras_ea_handler
(
unsigned
int
ea_reason
,
uint64_t
syndrome
,
void
*
cookie
,
void
*
handle
,
uint64_t
flags
)
{
unsigned
int
i
,
n_handled
=
0
,
ret
;
int
probe_data
;
unsigned
int
i
,
n_handled
=
0
;
int
probe_data
,
ret
;
struct
err_record_info
*
info
;
const
struct
err_handler_data
err_data
=
{
.
version
=
ERR_HANDLER_VERSION
,
.
ea_reason
=
ea_reason
,
.
interrupt
=
0
,
.
syndrome
=
syndrome
,
.
syndrome
=
(
uint32_t
)
syndrome
,
.
flags
=
flags
,
.
cookie
=
cookie
,
.
handle
=
handle
...
...
@@ -39,7 +40,7 @@ int ras_ea_handler(unsigned int ea_reason, uint64_t syndrome, void *cookie,
assert
(
info
->
handler
!=
NULL
);
/* Continue probing until the record group signals no error */
while
(
1
)
{
while
(
true
)
{
if
(
info
->
probe
(
info
,
&
probe_data
)
==
0
)
break
;
...
...
@@ -52,20 +53,20 @@ int ras_ea_handler(unsigned int ea_reason, uint64_t syndrome, void *cookie,
}
}
return
(
n_handled
!=
0
)
;
return
(
n_handled
!=
0
U
)
?
1
:
0
;
}
#if ENABLE_ASSERTIONS
static
void
assert_interrupts_sorted
(
void
)
{
unsigned
int
i
,
last
;
struct
ras_interrupt
*
start
=
ras_interrupt_mapping
.
intrs
;
struct
ras_interrupt
*
start
=
ras_interrupt_mapping
s
.
intrs
;
if
(
ras_interrupt_mapping
.
num_intrs
==
0
)
if
(
ras_interrupt_mapping
s
.
num_intrs
==
0
UL
)
return
;
last
=
start
[
0
].
intr_number
;
for
(
i
=
1
;
i
<
ras_interrupt_mapping
.
num_intrs
;
i
++
)
{
for
(
i
=
1
;
i
<
ras_interrupt_mapping
s
.
num_intrs
;
i
++
)
{
assert
(
start
[
i
].
intr_number
>
last
);
last
=
start
[
i
].
intr_number
;
}
...
...
@@ -79,7 +80,7 @@ static void assert_interrupts_sorted(void)
static
int
ras_interrupt_handler
(
uint32_t
intr_raw
,
uint32_t
flags
,
void
*
handle
,
void
*
cookie
)
{
struct
ras_interrupt
*
ras_inrs
=
ras_interrupt_mapping
.
intrs
;
struct
ras_interrupt
*
ras_inrs
=
ras_interrupt_mapping
s
.
intrs
;
struct
ras_interrupt
*
selected
=
NULL
;
int
start
,
end
,
mid
,
probe_data
,
ret
__unused
;
...
...
@@ -91,10 +92,10 @@ static int ras_interrupt_handler(uint32_t intr_raw, uint32_t flags,
.
handle
=
handle
};
assert
(
ras_interrupt_mapping
.
num_intrs
>
0
);
assert
(
ras_interrupt_mapping
s
.
num_intrs
>
0
UL
);
start
=
0
;
end
=
ras_interrupt_mapping
.
num_intrs
;
end
=
(
int
)
ras_interrupt_mapping
s
.
num_intrs
;
while
(
start
<=
end
)
{
mid
=
((
end
+
start
)
/
2
);
if
(
intr_raw
==
ras_inrs
[
mid
].
intr_number
)
{
...
...
@@ -114,14 +115,14 @@ static int ras_interrupt_handler(uint32_t intr_raw, uint32_t flags,
panic
();
}
if
(
selected
->
err_record
->
probe
)
{
if
(
selected
->
err_record
->
probe
!=
NULL
)
{
ret
=
selected
->
err_record
->
probe
(
selected
->
err_record
,
&
probe_data
);
assert
(
ret
!=
0
);
}
/* Call error handler for the record group */
assert
(
selected
->
err_record
->
handler
!=
NULL
);
selected
->
err_record
->
handler
(
selected
->
err_record
,
probe_data
,
(
void
)
selected
->
err_record
->
handler
(
selected
->
err_record
,
probe_data
,
&
err_data
);
return
0
;
...
...
lib/extensions/ras/std_err_record.c
View file @
11dfe0b4
...
...
@@ -13,28 +13,29 @@
*/
int
ser_probe_memmap
(
uintptr_t
base
,
unsigned
int
size_num_k
,
int
*
probe_data
)
{
int
num_records
,
num_group_regs
,
i
;
unsigned
int
num_records
,
num_group_regs
,
i
;
uint64_t
gsr
;
assert
(
base
!=
0
);
assert
(
base
!=
0
UL
);
/* Only 4K supported for now */
assert
(
size_num_k
==
STD_ERR_NODE_SIZE_NUM_K
);
num_records
=
(
mmio_read_32
(
ERR_DEVID
(
base
,
size_num_k
))
&
ERR_DEVID_MASK
);
num_records
=
(
unsigned
int
)
(
mmio_read_32
(
ERR_DEVID
(
base
,
size_num_k
))
&
ERR_DEVID_MASK
);
/* A group register shows error status for 2^6 error records */
num_group_regs
=
(
num_records
>>
6
)
+
1
;
num_group_regs
=
(
num_records
>>
6
U
)
+
1
U
;
/* Iterate through group registers to find a record in error */
for
(
i
=
0
;
i
<
num_group_regs
;
i
++
)
{
gsr
=
mmio_read_64
(
ERR_GSR
(
base
,
size_num_k
,
i
));
if
(
gsr
==
0
)
if
(
gsr
==
0
ULL
)
continue
;
/* Return the index of the record in error */
if
(
probe_data
!=
NULL
)
*
probe_data
=
((
i
<<
6
)
+
__builtin_ctz
(
gsr
));
*
probe_data
=
(
((
int
)
(
i
<<
6
U
)
)
+
__builtin_ctz
ll
(
gsr
));
return
1
;
}
...
...
@@ -49,13 +50,14 @@ int ser_probe_memmap(uintptr_t base, unsigned int size_num_k, int *probe_data)
*/
int
ser_probe_sysreg
(
unsigned
int
idx_start
,
unsigned
int
num_idx
,
int
*
probe_data
)
{
int
i
;
unsigned
int
i
;
uint64_t
status
;
unsigned
int
max_idx
__unused
=
read_erridr_el1
()
&
ERRIDR_MASK
;
unsigned
int
max_idx
__unused
=
((
unsigned
int
)
read_erridr_el1
())
&
ERRIDR_MASK
;
assert
(
idx_start
<
max_idx
);
assert
(
check_u32_overflow
(
idx_start
,
num_idx
)
==
0
);
assert
((
idx_start
+
num_idx
-
1
)
<
max_idx
);
assert
(
check_u32_overflow
(
idx_start
,
num_idx
));
assert
((
idx_start
+
num_idx
-
1
U
)
<
max_idx
);
for
(
i
=
0
;
i
<
num_idx
;
i
++
)
{
/* Select the error record */
...
...
@@ -65,9 +67,9 @@ int ser_probe_sysreg(unsigned int idx_start, unsigned int num_idx, int *probe_da
status
=
read_erxstatus_el1
();
/* Check for valid field in status */
if
(
ERR_STATUS_GET_FIELD
(
status
,
V
))
{
if
(
ERR_STATUS_GET_FIELD
(
status
,
V
)
!=
0U
)
{
if
(
probe_data
!=
NULL
)
*
probe_data
=
i
;
*
probe_data
=
(
int
)
i
;
return
1
;
}
}
...
...
plat/arm/common/arm_sip_svc.c
View file @
11dfe0b4
/*
* Copyright (c) 2016-201
7
, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016-201
8
, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
...
...
@@ -64,9 +64,9 @@ static uintptr_t arm_sip_handler(unsigned int smc_fid,
/*
* Pointers used in execution state switch are all 32 bits wide
*/
return
arm_execution_state_switch
(
smc_fid
,
(
uint32_t
)
x1
,
(
uint32_t
)
x2
,
(
uint32_t
)
x3
,
(
uint32_t
)
x4
,
handle
);
return
(
uintptr_t
)
arm_execution_state_switch
(
smc_fid
,
(
uint32_t
)
x1
,
(
uint32_t
)
x2
,
(
uint32_t
)
x3
,
(
uint32_t
)
x4
,
handle
);
}
case
ARM_SIP_SVC_CALL_COUNT
:
...
...
plat/arm/common/execution_state_switch.c
View file @
11dfe0b4
...
...
@@ -11,6 +11,7 @@
#include <plat_arm.h>
#include <psci.h>
#include <smccc_helpers.h>
#include <stdbool.h>
#include <string.h>
#include <utils.h>
...
...
@@ -39,7 +40,8 @@ int arm_execution_state_switch(unsigned int smc_fid,
{
/* Execution state can be switched only if EL3 is AArch64 */
#ifdef AARCH64
int
caller_64
,
from_el2
,
el
,
endianness
,
thumb
=
0
;
bool
caller_64
,
thumb
=
false
,
from_el2
;
unsigned
int
el
,
endianness
;
u_register_t
spsr
,
pc
,
scr
,
sctlr
;
entry_point_info_t
ep
;
cpu_context_t
*
ctx
=
(
cpu_context_t
*
)
handle
;
...
...
@@ -50,7 +52,7 @@ int arm_execution_state_switch(unsigned int smc_fid,
/*
* Disallow state switch if any of the secondaries have been brought up.
*/
if
(
psci_secondaries_brought_up
())
if
(
psci_secondaries_brought_up
()
!=
0
)
goto
exec_denied
;
spsr
=
read_ctx_reg
(
el3_ctx
,
CTX_SPSR_EL3
);
...
...
@@ -61,20 +63,20 @@ int arm_execution_state_switch(unsigned int smc_fid,
* If the call originated from AArch64, expect 32-bit pointers when
* switching to AArch32.
*/
if
((
pc_hi
!=
0
)
||
(
cookie_hi
!=
0
))
if
((
pc_hi
!=
0
U
)
||
(
cookie_hi
!=
0
U
))
goto
invalid_param
;
pc
=
pc_lo
;
/* Instruction state when entering AArch32 */
thumb
=
pc
&
1
;
thumb
=
(
pc
&
1
U
)
!=
0U
;
}
else
{
/* Construct AArch64 PC */
pc
=
(((
u_register_t
)
pc_hi
)
<<
32
)
|
pc_lo
;
}
/* Make sure PC is 4-byte aligned, except for Thumb */
if
((
pc
&
0x3
)
&&
!
thumb
)
if
((
(
pc
&
0x3
U
)
!=
0U
)
&&
!
thumb
)
goto
invalid_param
;
/*
...
...
@@ -95,7 +97,7 @@ int arm_execution_state_switch(unsigned int smc_fid,
* Disallow switching state if there's a Hypervisor in place;
* this request must be taken up with the Hypervisor instead.
*/
if
(
scr
&
SCR_HCE_BIT
)
if
(
(
scr
&
SCR_HCE_BIT
)
!=
0U
)
goto
exec_denied
;
}
...
...
@@ -105,11 +107,11 @@ int arm_execution_state_switch(unsigned int smc_fid,
* directly.
*/
sctlr
=
from_el2
?
read_sctlr_el2
()
:
read_sctlr_el1
();
endianness
=
!!
(
sctlr
&
SCTLR_EE_BIT
);
endianness
=
(
(
sctlr
&
SCTLR_EE_BIT
)
!=
0U
)
?
1U
:
0U
;
/* Construct SPSR for the exception state we're about to switch to */
if
(
caller_64
)
{
int
impl
;
unsigned
long
long
impl
;
/*
* Switching from AArch64 to AArch32. Ensure this CPU implements
...
...
@@ -121,7 +123,8 @@ int arm_execution_state_switch(unsigned int smc_fid,
/* Return to the equivalent AArch32 privilege level */
el
=
from_el2
?
MODE32_hyp
:
MODE32_svc
;
spsr
=
SPSR_MODE32
(
el
,
thumb
?
SPSR_T_THUMB
:
SPSR_T_ARM
,
spsr
=
SPSR_MODE32
((
u_register_t
)
el
,
thumb
?
SPSR_T_THUMB
:
SPSR_T_ARM
,
endianness
,
DISABLE_ALL_EXCEPTIONS
);
}
else
{
/*
...
...
@@ -130,7 +133,8 @@ int arm_execution_state_switch(unsigned int smc_fid,
* raised), it's safe to assume AArch64 is also implemented.
*/
el
=
from_el2
?
MODE_EL2
:
MODE_EL1
;
spsr
=
SPSR_64
(
el
,
MODE_SP_ELX
,
DISABLE_ALL_EXCEPTIONS
);
spsr
=
SPSR_64
((
u_register_t
)
el
,
MODE_SP_ELX
,
DISABLE_ALL_EXCEPTIONS
);
}
/*
...
...
@@ -143,10 +147,11 @@ int arm_execution_state_switch(unsigned int smc_fid,
*/
zeromem
(
&
ep
,
sizeof
(
ep
));
ep
.
pc
=
pc
;
ep
.
spsr
=
spsr
;
ep
.
spsr
=
(
uint32_t
)
spsr
;
SET_PARAM_HEAD
(
&
ep
,
PARAM_EP
,
VERSION_1
,
((
endianness
?
EP_EE_BIG
:
EP_EE_LITTLE
)
|
NON_SECURE
|
EP_ST_DISABLE
));
((
unsigned
int
)
((
endianness
!=
0U
)
?
EP_EE_BIG
:
EP_EE_LITTLE
)
|
NON_SECURE
|
EP_ST_DISABLE
));
/*
* Re-initialize the system register context, and exit EL3 as if for the
...
...
services/std_svc/sdei/sdei_event.c
View file @
11dfe0b4
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017
-2018
, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
...
...
@@ -21,7 +21,8 @@ sdei_entry_t *get_event_entry(sdei_ev_map_t *map)
{
const
sdei_mapping_t
*
mapping
;
sdei_entry_t
*
cpu_priv_base
;
unsigned
int
idx
,
base_idx
;
unsigned
int
base_idx
;
long
int
idx
;
if
(
is_event_private
(
map
))
{
/*
...
...
@@ -32,7 +33,7 @@ sdei_entry_t *get_event_entry(sdei_ev_map_t *map)
idx
=
MAP_OFF
(
map
,
mapping
);
/* Base of private mappings for this CPU */
base_idx
=
plat_my_core_pos
()
*
mapping
->
num_maps
;
base_idx
=
plat_my_core_pos
()
*
((
unsigned
int
)
mapping
->
num_maps
)
;
cpu_priv_base
=
&
sdei_private_event_table
[
base_idx
];
/*
...
...
@@ -52,7 +53,7 @@ sdei_entry_t *get_event_entry(sdei_ev_map_t *map)
* Find event mapping for a given interrupt number: On success, returns pointer
* to the event mapping. On error, returns NULL.
*/
sdei_ev_map_t
*
find_event_map_by_intr
(
int
intr_num
,
int
shared
)
sdei_ev_map_t
*
find_event_map_by_intr
(
unsigned
int
intr_num
,
bool
shared
)
{
const
sdei_mapping_t
*
mapping
;
sdei_ev_map_t
*
map
;
...
...
services/std_svc/sdei/sdei_intr_mgmt.c
View file @
11dfe0b4
...
...
@@ -16,17 +16,14 @@
#include <string.h>
#include "sdei_private.h"
#define PE_MASKED 1
#define PE_NOT_MASKED 0
/* x0-x17 GPREGS context */
#define SDEI_SAVED_GPREGS 18
#define SDEI_SAVED_GPREGS 18
U
/* Maximum preemption nesting levels: Critical priority and Normal priority */
#define MAX_EVENT_NESTING 2
#define MAX_EVENT_NESTING 2
U
/* Per-CPU SDEI state access macro */
#define sdei_get_this_pe_state() (&
sdei_
cpu_state[plat_my_core_pos()])
#define sdei_get_this_pe_state() (&cpu_state[plat_my_core_pos()])
/* Structure to store information about an outstanding dispatch */
typedef
struct
sdei_dispatch_context
{
...
...
@@ -48,31 +45,33 @@ typedef struct sdei_dispatch_context {
typedef
struct
sdei_cpu_state
{
sdei_dispatch_context_t
dispatch_stack
[
MAX_EVENT_NESTING
];
unsigned
short
stack_top
;
/* Empty ascending */
unsigned
int
pe_masked
:
1
;
unsigned
int
pending_enables
:
1
;
bool
pe_masked
;
bool
pending_enables
;
}
sdei_cpu_state_t
;
/* SDEI states for all cores in the system */
static
sdei_cpu_state_t
sdei_
cpu_state
[
PLATFORM_CORE_COUNT
];
static
sdei_cpu_state_t
cpu_state
[
PLATFORM_CORE_COUNT
];
unsigned
in
t
sdei_pe_mask
(
void
)
int64_
t
sdei_pe_mask
(
void
)
{
unsigned
in
t
ret
;
int64_
t
ret
=
0
;
sdei_cpu_state_t
*
state
=
sdei_get_this_pe_state
();
/*
* Return value indicates whether this call had any effect in the mask
* status of this PE.
*/
ret
=
(
state
->
pe_masked
^
PE_MASKED
);
state
->
pe_masked
=
PE_MASKED
;
if
(
!
state
->
pe_masked
)
{
state
->
pe_masked
=
true
;
ret
=
1
;
}
return
ret
;
}
void
sdei_pe_unmask
(
void
)
{
int
i
;
unsigned
int
i
;
sdei_ev_map_t
*
map
;
sdei_entry_t
*
se
;
sdei_cpu_state_t
*
state
=
sdei_get_this_pe_state
();
...
...
@@ -95,8 +94,7 @@ void sdei_pe_unmask(void)
se
=
get_event_entry
(
map
);
sdei_map_lock
(
map
);
if
(
is_map_bound
(
map
)
&&
GET_EV_STATE
(
se
,
ENABLED
)
&&
if
(
is_map_bound
(
map
)
&&
GET_EV_STATE
(
se
,
ENABLED
)
&&
(
se
->
reg_flags
==
SDEI_REGF_RM_PE
)
&&
(
se
->
affinity
==
my_mpidr
))
{
plat_ic_enable_interrupt
(
map
->
intr
);
...
...
@@ -105,8 +103,8 @@ void sdei_pe_unmask(void)
}
}
state
->
pending_enables
=
0
;
state
->
pe_masked
=
PE_NOT_MASKED
;
state
->
pending_enables
=
false
;
state
->
pe_masked
=
false
;
}
/* Push a dispatch context to the dispatch stack */
...
...
@@ -129,7 +127,7 @@ static sdei_dispatch_context_t *pop_dispatch(void)
{
sdei_cpu_state_t
*
state
=
sdei_get_this_pe_state
();
if
(
state
->
stack_top
==
0
)
if
(
state
->
stack_top
==
0
U
)
return
NULL
;
assert
(
state
->
stack_top
<=
MAX_EVENT_NESTING
);
...
...
@@ -144,27 +142,27 @@ static sdei_dispatch_context_t *get_outstanding_dispatch(void)
{
sdei_cpu_state_t
*
state
=
sdei_get_this_pe_state
();
if
(
state
->
stack_top
==
0
)
if
(
state
->
stack_top
==
0
U
)
return
NULL
;
assert
(
state
->
stack_top
<=
MAX_EVENT_NESTING
);
return
&
state
->
dispatch_stack
[
state
->
stack_top
-
1
];
return
&
state
->
dispatch_stack
[
state
->
stack_top
-
1
U
];
}
static
sdei_dispatch_context_t
*
save_event_ctx
(
sdei_ev_map_t
*
map
,
void
*
tgt_ctx
)
{
sdei_dispatch_context_t
*
disp_ctx
;
gp_regs_t
*
tgt_gpregs
;
el3_state_t
*
tgt_el3
;
const
gp_regs_t
*
tgt_gpregs
;
const
el3_state_t
*
tgt_el3
;
assert
(
tgt_ctx
);
assert
(
tgt_ctx
!=
NULL
);
tgt_gpregs
=
get_gpregs_ctx
(
tgt_ctx
);
tgt_el3
=
get_el3state_ctx
(
tgt_ctx
);
disp_ctx
=
push_dispatch
();
assert
(
disp_ctx
);
assert
(
disp_ctx
!=
NULL
);
disp_ctx
->
map
=
map
;
/* Save general purpose and exception registers */
...
...
@@ -175,12 +173,12 @@ static sdei_dispatch_context_t *save_event_ctx(sdei_ev_map_t *map,
return
disp_ctx
;
}
static
void
restore_event_ctx
(
sdei_dispatch_context_t
*
disp_ctx
,
void
*
tgt_ctx
)
static
void
restore_event_ctx
(
const
sdei_dispatch_context_t
*
disp_ctx
,
void
*
tgt_ctx
)
{
gp_regs_t
*
tgt_gpregs
;
el3_state_t
*
tgt_el3
;
assert
(
tgt_ctx
);
assert
(
tgt_ctx
!=
NULL
);
tgt_gpregs
=
get_gpregs_ctx
(
tgt_ctx
);
tgt_el3
=
get_el3state_ctx
(
tgt_ctx
);
...
...
@@ -226,7 +224,7 @@ static cpu_context_t *restore_and_resume_ns_context(void)
cm_set_next_eret_context
(
NON_SECURE
);
ns_ctx
=
cm_get_context
(
NON_SECURE
);
assert
(
ns_ctx
);
assert
(
ns_ctx
!=
NULL
);
return
ns_ctx
;
}
...
...
@@ -251,7 +249,7 @@ static void setup_ns_dispatch(sdei_ev_map_t *map, sdei_entry_t *se,
* - x2: Interrupted PC
* - x3: Interrupted SPSR
*/
SMC_SET_GP
(
ctx
,
CTX_GPREG_X0
,
map
->
ev_num
);
SMC_SET_GP
(
ctx
,
CTX_GPREG_X0
,
(
uint64_t
)
map
->
ev_num
);
SMC_SET_GP
(
ctx
,
CTX_GPREG_X1
,
se
->
arg
);
SMC_SET_GP
(
ctx
,
CTX_GPREG_X2
,
disp_ctx
->
elr_el3
);
SMC_SET_GP
(
ctx
,
CTX_GPREG_X3
,
disp_ctx
->
spsr_el3
);
...
...
@@ -286,7 +284,7 @@ static void handle_masked_trigger(sdei_ev_map_t *map, sdei_entry_t *se,
sdei_cpu_state_t
*
state
,
unsigned
int
intr_raw
)
{
uint64_t
my_mpidr
__unused
=
(
read_mpidr_el1
()
&
MPIDR_AFFINITY_MASK
);
int
disable
=
0
;
bool
disable
=
false
;
/* Nothing to do for event 0 */
if
(
map
->
ev_num
==
SDEI_EVENT_0
)
...
...
@@ -297,18 +295,17 @@ static void handle_masked_trigger(sdei_ev_map_t *map, sdei_entry_t *se,
* this CPU, we disable interrupt, leave the interrupt pending, and do
* EOI.
*/
if
(
is_event_private
(
map
))
{
disable
=
1
;
}
else
if
(
se
->
reg_flags
==
SDEI_REGF_RM_PE
)
{
if
(
is_event_private
(
map
)
||
(
se
->
reg_flags
==
SDEI_REGF_RM_PE
))
disable
=
true
;
if
(
se
->
reg_flags
==
SDEI_REGF_RM_PE
)
assert
(
se
->
affinity
==
my_mpidr
);
disable
=
1
;
}
if
(
disable
)
{
plat_ic_disable_interrupt
(
map
->
intr
);
plat_ic_set_interrupt_pending
(
map
->
intr
);
plat_ic_end_of_interrupt
(
intr_raw
);
state
->
pending_enables
=
1
;
state
->
pending_enables
=
true
;
return
;
}
...
...
@@ -321,7 +318,7 @@ static void handle_masked_trigger(sdei_ev_map_t *map, sdei_entry_t *se,
* Therefore, we set the interrupt back pending so as to give other
* suitable PEs a chance of handling it.
*/
assert
(
plat_ic_is_spi
(
map
->
intr
));
assert
(
plat_ic_is_spi
(
map
->
intr
)
!=
0
);
plat_ic_set_interrupt_pending
(
map
->
intr
);
/*
...
...
@@ -344,11 +341,12 @@ int sdei_intr_handler(uint32_t intr_raw, uint32_t flags, void *handle,
sdei_entry_t
*
se
;
cpu_context_t
*
ctx
;
sdei_ev_map_t
*
map
;
sdei_dispatch_context_t
*
disp_ctx
;
const
sdei_dispatch_context_t
*
disp_ctx
;
unsigned
int
sec_state
;
sdei_cpu_state_t
*
state
;
uint32_t
intr
;
struct
jmpbuf
dispatch_jmp
;
const
uint64_t
mpidr
=
read_mpidr_el1
();
/*
* To handle an event, the following conditions must be true:
...
...
@@ -374,8 +372,8 @@ int sdei_intr_handler(uint32_t intr_raw, uint32_t flags, void *handle,
* this interrupt
*/
intr
=
plat_ic_get_interrupt_id
(
intr_raw
);
map
=
find_event_map_by_intr
(
intr
,
plat_ic_is_spi
(
intr
));
if
(
!
map
)
{
map
=
find_event_map_by_intr
(
intr
,
(
plat_ic_is_spi
(
intr
)
!=
0
)
);
if
(
map
==
NULL
)
{
ERROR
(
"No SDEI map for interrupt %u
\n
"
,
intr
);
panic
();
}
...
...
@@ -389,13 +387,13 @@ int sdei_intr_handler(uint32_t intr_raw, uint32_t flags, void *handle,
se
=
get_event_entry
(
map
);
state
=
sdei_get_this_pe_state
();
if
(
state
->
pe_masked
==
PE_MASKED
)
{
if
(
state
->
pe_masked
)
{
/*
* Interrupts received while this PE was masked can't be
* dispatched.
*/
SDEI_LOG
(
"interrupt %u on %lx while PE masked
\n
"
,
map
->
intr
,
read_mpidr_el1
()
);
SDEI_LOG
(
"interrupt %u on %
l
lx while PE masked
\n
"
,
map
->
intr
,
mpidr
);
if
(
is_event_shared
(
map
))
sdei_map_lock
(
map
);
...
...
@@ -416,8 +414,7 @@ int sdei_intr_handler(uint32_t intr_raw, uint32_t flags, void *handle,
/* Assert shared event routed to this PE had been configured so */
if
(
is_event_shared
(
map
)
&&
(
se
->
reg_flags
==
SDEI_REGF_RM_PE
))
{
assert
(
se
->
affinity
==
(
read_mpidr_el1
()
&
MPIDR_AFFINITY_MASK
));
assert
(
se
->
affinity
==
(
mpidr
&
MPIDR_AFFINITY_MASK
));
}
if
(
!
can_sdei_state_trans
(
se
,
DO_DISPATCH
))
{
...
...
@@ -451,7 +448,7 @@ int sdei_intr_handler(uint32_t intr_raw, uint32_t flags, void *handle,
* dispatch, assert the latter is a Normal dispatch. Critical
* events can preempt an outstanding Normal event dispatch.
*/
if
(
disp_ctx
)
if
(
disp_ctx
!=
NULL
)
assert
(
is_event_normal
(
disp_ctx
->
map
));
}
else
{
/*
...
...
@@ -467,9 +464,8 @@ int sdei_intr_handler(uint32_t intr_raw, uint32_t flags, void *handle,
if
(
is_event_shared
(
map
))
sdei_map_unlock
(
map
);
SDEI_LOG
(
"ACK %lx, ev:%d ss:%d spsr:%lx ELR:%lx
\n
"
,
read_mpidr_el1
(),
map
->
ev_num
,
sec_state
,
read_spsr_el3
(),
read_elr_el3
());
SDEI_LOG
(
"ACK %llx, ev:%d ss:%d spsr:%lx ELR:%lx
\n
"
,
mpidr
,
map
->
ev_num
,
sec_state
,
read_spsr_el3
(),
read_elr_el3
());
ctx
=
handle
;
...
...
@@ -497,7 +493,7 @@ int sdei_intr_handler(uint32_t intr_raw, uint32_t flags, void *handle,
* Non-secure context was fully saved before dispatch, and has been
* returned to its pre-dispatch state.
*/
if
((
sec_state
==
SECURE
)
&&
(
ehf_is_ns_preemption_allowed
()
==
0
))
if
((
sec_state
==
SECURE
)
&&
(
ehf_is_ns_preemption_allowed
()
==
0
U
))
restore_and_resume_secure_context
();
/*
...
...
@@ -511,9 +507,6 @@ int sdei_intr_handler(uint32_t intr_raw, uint32_t flags, void *handle,
}
plat_ic_end_of_interrupt
(
intr_raw
);
if
(
is_event_shared
(
map
))
sdei_map_unlock
(
map
);
return
0
;
}
...
...
@@ -539,7 +532,7 @@ int sdei_dispatch_event(int ev_num)
/* Can't dispatch if events are masked on this PE */
state
=
sdei_get_this_pe_state
();
if
(
state
->
pe_masked
==
PE_MASKED
)
if
(
state
->
pe_masked
)
return
-
1
;
/* Event 0 can't be dispatched */
...
...
@@ -548,7 +541,7 @@ int sdei_dispatch_event(int ev_num)
/* Locate mapping corresponding to this event */
map
=
find_event_map
(
ev_num
);
if
(
!
map
)
if
(
map
==
NULL
)
return
-
1
;
/* Only explicit events can be dispatched */
...
...
@@ -557,7 +550,7 @@ int sdei_dispatch_event(int ev_num)
/* Examine state of dispatch stack */
disp_ctx
=
get_outstanding_dispatch
();
if
(
disp_ctx
)
{
if
(
disp_ctx
!=
NULL
)
{
/*
* There's an outstanding dispatch. If the outstanding dispatch
* is critical, no more dispatches are possible.
...
...
@@ -606,7 +599,7 @@ static void end_sdei_synchronous_dispatch(struct jmpbuf *buffer)
longjmp
(
buffer
);
}
int
sdei_event_complete
(
int
resume
,
uint64_t
pc
)
int
sdei_event_complete
(
bool
resume
,
uint64_t
pc
)
{
sdei_dispatch_context_t
*
disp_ctx
;
sdei_entry_t
*
se
;
...
...
@@ -617,7 +610,7 @@ int sdei_event_complete(int resume, uint64_t pc)
/* Return error if called without an active event */
disp_ctx
=
get_outstanding_dispatch
();
if
(
!
disp_ctx
)
if
(
disp_ctx
==
NULL
)
return
SDEI_EDENY
;
/* Validate resumption point */
...
...
@@ -625,9 +618,12 @@ int sdei_event_complete(int resume, uint64_t pc)
return
SDEI_EDENY
;
map
=
disp_ctx
->
map
;
assert
(
map
);
assert
(
map
!=
NULL
);
se
=
get_event_entry
(
map
);
if
(
is_event_shared
(
map
))
sdei_map_lock
(
map
);
act
=
resume
?
DO_COMPLETE_RESUME
:
DO_COMPLETE
;
if
(
!
can_sdei_state_trans
(
se
,
act
))
{
if
(
is_event_shared
(
map
))
...
...
@@ -635,15 +631,15 @@ int sdei_event_complete(int resume, uint64_t pc)
return
SDEI_EDENY
;
}
if
(
is_event_shared
(
map
))
sdei_map_unlock
(
map
);
/* Having done sanity checks, pop dispatch */
pop_dispatch
();
(
void
)
pop_dispatch
();
SDEI_LOG
(
"EOI:%lx, %d spsr:%lx elr:%lx
\n
"
,
read_mpidr_el1
(),
map
->
ev_num
,
read_spsr_el3
(),
read_elr_el3
());
if
(
is_event_shared
(
map
))
sdei_map_lock
(
map
);
/*
* Restore Non-secure to how it was originally interrupted. Once done,
* it's up-to-date with the saved copy.
...
...
@@ -684,7 +680,7 @@ int sdei_event_complete(int resume, uint64_t pc)
return
0
;
}
int
sdei_event_context
(
void
*
handle
,
unsigned
int
param
)
int
64_t
sdei_event_context
(
void
*
handle
,
unsigned
int
param
)
{
sdei_dispatch_context_t
*
disp_ctx
;
...
...
@@ -693,10 +689,10 @@ int sdei_event_context(void *handle, unsigned int param)
/* Get outstanding dispatch on this CPU */
disp_ctx
=
get_outstanding_dispatch
();
if
(
!
disp_ctx
)
if
(
disp_ctx
==
NULL
)
return
SDEI_EDENY
;
assert
(
disp_ctx
->
map
);
assert
(
disp_ctx
->
map
!=
NULL
);
if
(
!
can_sdei_state_trans
(
get_event_entry
(
disp_ctx
->
map
),
DO_CONTEXT
))
return
SDEI_EDENY
;
...
...
@@ -706,5 +702,5 @@ int sdei_event_context(void *handle, unsigned int param)
* which can complete the event
*/
return
disp_ctx
->
x
[
param
];
return
(
int64_t
)
disp_ctx
->
x
[
param
];
}
services/std_svc/sdei/sdei_main.c
View file @
11dfe0b4
...
...
@@ -22,14 +22,12 @@
#include <utils.h>
#include "sdei_private.h"
#define MAJOR_VERSION 1
#define MINOR_VERSION 0
#define VENDOR_VERSION 0
#define MAJOR_VERSION 1
ULL
#define MINOR_VERSION 0
ULL
#define VENDOR_VERSION 0
ULL
#define MAKE_SDEI_VERSION(_major, _minor, _vendor) \
((((unsigned long long)(_major)) << 48) | \
(((unsigned long long)(_minor)) << 32) | \
(_vendor))
((((_major)) << 48ULL) | (((_minor)) << 32ULL) | (_vendor))
#define LOWEST_INTR_PRIORITY 0xff
...
...
@@ -47,7 +45,7 @@ static void init_map(sdei_ev_map_t *map)
}
/* Convert mapping to SDEI class */
sdei_class_t
map_to_class
(
sdei_ev_map_t
*
map
)
static
sdei_class_t
map_to_class
(
sdei_ev_map_t
*
map
)
{
return
is_event_critical
(
map
)
?
SDEI_CRITICAL
:
SDEI_NORMAL
;
}
...
...
@@ -64,7 +62,7 @@ static void clear_event_entries(sdei_entry_t *se)
/* Perform CPU-specific state initialisation */
static
void
*
sdei_cpu_on_init
(
const
void
*
arg
)
{
int
i
;
unsigned
int
i
;
sdei_ev_map_t
*
map
;
sdei_entry_t
*
se
;
...
...
@@ -78,15 +76,16 @@ static void *sdei_cpu_on_init(const void *arg)
SDEI_LOG
(
"Private events initialized on %lx
\n
"
,
read_mpidr_el1
());
/* All PEs start with SDEI events masked */
sdei_pe_mask
();
(
void
)
sdei_pe_mask
();
return
0
;
return
NULL
;
}
/* Initialise an SDEI class */
void
sdei_class_init
(
sdei_class_t
class
)
static
void
sdei_class_init
(
sdei_class_t
class
)
{
unsigned
int
i
,
zero_found
__unused
=
0
;
unsigned
int
i
;
bool
zero_found
__unused
=
false
;
int
ev_num_so_far
__unused
;
sdei_ev_map_t
*
map
;
...
...
@@ -126,7 +125,7 @@ void sdei_class_init(sdei_class_t class)
num_dyn_shrd_slots
++
;
}
else
{
/* Shared mappings must be bound to shared interrupt */
assert
(
plat_ic_is_spi
(
map
->
intr
));
assert
(
plat_ic_is_spi
(
map
->
intr
)
!=
0
);
set_map_bound
(
map
);
}
...
...
@@ -143,7 +142,7 @@ void sdei_class_init(sdei_class_t class)
ev_num_so_far
=
map
->
ev_num
;
if
(
map
->
ev_num
==
SDEI_EVENT_0
)
{
zero_found
=
1
;
zero_found
=
true
;
/* Event 0 must be a Secure SGI */
assert
(
is_secure_sgi
(
map
->
intr
));
...
...
@@ -197,7 +196,7 @@ void sdei_class_init(sdei_class_t class)
* Private mappings must be bound to private
* interrupt.
*/
assert
(
plat_ic_is_ppi
(
map
->
intr
));
assert
(
plat_ic_is_ppi
(
(
unsigned
)
map
->
intr
)
!=
0
);
set_map_bound
(
map
);
}
}
...
...
@@ -208,7 +207,7 @@ void sdei_class_init(sdei_class_t class)
/* Ensure event 0 is in the mapping */
assert
(
zero_found
);
sdei_cpu_on_init
(
NULL
);
(
void
)
sdei_cpu_on_init
(
NULL
);
}
/* SDEI dispatcher initialisation */
...
...
@@ -236,7 +235,7 @@ static void set_sdei_entry(sdei_entry_t *se, uint64_t ep, uint64_t arg,
se
->
reg_flags
=
flags
;
}
static
u
nsigned
long
long
sdei_version
(
void
)
static
u
int64_t
sdei_version
(
void
)
{
return
MAKE_SDEI_VERSION
(
MAJOR_VERSION
,
MINOR_VERSION
,
VENDOR_VERSION
);
}
...
...
@@ -263,17 +262,18 @@ static int validate_flags(uint64_t flags, uint64_t mpidr)
/* Set routing of an SDEI event */
static
int
sdei_event_routing_set
(
int
ev_num
,
uint64_t
flags
,
uint64_t
mpidr
)
{
int
ret
,
routing
;
int
ret
;
unsigned
int
routing
;
sdei_ev_map_t
*
map
;
sdei_entry_t
*
se
;
ret
=
validate_flags
(
flags
,
mpidr
);
if
(
ret
)
if
(
ret
!=
0
)
return
ret
;
/* Check if valid event number */
map
=
find_event_map
(
ev_num
);
if
(
!
map
)
if
(
map
==
NULL
)
return
SDEI_EINVAL
;
/* The event must not be private */
...
...
@@ -295,11 +295,11 @@ static int sdei_event_routing_set(int ev_num, uint64_t flags, uint64_t mpidr)
}
/* Choose appropriate routing */
routing
=
(
flags
==
SDEI_REGF_RM_ANY
)
?
INTR_ROUTING_MODE_ANY
:
INTR_ROUTING_MODE_PE
;
routing
=
(
unsigned
int
)
(
(
flags
==
SDEI_REGF_RM_ANY
)
?
INTR_ROUTING_MODE_ANY
:
INTR_ROUTING_MODE_PE
)
;
/* Update event registration flag */
se
->
reg_flags
=
flags
;
se
->
reg_flags
=
(
unsigned
int
)
flags
;
/*
* ROUTING_SET is permissible only when event composite state is
...
...
@@ -315,24 +315,27 @@ finish:
}
/* Register handler and argument for an SDEI event */
static
int
sdei_event_register
(
int
ev_num
,
uint64_t
ep
,
uint64_t
arg
,
static
int
64_t
sdei_event_register
(
int
ev_num
,
uint64_t
ep
,
uint64_t
arg
,
uint64_t
flags
,
uint64_t
mpidr
)
{
int
ret
;
unsigned
int
routing
;
sdei_entry_t
*
se
;
sdei_ev_map_t
*
map
;
sdei_state_t
backup_state
;
if
(
!
ep
||
(
plat_sdei_validate_entry_point
(
ep
,
sdei_client_el
())
!=
0
))
if
((
ep
==
0U
)
||
(
plat_sdei_validate_entry_point
(
ep
,
sdei_client_el
())
!=
0
))
{
return
SDEI_EINVAL
;
}
ret
=
validate_flags
(
flags
,
mpidr
);
if
(
ret
)
if
(
ret
!=
0
)
return
ret
;
/* Check if valid event number */
map
=
find_event_map
(
ev_num
);
if
(
!
map
)
if
(
map
==
NULL
)
return
SDEI_EINVAL
;
/* Private events always target the PE */
...
...
@@ -371,7 +374,7 @@ static int sdei_event_register(int ev_num, uint64_t ep, uint64_t arg,
if
(
is_map_bound
(
map
))
{
/* Meanwhile, did any PE ACK the interrupt? */
if
(
plat_ic_get_interrupt_active
(
map
->
intr
))
if
(
plat_ic_get_interrupt_active
(
map
->
intr
)
!=
0U
)
goto
fallback
;
/* The interrupt must currently owned by Non-secure */
...
...
@@ -404,16 +407,15 @@ static int sdei_event_register(int ev_num, uint64_t ep, uint64_t arg,
* already ensure that shared events get bound to SPIs.
*/
if
(
is_event_shared
(
map
))
{
plat_ic_set_spi_routing
(
map
->
intr
,
((
flags
==
SDEI_REGF_RM_ANY
)
?
INTR_ROUTING_MODE_ANY
:
INTR_ROUTING_MODE_PE
),
routing
=
(
unsigned
int
)
((
flags
==
SDEI_REGF_RM_ANY
)
?
INTR_ROUTING_MODE_ANY
:
INTR_ROUTING_MODE_PE
);
plat_ic_set_spi_routing
(
map
->
intr
,
routing
,
(
u_register_t
)
mpidr
);
}
}
/* Populate event entries */
set_sdei_entry
(
se
,
ep
,
arg
,
flags
,
mpidr
);
set_sdei_entry
(
se
,
ep
,
arg
,
(
unsigned
int
)
flags
,
mpidr
);
/* Increment register count */
map
->
reg_count
++
;
...
...
@@ -432,15 +434,16 @@ fallback:
}
/* Enable SDEI event */
static
int
sdei_event_enable
(
int
ev_num
)
static
int
64_t
sdei_event_enable
(
int
ev_num
)
{
sdei_ev_map_t
*
map
;
sdei_entry_t
*
se
;
int
ret
,
before
,
after
;
int
ret
;
bool
before
,
after
;
/* Check if valid event number */
map
=
find_event_map
(
ev_num
);
if
(
!
map
)
if
(
map
==
NULL
)
return
SDEI_EINVAL
;
se
=
get_event_entry
(
map
);
...
...
@@ -475,11 +478,12 @@ static int sdei_event_disable(int ev_num)
{
sdei_ev_map_t
*
map
;
sdei_entry_t
*
se
;
int
ret
,
before
,
after
;
int
ret
;
bool
before
,
after
;
/* Check if valid event number */
map
=
find_event_map
(
ev_num
);
if
(
!
map
)
if
(
map
==
NULL
)
return
SDEI_EINVAL
;
se
=
get_event_entry
(
map
);
...
...
@@ -510,17 +514,18 @@ finish:
}
/* Query SDEI event information */
static
u
int64_t
sdei_event_get_info
(
int
ev_num
,
int
info
)
static
int64_t
sdei_event_get_info
(
int
ev_num
,
int
info
)
{
sdei_entry_t
*
se
;
sdei_ev_map_t
*
map
;
unsigned
int
flags
,
registered
;
uint64_t
flags
;
bool
registered
;
uint64_t
affinity
;
/* Check if valid event number */
map
=
find_event_map
(
ev_num
);
if
(
!
map
)
if
(
map
==
NULL
)
return
SDEI_EINVAL
;
se
=
get_event_entry
(
map
);
...
...
@@ -576,7 +581,7 @@ static int sdei_event_unregister(int ev_num)
/* Check if valid event number */
map
=
find_event_map
(
ev_num
);
if
(
!
map
)
if
(
map
==
NULL
)
return
SDEI_EINVAL
;
se
=
get_event_entry
(
map
);
...
...
@@ -648,7 +653,7 @@ static int sdei_event_status(int ev_num)
/* Check if valid event number */
map
=
find_event_map
(
ev_num
);
if
(
!
map
)
if
(
map
==
NULL
)
return
SDEI_EINVAL
;
se
=
get_event_entry
(
map
);
...
...
@@ -662,27 +667,27 @@ static int sdei_event_status(int ev_num)
if
(
is_event_shared
(
map
))
sdei_map_unlock
(
map
);
return
state
;
return
(
int
)
state
;
}
/* Bind an SDEI event to an interrupt */
static
int
sdei_interrupt_bind
(
int
intr_num
)
static
int
sdei_interrupt_bind
(
unsigned
int
intr_num
)
{
sdei_ev_map_t
*
map
;
int
retry
=
1
,
shared_mapping
;
bool
retry
=
true
,
shared_mapping
;
/* SGIs are not allowed to be bound */
if
(
plat_ic_is_sgi
(
intr_num
))
if
(
plat_ic_is_sgi
(
intr_num
)
!=
0
)
return
SDEI_EINVAL
;
shared_mapping
=
plat_ic_is_spi
(
intr_num
);
shared_mapping
=
(
plat_ic_is_spi
(
intr_num
)
!=
0
)
;
do
{
/*
* Bail out if there is already an event for this interrupt,
* either platform-defined or dynamic.
*/
map
=
find_event_map_by_intr
(
intr_num
,
shared_mapping
);
if
(
map
)
{
if
(
map
!=
NULL
)
{
if
(
is_map_dynamic
(
map
))
{
if
(
is_map_bound
(
map
))
{
/*
...
...
@@ -703,7 +708,7 @@ static int sdei_interrupt_bind(int intr_num)
* SDEI_DYN_IRQ.
*/
map
=
find_event_map_by_intr
(
SDEI_DYN_IRQ
,
shared_mapping
);
if
(
!
map
)
if
(
map
==
NULL
)
return
SDEI_ENOMEM
;
/* The returned mapping must be dynamic */
...
...
@@ -727,7 +732,7 @@ static int sdei_interrupt_bind(int intr_num)
if
(
!
is_map_bound
(
map
))
{
map
->
intr
=
intr_num
;
set_map_bound
(
map
);
retry
=
0
;
retry
=
false
;
}
sdei_map_unlock
(
map
);
}
while
(
retry
);
...
...
@@ -744,7 +749,7 @@ static int sdei_interrupt_release(int ev_num)
/* Check if valid event number */
map
=
find_event_map
(
ev_num
);
if
(
!
map
)
if
(
map
==
NULL
)
return
SDEI_EINVAL
;
if
(
!
is_map_dynamic
(
map
))
...
...
@@ -774,7 +779,7 @@ static int sdei_interrupt_release(int ev_num)
* Deny release if the interrupt is active, which means it's
* probably being acknowledged and handled elsewhere.
*/
if
(
plat_ic_get_interrupt_active
(
map
->
intr
))
{
if
(
plat_ic_get_interrupt_active
(
map
->
intr
)
!=
0U
)
{
ret
=
SDEI_EDENY
;
goto
finish
;
}
...
...
@@ -802,7 +807,8 @@ finish:
static
int
sdei_private_reset
(
void
)
{
sdei_ev_map_t
*
map
;
int
ret
=
0
,
final_ret
=
0
,
i
;
int
ret
=
0
,
final_ret
=
0
;
unsigned
int
i
;
/* Unregister all private events */
for_each_private_map
(
i
,
map
)
{
...
...
@@ -824,7 +830,8 @@ static int sdei_shared_reset(void)
{
const
sdei_mapping_t
*
mapping
;
sdei_ev_map_t
*
map
;
int
ret
=
0
,
final_ret
=
0
,
i
,
j
;
int
ret
=
0
,
final_ret
=
0
;
unsigned
int
i
,
j
;
/* Unregister all shared events */
for_each_shared_map
(
i
,
map
)
{
...
...
@@ -867,17 +874,17 @@ static int sdei_shared_reset(void)
}
/* Send a signal to another SDEI client PE */
int
sdei_signal
(
int
ev
ent
,
uint64_t
target_pe
)
static
int
sdei_signal
(
int
ev
_num
,
uint64_t
target_pe
)
{
sdei_ev_map_t
*
map
;
/* Only event 0 can be signalled */
if
(
ev
ent
!=
SDEI_EVENT_0
)
if
(
ev
_num
!=
SDEI_EVENT_0
)
return
SDEI_EINVAL
;
/* Find mapping for event 0 */
map
=
find_event_map
(
SDEI_EVENT_0
);
if
(
!
map
)
if
(
map
==
NULL
)
return
SDEI_EINVAL
;
/* The event must be signalable */
...
...
@@ -889,20 +896,20 @@ int sdei_signal(int event, uint64_t target_pe)
return
SDEI_EINVAL
;
/* Raise SGI. Platform will validate target_pe */
plat_ic_raise_el3_sgi
(
map
->
intr
,
(
u_register_t
)
target_pe
);
plat_ic_raise_el3_sgi
(
(
int
)
map
->
intr
,
(
u_register_t
)
target_pe
);
return
0
;
}
/* Query SDEI dispatcher features */
uint64_t
sdei_features
(
unsigned
int
feature
)
static
uint64_t
sdei_features
(
unsigned
int
feature
)
{
if
(
feature
==
SDEI_FEATURE_BIND_SLOTS
)
{
return
FEATURE_BIND_SLOTS
(
num_dyn_priv_slots
,
num_dyn_shrd_slots
);
}
return
SDEI_EINVAL
;
return
(
uint64_t
)
SDEI_EINVAL
;
}
/* SDEI top level handler for servicing SMCs */
...
...
@@ -917,56 +924,59 @@ uint64_t sdei_smc_handler(uint32_t smc_fid,
{
uint64_t
x5
;
int
ss
=
get_interrupt_src_ss
(
flags
);
unsigned
int
ss
=
(
unsigned
int
)
get_interrupt_src_ss
(
flags
);
int64_t
ret
;
unsigned
int
resume
=
0
;
bool
resume
=
false
;
cpu_context_t
*
ctx
=
handle
;
int
ev_num
=
(
int
)
x1
;
if
(
ss
!=
NON_SECURE
)
SMC_RET1
(
handle
,
SMC_UNK
);
SMC_RET1
(
ctx
,
SMC_UNK
);
/* Verify the caller EL */
if
(
GET_EL
(
read_spsr_el3
())
!=
sdei_client_el
())
SMC_RET1
(
handle
,
SMC_UNK
);
SMC_RET1
(
ctx
,
SMC_UNK
);
switch
(
smc_fid
)
{
case
SDEI_VERSION
:
SDEI_LOG
(
"> VER
\n
"
);
ret
=
sdei_version
();
ret
=
(
int64_t
)
sdei_version
();
SDEI_LOG
(
"< VER:%llx
\n
"
,
ret
);
SMC_RET1
(
handle
,
ret
);
SMC_RET1
(
ctx
,
ret
);
case
SDEI_EVENT_REGISTER
:
x5
=
SMC_GET_GP
(
handle
,
CTX_GPREG_X5
);
SDEI_LOG
(
"> REG(n:%d e:%llx a:%llx f:%x m:%llx)
\n
"
,
(
int
)
x1
,
x5
=
SMC_GET_GP
(
ctx
,
CTX_GPREG_X5
);
SDEI_LOG
(
"> REG(n:%d e:%llx a:%llx f:%x m:%llx)
\n
"
,
ev_num
,
x2
,
x3
,
(
int
)
x4
,
x5
);
ret
=
sdei_event_register
(
x1
,
x2
,
x3
,
x4
,
x5
);
ret
=
sdei_event_register
(
ev_num
,
x2
,
x3
,
x4
,
x5
);
SDEI_LOG
(
"< REG:%lld
\n
"
,
ret
);
SMC_RET1
(
handle
,
ret
);
SMC_RET1
(
ctx
,
ret
);
case
SDEI_EVENT_ENABLE
:
SDEI_LOG
(
"> ENABLE(n:%d)
\n
"
,
(
int
)
x1
);
ret
=
sdei_event_enable
(
x1
);
ret
=
sdei_event_enable
(
ev_num
);
SDEI_LOG
(
"< ENABLE:%lld
\n
"
,
ret
);
SMC_RET1
(
handle
,
ret
);
SMC_RET1
(
ctx
,
ret
);
case
SDEI_EVENT_DISABLE
:
SDEI_LOG
(
"> DISABLE(n:%d)
\n
"
,
(
int
)
x1
);
ret
=
sdei_event_disable
(
x1
);
SDEI_LOG
(
"> DISABLE(n:%d)
\n
"
,
ev_num
);
ret
=
sdei_event_disable
(
ev_num
);
SDEI_LOG
(
"< DISABLE:%lld
\n
"
,
ret
);
SMC_RET1
(
handle
,
ret
);
SMC_RET1
(
ctx
,
ret
);
case
SDEI_EVENT_CONTEXT
:
SDEI_LOG
(
"> CTX(p:%d):%lx
\n
"
,
(
int
)
x1
,
read_mpidr_el1
());
ret
=
sdei_event_context
(
handle
,
x1
);
ret
=
sdei_event_context
(
ctx
,
(
unsigned
int
)
x1
);
SDEI_LOG
(
"< CTX:%lld
\n
"
,
ret
);
SMC_RET1
(
handle
,
ret
);
SMC_RET1
(
ctx
,
ret
);
case
SDEI_EVENT_COMPLETE_AND_RESUME
:
resume
=
1
;
resume
=
true
;
/* Fallthrough */
case
SDEI_EVENT_COMPLETE
:
SDEI_LOG
(
"> COMPLETE(r:%
d
sta/ep:%llx):%lx
\n
"
,
resume
,
x1
,
read_mpidr_el1
());
SDEI_LOG
(
"> COMPLETE(r:%
u
sta/ep:%llx):%lx
\n
"
,
(
unsigned
int
)
resume
,
x1
,
read_mpidr_el1
());
ret
=
sdei_event_complete
(
resume
,
x1
);
SDEI_LOG
(
"< COMPLETE:%llx
\n
"
,
ret
);
...
...
@@ -977,82 +987,82 @@ uint64_t sdei_smc_handler(uint32_t smc_fid,
* shouldn't be modified. We don't return to the caller in this
* case anyway.
*/
if
(
ret
)
SMC_RET1
(
handle
,
ret
);
if
(
ret
!=
0
)
SMC_RET1
(
ctx
,
ret
);
SMC_RET0
(
handle
);
SMC_RET0
(
ctx
);
case
SDEI_EVENT_STATUS
:
SDEI_LOG
(
"> STAT(n:%d)
\n
"
,
(
int
)
x1
);
ret
=
sdei_event_status
(
x1
);
SDEI_LOG
(
"> STAT(n:%d)
\n
"
,
ev_num
);
ret
=
sdei_event_status
(
ev_num
);
SDEI_LOG
(
"< STAT:%lld
\n
"
,
ret
);
SMC_RET1
(
handle
,
ret
);
SMC_RET1
(
ctx
,
ret
);
case
SDEI_EVENT_GET_INFO
:
SDEI_LOG
(
"> INFO(n:%d, %d)
\n
"
,
(
int
)
x1
,
(
int
)
x2
);
ret
=
sdei_event_get_info
(
x1
,
x2
);
SDEI_LOG
(
"> INFO(n:%d, %d)
\n
"
,
ev_num
,
(
int
)
x2
);
ret
=
sdei_event_get_info
(
ev_num
,
(
int
)
x2
);
SDEI_LOG
(
"< INFO:%lld
\n
"
,
ret
);
SMC_RET1
(
handle
,
ret
);
SMC_RET1
(
ctx
,
ret
);
case
SDEI_EVENT_UNREGISTER
:
SDEI_LOG
(
"> UNREG(n:%d)
\n
"
,
(
int
)
x1
);
ret
=
sdei_event_unregister
(
x1
);
SDEI_LOG
(
"> UNREG(n:%d)
\n
"
,
ev_num
);
ret
=
sdei_event_unregister
(
ev_num
);
SDEI_LOG
(
"< UNREG:%lld
\n
"
,
ret
);
SMC_RET1
(
handle
,
ret
);
SMC_RET1
(
ctx
,
ret
);
case
SDEI_PE_UNMASK
:
SDEI_LOG
(
"> UNMASK:%lx
\n
"
,
read_mpidr_el1
());
sdei_pe_unmask
();
SDEI_LOG
(
"< UNMASK:%d
\n
"
,
0
);
SMC_RET1
(
handle
,
0
);
SMC_RET1
(
ctx
,
0
);
case
SDEI_PE_MASK
:
SDEI_LOG
(
"> MASK:%lx
\n
"
,
read_mpidr_el1
());
ret
=
sdei_pe_mask
();
SDEI_LOG
(
"< MASK:%lld
\n
"
,
ret
);
SMC_RET1
(
handle
,
ret
);
SMC_RET1
(
ctx
,
ret
);
case
SDEI_INTERRUPT_BIND
:
SDEI_LOG
(
"> BIND(%d)
\n
"
,
(
int
)
x1
);
ret
=
sdei_interrupt_bind
(
x1
);
ret
=
sdei_interrupt_bind
(
(
unsigned
int
)
x1
);
SDEI_LOG
(
"< BIND:%lld
\n
"
,
ret
);
SMC_RET1
(
handle
,
ret
);
SMC_RET1
(
ctx
,
ret
);
case
SDEI_INTERRUPT_RELEASE
:
SDEI_LOG
(
"> REL(%d)
\n
"
,
(
int
)
x1
);
ret
=
sdei_interrupt_release
(
x1
);
SDEI_LOG
(
"> REL(%d)
\n
"
,
ev_num
);
ret
=
sdei_interrupt_release
(
ev_num
);
SDEI_LOG
(
"< REL:%lld
\n
"
,
ret
);
SMC_RET1
(
handle
,
ret
);
SMC_RET1
(
ctx
,
ret
);
case
SDEI_SHARED_RESET
:
SDEI_LOG
(
"> S_RESET():%lx
\n
"
,
read_mpidr_el1
());
ret
=
sdei_shared_reset
();
SDEI_LOG
(
"< S_RESET:%lld
\n
"
,
ret
);
SMC_RET1
(
handle
,
ret
);
SMC_RET1
(
ctx
,
ret
);
case
SDEI_PRIVATE_RESET
:
SDEI_LOG
(
"> P_RESET():%lx
\n
"
,
read_mpidr_el1
());
ret
=
sdei_private_reset
();
SDEI_LOG
(
"< P_RESET:%lld
\n
"
,
ret
);
SMC_RET1
(
handle
,
ret
);
SMC_RET1
(
ctx
,
ret
);
case
SDEI_EVENT_ROUTING_SET
:
SDEI_LOG
(
"> ROUTE_SET(n:%d f:%llx aff:%llx)
\n
"
,
(
int
)
x1
,
x2
,
x3
);
ret
=
sdei_event_routing_set
(
x1
,
x2
,
x3
);
SDEI_LOG
(
"> ROUTE_SET(n:%d f:%llx aff:%llx)
\n
"
,
ev_num
,
x2
,
x3
);
ret
=
sdei_event_routing_set
(
ev_num
,
x2
,
x3
);
SDEI_LOG
(
"< ROUTE_SET:%lld
\n
"
,
ret
);
SMC_RET1
(
handle
,
ret
);
SMC_RET1
(
ctx
,
ret
);
case
SDEI_FEATURES
:
SDEI_LOG
(
"> FTRS(f:%llx)
\n
"
,
x1
);
ret
=
sdei_features
(
x1
);
ret
=
(
int64_t
)
sdei_features
(
(
unsigned
int
)
x1
);
SDEI_LOG
(
"< FTRS:%llx
\n
"
,
ret
);
SMC_RET1
(
handle
,
ret
);
SMC_RET1
(
ctx
,
ret
);
case
SDEI_EVENT_SIGNAL
:
SDEI_LOG
(
"> SIGNAL(e:%
llx
t:%llx)
\n
"
,
x1
,
x2
);
ret
=
sdei_signal
(
x1
,
x2
);
SDEI_LOG
(
"> SIGNAL(e:%
d
t:%llx)
\n
"
,
ev_num
,
x2
);
ret
=
sdei_signal
(
ev_num
,
x2
);
SDEI_LOG
(
"< SIGNAL:%lld
\n
"
,
ret
);
SMC_RET1
(
handle
,
ret
);
SMC_RET1
(
ctx
,
ret
);
default:
/* Do nothing in default case */
...
...
@@ -1060,7 +1070,7 @@ uint64_t sdei_smc_handler(uint32_t smc_fid,
}
WARN
(
"Unimplemented SDEI Call: 0x%x
\n
"
,
smc_fid
);
SMC_RET1
(
handle
,
SMC_UNK
);
SMC_RET1
(
ctx
,
SMC_UNK
);
}
/* Subscribe to PSCI CPU on to initialize per-CPU SDEI configuration */
...
...
services/std_svc/sdei/sdei_private.h
View file @
11dfe0b4
...
...
@@ -4,8 +4,8 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef
__
SDEI_PRIVATE_H
__
#define
__
SDEI_PRIVATE_H
__
#ifndef SDEI_PRIVATE_H
#define SDEI_PRIVATE_H
#include <arch_helpers.h>
#include <context_mgmt.h>
...
...
@@ -36,12 +36,12 @@
#define SDEI_LOG(...) VERBOSE("SDEI: " __VA_ARGS__)
/* SDEI handler unregistered state. This is the default state. */
#define SDEI_STATE_UNREGISTERED 0
#define SDEI_STATE_UNREGISTERED 0
U
/* SDE event status values in bit position */
#define SDEI_STATF_REGISTERED 0
#define SDEI_STATF_ENABLED 1
#define SDEI_STATF_RUNNING 2
#define SDEI_STATF_REGISTERED 0
U
#define SDEI_STATF_ENABLED 1
U
#define SDEI_STATF_RUNNING 2
U
/* SDEI SMC error codes */
#define SDEI_EINVAL (-2)
...
...
@@ -62,18 +62,18 @@
#define SDEI_INFO_EV_ROUTING_MODE 3
#define SDEI_INFO_EV_ROUTING_AFF 4
#define SDEI_PRIVATE_MAPPING() (&sdei_global_mappings[
_
SDEI_MAP_IDX_PRIV])
#define SDEI_SHARED_MAPPING() (&sdei_global_mappings[
_
SDEI_MAP_IDX_SHRD])
#define SDEI_PRIVATE_MAPPING() (&sdei_global_mappings[SDEI_MAP_IDX_PRIV
_
])
#define SDEI_SHARED_MAPPING() (&sdei_global_mappings[SDEI_MAP_IDX_SHRD
_
])
#define for_each_mapping_type(_i, _mapping) \
for (_i = 0, _mapping = &sdei_global_mappings[
i
]; \
_i <
_
SDEI_MAP_IDX_MAX; \
_i++, _mapping = &sdei_global_mappings[
i
])
for
(
(_i
)
= 0,
(
_mapping
)
= &sdei_global_mappings[
(_i)
]; \
(
_i
)
< SDEI_MAP_IDX_MAX
_
; \
(
_i
)
++,
(
_mapping
)
= &sdei_global_mappings[
(_i)
])
#define iterate_mapping(_mapping, _i, _map) \
for (_map = (_mapping)->map, _i = 0; \
_i < (_mapping)->num_maps; \
_i++, _map++)
for
(
(_map
)
= (_mapping)->map,
(
_i
)
= 0; \
(
_i
)
< (_mapping)->num_maps; \
(
_i
)
++,
(
_map
)
++)
#define for_each_private_map(_i, _map) \
iterate_mapping(SDEI_PRIVATE_MAPPING(), _i, _map)
...
...
@@ -82,45 +82,45 @@
iterate_mapping(SDEI_SHARED_MAPPING(), _i, _map)
/* SDEI_FEATURES */
#define SDEI_FEATURE_BIND_SLOTS 0
#define BIND_SLOTS_MASK 0xffff
#define FEATURES_SHARED_SLOTS_SHIFT 16
#define FEATURES_PRIVATE_SLOTS_SHIFT 0
#define SDEI_FEATURE_BIND_SLOTS 0
U
#define BIND_SLOTS_MASK 0xffff
U
#define FEATURES_SHARED_SLOTS_SHIFT 16
U
#define FEATURES_PRIVATE_SLOTS_SHIFT 0
U
#define FEATURE_BIND_SLOTS(_priv, _shrd) \
((((_priv) & BIND_SLOTS_MASK) << FEATURES_PRIVATE_SLOTS_SHIFT) | \
(((_shrd) & BIND_SLOTS_MASK) << FEATURES_SHARED_SLOTS_SHIFT))
(((
((uint64_t)
(_priv)
)
& BIND_SLOTS_MASK) << FEATURES_PRIVATE_SLOTS_SHIFT) | \
((
((uint64_t)
(_shrd)
)
& BIND_SLOTS_MASK) << FEATURES_SHARED_SLOTS_SHIFT))
#define GET_EV_STATE(_e, _s) get_ev_state_bit(_e, SDEI_STATF_##_s)
#define SET_EV_STATE(_e, _s) clr_ev_state_bit(_e->state, SDEI_STATF_##_s)
static
inline
int
is_event_private
(
sdei_ev_map_t
*
map
)
static
inline
bool
is_event_private
(
sdei_ev_map_t
*
map
)
{
return
((
map
->
map_flags
&
BIT
(
_SDEI_MAPF_PRIVATE_SHIFT
))
!=
0
);
return
((
map
->
map_flags
&
BIT_
32
(
SDEI_MAPF_PRIVATE_SHIFT
_
))
!=
0
U
);
}
static
inline
int
is_event_shared
(
sdei_ev_map_t
*
map
)
static
inline
bool
is_event_shared
(
sdei_ev_map_t
*
map
)
{
return
!
is_event_private
(
map
);
}
static
inline
int
is_event_critical
(
sdei_ev_map_t
*
map
)
static
inline
bool
is_event_critical
(
sdei_ev_map_t
*
map
)
{
return
((
map
->
map_flags
&
BIT
(
_SDEI_MAPF_CRITICAL_SHIFT
))
!=
0
);
return
((
map
->
map_flags
&
BIT_
32
(
SDEI_MAPF_CRITICAL_SHIFT
_
))
!=
0
U
);
}
static
inline
int
is_event_normal
(
sdei_ev_map_t
*
map
)
static
inline
bool
is_event_normal
(
sdei_ev_map_t
*
map
)
{
return
!
is_event_critical
(
map
);
}
static
inline
int
is_event_signalable
(
sdei_ev_map_t
*
map
)
static
inline
bool
is_event_signalable
(
sdei_ev_map_t
*
map
)
{
return
((
map
->
map_flags
&
BIT
(
_SDEI_MAPF_SIGNALABLE_SHIFT
))
!=
0
);
return
((
map
->
map_flags
&
BIT_
32
(
SDEI_MAPF_SIGNALABLE_SHIFT
_
))
!=
0
U
);
}
static
inline
int
is_map_dynamic
(
sdei_ev_map_t
*
map
)
static
inline
bool
is_map_dynamic
(
sdei_ev_map_t
*
map
)
{
return
((
map
->
map_flags
&
BIT
(
_SDEI_MAPF_DYNAMIC_SHIFT
))
!=
0
);
return
((
map
->
map_flags
&
BIT_
32
(
SDEI_MAPF_DYNAMIC_SHIFT
_
))
!=
0
U
);
}
/*
...
...
@@ -129,29 +129,29 @@ static inline int is_map_dynamic(sdei_ev_map_t *map)
* called on them. This can be used on both static or dynamic events to check
* for an associated interrupt.
*/
static
inline
int
is_map_bound
(
sdei_ev_map_t
*
map
)
static
inline
bool
is_map_bound
(
sdei_ev_map_t
*
map
)
{
return
((
map
->
map_flags
&
BIT
(
_SDEI_MAPF_BOUND_SHIFT
))
!=
0
);
return
((
map
->
map_flags
&
BIT_
32
(
SDEI_MAPF_BOUND_SHIFT
_
))
!=
0
U
);
}
static
inline
void
set_map_bound
(
sdei_ev_map_t
*
map
)
{
map
->
map_flags
|=
BIT
(
_SDEI_MAPF_BOUND_SHIFT
);
map
->
map_flags
|=
BIT_
32
(
SDEI_MAPF_BOUND_SHIFT
_
);
}
static
inline
int
is_map_explicit
(
sdei_ev_map_t
*
map
)
static
inline
bool
is_map_explicit
(
sdei_ev_map_t
*
map
)
{
return
((
map
->
map_flags
&
BIT
(
_SDEI_MAPF_EXPLICIT_SHIFT
))
!=
0
);
return
((
map
->
map_flags
&
BIT_
32
(
SDEI_MAPF_EXPLICIT_SHIFT
_
))
!=
0
U
);
}
static
inline
void
clr_map_bound
(
sdei_ev_map_t
*
map
)
{
map
->
map_flags
&=
~
(
BIT
(
_SDEI_MAPF_BOUND_SHIFT
)
);
map
->
map_flags
&=
~
BIT_
32
(
SDEI_MAPF_BOUND_SHIFT
_
);
}
static
inline
int
is_secure_sgi
(
unsigned
int
intr
)
static
inline
bool
is_secure_sgi
(
unsigned
int
intr
)
{
return
(
plat_ic_is_sgi
(
intr
)
&&
return
(
(
plat_ic_is_sgi
(
intr
)
!=
0
)
&&
(
plat_ic_get_interrupt_type
(
intr
)
==
INTR_TYPE_EL3
));
}
...
...
@@ -164,24 +164,24 @@ static inline unsigned int sdei_client_el(void)
cpu_context_t
*
ns_ctx
=
cm_get_context
(
NON_SECURE
);
el3_state_t
*
el3_ctx
=
get_el3state_ctx
(
ns_ctx
);
return
read_ctx_reg
(
el3_ctx
,
CTX_SCR_EL3
)
&
SCR_HCE_BIT
?
MODE_EL2
:
MODE_EL1
;
return
((
read_ctx_reg
(
el3_ctx
,
CTX_SCR_EL3
)
&
SCR_HCE_BIT
)
!=
0U
)
?
MODE_EL2
:
MODE_EL1
;
}
static
inline
unsigned
int
sdei_event_priority
(
sdei_ev_map_t
*
map
)
{
return
is_event_critical
(
map
)
?
PLAT_SDEI_CRITICAL_PRI
:
PLAT_SDEI_NORMAL_PRI
;
return
(
unsigned
int
)
(
is_event_critical
(
map
)
?
PLAT_SDEI_CRITICAL_PRI
:
PLAT_SDEI_NORMAL_PRI
)
;
}
static
inline
int
get_ev_state_bit
(
sdei_entry_t
*
se
,
unsigned
int
bit_no
)
static
inline
bool
get_ev_state_bit
(
sdei_entry_t
*
se
,
unsigned
int
bit_no
)
{
return
((
se
->
state
&
BIT
(
bit_no
))
!=
0
);
return
((
se
->
state
&
BIT
_32
(
bit_no
))
!=
0
U
);
}
static
inline
void
clr_ev_state_bit
(
sdei_entry_t
*
se
,
unsigned
int
bit_no
)
{
se
->
state
&=
~
BIT
(
bit_no
);
se
->
state
&=
~
BIT
_32
(
bit_no
);
}
/* SDEI actions for state transition */
...
...
@@ -228,19 +228,19 @@ extern sdei_entry_t sdei_shared_event_table[];
void
init_sdei_state
(
void
);
sdei_ev_map_t
*
find_event_map_by_intr
(
int
intr_num
,
int
shared
);
sdei_ev_map_t
*
find_event_map_by_intr
(
unsigned
int
intr_num
,
bool
shared
);
sdei_ev_map_t
*
find_event_map
(
int
ev_num
);
sdei_entry_t
*
get_event_entry
(
sdei_ev_map_t
*
map
);
int
sdei_event_context
(
void
*
handle
,
unsigned
int
param
);
int
sdei_event_complete
(
int
resume
,
uint64_t
arg
);
int
64_t
sdei_event_context
(
void
*
handle
,
unsigned
int
param
);
int
sdei_event_complete
(
bool
resume
,
uint64_t
pc
);
void
sdei_pe_unmask
(
void
);
unsigned
in
t
sdei_pe_mask
(
void
);
int64_
t
sdei_pe_mask
(
void
);
int
sdei_intr_handler
(
uint32_t
intr
,
uint32_t
flags
,
void
*
handle
,
int
sdei_intr_handler
(
uint32_t
intr
_raw
,
uint32_t
flags
,
void
*
handle
,
void
*
cookie
);
bool
can_sdei_state_trans
(
sdei_entry_t
*
se
,
sdei_action_t
act
);
void
begin_sdei_synchronous_dispatch
(
struct
jmpbuf
*
buffer
);
#endif
/*
__
SDEI_PRIVATE_H
__
*/
#endif
/* SDEI_PRIVATE_H */
services/std_svc/sdei/sdei_state.c
View file @
11dfe0b4
...
...
@@ -10,13 +10,13 @@
#include "sdei_private.h"
/* Aliases for SDEI handler states: 'R'unning, 'E'nabled, and re'G'istered */
#define r_ 0
#define r_ 0
U
#define R_ (1u << SDEI_STATF_RUNNING)
#define e_ 0
#define e_ 0
U
#define E_ (1u << SDEI_STATF_ENABLED)
#define g_ 0
#define g_ 0
U
#define G_ (1u << SDEI_STATF_REGISTERED)
/* All possible composite handler states */
...
...
@@ -29,7 +29,7 @@
#define REg_ (R_ | E_ | g_)
#define REG_ (R_ | E_ | G_)
#define MAX_STATES (REG_ + 1)
#define MAX_STATES (REG_ + 1
u
)
/* Invalid state */
#define SDEI_STATE_INVALID ((sdei_state_t) (-1))
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment