Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
Menu
Open sidebar
adam.huang
Arm Trusted Firmware
Commits
2eedba9a
Unverified
Commit
2eedba9a
authored
Oct 30, 2018
by
Antonio Niño Díaz
Committed by
GitHub
Oct 30, 2018
Browse files
Merge pull request #1651 from antonio-nino-diaz-arm/an/rand-misra
Fix some MISRA defects
parents
392b1d59
195e363f
Changes
30
Hide whitespace changes
Inline
Side-by-side
lib/el3_runtime/aarch32/context_mgmt.c
View file @
2eedba9a
...
...
@@ -14,6 +14,7 @@
#include <platform.h>
#include <platform_def.h>
#include <smccc_helpers.h>
#include <stdbool.h>
#include <string.h>
#include <utils.h>
...
...
@@ -129,7 +130,7 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
* When EL2 is implemented but unused `el2_unused` is non-zero, otherwise
* it is zero.
******************************************************************************/
static
void
enable_extensions_nonsecure
(
int
el2_unused
)
static
void
enable_extensions_nonsecure
(
bool
el2_unused
)
{
#if IMAGE_BL32
#if ENABLE_AMU
...
...
@@ -175,7 +176,7 @@ void cm_prepare_el3_exit(uint32_t security_state)
{
uint32_t
hsctlr
,
scr
;
cpu_context_t
*
ctx
=
cm_get_context
(
security_state
);
int
el2_unused
=
0
;
bool
el2_unused
=
false
;
assert
(
ctx
);
...
...
@@ -200,7 +201,7 @@ void cm_prepare_el3_exit(uint32_t security_state)
isb
();
}
else
if
(
read_id_pfr1
()
&
(
ID_PFR1_VIRTEXT_MASK
<<
ID_PFR1_VIRTEXT_SHIFT
))
{
el2_unused
=
1
;
el2_unused
=
true
;
/*
* Set the NS bit to access NS copies of certain banked
...
...
lib/el3_runtime/aarch64/context_mgmt.c
View file @
2eedba9a
...
...
@@ -18,6 +18,7 @@
#include <pubsub_events.h>
#include <smccc_helpers.h>
#include <spe.h>
#include <stdbool.h>
#include <string.h>
#include <sve.h>
#include <utils.h>
...
...
@@ -231,7 +232,7 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
* When EL2 is implemented but unused `el2_unused` is non-zero, otherwise
* it is zero.
******************************************************************************/
static
void
enable_extensions_nonsecure
(
int
el2_unused
)
static
void
enable_extensions_nonsecure
(
bool
el2_unused
)
{
#if IMAGE_BL31
#if ENABLE_SPE_FOR_LOWER_ELS
...
...
@@ -289,7 +290,7 @@ void cm_prepare_el3_exit(uint32_t security_state)
{
uint32_t
sctlr_elx
,
scr_el3
,
mdcr_el2
;
cpu_context_t
*
ctx
=
cm_get_context
(
security_state
);
int
el2_unused
=
0
;
bool
el2_unused
=
false
;
uint64_t
hcr_el2
=
0
;
assert
(
ctx
);
...
...
@@ -304,7 +305,7 @@ void cm_prepare_el3_exit(uint32_t security_state)
sctlr_elx
|=
SCTLR_EL2_RES1
;
write_sctlr_el2
(
sctlr_elx
);
}
else
if
(
EL_IMPLEMENTED
(
2
))
{
el2_unused
=
1
;
el2_unused
=
true
;
/*
* EL2 present but unused, need to disable safely.
...
...
lib/extensions/amu/aarch32/amu.c
View file @
2eedba9a
...
...
@@ -10,6 +10,7 @@
#include <arch_helpers.h>
#include <platform.h>
#include <pubsub_events.h>
#include <stdbool.h>
#define AMU_GROUP0_NR_COUNTERS 4
...
...
@@ -20,17 +21,17 @@ struct amu_ctx {
static
struct
amu_ctx
amu_ctxs
[
PLATFORM_CORE_COUNT
];
int
amu_supported
(
void
)
bool
amu_supported
(
void
)
{
uint64_t
features
;
features
=
read_id_pfr0
()
>>
ID_PFR0_AMU_SHIFT
;
return
(
features
&
ID_PFR0_AMU_MASK
)
==
1
;
return
(
features
&
ID_PFR0_AMU_MASK
)
==
1
U
;
}
void
amu_enable
(
int
el2_unused
)
void
amu_enable
(
bool
el2_unused
)
{
if
(
amu_supported
()
==
0
)
if
(
!
amu_supported
())
return
;
if
(
el2_unused
)
{
...
...
@@ -54,8 +55,8 @@ void amu_enable(int el2_unused)
/* Read the group 0 counter identified by the given `idx`. */
uint64_t
amu_group0_cnt_read
(
int
idx
)
{
assert
(
amu_supported
()
!=
0
);
assert
(
idx
>=
0
&&
idx
<
AMU_GROUP0_NR_COUNTERS
);
assert
(
amu_supported
());
assert
(
(
idx
>=
0
)
&&
(
idx
<
AMU_GROUP0_NR_COUNTERS
)
)
;
return
amu_group0_cnt_read_internal
(
idx
);
}
...
...
@@ -63,8 +64,8 @@ uint64_t amu_group0_cnt_read(int idx)
/* Write the group 0 counter identified by the given `idx` with `val`. */
void
amu_group0_cnt_write
(
int
idx
,
uint64_t
val
)
{
assert
(
amu_supported
()
!=
0
);
assert
(
idx
>=
0
&&
idx
<
AMU_GROUP0_NR_COUNTERS
);
assert
(
amu_supported
());
assert
(
(
idx
>=
0
)
&&
(
idx
<
AMU_GROUP0_NR_COUNTERS
)
)
;
amu_group0_cnt_write_internal
(
idx
,
val
);
isb
();
...
...
@@ -73,8 +74,8 @@ void amu_group0_cnt_write(int idx, uint64_t val)
/* Read the group 1 counter identified by the given `idx`. */
uint64_t
amu_group1_cnt_read
(
int
idx
)
{
assert
(
amu_supported
()
!=
0
);
assert
(
idx
>=
0
&&
idx
<
AMU_GROUP1_NR_COUNTERS
);
assert
(
amu_supported
());
assert
(
(
idx
>=
0
)
&&
(
idx
<
AMU_GROUP1_NR_COUNTERS
)
)
;
return
amu_group1_cnt_read_internal
(
idx
);
}
...
...
@@ -82,8 +83,8 @@ uint64_t amu_group1_cnt_read(int idx)
/* Write the group 1 counter identified by the given `idx` with `val`. */
void
amu_group1_cnt_write
(
int
idx
,
uint64_t
val
)
{
assert
(
amu_supported
()
!=
0
);
assert
(
idx
>=
0
&&
idx
<
AMU_GROUP1_NR_COUNTERS
);
assert
(
amu_supported
());
assert
(
(
idx
>=
0
)
&&
(
idx
<
AMU_GROUP1_NR_COUNTERS
)
)
;
amu_group1_cnt_write_internal
(
idx
,
val
);
isb
();
...
...
@@ -91,8 +92,8 @@ void amu_group1_cnt_write(int idx, uint64_t val)
void
amu_group1_set_evtype
(
int
idx
,
unsigned
int
val
)
{
assert
(
amu_supported
()
!=
0
);
assert
(
idx
>=
0
&&
idx
<
AMU_GROUP1_NR_COUNTERS
);
assert
(
amu_supported
());
assert
(
(
idx
>=
0
)
&&
(
idx
<
AMU_GROUP1_NR_COUNTERS
)
)
;
amu_group1_set_evtype_internal
(
idx
,
val
);
isb
();
...
...
@@ -103,7 +104,7 @@ static void *amu_context_save(const void *arg)
struct
amu_ctx
*
ctx
;
int
i
;
if
(
amu_supported
()
==
0
)
if
(
!
amu_supported
())
return
(
void
*
)
-
1
;
ctx
=
&
amu_ctxs
[
plat_my_core_pos
()];
...
...
@@ -126,7 +127,7 @@ static void *amu_context_save(const void *arg)
for
(
i
=
0
;
i
<
AMU_GROUP1_NR_COUNTERS
;
i
++
)
ctx
->
group1_cnts
[
i
]
=
amu_group1_cnt_read
(
i
);
return
0
;
return
(
void
*
)
0
;
}
static
void
*
amu_context_restore
(
const
void
*
arg
)
...
...
@@ -134,13 +135,13 @@ static void *amu_context_restore(const void *arg)
struct
amu_ctx
*
ctx
;
int
i
;
if
(
amu_supported
()
==
0
)
if
(
!
amu_supported
())
return
(
void
*
)
-
1
;
ctx
=
&
amu_ctxs
[
plat_my_core_pos
()];
/* Counters were disabled in `amu_context_save()` */
assert
(
read_amcntenset0
()
==
0
&&
read_amcntenset1
()
==
0
);
assert
(
(
read_amcntenset0
()
==
0
U
)
&&
(
read_amcntenset1
()
==
0
U
)
);
/* Restore group 0 counters */
for
(
i
=
0
;
i
<
AMU_GROUP0_NR_COUNTERS
;
i
++
)
...
...
@@ -153,7 +154,7 @@ static void *amu_context_restore(const void *arg)
/* Enable group 1 counters */
write_amcntenset1
(
AMU_GROUP1_COUNTERS_MASK
);
return
0
;
return
(
void
*
)
0
;
}
SUBSCRIBE_TO_EVENT
(
psci_suspend_pwrdown_start
,
amu_context_save
);
...
...
lib/extensions/amu/aarch64/amu.c
View file @
2eedba9a
...
...
@@ -11,6 +11,7 @@
#include <assert.h>
#include <platform.h>
#include <pubsub_events.h>
#include <stdbool.h>
#define AMU_GROUP0_NR_COUNTERS 4
...
...
@@ -21,23 +22,23 @@ struct amu_ctx {
static
struct
amu_ctx
amu_ctxs
[
PLATFORM_CORE_COUNT
];
int
amu_supported
(
void
)
bool
amu_supported
(
void
)
{
uint64_t
features
;
features
=
read_id_aa64pfr0_el1
()
>>
ID_AA64PFR0_AMU_SHIFT
;
return
(
features
&
ID_AA64PFR0_AMU_MASK
)
==
1
;
return
(
features
&
ID_AA64PFR0_AMU_MASK
)
==
1
U
;
}
/*
* Enable counters. This function is meant to be invoked
* by the context management library before exiting from EL3.
*/
void
amu_enable
(
int
el2_unused
)
void
amu_enable
(
bool
el2_unused
)
{
uint64_t
v
;
if
(
amu_supported
()
==
0
)
if
(
!
amu_supported
())
return
;
if
(
el2_unused
)
{
...
...
@@ -67,8 +68,8 @@ void amu_enable(int el2_unused)
/* Read the group 0 counter identified by the given `idx`. */
uint64_t
amu_group0_cnt_read
(
int
idx
)
{
assert
(
amu_supported
()
!=
0
);
assert
(
idx
>=
0
&&
idx
<
AMU_GROUP0_NR_COUNTERS
);
assert
(
amu_supported
());
assert
(
(
idx
>=
0
)
&&
(
idx
<
AMU_GROUP0_NR_COUNTERS
)
)
;
return
amu_group0_cnt_read_internal
(
idx
);
}
...
...
@@ -76,8 +77,8 @@ uint64_t amu_group0_cnt_read(int idx)
/* Write the group 0 counter identified by the given `idx` with `val`. */
void
amu_group0_cnt_write
(
int
idx
,
uint64_t
val
)
{
assert
(
amu_supported
()
!=
0
);
assert
(
idx
>=
0
&&
idx
<
AMU_GROUP0_NR_COUNTERS
);
assert
(
amu_supported
());
assert
(
(
idx
>=
0
)
&&
(
idx
<
AMU_GROUP0_NR_COUNTERS
)
)
;
amu_group0_cnt_write_internal
(
idx
,
val
);
isb
();
...
...
@@ -86,8 +87,8 @@ void amu_group0_cnt_write(int idx, uint64_t val)
/* Read the group 1 counter identified by the given `idx`. */
uint64_t
amu_group1_cnt_read
(
int
idx
)
{
assert
(
amu_supported
()
!=
0
);
assert
(
idx
>=
0
&&
idx
<
AMU_GROUP1_NR_COUNTERS
);
assert
(
amu_supported
());
assert
(
(
idx
>=
0
)
&&
(
idx
<
AMU_GROUP1_NR_COUNTERS
)
)
;
return
amu_group1_cnt_read_internal
(
idx
);
}
...
...
@@ -95,8 +96,8 @@ uint64_t amu_group1_cnt_read(int idx)
/* Write the group 1 counter identified by the given `idx` with `val`. */
void
amu_group1_cnt_write
(
int
idx
,
uint64_t
val
)
{
assert
(
amu_supported
()
!=
0
);
assert
(
idx
>=
0
&&
idx
<
AMU_GROUP1_NR_COUNTERS
);
assert
(
amu_supported
());
assert
(
(
idx
>=
0
)
&&
(
idx
<
AMU_GROUP1_NR_COUNTERS
)
)
;
amu_group1_cnt_write_internal
(
idx
,
val
);
isb
();
...
...
@@ -108,8 +109,8 @@ void amu_group1_cnt_write(int idx, uint64_t val)
*/
void
amu_group1_set_evtype
(
int
idx
,
unsigned
int
val
)
{
assert
(
amu_supported
()
!=
0
);
assert
(
idx
>=
0
&&
idx
<
AMU_GROUP1_NR_COUNTERS
);
assert
(
amu_supported
());
assert
(
(
idx
>=
0
)
&&
(
idx
<
AMU_GROUP1_NR_COUNTERS
)
)
;
amu_group1_set_evtype_internal
(
idx
,
val
);
isb
();
...
...
@@ -120,14 +121,14 @@ static void *amu_context_save(const void *arg)
struct
amu_ctx
*
ctx
=
&
amu_ctxs
[
plat_my_core_pos
()];
int
i
;
if
(
amu_supported
()
==
0
)
if
(
!
amu_supported
())
return
(
void
*
)
-
1
;
/* Assert that group 0/1 counter configuration is what we expect */
assert
(
read_amcntenset0_el0
()
==
AMU_GROUP0_COUNTERS_MASK
&&
read_amcntenset1_el0
()
==
AMU_GROUP1_COUNTERS_MASK
);
assert
(
(
read_amcntenset0_el0
()
==
AMU_GROUP0_COUNTERS_MASK
)
&&
(
read_amcntenset1_el0
()
==
AMU_GROUP1_COUNTERS_MASK
)
)
;
assert
((
sizeof
(
int
)
*
8
)
-
__builtin_clz
(
AMU_GROUP1_COUNTERS_MASK
)
assert
((
(
sizeof
(
int
)
*
8
)
-
__builtin_clz
(
AMU_GROUP1_COUNTERS_MASK
)
)
<=
AMU_GROUP1_NR_COUNTERS
);
/*
...
...
@@ -146,7 +147,7 @@ static void *amu_context_save(const void *arg)
for
(
i
=
0
;
i
<
AMU_GROUP1_NR_COUNTERS
;
i
++
)
ctx
->
group1_cnts
[
i
]
=
amu_group1_cnt_read
(
i
);
return
0
;
return
(
void
*
)
0
;
}
static
void
*
amu_context_restore
(
const
void
*
arg
)
...
...
@@ -154,30 +155,30 @@ static void *amu_context_restore(const void *arg)
struct
amu_ctx
*
ctx
=
&
amu_ctxs
[
plat_my_core_pos
()];
int
i
;
if
(
amu_supported
()
==
0
)
if
(
!
amu_supported
())
return
(
void
*
)
-
1
;
/* Counters were disabled in `amu_context_save()` */
assert
(
read_amcntenset0_el0
()
==
0
&&
read_amcntenset1_el0
()
==
0
);
assert
(
(
read_amcntenset0_el0
()
==
0
U
)
&&
(
read_amcntenset1_el0
()
==
0
U
)
);
assert
((
sizeof
(
int
)
*
8
)
-
__builtin_clz
(
AMU_GROUP1_COUNTERS_MASK
)
assert
((
(
sizeof
(
int
)
*
8
U
)
-
__builtin_clz
(
AMU_GROUP1_COUNTERS_MASK
)
)
<=
AMU_GROUP1_NR_COUNTERS
);
/* Restore group 0 counters */
for
(
i
=
0
;
i
<
AMU_GROUP0_NR_COUNTERS
;
i
++
)
if
(
AMU_GROUP0_COUNTERS_MASK
&
(
1U
<<
i
))
if
(
(
AMU_GROUP0_COUNTERS_MASK
&
(
1U
<<
i
))
!=
0U
)
amu_group0_cnt_write
(
i
,
ctx
->
group0_cnts
[
i
]);
/* Restore group 1 counters */
for
(
i
=
0
;
i
<
AMU_GROUP1_NR_COUNTERS
;
i
++
)
if
(
AMU_GROUP1_COUNTERS_MASK
&
(
1U
<<
i
))
if
(
(
AMU_GROUP1_COUNTERS_MASK
&
(
1U
<<
i
))
!=
0U
)
amu_group1_cnt_write
(
i
,
ctx
->
group1_cnts
[
i
]);
/* Restore group 0/1 counter configuration */
write_amcntenset0_el0
(
AMU_GROUP0_COUNTERS_MASK
);
write_amcntenset1_el0
(
AMU_GROUP1_COUNTERS_MASK
);
return
0
;
return
(
void
*
)
0
;
}
SUBSCRIBE_TO_EVENT
(
psci_suspend_pwrdown_start
,
amu_context_save
);
...
...
lib/extensions/mpam/mpam.c
View file @
2eedba9a
...
...
@@ -16,7 +16,7 @@ bool mpam_supported(void)
return
((
features
&
ID_AA64PFR0_MPAM_MASK
)
!=
0U
);
}
void
mpam_enable
(
int
el2_unused
)
void
mpam_enable
(
bool
el2_unused
)
{
if
(
!
mpam_supported
())
return
;
...
...
@@ -31,7 +31,7 @@ void mpam_enable(int el2_unused)
* If EL2 is implemented but unused, disable trapping to EL2 when lower
* ELs access their own MPAM registers.
*/
if
(
el2_unused
!=
0
)
{
if
(
el2_unused
)
{
write_mpam2_el2
(
0
);
if
((
read_mpamidr_el1
()
&
MPAMIDR_HAS_HCR_BIT
)
!=
0U
)
...
...
lib/extensions/spe/spe.c
View file @
2eedba9a
...
...
@@ -8,26 +8,30 @@
#include <arch_helpers.h>
#include <pubsub.h>
#include <spe.h>
#include <stdbool.h>
/*
* The assembler does not yet understand the psb csync mnemonic
* so use the equivalent hint instruction.
*/
#define psb_csync() asm volatile("hint #17")
static
inline
void
psb_csync
(
void
)
{
/*
* The assembler does not yet understand the psb csync mnemonic
* so use the equivalent hint instruction.
*/
__asm__
volatile
(
"hint #17"
);
}
int
spe_supported
(
void
)
bool
spe_supported
(
void
)
{
uint64_t
features
;
features
=
read_id_aa64dfr0_el1
()
>>
ID_AA64DFR0_PMS_SHIFT
;
return
(
features
&
ID_AA64DFR0_PMS_MASK
)
==
1
;
return
(
features
&
ID_AA64DFR0_PMS_MASK
)
==
1
U
;
}
void
spe_enable
(
int
el2_unused
)
void
spe_enable
(
bool
el2_unused
)
{
uint64_t
v
;
if
(
spe_supported
()
==
0
)
if
(
!
spe_supported
())
return
;
if
(
el2_unused
)
{
...
...
@@ -59,7 +63,7 @@ void spe_disable(void)
{
uint64_t
v
;
if
(
spe_supported
()
==
0
)
if
(
!
spe_supported
())
return
;
/* Drain buffered data */
...
...
@@ -75,13 +79,14 @@ void spe_disable(void)
static
void
*
spe_drain_buffers_hook
(
const
void
*
arg
)
{
if
(
spe_supported
()
==
0
)
if
(
!
spe_supported
())
return
(
void
*
)
-
1
;
/* Drain buffered data */
psb_csync
();
dsbnsh
();
return
0
;
return
(
void
*
)
0
;
}
SUBSCRIBE_TO_EVENT
(
cm_entering_secure_world
,
spe_drain_buffers_hook
);
lib/extensions/sve/sve.c
View file @
2eedba9a
...
...
@@ -7,21 +7,22 @@
#include <arch.h>
#include <arch_helpers.h>
#include <pubsub.h>
#include <stdbool.h>
#include <sve.h>
int
sve_supported
(
void
)
bool
sve_supported
(
void
)
{
uint64_t
features
;
features
=
read_id_aa64pfr0_el1
()
>>
ID_AA64PFR0_SVE_SHIFT
;
return
(
features
&
ID_AA64PFR0_SVE_MASK
)
==
1
;
return
(
features
&
ID_AA64PFR0_SVE_MASK
)
==
1
U
;
}
static
void
*
disable_sve_hook
(
const
void
*
arg
)
{
uint64_t
cptr
;
if
(
sve_supported
()
==
0
)
if
(
!
sve_supported
())
return
(
void
*
)
-
1
;
/*
...
...
@@ -39,14 +40,14 @@ static void *disable_sve_hook(const void *arg)
* No explicit ISB required here as ERET to switch to Secure
* world covers it
*/
return
0
;
return
(
void
*
)
0
;
}
static
void
*
enable_sve_hook
(
const
void
*
arg
)
{
uint64_t
cptr
;
if
(
sve_supported
()
==
0
)
if
(
!
sve_supported
())
return
(
void
*
)
-
1
;
/*
...
...
@@ -60,14 +61,14 @@ static void *enable_sve_hook(const void *arg)
* No explicit ISB required here as ERET to switch to Non-secure
* world covers it
*/
return
0
;
return
(
void
*
)
0
;
}
void
sve_enable
(
int
el2_unused
)
void
sve_enable
(
bool
el2_unused
)
{
uint64_t
cptr
;
if
(
sve_supported
()
==
0
)
if
(
!
sve_supported
())
return
;
#if CTX_INCLUDE_FPREGS
...
...
lib/pmf/pmf_main.c
View file @
2eedba9a
...
...
@@ -26,7 +26,7 @@
IMPORT_SYM
(
uintptr_t
,
__PMF_SVC_DESCS_START__
,
PMF_SVC_DESCS_START
);
IMPORT_SYM
(
uintptr_t
,
__PMF_SVC_DESCS_END__
,
PMF_SVC_DESCS_END
);
IMPORT_SYM
(
uintptr_t
,
__PMF_PERCPU_TIMESTAMP_END__
,
PMF_PERCPU_TIMESTAMP_END
);
IMPORT_SYM
(
intptr_t
,
__PMF_TIMESTAMP_START__
,
PMF_TIMESTAMP_ARRAY_START
);
IMPORT_SYM
(
u
intptr_t
,
__PMF_TIMESTAMP_START__
,
PMF_TIMESTAMP_ARRAY_START
);
#define PMF_PERCPU_TIMESTAMP_SIZE (PMF_PERCPU_TIMESTAMP_END - PMF_TIMESTAMP_ARRAY_START)
...
...
@@ -67,15 +67,15 @@ int pmf_setup(void)
pmf_svc_descs
=
(
pmf_svc_desc_t
*
)
PMF_SVC_DESCS_START
;
for
(
ii
=
0
;
ii
<
pmf_svc_descs_num
;
ii
++
)
{
assert
(
pmf_svc_descs
[
ii
].
get_ts
);
assert
(
pmf_svc_descs
[
ii
].
get_ts
!=
NULL
);
/*
* Call the initialization routine for this
* PMF service, if it is defined.
*/
if
(
pmf_svc_descs
[
ii
].
init
)
{
if
(
pmf_svc_descs
[
ii
].
init
!=
NULL
)
{
rc
=
pmf_svc_descs
[
ii
].
init
();
if
(
rc
)
{
if
(
rc
!=
0
)
{
WARN
(
"Could not initialize PMF"
"service %s - skipping
\n
"
,
pmf_svc_descs
[
ii
].
name
);
...
...
@@ -125,7 +125,7 @@ static pmf_svc_desc_t *get_service(unsigned int tid)
if
(
pmf_num_services
==
0
)
return
NULL
;
assert
(
pmf_svc_descs
);
assert
(
pmf_svc_descs
!=
NULL
);
do
{
mid
=
(
low
+
high
)
/
2
;
...
...
@@ -158,7 +158,7 @@ int pmf_get_timestamp_smc(unsigned int tid,
unsigned
long
long
*
ts_value
)
{
pmf_svc_desc_t
*
svc_desc
;
assert
(
ts_value
);
assert
(
ts_value
!=
NULL
);
/* Search for registered service. */
svc_desc
=
get_service
(
tid
);
...
...
@@ -247,7 +247,7 @@ unsigned long long __pmf_get_timestamp(uintptr_t base_addr,
unsigned
long
long
*
ts_addr
=
(
unsigned
long
long
*
)
calc_ts_addr
(
base_addr
,
tid
,
cpuid
);
if
(
flags
&
PMF_CACHE_MAINT
)
if
(
(
flags
&
PMF_CACHE_MAINT
)
!=
0U
)
inv_dcache_range
((
uintptr_t
)
ts_addr
,
sizeof
(
unsigned
long
long
));
return
*
ts_addr
;
...
...
lib/pmf/pmf_smc.c
View file @
2eedba9a
...
...
@@ -37,7 +37,8 @@ uintptr_t pmf_smc_handler(unsigned int smc_fid,
* x0 --> error code.
* x1 - x2 --> time-stamp value.
*/
rc
=
pmf_get_timestamp_smc
(
x1
,
x2
,
x3
,
&
ts_value
);
rc
=
pmf_get_timestamp_smc
((
unsigned
int
)
x1
,
x2
,
(
unsigned
int
)
x3
,
&
ts_value
);
SMC_RET3
(
handle
,
rc
,
(
uint32_t
)
ts_value
,
(
uint32_t
)(
ts_value
>>
32
));
}
...
...
@@ -49,7 +50,8 @@ uintptr_t pmf_smc_handler(unsigned int smc_fid,
* x0 --> error code.
* x1 --> time-stamp value.
*/
rc
=
pmf_get_timestamp_smc
(
x1
,
x2
,
x3
,
&
ts_value
);
rc
=
pmf_get_timestamp_smc
((
unsigned
int
)
x1
,
x2
,
(
unsigned
int
)
x3
,
&
ts_value
);
SMC_RET2
(
handle
,
rc
,
ts_value
);
}
}
...
...
plat/arm/common/arm_sip_svc.c
View file @
2eedba9a
...
...
@@ -58,7 +58,7 @@ static uintptr_t arm_sip_handler(unsigned int smc_fid,
/* Validate supplied entry point */
pc
=
(
u_register_t
)
((
x1
<<
32
)
|
(
uint32_t
)
x2
);
if
(
arm_validate_ns_entrypoint
(
pc
))
if
(
arm_validate_ns_entrypoint
(
pc
)
!=
0
)
SMC_RET1
(
handle
,
STATE_SW_E_PARAM
);
/*
...
...
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment