Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
Menu
Open sidebar
adam.huang
Arm Trusted Firmware
Commits
f461da2a
Unverified
Commit
f461da2a
authored
Feb 27, 2018
by
davidcunado-arm
Committed by
GitHub
Feb 27, 2018
Browse files
Merge pull request #1272 from dp-arm/dp/extensions
Refactor SPE/SVE code and fix some bugs in AMUv1 on AArch32
parents
322a98b6
700efdd1
Changes
7
Hide whitespace changes
Inline
Side-by-side
include/lib/extensions/spe.h
View file @
f461da2a
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017
-2018
, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
...
...
@@ -7,6 +7,7 @@
#ifndef __SPE_H__
#define __SPE_H__
int
spe_supported
(
void
);
void
spe_enable
(
int
el2_unused
);
void
spe_disable
(
void
);
...
...
include/lib/extensions/sve.h
View file @
f461da2a
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017
-2018
, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
...
...
@@ -7,6 +7,7 @@
#ifndef __SVE_H__
#define __SVE_H__
int
sve_supported
(
void
);
void
sve_enable
(
int
el2_unused
);
#endif
/* __SVE_H__ */
lib/extensions/amu/aarch32/amu.c
View file @
f461da2a
...
...
@@ -30,7 +30,7 @@ int amu_supported(void)
void
amu_enable
(
int
el2_unused
)
{
if
(
!
amu_supported
())
if
(
amu_supported
()
==
0
)
return
;
if
(
el2_unused
)
{
...
...
@@ -54,7 +54,7 @@ void amu_enable(int el2_unused)
/* Read the group 0 counter identified by the given `idx`. */
uint64_t
amu_group0_cnt_read
(
int
idx
)
{
assert
(
amu_supported
());
assert
(
amu_supported
()
!=
0
);
assert
(
idx
>=
0
&&
idx
<
AMU_GROUP0_NR_COUNTERS
);
return
amu_group0_cnt_read_internal
(
idx
);
...
...
@@ -63,7 +63,7 @@ uint64_t amu_group0_cnt_read(int idx)
/* Write the group 0 counter identified by the given `idx` with `val`. */
void
amu_group0_cnt_write
(
int
idx
,
uint64_t
val
)
{
assert
(
amu_supported
());
assert
(
amu_supported
()
!=
0
);
assert
(
idx
>=
0
&&
idx
<
AMU_GROUP0_NR_COUNTERS
);
amu_group0_cnt_write_internal
(
idx
,
val
);
...
...
@@ -73,7 +73,7 @@ void amu_group0_cnt_write(int idx, uint64_t val)
/* Read the group 1 counter identified by the given `idx`. */
uint64_t
amu_group1_cnt_read
(
int
idx
)
{
assert
(
amu_supported
());
assert
(
amu_supported
()
!=
0
);
assert
(
idx
>=
0
&&
idx
<
AMU_GROUP1_NR_COUNTERS
);
return
amu_group1_cnt_read_internal
(
idx
);
...
...
@@ -82,7 +82,7 @@ uint64_t amu_group1_cnt_read(int idx)
/* Write the group 1 counter identified by the given `idx` with `val`. */
void
amu_group1_cnt_write
(
int
idx
,
uint64_t
val
)
{
assert
(
amu_supported
());
assert
(
amu_supported
()
!=
0
);
assert
(
idx
>=
0
&&
idx
<
AMU_GROUP1_NR_COUNTERS
);
amu_group1_cnt_write_internal
(
idx
,
val
);
...
...
@@ -91,7 +91,7 @@ void amu_group1_cnt_write(int idx, uint64_t val)
void
amu_group1_set_evtype
(
int
idx
,
unsigned
int
val
)
{
assert
(
amu_supported
());
assert
(
amu_supported
()
!=
0
);
assert
(
idx
>=
0
&&
idx
<
AMU_GROUP1_NR_COUNTERS
);
amu_group1_set_evtype_internal
(
idx
,
val
);
...
...
@@ -103,13 +103,14 @@ static void *amu_context_save(const void *arg)
struct
amu_ctx
*
ctx
;
int
i
;
if
(
!
amu_supported
())
if
(
amu_supported
()
==
0
)
return
(
void
*
)
-
1
;
ctx
=
&
amu_ctxs
[
plat_my_core_pos
()];
/* Assert that group 0 counter configuration is what we expect */
assert
(
read_amcntenset0
()
==
AMU_GROUP0_COUNTERS_MASK
);
assert
(
read_amcntenset0
()
==
AMU_GROUP0_COUNTERS_MASK
&&
read_amcntenset1
()
==
AMU_GROUP1_COUNTERS_MASK
);
/*
* Disable group 0 counters to avoid other observers like SCP sampling
...
...
@@ -131,17 +132,15 @@ static void *amu_context_save(const void *arg)
static
void
*
amu_context_restore
(
const
void
*
arg
)
{
struct
amu_ctx
*
ctx
;
uint64_t
features
;
int
i
;
features
=
read_id_pfr0
()
>>
ID_PFR0_AMU_SHIFT
;
if
((
features
&
ID_PFR0_AMU_MASK
)
!=
1
)
if
(
amu_supported
()
==
0
)
return
(
void
*
)
-
1
;
ctx
=
&
amu_ctxs
[
plat_my_core_pos
()];
/* Counters were disabled in `amu_context_save()` */
assert
(
read_amcntenset0
()
==
0
);
assert
(
read_amcntenset0
()
==
0
&&
read_amcntenset1
()
==
0
);
/* Restore group 0 counters */
for
(
i
=
0
;
i
<
AMU_GROUP0_NR_COUNTERS
;
i
++
)
...
...
lib/extensions/amu/aarch32/amu_helpers.S
View file @
f461da2a
...
...
@@ -18,7 +18,7 @@
*
uint64_t
amu_group0_cnt_read_internal
(
int
idx
)
;
*
*
Given
`
idx
`
,
read
the
corresponding
AMU
counter
*
and
return
it
in
`
r0
`
.
*
and
return
it
in
`
r0
`
and
`
r1
`
.
*/
func
amu_group0_cnt_read_internal
#if ENABLE_ASSERTIONS
...
...
@@ -52,13 +52,15 @@ endfunc amu_group0_cnt_read_internal
*
void
amu_group0_cnt_write_internal
(
int
idx
,
uint64_t
val
)
;
*
*
Given
`
idx
`
,
write
`
val
`
to
the
corresponding
AMU
counter
.
*
`
idx
`
is
passed
in
`
r0
`
and
`
val
`
is
passed
in
`
r2
`
and
`
r3
`
.
*
`
r1
`
is
used
as
a
scratch
register
.
*/
func
amu_group0_cnt_write_internal
#if ENABLE_ASSERTIONS
/
*
`
idx
`
should
be
between
[
0
,
3
]
*/
mov
r
2
,
r0
lsr
r
2
,
r
2
,
#
2
cmp
r
2
,
#
0
mov
r
1
,
r0
lsr
r
1
,
r
1
,
#
2
cmp
r
1
,
#
0
ASM_ASSERT
(
eq
)
#endif
...
...
@@ -66,19 +68,19 @@ func amu_group0_cnt_write_internal
*
Given
`
idx
`
calculate
address
of
stcopr16
/
bx
lr
instruction
pair
*
in
the
table
below
.
*/
adr
r
2
,
1
f
adr
r
1
,
1
f
lsl
r0
,
r0
,
#
3
/*
each
stcopr16
/
bx
lr
sequence
is
8
bytes
*/
add
r
2
,
r
2
,
r0
bx
r
2
add
r
1
,
r
1
,
r0
bx
r
1
1
:
stcopr16
r
0
,
r1
,
AMEVCNTR00
/*
index
0
*/
stcopr16
r
2
,
r3
,
AMEVCNTR00
/*
index
0
*/
bx
lr
stcopr16
r
0
,
r1
,
AMEVCNTR01
/*
index
1
*/
stcopr16
r
2
,
r3
,
AMEVCNTR01
/*
index
1
*/
bx
lr
stcopr16
r
0
,
r1
,
AMEVCNTR02
/*
index
2
*/
stcopr16
r
2
,
r3
,
AMEVCNTR02
/*
index
2
*/
bx
lr
stcopr16
r
0
,
r1
,
AMEVCNTR03
/*
index
3
*/
stcopr16
r
2
,
r3
,
AMEVCNTR03
/*
index
3
*/
bx
lr
endfunc
amu_group0_cnt_write_internal
...
...
@@ -86,14 +88,14 @@ endfunc amu_group0_cnt_write_internal
*
uint64_t
amu_group1_cnt_read_internal
(
int
idx
)
;
*
*
Given
`
idx
`
,
read
the
corresponding
AMU
counter
*
and
return
it
in
`
r0
`
.
*
and
return
it
in
`
r0
`
and
`
r1
`
.
*/
func
amu_group1_cnt_read_internal
#if ENABLE_ASSERTIONS
/
*
`
idx
`
should
be
between
[
0
,
15
]
*/
mov
r
2
,
r0
lsr
r
2
,
r
2
,
#
4
cmp
r
2
,
#
0
mov
r
1
,
r0
lsr
r
1
,
r
1
,
#
4
cmp
r
1
,
#
0
ASM_ASSERT
(
eq
)
#endif
...
...
@@ -107,51 +109,53 @@ func amu_group1_cnt_read_internal
bx
r1
1
:
ldcopr16
r0
,
r1
,
AMEVCNTR10
/*
index
0
*/
bx
lr
ldcopr16
r0
,
r1
,
AMEVCNTR11
/*
index
1
*/
bx
lr
ldcopr16
r0
,
r1
,
AMEVCNTR12
/*
index
2
*/
bx
lr
ldcopr16
r0
,
r1
,
AMEVCNTR13
/*
index
3
*/
bx
lr
ldcopr16
r0
,
r1
,
AMEVCNTR14
/*
index
4
*/
bx
lr
ldcopr16
r0
,
r1
,
AMEVCNTR15
/*
index
5
*/
bx
lr
ldcopr16
r0
,
r1
,
AMEVCNTR16
/*
index
6
*/
bx
lr
ldcopr16
r0
,
r1
,
AMEVCNTR17
/*
index
7
*/
bx
lr
ldcopr16
r0
,
r1
,
AMEVCNTR18
/*
index
8
*/
bx
lr
ldcopr16
r0
,
r1
,
AMEVCNTR19
/*
index
9
*/
bx
lr
ldcopr16
r0
,
r1
,
AMEVCNTR1A
/*
index
10
*/
bx
lr
ldcopr16
r0
,
r1
,
AMEVCNTR1B
/*
index
11
*/
bx
lr
ldcopr16
r0
,
r1
,
AMEVCNTR1C
/*
index
12
*/
bx
lr
ldcopr16
r0
,
r1
,
AMEVCNTR1D
/*
index
13
*/
bx
lr
ldcopr16
r0
,
r1
,
AMEVCNTR1E
/*
index
14
*/
bx
lr
ldcopr16
r0
,
r1
,
AMEVCNTR1F
/*
index
15
*/
bx
lr
ldcopr16
r0
,
r1
,
AMEVCNTR10
/*
index
0
*/
bx
lr
ldcopr16
r0
,
r1
,
AMEVCNTR11
/*
index
1
*/
bx
lr
ldcopr16
r0
,
r1
,
AMEVCNTR12
/*
index
2
*/
bx
lr
ldcopr16
r0
,
r1
,
AMEVCNTR13
/*
index
3
*/
bx
lr
ldcopr16
r0
,
r1
,
AMEVCNTR14
/*
index
4
*/
bx
lr
ldcopr16
r0
,
r1
,
AMEVCNTR15
/*
index
5
*/
bx
lr
ldcopr16
r0
,
r1
,
AMEVCNTR16
/*
index
6
*/
bx
lr
ldcopr16
r0
,
r1
,
AMEVCNTR17
/*
index
7
*/
bx
lr
ldcopr16
r0
,
r1
,
AMEVCNTR18
/*
index
8
*/
bx
lr
ldcopr16
r0
,
r1
,
AMEVCNTR19
/*
index
9
*/
bx
lr
ldcopr16
r0
,
r1
,
AMEVCNTR1A
/*
index
10
*/
bx
lr
ldcopr16
r0
,
r1
,
AMEVCNTR1B
/*
index
11
*/
bx
lr
ldcopr16
r0
,
r1
,
AMEVCNTR1C
/*
index
12
*/
bx
lr
ldcopr16
r0
,
r1
,
AMEVCNTR1D
/*
index
13
*/
bx
lr
ldcopr16
r0
,
r1
,
AMEVCNTR1E
/*
index
14
*/
bx
lr
ldcopr16
r0
,
r1
,
AMEVCNTR1F
/*
index
15
*/
bx
lr
endfunc
amu_group1_cnt_read_internal
/*
*
void
amu_group1_cnt_write_internal
(
int
idx
,
uint64_t
val
)
;
*
*
Given
`
idx
`
,
write
`
val
`
to
the
corresponding
AMU
counter
.
*
`
idx
`
is
passed
in
`
r0
`
and
`
val
`
is
passed
in
`
r2
`
and
`
r3
`
.
*
`
r1
`
is
used
as
a
scratch
register
.
*/
func
amu_group1_cnt_write_internal
#if ENABLE_ASSERTIONS
/
*
`
idx
`
should
be
between
[
0
,
15
]
*/
mov
r
2
,
r0
lsr
r
2
,
r
2
,
#
4
cmp
r
2
,
#
0
mov
r
1
,
r0
lsr
r
1
,
r
1
,
#
4
cmp
r
1
,
#
0
ASM_ASSERT
(
eq
)
#endif
...
...
@@ -159,43 +163,43 @@ func amu_group1_cnt_write_internal
*
Given
`
idx
`
calculate
address
of
ldcopr16
/
bx
lr
instruction
pair
*
in
the
table
below
.
*/
adr
r
2
,
1
f
adr
r
1
,
1
f
lsl
r0
,
r0
,
#
3
/*
each
stcopr16
/
bx
lr
sequence
is
8
bytes
*/
add
r
2
,
r
2
,
r0
bx
r
2
add
r
1
,
r
1
,
r0
bx
r
1
1
:
stcopr16
r
0
,
r1
,
AMEVCNTR10
/*
index
0
*/
stcopr16
r
2
,
r3
,
AMEVCNTR10
/*
index
0
*/
bx
lr
stcopr16
r
0
,
r1
,
AMEVCNTR11
/*
index
1
*/
stcopr16
r
2
,
r3
,
AMEVCNTR11
/*
index
1
*/
bx
lr
stcopr16
r
0
,
r1
,
AMEVCNTR12
/*
index
2
*/
stcopr16
r
2
,
r3
,
AMEVCNTR12
/*
index
2
*/
bx
lr
stcopr16
r
0
,
r1
,
AMEVCNTR13
/*
index
3
*/
stcopr16
r
2
,
r3
,
AMEVCNTR13
/*
index
3
*/
bx
lr
stcopr16
r
0
,
r1
,
AMEVCNTR14
/*
index
4
*/
stcopr16
r
2
,
r3
,
AMEVCNTR14
/*
index
4
*/
bx
lr
stcopr16
r
0
,
r1
,
AMEVCNTR15
/*
index
5
*/
stcopr16
r
2
,
r3
,
AMEVCNTR15
/*
index
5
*/
bx
lr
stcopr16
r
0
,
r1
,
AMEVCNTR16
/*
index
6
*/
stcopr16
r
2
,
r3
,
AMEVCNTR16
/*
index
6
*/
bx
lr
stcopr16
r
0
,
r1
,
AMEVCNTR17
/*
index
7
*/
stcopr16
r
2
,
r3
,
AMEVCNTR17
/*
index
7
*/
bx
lr
stcopr16
r
0
,
r1
,
AMEVCNTR18
/*
index
8
*/
stcopr16
r
2
,
r3
,
AMEVCNTR18
/*
index
8
*/
bx
lr
stcopr16
r
0
,
r1
,
AMEVCNTR19
/*
index
9
*/
stcopr16
r
2
,
r3
,
AMEVCNTR19
/*
index
9
*/
bx
lr
stcopr16
r
0
,
r1
,
AMEVCNTR1A
/*
index
10
*/
stcopr16
r
2
,
r3
,
AMEVCNTR1A
/*
index
10
*/
bx
lr
stcopr16
r
0
,
r1
,
AMEVCNTR1B
/*
index
11
*/
stcopr16
r
2
,
r3
,
AMEVCNTR1B
/*
index
11
*/
bx
lr
stcopr16
r
0
,
r1
,
AMEVCNTR1C
/*
index
12
*/
stcopr16
r
2
,
r3
,
AMEVCNTR1C
/*
index
12
*/
bx
lr
stcopr16
r
0
,
r1
,
AMEVCNTR1D
/*
index
13
*/
stcopr16
r
2
,
r3
,
AMEVCNTR1D
/*
index
13
*/
bx
lr
stcopr16
r
0
,
r1
,
AMEVCNTR1E
/*
index
14
*/
stcopr16
r
2
,
r3
,
AMEVCNTR1E
/*
index
14
*/
bx
lr
stcopr16
r
0
,
r1
,
AMEVCNTR1F
/*
index
15
*/
stcopr16
r
2
,
r3
,
AMEVCNTR1F
/*
index
15
*/
bx
lr
endfunc
amu_group1_cnt_write_internal
...
...
@@ -230,36 +234,36 @@ func amu_group1_set_evtype_internal
bx
r2
1
:
stcopr
r
0
,
AMEVTYPER10
/*
index
0
*/
stcopr
r
1
,
AMEVTYPER10
/*
index
0
*/
bx
lr
stcopr
r
0
,
AMEVTYPER11
/*
index
1
*/
stcopr
r
1
,
AMEVTYPER11
/*
index
1
*/
bx
lr
stcopr
r
0
,
AMEVTYPER12
/*
index
2
*/
stcopr
r
1
,
AMEVTYPER12
/*
index
2
*/
bx
lr
stcopr
r
0
,
AMEVTYPER13
/*
index
3
*/
stcopr
r
1
,
AMEVTYPER13
/*
index
3
*/
bx
lr
stcopr
r
0
,
AMEVTYPER14
/*
index
4
*/
stcopr
r
1
,
AMEVTYPER14
/*
index
4
*/
bx
lr
stcopr
r
0
,
AMEVTYPER15
/*
index
5
*/
stcopr
r
1
,
AMEVTYPER15
/*
index
5
*/
bx
lr
stcopr
r
0
,
AMEVTYPER16
/*
index
6
*/
stcopr
r
1
,
AMEVTYPER16
/*
index
6
*/
bx
lr
stcopr
r
0
,
AMEVTYPER17
/*
index
7
*/
stcopr
r
1
,
AMEVTYPER17
/*
index
7
*/
bx
lr
stcopr
r
0
,
AMEVTYPER18
/*
index
8
*/
stcopr
r
1
,
AMEVTYPER18
/*
index
8
*/
bx
lr
stcopr
r
0
,
AMEVTYPER19
/*
index
9
*/
stcopr
r
1
,
AMEVTYPER19
/*
index
9
*/
bx
lr
stcopr
r
0
,
AMEVTYPER1A
/*
index
10
*/
stcopr
r
1
,
AMEVTYPER1A
/*
index
10
*/
bx
lr
stcopr
r
0
,
AMEVTYPER1B
/*
index
11
*/
stcopr
r
1
,
AMEVTYPER1B
/*
index
11
*/
bx
lr
stcopr
r
0
,
AMEVTYPER1C
/*
index
12
*/
stcopr
r
1
,
AMEVTYPER1C
/*
index
12
*/
bx
lr
stcopr
r
0
,
AMEVTYPER1D
/*
index
13
*/
stcopr
r
1
,
AMEVTYPER1D
/*
index
13
*/
bx
lr
stcopr
r
0
,
AMEVTYPER1E
/*
index
14
*/
stcopr
r
1
,
AMEVTYPER1E
/*
index
14
*/
bx
lr
stcopr
r
0
,
AMEVTYPER1F
/*
index
15
*/
stcopr
r
1
,
AMEVTYPER1F
/*
index
15
*/
bx
lr
endfunc
amu_group1_set_evtype_internal
lib/extensions/amu/aarch64/amu.c
View file @
f461da2a
...
...
@@ -37,7 +37,7 @@ void amu_enable(int el2_unused)
{
uint64_t
v
;
if
(
!
amu_supported
())
if
(
amu_supported
()
==
0
)
return
;
if
(
el2_unused
)
{
...
...
@@ -67,7 +67,7 @@ void amu_enable(int el2_unused)
/* Read the group 0 counter identified by the given `idx`. */
uint64_t
amu_group0_cnt_read
(
int
idx
)
{
assert
(
amu_supported
());
assert
(
amu_supported
()
!=
0
);
assert
(
idx
>=
0
&&
idx
<
AMU_GROUP0_NR_COUNTERS
);
return
amu_group0_cnt_read_internal
(
idx
);
...
...
@@ -76,7 +76,7 @@ uint64_t amu_group0_cnt_read(int idx)
/* Write the group 0 counter identified by the given `idx` with `val`. */
void
amu_group0_cnt_write
(
int
idx
,
uint64_t
val
)
{
assert
(
amu_supported
());
assert
(
amu_supported
()
!=
0
);
assert
(
idx
>=
0
&&
idx
<
AMU_GROUP0_NR_COUNTERS
);
amu_group0_cnt_write_internal
(
idx
,
val
);
...
...
@@ -86,7 +86,7 @@ void amu_group0_cnt_write(int idx, uint64_t val)
/* Read the group 1 counter identified by the given `idx`. */
uint64_t
amu_group1_cnt_read
(
int
idx
)
{
assert
(
amu_supported
());
assert
(
amu_supported
()
!=
0
);
assert
(
idx
>=
0
&&
idx
<
AMU_GROUP1_NR_COUNTERS
);
return
amu_group1_cnt_read_internal
(
idx
);
...
...
@@ -95,7 +95,7 @@ uint64_t amu_group1_cnt_read(int idx)
/* Write the group 1 counter identified by the given `idx` with `val`. */
void
amu_group1_cnt_write
(
int
idx
,
uint64_t
val
)
{
assert
(
amu_supported
());
assert
(
amu_supported
()
!=
0
);
assert
(
idx
>=
0
&&
idx
<
AMU_GROUP1_NR_COUNTERS
);
amu_group1_cnt_write_internal
(
idx
,
val
);
...
...
@@ -108,7 +108,7 @@ void amu_group1_cnt_write(int idx, uint64_t val)
*/
void
amu_group1_set_evtype
(
int
idx
,
unsigned
int
val
)
{
assert
(
amu_supported
());
assert
(
amu_supported
()
!=
0
);
assert
(
idx
>=
0
&&
idx
<
AMU_GROUP1_NR_COUNTERS
);
amu_group1_set_evtype_internal
(
idx
,
val
);
...
...
@@ -120,7 +120,7 @@ static void *amu_context_save(const void *arg)
struct
amu_ctx
*
ctx
=
&
amu_ctxs
[
plat_my_core_pos
()];
int
i
;
if
(
!
amu_supported
())
if
(
amu_supported
()
==
0
)
return
(
void
*
)
-
1
;
/* Assert that group 0/1 counter configuration is what we expect */
...
...
@@ -154,7 +154,7 @@ static void *amu_context_restore(const void *arg)
struct
amu_ctx
*
ctx
=
&
amu_ctxs
[
plat_my_core_pos
()];
int
i
;
if
(
!
amu_supported
())
if
(
amu_supported
()
==
0
)
return
(
void
*
)
-
1
;
/* Counters were disabled in `amu_context_save()` */
...
...
lib/extensions/spe/spe.c
View file @
f461da2a
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017
-2018
, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
...
...
@@ -14,71 +14,72 @@
*/
#define psb_csync() asm volatile("hint #17")
void
spe_
enable
(
int
el2_unuse
d
)
int
spe_
supported
(
voi
d
)
{
uint64_t
features
;
features
=
read_id_aa64dfr0_el1
()
>>
ID_AA64DFR0_PMS_SHIFT
;
if
(
(
features
&
ID_AA64DFR0_PMS_MASK
)
==
1
)
{
uint64_t
v
;
return
(
features
&
ID_AA64DFR0_PMS_MASK
)
==
1
;
}
if
(
el2_unused
)
{
/*
* MDCR_EL2.TPMS (ARM v8.2): Do not trap statistical
* profiling controls to EL2.
*
* MDCR_EL2.E2PB (ARM v8.2): SPE enabled in Non-secure
* state. Accesses to profiling buffer controls at
* Non-secure EL1 are not trapped to EL2.
*/
v
=
read_mdcr_el2
();
v
&=
~
MDCR_EL2_TPMS
;
v
|=
MDCR_EL2_E2PB
(
MDCR_EL2_E2PB_EL1
);
write_mdcr_el2
(
v
);
}
void
spe_enable
(
int
el2_unused
)
{
uint64_t
v
;
if
(
spe_supported
()
==
0
)
return
;
if
(
el2_unused
)
{
/*
* MDCR_EL2.NSPB (ARM v8.2): SPE enabled in Non-secure state
* and disabled in secure state. Accesses to SPE registers at
* S-EL1 generate trap exceptions to EL3.
* MDCR_EL2.TPMS (ARM v8.2): Do not trap statistical
* profiling controls to EL2.
*
* MDCR_EL2.E2PB (ARM v8.2): SPE enabled in Non-secure
* state. Accesses to profiling buffer controls at
* Non-secure EL1 are not trapped to EL2.
*/
v
=
read_mdcr_el3
();
v
|=
MDCR_NSPB
(
MDCR_NSPB_EL1
);
write_mdcr_el3
(
v
);
v
=
read_mdcr_el2
();
v
&=
~
MDCR_EL2_TPMS
;
v
|=
MDCR_EL2_E2PB
(
MDCR_EL2_E2PB_EL1
);
write_mdcr_el2
(
v
);
}
/*
* MDCR_EL2.NSPB (ARM v8.2): SPE enabled in Non-secure state
* and disabled in secure state. Accesses to SPE registers at
* S-EL1 generate trap exceptions to EL3.
*/
v
=
read_mdcr_el3
();
v
|=
MDCR_NSPB
(
MDCR_NSPB_EL1
);
write_mdcr_el3
(
v
);
}
void
spe_disable
(
void
)
{
uint64_t
features
;
uint64_t
v
;
features
=
read_id_aa64dfr0_el1
()
>>
ID_AA64DFR0_PMS_SHIFT
;
if
((
features
&
ID_AA64DFR0_PMS_MASK
)
==
1
)
{
uint64_t
v
;
if
(
spe_supported
()
==
0
)
return
;
/* Drain buffered data */
psb_csync
();
dsbnsh
();
/* Drain buffered data */
psb_csync
();
dsbnsh
();
/* Disable profiling buffer */
v
=
read_pmblimitr_el1
();
v
&=
~
(
1ULL
<<
0
);
write_pmblimitr_el1
(
v
);
isb
();
}
/* Disable profiling buffer */
v
=
read_pmblimitr_el1
();
v
&=
~
(
1ULL
<<
0
);
write_pmblimitr_el1
(
v
);
isb
();
}
static
void
*
spe_drain_buffers_hook
(
const
void
*
arg
)
{
uint64_t
features
;
features
=
read_id_aa64dfr0_el1
()
>>
ID_AA64DFR0_PMS_SHIFT
;
if
((
features
&
ID_AA64DFR0_PMS_MASK
)
==
1
)
{
/* Drain buffered data */
psb_csync
();
dsbnsh
();
}
if
(
spe_supported
()
==
0
)
return
(
void
*
)
-
1
;
/* Drain buffered data */
psb_csync
();
dsbnsh
();
return
0
;
}
...
...
lib/extensions/sve/sve.c
View file @
f461da2a
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017
-2018
, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
...
...
@@ -9,117 +9,120 @@
#include <pubsub.h>
#include <sve.h>
static
void
*
disable_sve_hook
(
const
void
*
arg
)
int
sve_supported
(
void
)
{
uint64_t
features
;
features
=
read_id_aa64pfr0_el1
()
>>
ID_AA64PFR0_SVE_SHIFT
;
if
((
features
&
ID_AA64PFR0_SVE_MASK
)
==
1
)
{
uint64_t
cptr
;
/*
* Disable SVE, SIMD and FP access for the Secure world.
* As the SIMD/FP registers are part of the SVE Z-registers, any
* use of SIMD/FP functionality will corrupt the SVE registers.
* Therefore it is necessary to prevent use of SIMD/FP support
* in the Secure world as well as SVE functionality.
*/
cptr
=
read_cptr_el3
();
cptr
=
(
cptr
|
TFP_BIT
)
&
~
(
CPTR_EZ_BIT
);
write_cptr_el3
(
cptr
);
return
(
features
&
ID_AA64PFR0_SVE_MASK
)
==
1
;
}
/*
* No explicit ISB required here as ERET to switch to Secure
* world covers it
*/
}
static
void
*
disable_sve_hook
(
const
void
*
arg
)
{
uint64_t
cptr
;
if
(
sve_supported
()
==
0
)
return
(
void
*
)
-
1
;
/*
* Disable SVE, SIMD and FP access for the Secure world.
* As the SIMD/FP registers are part of the SVE Z-registers, any
* use of SIMD/FP functionality will corrupt the SVE registers.
* Therefore it is necessary to prevent use of SIMD/FP support
* in the Secure world as well as SVE functionality.
*/
cptr
=
read_cptr_el3
();
cptr
=
(
cptr
|
TFP_BIT
)
&
~
(
CPTR_EZ_BIT
);
write_cptr_el3
(
cptr
);
/*
* No explicit ISB required here as ERET to switch to Secure
* world covers it
*/
return
0
;
}
static
void
*
enable_sve_hook
(
const
void
*
arg
)
{
uint64_t
features
;
features
=
read_id_aa64pfr0_el1
()
>>
ID_AA64PFR0_SVE_SHIFT
;
if
((
features
&
ID_AA64PFR0_SVE_MASK
)
==
1
)
{
uint64_t
cptr
;
/*
* Enable SVE, SIMD and FP access for the Non-secure world.
*/
cptr
=
read_cptr_el3
();
cptr
=
(
cptr
|
CPTR_EZ_BIT
)
&
~
(
TFP_BIT
);
write_cptr_el3
(
cptr
);
/*
* No explicit ISB required here as ERET to switch to Non-secure
* world covers it
*/
}
uint64_t
cptr
;
if
(
sve_supported
()
==
0
)
return
(
void
*
)
-
1
;
/*
* Enable SVE, SIMD and FP access for the Non-secure world.
*/
cptr
=
read_cptr_el3
();
cptr
=
(
cptr
|
CPTR_EZ_BIT
)
&
~
(
TFP_BIT
);
write_cptr_el3
(
cptr
);
/*
* No explicit ISB required here as ERET to switch to Non-secure
* world covers it
*/
return
0
;
}
void
sve_enable
(
int
el2_unused
)
{
uint64_t
features
;
uint64_t
cptr
;
if
(
sve_supported
()
==
0
)
return
;
features
=
read_id_aa64pfr0_el1
()
>>
ID_AA64PFR0_SVE_SHIFT
;
if
((
features
&
ID_AA64PFR0_SVE_MASK
)
==
1
)
{
uint64_t
cptr
;
#if CTX_INCLUDE_FPREGS
/*
* CTX_INCLUDE_FPREGS is not supported on SVE enabled systems.
*/
assert
(
0
);
/*
* CTX_INCLUDE_FPREGS is not supported on SVE enabled systems.
*/
assert
(
0
);
#endif
/*
* Update CPTR_EL3 to enable access to SVE functionality for the
* Non-secure world.
* NOTE - assumed that CPTR_EL3.TFP is set to allow access to
* the SIMD, floating-point and SVE support.
*
* CPTR_EL3.EZ: Set to 1 to enable access to SVE functionality
* in the Non-secure world.
*/
cptr
=
read_cptr_el3
();
cptr
|=
CPTR_EZ_BIT
;
write_cptr_el3
(
cptr
);
/*
* Need explicit ISB here to guarantee that update to ZCR_ELx
* and CPTR_EL2.TZ do not result in trap to EL3.
*/
isb
();
/*
* Ensure lower ELs have access to full vector length.
*/
write_zcr_el3
(
ZCR_EL3_LEN_MASK
);
if
(
el2_unused
)
{
/*
* Update CPTR_EL
3
to enable access to SVE functionality
for the
* Non-secure world.
* NOTE - assumed that CPTR_EL
3
.TFP is set to allow
access to
* the SIMD, floating-point and SVE support.
* Update CPTR_EL
2
to enable access to SVE functionality
*
for
Non-secure world
, EL2 and Non-secure EL1 and EL0
.
* NOTE - assumed that CPTR_EL
2
.TFP is set to allow
*
access to
the SIMD, floating-point and SVE support.
*
* CPTR_EL
3.E
Z: Set to
1
to enable access to SVE
functionality
*
in the
Non-secure
world
.
* CPTR_EL
2.T
Z: Set to
0
to enable access to SVE
support
*
for EL2 and
Non-secure
EL1 and EL0
.
*/
cptr
=
read_cptr_el3
();
cptr
|=
CPTR_EZ_BIT
;
write_cptr_el3
(
cptr
);
/*
* Need explicit ISB here to guarantee that update to ZCR_ELx
* and CPTR_EL2.TZ do not result in trap to EL3.
*/
isb
();
cptr
=
read_cptr_el2
();
cptr
&=
~
(
CPTR_EL2_TZ_BIT
);
write_cptr_el2
(
cptr
);
/*
* Ensure lower ELs have access to full vector length.
*/
write_zcr_el3
(
ZCR_EL3_LEN_MASK
);
if
(
el2_unused
)
{
/*
* Update CPTR_EL2 to enable access to SVE functionality
* for Non-secure world, EL2 and Non-secure EL1 and EL0.
* NOTE - assumed that CPTR_EL2.TFP is set to allow
* access to the SIMD, floating-point and SVE support.
*
* CPTR_EL2.TZ: Set to 0 to enable access to SVE support
* for EL2 and Non-secure EL1 and EL0.
*/
cptr
=
read_cptr_el2
();
cptr
&=
~
(
CPTR_EL2_TZ_BIT
);
write_cptr_el2
(
cptr
);
/*
* Ensure lower ELs have access to full vector length.
*/
write_zcr_el2
(
ZCR_EL2_LEN_MASK
);
}
/*
* No explicit ISB required here as ERET to switch to
* Non-secure world covers it.
*/
write_zcr_el2
(
ZCR_EL2_LEN_MASK
);
}
/*
* No explicit ISB required here as ERET to switch to
* Non-secure world covers it.
*/
}
SUBSCRIBE_TO_EVENT
(
cm_exited_normal_world
,
disable_sve_hook
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment