Unverified Commit f461da2a authored by davidcunado-arm's avatar davidcunado-arm Committed by GitHub
Browse files

Merge pull request #1272 from dp-arm/dp/extensions

Refactor SPE/SVE code and fix some bugs in AMUv1 on AArch32
parents 322a98b6 700efdd1
/* /*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
* *
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#ifndef __SPE_H__ #ifndef __SPE_H__
#define __SPE_H__ #define __SPE_H__
int spe_supported(void);
void spe_enable(int el2_unused); void spe_enable(int el2_unused);
void spe_disable(void); void spe_disable(void);
......
/* /*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
* *
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#ifndef __SVE_H__ #ifndef __SVE_H__
#define __SVE_H__ #define __SVE_H__
int sve_supported(void);
void sve_enable(int el2_unused); void sve_enable(int el2_unused);
#endif /* __SVE_H__ */ #endif /* __SVE_H__ */
...@@ -30,7 +30,7 @@ int amu_supported(void) ...@@ -30,7 +30,7 @@ int amu_supported(void)
void amu_enable(int el2_unused) void amu_enable(int el2_unused)
{ {
if (!amu_supported()) if (amu_supported() == 0)
return; return;
if (el2_unused) { if (el2_unused) {
...@@ -54,7 +54,7 @@ void amu_enable(int el2_unused) ...@@ -54,7 +54,7 @@ void amu_enable(int el2_unused)
/* Read the group 0 counter identified by the given `idx`. */ /* Read the group 0 counter identified by the given `idx`. */
uint64_t amu_group0_cnt_read(int idx) uint64_t amu_group0_cnt_read(int idx)
{ {
assert(amu_supported()); assert(amu_supported() != 0);
assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS); assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS);
return amu_group0_cnt_read_internal(idx); return amu_group0_cnt_read_internal(idx);
...@@ -63,7 +63,7 @@ uint64_t amu_group0_cnt_read(int idx) ...@@ -63,7 +63,7 @@ uint64_t amu_group0_cnt_read(int idx)
/* Write the group 0 counter identified by the given `idx` with `val`. */ /* Write the group 0 counter identified by the given `idx` with `val`. */
void amu_group0_cnt_write(int idx, uint64_t val) void amu_group0_cnt_write(int idx, uint64_t val)
{ {
assert(amu_supported()); assert(amu_supported() != 0);
assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS); assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS);
amu_group0_cnt_write_internal(idx, val); amu_group0_cnt_write_internal(idx, val);
...@@ -73,7 +73,7 @@ void amu_group0_cnt_write(int idx, uint64_t val) ...@@ -73,7 +73,7 @@ void amu_group0_cnt_write(int idx, uint64_t val)
/* Read the group 1 counter identified by the given `idx`. */ /* Read the group 1 counter identified by the given `idx`. */
uint64_t amu_group1_cnt_read(int idx) uint64_t amu_group1_cnt_read(int idx)
{ {
assert(amu_supported()); assert(amu_supported() != 0);
assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS); assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
return amu_group1_cnt_read_internal(idx); return amu_group1_cnt_read_internal(idx);
...@@ -82,7 +82,7 @@ uint64_t amu_group1_cnt_read(int idx) ...@@ -82,7 +82,7 @@ uint64_t amu_group1_cnt_read(int idx)
/* Write the group 1 counter identified by the given `idx` with `val`. */ /* Write the group 1 counter identified by the given `idx` with `val`. */
void amu_group1_cnt_write(int idx, uint64_t val) void amu_group1_cnt_write(int idx, uint64_t val)
{ {
assert(amu_supported()); assert(amu_supported() != 0);
assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS); assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
amu_group1_cnt_write_internal(idx, val); amu_group1_cnt_write_internal(idx, val);
...@@ -91,7 +91,7 @@ void amu_group1_cnt_write(int idx, uint64_t val) ...@@ -91,7 +91,7 @@ void amu_group1_cnt_write(int idx, uint64_t val)
void amu_group1_set_evtype(int idx, unsigned int val) void amu_group1_set_evtype(int idx, unsigned int val)
{ {
assert(amu_supported()); assert(amu_supported() != 0);
assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS); assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
amu_group1_set_evtype_internal(idx, val); amu_group1_set_evtype_internal(idx, val);
...@@ -103,13 +103,14 @@ static void *amu_context_save(const void *arg) ...@@ -103,13 +103,14 @@ static void *amu_context_save(const void *arg)
struct amu_ctx *ctx; struct amu_ctx *ctx;
int i; int i;
if (!amu_supported()) if (amu_supported() == 0)
return (void *)-1; return (void *)-1;
ctx = &amu_ctxs[plat_my_core_pos()]; ctx = &amu_ctxs[plat_my_core_pos()];
/* Assert that group 0 counter configuration is what we expect */ /* Assert that group 0 counter configuration is what we expect */
assert(read_amcntenset0() == AMU_GROUP0_COUNTERS_MASK); assert(read_amcntenset0() == AMU_GROUP0_COUNTERS_MASK &&
read_amcntenset1() == AMU_GROUP1_COUNTERS_MASK);
/* /*
* Disable group 0 counters to avoid other observers like SCP sampling * Disable group 0 counters to avoid other observers like SCP sampling
...@@ -131,17 +132,15 @@ static void *amu_context_save(const void *arg) ...@@ -131,17 +132,15 @@ static void *amu_context_save(const void *arg)
static void *amu_context_restore(const void *arg) static void *amu_context_restore(const void *arg)
{ {
struct amu_ctx *ctx; struct amu_ctx *ctx;
uint64_t features;
int i; int i;
features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT; if (amu_supported() == 0)
if ((features & ID_PFR0_AMU_MASK) != 1)
return (void *)-1; return (void *)-1;
ctx = &amu_ctxs[plat_my_core_pos()]; ctx = &amu_ctxs[plat_my_core_pos()];
/* Counters were disabled in `amu_context_save()` */ /* Counters were disabled in `amu_context_save()` */
assert(read_amcntenset0() == 0); assert(read_amcntenset0() == 0 && read_amcntenset1() == 0);
/* Restore group 0 counters */ /* Restore group 0 counters */
for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++) for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
* uint64_t amu_group0_cnt_read_internal(int idx); * uint64_t amu_group0_cnt_read_internal(int idx);
* *
* Given `idx`, read the corresponding AMU counter * Given `idx`, read the corresponding AMU counter
* and return it in `r0`. * and return it in `r0` and `r1`.
*/ */
func amu_group0_cnt_read_internal func amu_group0_cnt_read_internal
#if ENABLE_ASSERTIONS #if ENABLE_ASSERTIONS
...@@ -52,13 +52,15 @@ endfunc amu_group0_cnt_read_internal ...@@ -52,13 +52,15 @@ endfunc amu_group0_cnt_read_internal
* void amu_group0_cnt_write_internal(int idx, uint64_t val); * void amu_group0_cnt_write_internal(int idx, uint64_t val);
* *
* Given `idx`, write `val` to the corresponding AMU counter. * Given `idx`, write `val` to the corresponding AMU counter.
* `idx` is passed in `r0` and `val` is passed in `r2` and `r3`.
* `r1` is used as a scratch register.
*/ */
func amu_group0_cnt_write_internal func amu_group0_cnt_write_internal
#if ENABLE_ASSERTIONS #if ENABLE_ASSERTIONS
/* `idx` should be between [0, 3] */ /* `idx` should be between [0, 3] */
mov r2, r0 mov r1, r0
lsr r2, r2, #2 lsr r1, r1, #2
cmp r2, #0 cmp r1, #0
ASM_ASSERT(eq) ASM_ASSERT(eq)
#endif #endif
...@@ -66,19 +68,19 @@ func amu_group0_cnt_write_internal ...@@ -66,19 +68,19 @@ func amu_group0_cnt_write_internal
* Given `idx` calculate address of stcopr16/bx lr instruction pair * Given `idx` calculate address of stcopr16/bx lr instruction pair
* in the table below. * in the table below.
*/ */
adr r2, 1f adr r1, 1f
lsl r0, r0, #3 /* each stcopr16/bx lr sequence is 8 bytes */ lsl r0, r0, #3 /* each stcopr16/bx lr sequence is 8 bytes */
add r2, r2, r0 add r1, r1, r0
bx r2 bx r1
1: 1:
stcopr16 r0,r1, AMEVCNTR00 /* index 0 */ stcopr16 r2, r3, AMEVCNTR00 /* index 0 */
bx lr bx lr
stcopr16 r0,r1, AMEVCNTR01 /* index 1 */ stcopr16 r2, r3, AMEVCNTR01 /* index 1 */
bx lr bx lr
stcopr16 r0,r1, AMEVCNTR02 /* index 2 */ stcopr16 r2, r3, AMEVCNTR02 /* index 2 */
bx lr bx lr
stcopr16 r0,r1, AMEVCNTR03 /* index 3 */ stcopr16 r2, r3, AMEVCNTR03 /* index 3 */
bx lr bx lr
endfunc amu_group0_cnt_write_internal endfunc amu_group0_cnt_write_internal
...@@ -86,14 +88,14 @@ endfunc amu_group0_cnt_write_internal ...@@ -86,14 +88,14 @@ endfunc amu_group0_cnt_write_internal
* uint64_t amu_group1_cnt_read_internal(int idx); * uint64_t amu_group1_cnt_read_internal(int idx);
* *
* Given `idx`, read the corresponding AMU counter * Given `idx`, read the corresponding AMU counter
* and return it in `r0`. * and return it in `r0` and `r1`.
*/ */
func amu_group1_cnt_read_internal func amu_group1_cnt_read_internal
#if ENABLE_ASSERTIONS #if ENABLE_ASSERTIONS
/* `idx` should be between [0, 15] */ /* `idx` should be between [0, 15] */
mov r2, r0 mov r1, r0
lsr r2, r2, #4 lsr r1, r1, #4
cmp r2, #0 cmp r1, #0
ASM_ASSERT(eq) ASM_ASSERT(eq)
#endif #endif
...@@ -107,37 +109,37 @@ func amu_group1_cnt_read_internal ...@@ -107,37 +109,37 @@ func amu_group1_cnt_read_internal
bx r1 bx r1
1: 1:
ldcopr16 r0,r1, AMEVCNTR10 /* index 0 */ ldcopr16 r0, r1, AMEVCNTR10 /* index 0 */
bx lr bx lr
ldcopr16 r0,r1, AMEVCNTR11 /* index 1 */ ldcopr16 r0, r1, AMEVCNTR11 /* index 1 */
bx lr bx lr
ldcopr16 r0,r1, AMEVCNTR12 /* index 2 */ ldcopr16 r0, r1, AMEVCNTR12 /* index 2 */
bx lr bx lr
ldcopr16 r0,r1, AMEVCNTR13 /* index 3 */ ldcopr16 r0, r1, AMEVCNTR13 /* index 3 */
bx lr bx lr
ldcopr16 r0,r1, AMEVCNTR14 /* index 4 */ ldcopr16 r0, r1, AMEVCNTR14 /* index 4 */
bx lr bx lr
ldcopr16 r0,r1, AMEVCNTR15 /* index 5 */ ldcopr16 r0, r1, AMEVCNTR15 /* index 5 */
bx lr bx lr
ldcopr16 r0,r1, AMEVCNTR16 /* index 6 */ ldcopr16 r0, r1, AMEVCNTR16 /* index 6 */
bx lr bx lr
ldcopr16 r0,r1, AMEVCNTR17 /* index 7 */ ldcopr16 r0, r1, AMEVCNTR17 /* index 7 */
bx lr bx lr
ldcopr16 r0,r1, AMEVCNTR18 /* index 8 */ ldcopr16 r0, r1, AMEVCNTR18 /* index 8 */
bx lr bx lr
ldcopr16 r0,r1, AMEVCNTR19 /* index 9 */ ldcopr16 r0, r1, AMEVCNTR19 /* index 9 */
bx lr bx lr
ldcopr16 r0,r1, AMEVCNTR1A /* index 10 */ ldcopr16 r0, r1, AMEVCNTR1A /* index 10 */
bx lr bx lr
ldcopr16 r0,r1, AMEVCNTR1B /* index 11 */ ldcopr16 r0, r1, AMEVCNTR1B /* index 11 */
bx lr bx lr
ldcopr16 r0,r1, AMEVCNTR1C /* index 12 */ ldcopr16 r0, r1, AMEVCNTR1C /* index 12 */
bx lr bx lr
ldcopr16 r0,r1, AMEVCNTR1D /* index 13 */ ldcopr16 r0, r1, AMEVCNTR1D /* index 13 */
bx lr bx lr
ldcopr16 r0,r1, AMEVCNTR1E /* index 14 */ ldcopr16 r0, r1, AMEVCNTR1E /* index 14 */
bx lr bx lr
ldcopr16 r0,r1, AMEVCNTR1F /* index 15 */ ldcopr16 r0, r1, AMEVCNTR1F /* index 15 */
bx lr bx lr
endfunc amu_group1_cnt_read_internal endfunc amu_group1_cnt_read_internal
...@@ -145,13 +147,15 @@ endfunc amu_group1_cnt_read_internal ...@@ -145,13 +147,15 @@ endfunc amu_group1_cnt_read_internal
* void amu_group1_cnt_write_internal(int idx, uint64_t val); * void amu_group1_cnt_write_internal(int idx, uint64_t val);
* *
* Given `idx`, write `val` to the corresponding AMU counter. * Given `idx`, write `val` to the corresponding AMU counter.
* `idx` is passed in `r0` and `val` is passed in `r2` and `r3`.
* `r1` is used as a scratch register.
*/ */
func amu_group1_cnt_write_internal func amu_group1_cnt_write_internal
#if ENABLE_ASSERTIONS #if ENABLE_ASSERTIONS
/* `idx` should be between [0, 15] */ /* `idx` should be between [0, 15] */
mov r2, r0 mov r1, r0
lsr r2, r2, #4 lsr r1, r1, #4
cmp r2, #0 cmp r1, #0
ASM_ASSERT(eq) ASM_ASSERT(eq)
#endif #endif
...@@ -159,43 +163,43 @@ func amu_group1_cnt_write_internal ...@@ -159,43 +163,43 @@ func amu_group1_cnt_write_internal
* Given `idx` calculate address of ldcopr16/bx lr instruction pair * Given `idx` calculate address of ldcopr16/bx lr instruction pair
* in the table below. * in the table below.
*/ */
adr r2, 1f adr r1, 1f
lsl r0, r0, #3 /* each stcopr16/bx lr sequence is 8 bytes */ lsl r0, r0, #3 /* each stcopr16/bx lr sequence is 8 bytes */
add r2, r2, r0 add r1, r1, r0
bx r2 bx r1
1: 1:
stcopr16 r0,r1, AMEVCNTR10 /* index 0 */ stcopr16 r2, r3, AMEVCNTR10 /* index 0 */
bx lr bx lr
stcopr16 r0,r1, AMEVCNTR11 /* index 1 */ stcopr16 r2, r3, AMEVCNTR11 /* index 1 */
bx lr bx lr
stcopr16 r0,r1, AMEVCNTR12 /* index 2 */ stcopr16 r2, r3, AMEVCNTR12 /* index 2 */
bx lr bx lr
stcopr16 r0,r1, AMEVCNTR13 /* index 3 */ stcopr16 r2, r3, AMEVCNTR13 /* index 3 */
bx lr bx lr
stcopr16 r0,r1, AMEVCNTR14 /* index 4 */ stcopr16 r2, r3, AMEVCNTR14 /* index 4 */
bx lr bx lr
stcopr16 r0,r1, AMEVCNTR15 /* index 5 */ stcopr16 r2, r3, AMEVCNTR15 /* index 5 */
bx lr bx lr
stcopr16 r0,r1, AMEVCNTR16 /* index 6 */ stcopr16 r2, r3, AMEVCNTR16 /* index 6 */
bx lr bx lr
stcopr16 r0,r1, AMEVCNTR17 /* index 7 */ stcopr16 r2, r3, AMEVCNTR17 /* index 7 */
bx lr bx lr
stcopr16 r0,r1, AMEVCNTR18 /* index 8 */ stcopr16 r2, r3, AMEVCNTR18 /* index 8 */
bx lr bx lr
stcopr16 r0,r1, AMEVCNTR19 /* index 9 */ stcopr16 r2, r3, AMEVCNTR19 /* index 9 */
bx lr bx lr
stcopr16 r0,r1, AMEVCNTR1A /* index 10 */ stcopr16 r2, r3, AMEVCNTR1A /* index 10 */
bx lr bx lr
stcopr16 r0,r1, AMEVCNTR1B /* index 11 */ stcopr16 r2, r3, AMEVCNTR1B /* index 11 */
bx lr bx lr
stcopr16 r0,r1, AMEVCNTR1C /* index 12 */ stcopr16 r2, r3, AMEVCNTR1C /* index 12 */
bx lr bx lr
stcopr16 r0,r1, AMEVCNTR1D /* index 13 */ stcopr16 r2, r3, AMEVCNTR1D /* index 13 */
bx lr bx lr
stcopr16 r0,r1, AMEVCNTR1E /* index 14 */ stcopr16 r2, r3, AMEVCNTR1E /* index 14 */
bx lr bx lr
stcopr16 r0,r1, AMEVCNTR1F /* index 15 */ stcopr16 r2, r3, AMEVCNTR1F /* index 15 */
bx lr bx lr
endfunc amu_group1_cnt_write_internal endfunc amu_group1_cnt_write_internal
...@@ -230,36 +234,36 @@ func amu_group1_set_evtype_internal ...@@ -230,36 +234,36 @@ func amu_group1_set_evtype_internal
bx r2 bx r2
1: 1:
stcopr r0, AMEVTYPER10 /* index 0 */ stcopr r1, AMEVTYPER10 /* index 0 */
bx lr bx lr
stcopr r0, AMEVTYPER11 /* index 1 */ stcopr r1, AMEVTYPER11 /* index 1 */
bx lr bx lr
stcopr r0, AMEVTYPER12 /* index 2 */ stcopr r1, AMEVTYPER12 /* index 2 */
bx lr bx lr
stcopr r0, AMEVTYPER13 /* index 3 */ stcopr r1, AMEVTYPER13 /* index 3 */
bx lr bx lr
stcopr r0, AMEVTYPER14 /* index 4 */ stcopr r1, AMEVTYPER14 /* index 4 */
bx lr bx lr
stcopr r0, AMEVTYPER15 /* index 5 */ stcopr r1, AMEVTYPER15 /* index 5 */
bx lr bx lr
stcopr r0, AMEVTYPER16 /* index 6 */ stcopr r1, AMEVTYPER16 /* index 6 */
bx lr bx lr
stcopr r0, AMEVTYPER17 /* index 7 */ stcopr r1, AMEVTYPER17 /* index 7 */
bx lr bx lr
stcopr r0, AMEVTYPER18 /* index 8 */ stcopr r1, AMEVTYPER18 /* index 8 */
bx lr bx lr
stcopr r0, AMEVTYPER19 /* index 9 */ stcopr r1, AMEVTYPER19 /* index 9 */
bx lr bx lr
stcopr r0, AMEVTYPER1A /* index 10 */ stcopr r1, AMEVTYPER1A /* index 10 */
bx lr bx lr
stcopr r0, AMEVTYPER1B /* index 11 */ stcopr r1, AMEVTYPER1B /* index 11 */
bx lr bx lr
stcopr r0, AMEVTYPER1C /* index 12 */ stcopr r1, AMEVTYPER1C /* index 12 */
bx lr bx lr
stcopr r0, AMEVTYPER1D /* index 13 */ stcopr r1, AMEVTYPER1D /* index 13 */
bx lr bx lr
stcopr r0, AMEVTYPER1E /* index 14 */ stcopr r1, AMEVTYPER1E /* index 14 */
bx lr bx lr
stcopr r0, AMEVTYPER1F /* index 15 */ stcopr r1, AMEVTYPER1F /* index 15 */
bx lr bx lr
endfunc amu_group1_set_evtype_internal endfunc amu_group1_set_evtype_internal
...@@ -37,7 +37,7 @@ void amu_enable(int el2_unused) ...@@ -37,7 +37,7 @@ void amu_enable(int el2_unused)
{ {
uint64_t v; uint64_t v;
if (!amu_supported()) if (amu_supported() == 0)
return; return;
if (el2_unused) { if (el2_unused) {
...@@ -67,7 +67,7 @@ void amu_enable(int el2_unused) ...@@ -67,7 +67,7 @@ void amu_enable(int el2_unused)
/* Read the group 0 counter identified by the given `idx`. */ /* Read the group 0 counter identified by the given `idx`. */
uint64_t amu_group0_cnt_read(int idx) uint64_t amu_group0_cnt_read(int idx)
{ {
assert(amu_supported()); assert(amu_supported() != 0);
assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS); assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS);
return amu_group0_cnt_read_internal(idx); return amu_group0_cnt_read_internal(idx);
...@@ -76,7 +76,7 @@ uint64_t amu_group0_cnt_read(int idx) ...@@ -76,7 +76,7 @@ uint64_t amu_group0_cnt_read(int idx)
/* Write the group 0 counter identified by the given `idx` with `val`. */ /* Write the group 0 counter identified by the given `idx` with `val`. */
void amu_group0_cnt_write(int idx, uint64_t val) void amu_group0_cnt_write(int idx, uint64_t val)
{ {
assert(amu_supported()); assert(amu_supported() != 0);
assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS); assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS);
amu_group0_cnt_write_internal(idx, val); amu_group0_cnt_write_internal(idx, val);
...@@ -86,7 +86,7 @@ void amu_group0_cnt_write(int idx, uint64_t val) ...@@ -86,7 +86,7 @@ void amu_group0_cnt_write(int idx, uint64_t val)
/* Read the group 1 counter identified by the given `idx`. */ /* Read the group 1 counter identified by the given `idx`. */
uint64_t amu_group1_cnt_read(int idx) uint64_t amu_group1_cnt_read(int idx)
{ {
assert(amu_supported()); assert(amu_supported() != 0);
assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS); assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
return amu_group1_cnt_read_internal(idx); return amu_group1_cnt_read_internal(idx);
...@@ -95,7 +95,7 @@ uint64_t amu_group1_cnt_read(int idx) ...@@ -95,7 +95,7 @@ uint64_t amu_group1_cnt_read(int idx)
/* Write the group 1 counter identified by the given `idx` with `val`. */ /* Write the group 1 counter identified by the given `idx` with `val`. */
void amu_group1_cnt_write(int idx, uint64_t val) void amu_group1_cnt_write(int idx, uint64_t val)
{ {
assert(amu_supported()); assert(amu_supported() != 0);
assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS); assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
amu_group1_cnt_write_internal(idx, val); amu_group1_cnt_write_internal(idx, val);
...@@ -108,7 +108,7 @@ void amu_group1_cnt_write(int idx, uint64_t val) ...@@ -108,7 +108,7 @@ void amu_group1_cnt_write(int idx, uint64_t val)
*/ */
void amu_group1_set_evtype(int idx, unsigned int val) void amu_group1_set_evtype(int idx, unsigned int val)
{ {
assert(amu_supported()); assert(amu_supported() != 0);
assert (idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS); assert (idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
amu_group1_set_evtype_internal(idx, val); amu_group1_set_evtype_internal(idx, val);
...@@ -120,7 +120,7 @@ static void *amu_context_save(const void *arg) ...@@ -120,7 +120,7 @@ static void *amu_context_save(const void *arg)
struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()]; struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
int i; int i;
if (!amu_supported()) if (amu_supported() == 0)
return (void *)-1; return (void *)-1;
/* Assert that group 0/1 counter configuration is what we expect */ /* Assert that group 0/1 counter configuration is what we expect */
...@@ -154,7 +154,7 @@ static void *amu_context_restore(const void *arg) ...@@ -154,7 +154,7 @@ static void *amu_context_restore(const void *arg)
struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()]; struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
int i; int i;
if (!amu_supported()) if (amu_supported() == 0)
return (void *)-1; return (void *)-1;
/* Counters were disabled in `amu_context_save()` */ /* Counters were disabled in `amu_context_save()` */
......
/* /*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
* *
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
...@@ -14,14 +14,21 @@ ...@@ -14,14 +14,21 @@
*/ */
#define psb_csync() asm volatile("hint #17") #define psb_csync() asm volatile("hint #17")
void spe_enable(int el2_unused) int spe_supported(void)
{ {
uint64_t features; uint64_t features;
features = read_id_aa64dfr0_el1() >> ID_AA64DFR0_PMS_SHIFT; features = read_id_aa64dfr0_el1() >> ID_AA64DFR0_PMS_SHIFT;
if ((features & ID_AA64DFR0_PMS_MASK) == 1) { return (features & ID_AA64DFR0_PMS_MASK) == 1;
}
void spe_enable(int el2_unused)
{
uint64_t v; uint64_t v;
if (spe_supported() == 0)
return;
if (el2_unused) { if (el2_unused) {
/* /*
* MDCR_EL2.TPMS (ARM v8.2): Do not trap statistical * MDCR_EL2.TPMS (ARM v8.2): Do not trap statistical
...@@ -45,17 +52,15 @@ void spe_enable(int el2_unused) ...@@ -45,17 +52,15 @@ void spe_enable(int el2_unused)
v = read_mdcr_el3(); v = read_mdcr_el3();
v |= MDCR_NSPB(MDCR_NSPB_EL1); v |= MDCR_NSPB(MDCR_NSPB_EL1);
write_mdcr_el3(v); write_mdcr_el3(v);
}
} }
void spe_disable(void) void spe_disable(void)
{ {
uint64_t features;
features = read_id_aa64dfr0_el1() >> ID_AA64DFR0_PMS_SHIFT;
if ((features & ID_AA64DFR0_PMS_MASK) == 1) {
uint64_t v; uint64_t v;
if (spe_supported() == 0)
return;
/* Drain buffered data */ /* Drain buffered data */
psb_csync(); psb_csync();
dsbnsh(); dsbnsh();
...@@ -65,20 +70,16 @@ void spe_disable(void) ...@@ -65,20 +70,16 @@ void spe_disable(void)
v &= ~(1ULL << 0); v &= ~(1ULL << 0);
write_pmblimitr_el1(v); write_pmblimitr_el1(v);
isb(); isb();
}
} }
static void *spe_drain_buffers_hook(const void *arg) static void *spe_drain_buffers_hook(const void *arg)
{ {
uint64_t features; if (spe_supported() == 0)
return (void *)-1;
features = read_id_aa64dfr0_el1() >> ID_AA64DFR0_PMS_SHIFT;
if ((features & ID_AA64DFR0_PMS_MASK) == 1) {
/* Drain buffered data */ /* Drain buffered data */
psb_csync(); psb_csync();
dsbnsh(); dsbnsh();
}
return 0; return 0;
} }
......
/* /*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
* *
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
...@@ -9,14 +9,21 @@ ...@@ -9,14 +9,21 @@
#include <pubsub.h> #include <pubsub.h>
#include <sve.h> #include <sve.h>
static void *disable_sve_hook(const void *arg) int sve_supported(void)
{ {
uint64_t features; uint64_t features;
features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_SVE_SHIFT; features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_SVE_SHIFT;
if ((features & ID_AA64PFR0_SVE_MASK) == 1) { return (features & ID_AA64PFR0_SVE_MASK) == 1;
}
static void *disable_sve_hook(const void *arg)
{
uint64_t cptr; uint64_t cptr;
if (sve_supported() == 0)
return (void *)-1;
/* /*
* Disable SVE, SIMD and FP access for the Secure world. * Disable SVE, SIMD and FP access for the Secure world.
* As the SIMD/FP registers are part of the SVE Z-registers, any * As the SIMD/FP registers are part of the SVE Z-registers, any
...@@ -32,18 +39,16 @@ static void *disable_sve_hook(const void *arg) ...@@ -32,18 +39,16 @@ static void *disable_sve_hook(const void *arg)
* No explicit ISB required here as ERET to switch to Secure * No explicit ISB required here as ERET to switch to Secure
* world covers it * world covers it
*/ */
}
return 0; return 0;
} }
static void *enable_sve_hook(const void *arg) static void *enable_sve_hook(const void *arg)
{ {
uint64_t features;
features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_SVE_SHIFT;
if ((features & ID_AA64PFR0_SVE_MASK) == 1) {
uint64_t cptr; uint64_t cptr;
if (sve_supported() == 0)
return (void *)-1;
/* /*
* Enable SVE, SIMD and FP access for the Non-secure world. * Enable SVE, SIMD and FP access for the Non-secure world.
*/ */
...@@ -55,17 +60,16 @@ static void *enable_sve_hook(const void *arg) ...@@ -55,17 +60,16 @@ static void *enable_sve_hook(const void *arg)
* No explicit ISB required here as ERET to switch to Non-secure * No explicit ISB required here as ERET to switch to Non-secure
* world covers it * world covers it
*/ */
}
return 0; return 0;
} }
void sve_enable(int el2_unused) void sve_enable(int el2_unused)
{ {
uint64_t features;
features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_SVE_SHIFT;
if ((features & ID_AA64PFR0_SVE_MASK) == 1) {
uint64_t cptr; uint64_t cptr;
if (sve_supported() == 0)
return;
#if CTX_INCLUDE_FPREGS #if CTX_INCLUDE_FPREGS
/* /*
* CTX_INCLUDE_FPREGS is not supported on SVE enabled systems. * CTX_INCLUDE_FPREGS is not supported on SVE enabled systems.
...@@ -119,7 +123,6 @@ void sve_enable(int el2_unused) ...@@ -119,7 +123,6 @@ void sve_enable(int el2_unused)
* No explicit ISB required here as ERET to switch to * No explicit ISB required here as ERET to switch to
* Non-secure world covers it. * Non-secure world covers it.
*/ */
}
} }
SUBSCRIBE_TO_EVENT(cm_exited_normal_world, disable_sve_hook); SUBSCRIBE_TO_EVENT(cm_exited_normal_world, disable_sve_hook);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment