Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
Menu
Open sidebar
adam.huang
Arm Trusted Firmware
Commits
0f22bef3
Commit
0f22bef3
authored
Apr 29, 2017
by
Scott Branden
Committed by
GitHub
Apr 29, 2017
Browse files
Merge branch 'integration' into tf_issue_461
parents
53d9c9c8
dd454b40
Changes
132
Show whitespace changes
Inline
Side-by-side
include/lib/stdlib/assert.h
View file @
0f22bef3
...
@@ -34,30 +34,27 @@
...
@@ -34,30 +34,27 @@
* @(#)assert.h 8.2 (Berkeley) 1/21/94
* @(#)assert.h 8.2 (Berkeley) 1/21/94
* $FreeBSD$
* $FreeBSD$
*/
*/
#include <sys/cdefs.h>
/*
/*
*
Unlike other ANSI header files, <assert.h> may usefully be included
*
Portions copyright (c) 2017, ARM Limited and Contributors.
*
multiple times, with and without NDEBUG defin
ed.
*
All rights reserv
ed.
*/
*/
#
u
ndef
assert
#
if
ndef
_ASSERT_H_
#
un
def
_assert
#def
ine _ASSERT_H_
#ifdef NDEBUG
#include <sys/cdefs.h>
#define assert(e) ((void)0)
#define _assert(e) ((void)0)
#else
#define _assert(e) assert(e)
#if ENABLE_ASSERTIONS
#define _assert(e) assert(e)
#define assert(e) ((e) ? (void)0 : __assert(__func__, __FILE__, \
#define assert(e) ((e) ? (void)0 : __assert(__func__, __FILE__, \
__LINE__, #e))
__LINE__, #e))
#endif
/* NDEBUG */
#else
#define assert(e) ((void)0)
#define _assert(e) ((void)0)
#endif
/* ENABLE_ASSERTIONS */
#ifndef _ASSERT_H_
#define _ASSERT_H_
__BEGIN_DECLS
__BEGIN_DECLS
void
__assert
(
const
char
*
,
const
char
*
,
int
,
const
char
*
)
__dead2
;
void
__assert
(
const
char
*
,
const
char
*
,
int
,
const
char
*
)
__dead2
;
__END_DECLS
__END_DECLS
#endif
/* !_ASSERT_H_ */
#endif
/* !_ASSERT_H_ */
include/lib/stdlib/stdbool.h
0 → 100644
View file @
0f22bef3
/*
* Copyright (c) 2000 Jeroen Ruigrok van der Werven <asmodai@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef __bool_true_false_are_defined
#define __bool_true_false_are_defined 1
#ifndef __cplusplus
#define false 0
#define true 1
#define bool _Bool
#if __STDC_VERSION__ < 199901L && __GNUC__ < 3 && !defined(__INTEL_COMPILER)
typedef
int
_Bool
;
#endif
#endif
/* !__cplusplus */
#endif
/* __bool_true_false_are_defined */
include/lib/xlat_tables/xlat_tables.h
View file @
0f22bef3
...
@@ -108,7 +108,7 @@ typedef struct mmap_region {
...
@@ -108,7 +108,7 @@ typedef struct mmap_region {
/* Generic translation table APIs */
/* Generic translation table APIs */
void
init_xlat_tables
(
void
);
void
init_xlat_tables
(
void
);
void
mmap_add_region
(
unsigned
long
long
base_pa
,
uintptr_t
base_va
,
void
mmap_add_region
(
unsigned
long
long
base_pa
,
uintptr_t
base_va
,
size_t
size
,
unsigned
in
t
attr
);
size_t
size
,
mmap_attr_
t
attr
);
void
mmap_add
(
const
mmap_region_t
*
mm
);
void
mmap_add
(
const
mmap_region_t
*
mm
);
#endif
/*__ASSEMBLY__*/
#endif
/*__ASSEMBLY__*/
...
...
include/lib/xlat_tables/xlat_tables_v2.h
View file @
0f22bef3
...
@@ -114,7 +114,7 @@ void init_xlat_tables(void);
...
@@ -114,7 +114,7 @@ void init_xlat_tables(void);
* be added before initializing the MMU and cannot be removed later.
* be added before initializing the MMU and cannot be removed later.
*/
*/
void
mmap_add_region
(
unsigned
long
long
base_pa
,
uintptr_t
base_va
,
void
mmap_add_region
(
unsigned
long
long
base_pa
,
uintptr_t
base_va
,
size_t
size
,
unsigned
in
t
attr
);
size_t
size
,
mmap_attr_
t
attr
);
/*
/*
* Add a region with defined base PA and base VA. This type of region can be
* Add a region with defined base PA and base VA. This type of region can be
...
@@ -128,7 +128,7 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
...
@@ -128,7 +128,7 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
* EPERM: It overlaps another region in an invalid way.
* EPERM: It overlaps another region in an invalid way.
*/
*/
int
mmap_add_dynamic_region
(
unsigned
long
long
base_pa
,
uintptr_t
base_va
,
int
mmap_add_dynamic_region
(
unsigned
long
long
base_pa
,
uintptr_t
base_va
,
size_t
size
,
unsigned
in
t
attr
);
size_t
size
,
mmap_attr_
t
attr
);
/*
/*
* Add an array of static regions with defined base PA and base VA. This type
* Add an array of static regions with defined base PA and base VA. This type
...
...
include/plat/arm/board/common/v2m_def.h
View file @
0f22bef3
...
@@ -30,7 +30,7 @@
...
@@ -30,7 +30,7 @@
#ifndef __V2M_DEF_H__
#ifndef __V2M_DEF_H__
#define __V2M_DEF_H__
#define __V2M_DEF_H__
#include <xlat_tables
_v2
.h>
#include <
arm_
xlat_tables.h>
/* V2M motherboard system registers & offsets */
/* V2M motherboard system registers & offsets */
...
...
include/plat/arm/common/arm_xlat_tables.h
0 → 100644
View file @
0f22bef3
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#if ARM_XLAT_TABLES_LIB_V1
#include <xlat_tables.h>
#else
#include <xlat_tables_v2.h>
#endif
/* ARM_XLAT_TABLES_LIB_V1 */
include/plat/arm/common/plat_arm.h
View file @
0f22bef3
...
@@ -30,6 +30,7 @@
...
@@ -30,6 +30,7 @@
#ifndef __PLAT_ARM_H__
#ifndef __PLAT_ARM_H__
#define __PLAT_ARM_H__
#define __PLAT_ARM_H__
#include <arm_xlat_tables.h>
#include <bakery_lock.h>
#include <bakery_lock.h>
#include <cassert.h>
#include <cassert.h>
#include <cpu_data.h>
#include <cpu_data.h>
...
@@ -80,7 +81,7 @@ void arm_setup_page_tables(uintptr_t total_base,
...
@@ -80,7 +81,7 @@ void arm_setup_page_tables(uintptr_t total_base,
#else
#else
/*
/*
* Empty macros for all other BL stages other than BL31
* Empty macros for all other BL stages other than BL31
and BL32
*/
*/
#define ARM_INSTANTIATE_LOCK
#define ARM_INSTANTIATE_LOCK
#define arm_lock_init()
#define arm_lock_init()
...
@@ -156,6 +157,7 @@ void arm_bl2_platform_setup(void);
...
@@ -156,6 +157,7 @@ void arm_bl2_platform_setup(void);
void
arm_bl2_plat_arch_setup
(
void
);
void
arm_bl2_plat_arch_setup
(
void
);
uint32_t
arm_get_spsr_for_bl32_entry
(
void
);
uint32_t
arm_get_spsr_for_bl32_entry
(
void
);
uint32_t
arm_get_spsr_for_bl33_entry
(
void
);
uint32_t
arm_get_spsr_for_bl33_entry
(
void
);
int
arm_bl2_handle_post_image_load
(
unsigned
int
image_id
);
/* BL2U utility functions */
/* BL2U utility functions */
void
arm_bl2u_early_platform_setup
(
struct
meminfo
*
mem_layout
,
void
arm_bl2u_early_platform_setup
(
struct
meminfo
*
mem_layout
,
...
...
include/plat/arm/css/common/css_pm.h
View file @
0f22bef3
/*
/*
* Copyright (c) 2015-201
6
, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015-201
7
, ARM Limited and Contributors. All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* modification, are permitted provided that the following conditions are met:
...
@@ -35,11 +35,15 @@
...
@@ -35,11 +35,15 @@
#include <psci.h>
#include <psci.h>
#include <types.h>
#include <types.h>
/* System power domain at level 2, as currently implemented by CSS platforms */
#define CSS_SYSTEM_PWR_DMN_LVL ARM_PWR_LVL2
/* Macros to read the CSS power domain state */
/* Macros to read the CSS power domain state */
#define CSS_CORE_PWR_STATE(state) (state)->pwr_domain_state[ARM_PWR_LVL0]
#define CSS_CORE_PWR_STATE(state) (state)->pwr_domain_state[ARM_PWR_LVL0]
#define CSS_CLUSTER_PWR_STATE(state) (state)->pwr_domain_state[ARM_PWR_LVL1]
#define CSS_CLUSTER_PWR_STATE(state) (state)->pwr_domain_state[ARM_PWR_LVL1]
#define CSS_SYSTEM_PWR_STATE(state) ((PLAT_MAX_PWR_LVL > ARM_PWR_LVL1) ?\
#define CSS_SYSTEM_PWR_STATE(state) \
(state)->pwr_domain_state[ARM_PWR_LVL2] : 0)
((PLAT_MAX_PWR_LVL == CSS_SYSTEM_PWR_DMN_LVL) ?\
(state)->pwr_domain_state[CSS_SYSTEM_PWR_DMN_LVL] : 0)
int
css_pwr_domain_on
(
u_register_t
mpidr
);
int
css_pwr_domain_on
(
u_register_t
mpidr
);
void
css_pwr_domain_on_finish
(
const
psci_power_state_t
*
target_state
);
void
css_pwr_domain_on_finish
(
const
psci_power_state_t
*
target_state
);
...
...
include/plat/arm/soc/common/soc_css_def.h
View file @
0f22bef3
...
@@ -96,9 +96,16 @@
...
@@ -96,9 +96,16 @@
/*
/*
* Required platform porting definitions common to all ARM CSS SoCs
* Required platform porting definitions common to all ARM CSS SoCs
*/
*/
#if JUNO_AARCH32_EL3_RUNTIME
/*
* Following change is required to initialize TZC
* for enabling access to the HI_VECTOR (0xFFFF0000)
* location needed for JUNO AARCH32 support.
*/
#define PLAT_ARM_SCP_TZC_DRAM1_SIZE ULL(0x8000)
#else
/* 2MB used for SCP DDR retraining */
/* 2MB used for SCP DDR retraining */
#define PLAT_ARM_SCP_TZC_DRAM1_SIZE ULL(0x00200000)
#define PLAT_ARM_SCP_TZC_DRAM1_SIZE ULL(0x00200000)
#endif
#endif
/* __SOC_CSS_DEF_H__ */
#endif
/* __SOC_CSS_DEF_H__ */
include/plat/common/platform.h
View file @
0f22bef3
...
@@ -100,6 +100,7 @@ uintptr_t plat_get_my_stack(void);
...
@@ -100,6 +100,7 @@ uintptr_t plat_get_my_stack(void);
void
plat_report_exception
(
unsigned
int
exception_type
);
void
plat_report_exception
(
unsigned
int
exception_type
);
int
plat_crash_console_init
(
void
);
int
plat_crash_console_init
(
void
);
int
plat_crash_console_putc
(
int
c
);
int
plat_crash_console_putc
(
int
c
);
int
plat_crash_console_flush
(
void
);
void
plat_error_handler
(
int
err
)
__dead2
;
void
plat_error_handler
(
int
err
)
__dead2
;
void
plat_panic_handler
(
void
)
__dead2
;
void
plat_panic_handler
(
void
)
__dead2
;
...
...
lib/aarch32/misc_helpers.S
View file @
0f22bef3
...
@@ -162,7 +162,7 @@ endfunc zeromem
...
@@ -162,7 +162,7 @@ endfunc zeromem
*
--------------------------------------------------------------------------
*
--------------------------------------------------------------------------
*/
*/
func
memcpy4
func
memcpy4
#if
ASM
_ASSERTION
#if
ENABLE
_ASSERTION
S
orr
r3
,
r0
,
r1
orr
r3
,
r0
,
r1
tst
r3
,
#
0x3
tst
r3
,
#
0x3
ASM_ASSERT
(
eq
)
ASM_ASSERT
(
eq
)
...
...
lib/aarch64/misc_helpers.S
View file @
0f22bef3
...
@@ -215,7 +215,7 @@ func zeromem_dczva
...
@@ -215,7 +215,7 @@ func zeromem_dczva
tmp1
.
req
x4
tmp1
.
req
x4
tmp2
.
req
x5
tmp2
.
req
x5
#if
ASM
_ASSERTION
#if
ENABLE
_ASSERTION
S
/
*
/
*
*
Check
for
M
bit
(
MMU
enabled
)
of
the
current
SCTLR_EL
(
1
|
3
)
*
Check
for
M
bit
(
MMU
enabled
)
of
the
current
SCTLR_EL
(
1
|
3
)
*
register
value
and
panic
if
the
MMU
is
disabled
.
*
register
value
and
panic
if
the
MMU
is
disabled
.
...
@@ -228,7 +228,7 @@ func zeromem_dczva
...
@@ -228,7 +228,7 @@ func zeromem_dczva
tst
tmp1
,
#
SCTLR_M_BIT
tst
tmp1
,
#
SCTLR_M_BIT
ASM_ASSERT
(
ne
)
ASM_ASSERT
(
ne
)
#endif /*
ASM
_ASSERTION */
#endif /*
ENABLE
_ASSERTION
S
*/
/
*
stop_address
is
the
address
past
the
last
to
zero
*/
/
*
stop_address
is
the
address
past
the
last
to
zero
*/
add
stop_address
,
cursor
,
length
add
stop_address
,
cursor
,
length
...
@@ -247,7 +247,7 @@ func zeromem_dczva
...
@@ -247,7 +247,7 @@ func zeromem_dczva
mov
tmp2
,
#(
1
<<
2
)
mov
tmp2
,
#(
1
<<
2
)
lsl
block_size
,
tmp2
,
block_size
lsl
block_size
,
tmp2
,
block_size
#if
ASM
_ASSERTION
#if
ENABLE
_ASSERTION
S
/
*
/
*
*
Assumes
block
size
is
at
least
16
bytes
to
avoid
manual
realignment
*
Assumes
block
size
is
at
least
16
bytes
to
avoid
manual
realignment
*
of
the
cursor
at
the
end
of
the
DCZVA
loop
.
*
of
the
cursor
at
the
end
of
the
DCZVA
loop
.
...
@@ -444,7 +444,7 @@ endfunc zeromem_dczva
...
@@ -444,7 +444,7 @@ endfunc zeromem_dczva
*
--------------------------------------------------------------------------
*
--------------------------------------------------------------------------
*/
*/
func
memcpy16
func
memcpy16
#if
ASM
_ASSERTION
#if
ENABLE
_ASSERTION
S
orr
x3
,
x0
,
x1
orr
x3
,
x0
,
x1
tst
x3
,
#
0xf
tst
x3
,
#
0xf
ASM_ASSERT
(
eq
)
ASM_ASSERT
(
eq
)
...
...
lib/cpus/aarch32/aem_generic.S
View file @
0f22bef3
/*
/*
*
Copyright
(
c
)
2016
,
ARM
Limited
and
Contributors
.
All
rights
reserved
.
*
Copyright
(
c
)
2016
-
2017
,
ARM
Limited
and
Contributors
.
All
rights
reserved
.
*
*
*
Redistribution
and
use
in
source
and
binary
forms
,
with
or
without
*
Redistribution
and
use
in
source
and
binary
forms
,
with
or
without
*
modification
,
are
permitted
provided
that
the
following
conditions
are
met
:
*
modification
,
are
permitted
provided
that
the
following
conditions
are
met
:
...
@@ -35,7 +35,7 @@
...
@@ -35,7 +35,7 @@
func
aem_generic_core_pwr_dwn
func
aem_generic_core_pwr_dwn
/
*
Assert
if
cache
is
enabled
*/
/
*
Assert
if
cache
is
enabled
*/
#if
ASM
_ASSERTION
#if
ENABLE
_ASSERTION
S
ldcopr
r0
,
SCTLR
ldcopr
r0
,
SCTLR
tst
r0
,
#
SCTLR_C_BIT
tst
r0
,
#
SCTLR_C_BIT
ASM_ASSERT
(
eq
)
ASM_ASSERT
(
eq
)
...
@@ -51,7 +51,7 @@ endfunc aem_generic_core_pwr_dwn
...
@@ -51,7 +51,7 @@ endfunc aem_generic_core_pwr_dwn
func
aem_generic_cluster_pwr_dwn
func
aem_generic_cluster_pwr_dwn
/
*
Assert
if
cache
is
enabled
*/
/
*
Assert
if
cache
is
enabled
*/
#if
ASM
_ASSERTION
#if
ENABLE
_ASSERTION
S
ldcopr
r0
,
SCTLR
ldcopr
r0
,
SCTLR
tst
r0
,
#
SCTLR_C_BIT
tst
r0
,
#
SCTLR_C_BIT
ASM_ASSERT
(
eq
)
ASM_ASSERT
(
eq
)
...
...
lib/cpus/aarch32/cortex_a32.S
View file @
0f22bef3
/*
/*
*
Copyright
(
c
)
2016
,
ARM
Limited
and
Contributors
.
All
rights
reserved
.
*
Copyright
(
c
)
2016
-
2017
,
ARM
Limited
and
Contributors
.
All
rights
reserved
.
*
*
*
Redistribution
and
use
in
source
and
binary
forms
,
with
or
without
*
Redistribution
and
use
in
source
and
binary
forms
,
with
or
without
*
modification
,
are
permitted
provided
that
the
following
conditions
are
met
:
*
modification
,
are
permitted
provided
that
the
following
conditions
are
met
:
...
@@ -76,7 +76,7 @@ func cortex_a32_core_pwr_dwn
...
@@ -76,7 +76,7 @@ func cortex_a32_core_pwr_dwn
push
{
r12
,
lr
}
push
{
r12
,
lr
}
/
*
Assert
if
cache
is
enabled
*/
/
*
Assert
if
cache
is
enabled
*/
#if
ASM
_ASSERTION
#if
ENABLE
_ASSERTION
S
ldcopr
r0
,
SCTLR
ldcopr
r0
,
SCTLR
tst
r0
,
#
SCTLR_C_BIT
tst
r0
,
#
SCTLR_C_BIT
ASM_ASSERT
(
eq
)
ASM_ASSERT
(
eq
)
...
@@ -107,7 +107,7 @@ func cortex_a32_cluster_pwr_dwn
...
@@ -107,7 +107,7 @@ func cortex_a32_cluster_pwr_dwn
push
{
r12
,
lr
}
push
{
r12
,
lr
}
/
*
Assert
if
cache
is
enabled
*/
/
*
Assert
if
cache
is
enabled
*/
#if
ASM
_ASSERTION
#if
ENABLE
_ASSERTION
S
ldcopr
r0
,
SCTLR
ldcopr
r0
,
SCTLR
tst
r0
,
#
SCTLR_C_BIT
tst
r0
,
#
SCTLR_C_BIT
ASM_ASSERT
(
eq
)
ASM_ASSERT
(
eq
)
...
...
lib/cpus/aarch32/cortex_a53.S
0 → 100644
View file @
0f22bef3
/*
*
Copyright
(
c
)
2017
,
ARM
Limited
and
Contributors
.
All
rights
reserved
.
*
*
Redistribution
and
use
in
source
and
binary
forms
,
with
or
without
*
modification
,
are
permitted
provided
that
the
following
conditions
are
met
:
*
*
Redistributions
of
source
code
must
retain
the
above
copyright
notice
,
this
*
list
of
conditions
and
the
following
disclaimer
.
*
*
Redistributions
in
binary
form
must
reproduce
the
above
copyright
notice
,
*
this
list
of
conditions
and
the
following
disclaimer
in
the
documentation
*
and
/
or
other
materials
provided
with
the
distribution
.
*
*
Neither
the
name
of
ARM
nor
the
names
of
its
contributors
may
be
used
*
to
endorse
or
promote
products
derived
from
this
software
without
specific
*
prior
written
permission
.
*
*
THIS
SOFTWARE
IS
PROVIDED
BY
THE
COPYRIGHT
HOLDERS
AND
CONTRIBUTORS
"AS IS"
*
AND
ANY
EXPRESS
OR
IMPLIED
WARRANTIES
,
INCLUDING
,
BUT
NOT
LIMITED
TO
,
THE
*
IMPLIED
WARRANTIES
OF
MERCHANTABILITY
AND
FITNESS
FOR
A
PARTICULAR
PURPOSE
*
ARE
DISCLAIMED
.
IN
NO
EVENT
SHALL
THE
COPYRIGHT
HOLDER
OR
CONTRIBUTORS
BE
*
LIABLE
FOR
ANY
DIRECT
,
INDIRECT
,
INCIDENTAL
,
SPECIAL
,
EXEMPLARY
,
OR
*
CONSEQUENTIAL
DAMAGES
(
INCLUDING
,
BUT
NOT
LIMITED
TO
,
PROCUREMENT
OF
*
SUBSTITUTE
GOODS
OR
SERVICES
; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
*
INTERRUPTION
)
HOWEVER
CAUSED
AND
ON
ANY
THEORY
OF
LIABILITY
,
WHETHER
IN
*
CONTRACT
,
STRICT
LIABILITY
,
OR
TORT
(
INCLUDING
NEGLIGENCE
OR
OTHERWISE
)
*
ARISING
IN
ANY
WAY
OUT
OF
THE
USE
OF
THIS
SOFTWARE
,
EVEN
IF
ADVISED
OF
THE
*
POSSIBILITY
OF
SUCH
DAMAGE
.
*/
#include <arch.h>
#include <asm_macros.S>
#include <assert_macros.S>
#include <cortex_a53.h>
#include <cpu_macros.S>
#include <debug.h>
/
*
---------------------------------------------
*
Disable
intra
-
cluster
coherency
*
---------------------------------------------
*/
func
cortex_a53_disable_smp
ldcopr16
r0
,
r1
,
CPUECTLR
bic64_imm
r0
,
r1
,
CPUECTLR_SMP_BIT
stcopr16
r0
,
r1
,
CPUECTLR
isb
dsb
sy
bx
lr
endfunc
cortex_a53_disable_smp
/
*
-------------------------------------------------
*
The
CPU
Ops
reset
function
for
Cortex
-
A53
.
*
-------------------------------------------------
*/
func
cortex_a53_reset_func
/
*
---------------------------------------------
*
Enable
the
SMP
bit
.
*
---------------------------------------------
*/
ldcopr16
r0
,
r1
,
CPUECTLR
orr64_imm
r0
,
r1
,
CPUECTLR_SMP_BIT
stcopr16
r0
,
r1
,
CPUECTLR
isb
bx
lr
endfunc
cortex_a53_reset_func
/
*
----------------------------------------------------
*
The
CPU
Ops
core
power
down
function
for
Cortex
-
A53
.
*
----------------------------------------------------
*/
func
cortex_a53_core_pwr_dwn
push
{
r12
,
lr
}
/
*
Assert
if
cache
is
enabled
*/
#if ASM_ASSERTION
ldcopr
r0
,
SCTLR
tst
r0
,
#
SCTLR_C_BIT
ASM_ASSERT
(
eq
)
#endif
/
*
---------------------------------------------
*
Flush
L1
caches
.
*
---------------------------------------------
*/
mov
r0
,
#
DC_OP_CISW
bl
dcsw_op_level1
/
*
---------------------------------------------
*
Come
out
of
intra
cluster
coherency
*
---------------------------------------------
*/
pop
{
r12
,
lr
}
b
cortex_a53_disable_smp
endfunc
cortex_a53_core_pwr_dwn
/
*
-------------------------------------------------------
*
The
CPU
Ops
cluster
power
down
function
for
Cortex
-
A53
.
*
Clobbers
:
r0
-
r3
*
-------------------------------------------------------
*/
func
cortex_a53_cluster_pwr_dwn
push
{
r12
,
lr
}
/
*
Assert
if
cache
is
enabled
*/
#if ASM_ASSERTION
ldcopr
r0
,
SCTLR
tst
r0
,
#
SCTLR_C_BIT
ASM_ASSERT
(
eq
)
#endif
/
*
---------------------------------------------
*
Flush
L1
caches
.
*
---------------------------------------------
*/
mov
r0
,
#
DC_OP_CISW
bl
dcsw_op_level1
/
*
---------------------------------------------
*
Disable
the
optional
ACP
.
*
---------------------------------------------
*/
bl
plat_disable_acp
/
*
---------------------------------------------
*
Flush
L2
caches
.
*
---------------------------------------------
*/
mov
r0
,
#
DC_OP_CISW
bl
dcsw_op_level2
/
*
---------------------------------------------
*
Come
out
of
intra
cluster
coherency
*
---------------------------------------------
*/
pop
{
r12
,
lr
}
b
cortex_a53_disable_smp
endfunc
cortex_a53_cluster_pwr_dwn
declare_cpu_ops
cortex_a53
,
CORTEX_A53_MIDR
,
\
cortex_a53_reset_func
,
\
cortex_a53_core_pwr_dwn
,
\
cortex_a53_cluster_pwr_dwn
lib/cpus/aarch32/cortex_a57.S
0 → 100644
View file @
0f22bef3
/*
*
Copyright
(
c
)
2017
,
ARM
Limited
and
Contributors
.
All
rights
reserved
.
*
*
Redistribution
and
use
in
source
and
binary
forms
,
with
or
without
*
modification
,
are
permitted
provided
that
the
following
conditions
are
met
:
*
*
Redistributions
of
source
code
must
retain
the
above
copyright
notice
,
this
*
list
of
conditions
and
the
following
disclaimer
.
*
*
Redistributions
in
binary
form
must
reproduce
the
above
copyright
notice
,
*
this
list
of
conditions
and
the
following
disclaimer
in
the
documentation
*
and
/
or
other
materials
provided
with
the
distribution
.
*
*
Neither
the
name
of
ARM
nor
the
names
of
its
contributors
may
be
used
*
to
endorse
or
promote
products
derived
from
this
software
without
specific
*
prior
written
permission
.
*
*
THIS
SOFTWARE
IS
PROVIDED
BY
THE
COPYRIGHT
HOLDERS
AND
CONTRIBUTORS
"AS IS"
*
AND
ANY
EXPRESS
OR
IMPLIED
WARRANTIES
,
INCLUDING
,
BUT
NOT
LIMITED
TO
,
THE
*
IMPLIED
WARRANTIES
OF
MERCHANTABILITY
AND
FITNESS
FOR
A
PARTICULAR
PURPOSE
*
ARE
DISCLAIMED
.
IN
NO
EVENT
SHALL
THE
COPYRIGHT
HOLDER
OR
CONTRIBUTORS
BE
*
LIABLE
FOR
ANY
DIRECT
,
INDIRECT
,
INCIDENTAL
,
SPECIAL
,
EXEMPLARY
,
OR
*
CONSEQUENTIAL
DAMAGES
(
INCLUDING
,
BUT
NOT
LIMITED
TO
,
PROCUREMENT
OF
*
SUBSTITUTE
GOODS
OR
SERVICES
; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
*
INTERRUPTION
)
HOWEVER
CAUSED
AND
ON
ANY
THEORY
OF
LIABILITY
,
WHETHER
IN
*
CONTRACT
,
STRICT
LIABILITY
,
OR
TORT
(
INCLUDING
NEGLIGENCE
OR
OTHERWISE
)
*
ARISING
IN
ANY
WAY
OUT
OF
THE
USE
OF
THIS
SOFTWARE
,
EVEN
IF
ADVISED
OF
THE
*
POSSIBILITY
OF
SUCH
DAMAGE
.
*/
#include <arch.h>
#include <asm_macros.S>
#include <assert_macros.S>
#include <cortex_a57.h>
#include <cpu_macros.S>
#include <debug.h>
/
*
---------------------------------------------
*
Disable
intra
-
cluster
coherency
*
Clobbers
:
r0
-
r1
*
---------------------------------------------
*/
func
cortex_a57_disable_smp
ldcopr16
r0
,
r1
,
CPUECTLR
bic64_imm
r0
,
r1
,
CPUECTLR_SMP_BIT
stcopr16
r0
,
r1
,
CPUECTLR
bx
lr
endfunc
cortex_a57_disable_smp
/
*
---------------------------------------------
*
Disable
all
types
of
L2
prefetches
.
*
Clobbers
:
r0
-
r2
*
---------------------------------------------
*/
func
cortex_a57_disable_l2_prefetch
ldcopr16
r0
,
r1
,
CPUECTLR
orr64_imm
r0
,
r1
,
CPUECTLR_DIS_TWD_ACC_PFTCH_BIT
bic64_imm
r0
,
r1
,
(
CPUECTLR_L2_IPFTCH_DIST_MASK
|
\
CPUECTLR_L2_DPFTCH_DIST_MASK
)
stcopr16
r0
,
r1
,
CPUECTLR
isb
dsb
ish
bx
lr
endfunc
cortex_a57_disable_l2_prefetch
/
*
---------------------------------------------
*
Disable
debug
interfaces
*
---------------------------------------------
*/
func
cortex_a57_disable_ext_debug
mov
r0
,
#
1
stcopr
r0
,
DBGOSDLR
isb
dsb
sy
bx
lr
endfunc
cortex_a57_disable_ext_debug
/
*
-------------------------------------------------
*
The
CPU
Ops
reset
function
for
Cortex
-
A57
.
*
-------------------------------------------------
*/
func
cortex_a57_reset_func
/
*
---------------------------------------------
*
Enable
the
SMP
bit
.
*
---------------------------------------------
*/
ldcopr16
r0
,
r1
,
CPUECTLR
orr64_imm
r0
,
r1
,
CPUECTLR_SMP_BIT
stcopr16
r0
,
r1
,
CPUECTLR
isb
bx
lr
endfunc
cortex_a57_reset_func
/
*
----------------------------------------------------
*
The
CPU
Ops
core
power
down
function
for
Cortex
-
A57
.
*
----------------------------------------------------
*/
func
cortex_a57_core_pwr_dwn
push
{
r12
,
lr
}
/
*
Assert
if
cache
is
enabled
*/
#if ASM_ASSERTION
ldcopr
r0
,
SCTLR
tst
r0
,
#
SCTLR_C_BIT
ASM_ASSERT
(
eq
)
#endif
/
*
---------------------------------------------
*
Disable
the
L2
prefetches
.
*
---------------------------------------------
*/
bl
cortex_a57_disable_l2_prefetch
/
*
---------------------------------------------
*
Flush
L1
caches
.
*
---------------------------------------------
*/
mov
r0
,
#
DC_OP_CISW
bl
dcsw_op_level1
/
*
---------------------------------------------
*
Come
out
of
intra
cluster
coherency
*
---------------------------------------------
*/
bl
cortex_a57_disable_smp
/
*
---------------------------------------------
*
Force
the
debug
interfaces
to
be
quiescent
*
---------------------------------------------
*/
pop
{
r12
,
lr
}
b
cortex_a57_disable_ext_debug
endfunc
cortex_a57_core_pwr_dwn
/
*
-------------------------------------------------------
*
The
CPU
Ops
cluster
power
down
function
for
Cortex
-
A57
.
*
Clobbers
:
r0
-
r3
*
-------------------------------------------------------
*/
func
cortex_a57_cluster_pwr_dwn
push
{
r12
,
lr
}
/
*
Assert
if
cache
is
enabled
*/
#if ASM_ASSERTION
ldcopr
r0
,
SCTLR
tst
r0
,
#
SCTLR_C_BIT
ASM_ASSERT
(
eq
)
#endif
/
*
---------------------------------------------
*
Disable
the
L2
prefetches
.
*
---------------------------------------------
*/
bl
cortex_a57_disable_l2_prefetch
/
*
---------------------------------------------
*
Flush
L1
caches
.
*
---------------------------------------------
*/
mov
r0
,
#
DC_OP_CISW
bl
dcsw_op_level1
/
*
---------------------------------------------
*
Disable
the
optional
ACP
.
*
---------------------------------------------
*/
bl
plat_disable_acp
/
*
---------------------------------------------
*
Flush
L2
caches
.
*
---------------------------------------------
*/
mov
r0
,
#
DC_OP_CISW
bl
dcsw_op_level2
/
*
---------------------------------------------
*
Come
out
of
intra
cluster
coherency
*
---------------------------------------------
*/
bl
cortex_a57_disable_smp
/
*
---------------------------------------------
*
Force
the
debug
interfaces
to
be
quiescent
*
---------------------------------------------
*/
pop
{
r12
,
lr
}
b
cortex_a57_disable_ext_debug
endfunc
cortex_a57_cluster_pwr_dwn
declare_cpu_ops
cortex_a57
,
CORTEX_A57_MIDR
,
\
cortex_a57_reset_func
,
\
cortex_a57_core_pwr_dwn
,
\
cortex_a57_cluster_pwr_dwn
lib/cpus/aarch32/cortex_a72.S
0 → 100644
View file @
0f22bef3
/*
*
Copyright
(
c
)
2017
,
ARM
Limited
and
Contributors
.
All
rights
reserved
.
*
*
Redistribution
and
use
in
source
and
binary
forms
,
with
or
without
*
modification
,
are
permitted
provided
that
the
following
conditions
are
met
:
*
*
Redistributions
of
source
code
must
retain
the
above
copyright
notice
,
this
*
list
of
conditions
and
the
following
disclaimer
.
*
*
Redistributions
in
binary
form
must
reproduce
the
above
copyright
notice
,
*
this
list
of
conditions
and
the
following
disclaimer
in
the
documentation
*
and
/
or
other
materials
provided
with
the
distribution
.
*
*
Neither
the
name
of
ARM
nor
the
names
of
its
contributors
may
be
used
*
to
endorse
or
promote
products
derived
from
this
software
without
specific
*
prior
written
permission
.
*
*
THIS
SOFTWARE
IS
PROVIDED
BY
THE
COPYRIGHT
HOLDERS
AND
CONTRIBUTORS
"AS IS"
*
AND
ANY
EXPRESS
OR
IMPLIED
WARRANTIES
,
INCLUDING
,
BUT
NOT
LIMITED
TO
,
THE
*
IMPLIED
WARRANTIES
OF
MERCHANTABILITY
AND
FITNESS
FOR
A
PARTICULAR
PURPOSE
*
ARE
DISCLAIMED
.
IN
NO
EVENT
SHALL
THE
COPYRIGHT
HOLDER
OR
CONTRIBUTORS
BE
*
LIABLE
FOR
ANY
DIRECT
,
INDIRECT
,
INCIDENTAL
,
SPECIAL
,
EXEMPLARY
,
OR
*
CONSEQUENTIAL
DAMAGES
(
INCLUDING
,
BUT
NOT
LIMITED
TO
,
PROCUREMENT
OF
*
SUBSTITUTE
GOODS
OR
SERVICES
; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
*
INTERRUPTION
)
HOWEVER
CAUSED
AND
ON
ANY
THEORY
OF
LIABILITY
,
WHETHER
IN
*
CONTRACT
,
STRICT
LIABILITY
,
OR
TORT
(
INCLUDING
NEGLIGENCE
OR
OTHERWISE
)
*
ARISING
IN
ANY
WAY
OUT
OF
THE
USE
OF
THIS
SOFTWARE
,
EVEN
IF
ADVISED
OF
THE
*
POSSIBILITY
OF
SUCH
DAMAGE
.
*/
#include <arch.h>
#include <asm_macros.S>
#include <assert_macros.S>
#include <cortex_a72.h>
#include <cpu_macros.S>
#include <debug.h>
/
*
---------------------------------------------
*
Disable
all
types
of
L2
prefetches
.
*
---------------------------------------------
*/
func
cortex_a72_disable_l2_prefetch
ldcopr16
r0
,
r1
,
CPUECTLR
orr64_imm
r0
,
r1
,
CPUECTLR_DIS_TWD_ACC_PFTCH_BIT
bic64_imm
r0
,
r1
,
(
CPUECTLR_L2_IPFTCH_DIST_MASK
|
\
CPUECTLR_L2_DPFTCH_DIST_MASK
)
stcopr16
r0
,
r1
,
CPUECTLR
isb
bx
lr
endfunc
cortex_a72_disable_l2_prefetch
/
*
---------------------------------------------
*
Disable
the
load
-
store
hardware
prefetcher
.
*
---------------------------------------------
*/
func
cortex_a72_disable_hw_prefetcher
ldcopr16
r0
,
r1
,
CPUACTLR
orr64_imm
r0
,
r1
,
CPUACTLR_DISABLE_L1_DCACHE_HW_PFTCH
stcopr16
r0
,
r1
,
CPUACTLR
isb
dsb
ish
bx
lr
endfunc
cortex_a72_disable_hw_prefetcher
/
*
---------------------------------------------
*
Disable
intra
-
cluster
coherency
*
Clobbers
:
r0
-
r1
*
---------------------------------------------
*/
func
cortex_a72_disable_smp
ldcopr16
r0
,
r1
,
CPUECTLR
bic64_imm
r0
,
r1
,
CPUECTLR_SMP_BIT
stcopr16
r0
,
r1
,
CPUECTLR
bx
lr
endfunc
cortex_a72_disable_smp
/
*
---------------------------------------------
*
Disable
debug
interfaces
*
---------------------------------------------
*/
func
cortex_a72_disable_ext_debug
mov
r0
,
#
1
stcopr
r0
,
DBGOSDLR
isb
dsb
sy
bx
lr
endfunc
cortex_a72_disable_ext_debug
/
*
-------------------------------------------------
*
The
CPU
Ops
reset
function
for
Cortex
-
A72
.
*
-------------------------------------------------
*/
func
cortex_a72_reset_func
/
*
---------------------------------------------
*
Enable
the
SMP
bit
.
*
---------------------------------------------
*/
ldcopr16
r0
,
r1
,
CPUECTLR
orr64_imm
r0
,
r1
,
CPUECTLR_SMP_BIT
stcopr16
r0
,
r1
,
CPUECTLR
isb
bx
lr
endfunc
cortex_a72_reset_func
/
*
----------------------------------------------------
*
The
CPU
Ops
core
power
down
function
for
Cortex
-
A72
.
*
----------------------------------------------------
*/
func
cortex_a72_core_pwr_dwn
push
{
r12
,
lr
}
/
*
Assert
if
cache
is
enabled
*/
#if ASM_ASSERTION
ldcopr
r0
,
SCTLR
tst
r0
,
#
SCTLR_C_BIT
ASM_ASSERT
(
eq
)
#endif
/
*
---------------------------------------------
*
Disable
the
L2
prefetches
.
*
---------------------------------------------
*/
bl
cortex_a72_disable_l2_prefetch
/
*
---------------------------------------------
*
Disable
the
load
-
store
hardware
prefetcher
.
*
---------------------------------------------
*/
bl
cortex_a72_disable_hw_prefetcher
/
*
---------------------------------------------
*
Flush
L1
caches
.
*
---------------------------------------------
*/
mov
r0
,
#
DC_OP_CISW
bl
dcsw_op_level1
/
*
---------------------------------------------
*
Come
out
of
intra
cluster
coherency
*
---------------------------------------------
*/
bl
cortex_a72_disable_smp
/
*
---------------------------------------------
*
Force
the
debug
interfaces
to
be
quiescent
*
---------------------------------------------
*/
pop
{
r12
,
lr
}
b
cortex_a72_disable_ext_debug
endfunc
cortex_a72_core_pwr_dwn
/
*
-------------------------------------------------------
*
The
CPU
Ops
cluster
power
down
function
for
Cortex
-
A72
.
*
-------------------------------------------------------
*/
func
cortex_a72_cluster_pwr_dwn
push
{
r12
,
lr
}
/
*
Assert
if
cache
is
enabled
*/
#if ASM_ASSERTION
ldcopr
r0
,
SCTLR
tst
r0
,
#
SCTLR_C_BIT
ASM_ASSERT
(
eq
)
#endif
/
*
---------------------------------------------
*
Disable
the
L2
prefetches
.
*
---------------------------------------------
*/
bl
cortex_a72_disable_l2_prefetch
/
*
---------------------------------------------
*
Disable
the
load
-
store
hardware
prefetcher
.
*
---------------------------------------------
*/
bl
cortex_a72_disable_hw_prefetcher
#if !SKIP_A72_L1_FLUSH_PWR_DWN
/
*
---------------------------------------------
*
Flush
L1
caches
.
*
---------------------------------------------
*/
mov
r0
,
#
DC_OP_CISW
bl
dcsw_op_level1
#endif
/
*
---------------------------------------------
*
Disable
the
optional
ACP
.
*
---------------------------------------------
*/
bl
plat_disable_acp
/
*
-------------------------------------------------
*
Flush
the
L2
caches
.
*
-------------------------------------------------
*/
mov
r0
,
#
DC_OP_CISW
bl
dcsw_op_level2
/
*
---------------------------------------------
*
Come
out
of
intra
cluster
coherency
*
---------------------------------------------
*/
bl
cortex_a72_disable_smp
/
*
---------------------------------------------
*
Force
the
debug
interfaces
to
be
quiescent
*
---------------------------------------------
*/
pop
{
r12
,
lr
}
b
cortex_a72_disable_ext_debug
endfunc
cortex_a72_cluster_pwr_dwn
declare_cpu_ops
cortex_a72
,
CORTEX_A72_MIDR
,
\
cortex_a72_reset_func
,
\
cortex_a72_core_pwr_dwn
,
\
cortex_a72_cluster_pwr_dwn
lib/cpus/aarch32/cpu_helpers.S
View file @
0f22bef3
...
@@ -53,7 +53,7 @@ func reset_handler
...
@@ -53,7 +53,7 @@ func reset_handler
/
*
Get
the
matching
cpu_ops
pointer
(
clobbers
:
r0
-
r5
)
*/
/
*
Get
the
matching
cpu_ops
pointer
(
clobbers
:
r0
-
r5
)
*/
bl
get_cpu_ops_ptr
bl
get_cpu_ops_ptr
#if
ASM
_ASSERTION
#if
ENABLE
_ASSERTION
S
cmp
r0
,
#
0
cmp
r0
,
#
0
ASM_ASSERT
(
ne
)
ASM_ASSERT
(
ne
)
#endif
#endif
...
@@ -92,7 +92,7 @@ func prepare_cpu_pwr_dwn
...
@@ -92,7 +92,7 @@ func prepare_cpu_pwr_dwn
pop
{
r2
,
lr
}
pop
{
r2
,
lr
}
ldr
r0
,
[
r0
,
#
CPU_DATA_CPU_OPS_PTR
]
ldr
r0
,
[
r0
,
#
CPU_DATA_CPU_OPS_PTR
]
#if
ASM
_ASSERTION
#if
ENABLE
_ASSERTION
S
cmp
r0
,
#
0
cmp
r0
,
#
0
ASM_ASSERT
(
ne
)
ASM_ASSERT
(
ne
)
#endif
#endif
...
@@ -118,7 +118,7 @@ func init_cpu_ops
...
@@ -118,7 +118,7 @@ func init_cpu_ops
cmp
r1
,
#
0
cmp
r1
,
#
0
bne
1
f
bne
1
f
bl
get_cpu_ops_ptr
bl
get_cpu_ops_ptr
#if
ASM
_ASSERTION
#if
ENABLE
_ASSERTION
S
cmp
r0
,
#
0
cmp
r0
,
#
0
ASM_ASSERT
(
ne
)
ASM_ASSERT
(
ne
)
#endif
#endif
...
...
lib/cpus/aarch64/cpu_helpers.S
View file @
0f22bef3
...
@@ -55,7 +55,7 @@ func reset_handler
...
@@ -55,7 +55,7 @@ func reset_handler
/
*
Get
the
matching
cpu_ops
pointer
*/
/
*
Get
the
matching
cpu_ops
pointer
*/
bl
get_cpu_ops_ptr
bl
get_cpu_ops_ptr
#if
ASM
_ASSERTION
#if
ENABLE
_ASSERTION
S
cmp
x0
,
#
0
cmp
x0
,
#
0
ASM_ASSERT
(
ne
)
ASM_ASSERT
(
ne
)
#endif
#endif
...
@@ -94,7 +94,7 @@ func prepare_cpu_pwr_dwn
...
@@ -94,7 +94,7 @@ func prepare_cpu_pwr_dwn
mrs
x1
,
tpidr_el3
mrs
x1
,
tpidr_el3
ldr
x0
,
[
x1
,
#
CPU_DATA_CPU_OPS_PTR
]
ldr
x0
,
[
x1
,
#
CPU_DATA_CPU_OPS_PTR
]
#if
ASM
_ASSERTION
#if
ENABLE
_ASSERTION
S
cmp
x0
,
#
0
cmp
x0
,
#
0
ASM_ASSERT
(
ne
)
ASM_ASSERT
(
ne
)
#endif
#endif
...
@@ -120,7 +120,7 @@ func init_cpu_ops
...
@@ -120,7 +120,7 @@ func init_cpu_ops
cbnz
x0
,
1
f
cbnz
x0
,
1
f
mov
x10
,
x30
mov
x10
,
x30
bl
get_cpu_ops_ptr
bl
get_cpu_ops_ptr
#if
ASM
_ASSERTION
#if
ENABLE
_ASSERTION
S
cmp
x0
,
#
0
cmp
x0
,
#
0
ASM_ASSERT
(
ne
)
ASM_ASSERT
(
ne
)
#endif
#endif
...
...
lib/psci/psci_on.c
View file @
0f22bef3
...
@@ -165,7 +165,7 @@ void psci_cpu_on_finish(unsigned int cpu_idx,
...
@@ -165,7 +165,7 @@ void psci_cpu_on_finish(unsigned int cpu_idx,
*/
*/
psci_plat_pm_ops
->
pwr_domain_on_finish
(
state_info
);
psci_plat_pm_ops
->
pwr_domain_on_finish
(
state_info
);
#if !HW_ASSISTED_COHERENCY
#if !
(
HW_ASSISTED_COHERENCY
|| WARMBOOT_ENABLE_DCACHE_EARLY)
/*
/*
* Arch. management: Enable data cache and manage stack memory
* Arch. management: Enable data cache and manage stack memory
*/
*/
...
...
Prev
1
2
3
4
5
6
7
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment