Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
Menu
Open sidebar
adam.huang
Arm Trusted Firmware
Commits
44abeaa6
Commit
44abeaa6
authored
Sep 22, 2016
by
danh-arm
Committed by
GitHub
Sep 22, 2016
Browse files
Merge pull request #713 from yatharth-arm/yk/AArch32_porting
Add basic AArch32 support for BL1 & BL2
parents
131f7cd4
03a3042b
Changes
60
Show whitespace changes
Inline
Side-by-side
common/aarch32/debug.S
View file @
44abeaa6
...
@@ -32,6 +32,7 @@
...
@@ -32,6 +32,7 @@
#include <asm_macros.S>
#include <asm_macros.S>
.
globl
do_panic
.
globl
do_panic
.
globl
report_exception
/**********************************************************
*
/**********************************************************
*
*
The
common
implementation
of
do_panic
for
all
BL
stages
*
The
common
implementation
of
do_panic
for
all
BL
stages
...
@@ -40,3 +41,14 @@ func do_panic
...
@@ -40,3 +41,14 @@ func do_panic
b
plat_panic_handler
b
plat_panic_handler
endfunc
do_panic
endfunc
do_panic
/**********************************************************
*
*
This
function
is
called
from
the
vector
table
for
*
unhandled
exceptions
.
It
reads
the
current
mode
and
*
passes
it
to
platform
.
***********************************************************/
func
report_exception
mrs
r0
,
cpsr
and
r0
,
#
MODE32_MASK
bl
plat_report_exception
bl
plat_panic_handler
endfunc
report_exception
common/bl_common.c
View file @
44abeaa6
...
@@ -53,10 +53,7 @@ uintptr_t page_align(uintptr_t value, unsigned dir)
...
@@ -53,10 +53,7 @@ uintptr_t page_align(uintptr_t value, unsigned dir)
return
value
;
return
value
;
}
}
static
inline
unsigned
int
is_page_aligned
(
uintptr_t
addr
)
{
#if !LOAD_IMAGE_V2
return
(
addr
&
(
PAGE_SIZE
-
1
))
==
0
;
}
/******************************************************************************
/******************************************************************************
* Determine whether the memory region delimited by 'addr' and 'size' is free,
* Determine whether the memory region delimited by 'addr' and 'size' is free,
* given the extents of free memory.
* given the extents of free memory.
...
@@ -179,6 +176,7 @@ static void dump_load_info(uintptr_t image_load_addr,
...
@@ -179,6 +176,7 @@ static void dump_load_info(uintptr_t image_load_addr,
INFO
(
" free region = [base = %p, size = 0x%zx]
\n
"
,
INFO
(
" free region = [base = %p, size = 0x%zx]
\n
"
,
(
void
*
)
mem_layout
->
free_base
,
mem_layout
->
free_size
);
(
void
*
)
mem_layout
->
free_base
,
mem_layout
->
free_size
);
}
}
#endif
/* LOAD_IMAGE_V2 */
/* Generic function to return the size of an image */
/* Generic function to return the size of an image */
size_t
image_size
(
unsigned
int
image_id
)
size_t
image_size
(
unsigned
int
image_id
)
...
@@ -223,6 +221,156 @@ size_t image_size(unsigned int image_id)
...
@@ -223,6 +221,156 @@ size_t image_size(unsigned int image_id)
return
image_size
;
return
image_size
;
}
}
#if LOAD_IMAGE_V2
/*******************************************************************************
* Generic function to load an image at a specific address given
* an image ID and extents of free memory.
*
* If the load is successful then the image information is updated.
*
* Returns 0 on success, a negative error code otherwise.
******************************************************************************/
int
load_image
(
unsigned
int
image_id
,
image_info_t
*
image_data
)
{
uintptr_t
dev_handle
;
uintptr_t
image_handle
;
uintptr_t
image_spec
;
uintptr_t
image_base
;
size_t
image_size
;
size_t
bytes_read
;
int
io_result
;
assert
(
image_data
!=
NULL
);
assert
(
image_data
->
h
.
version
>=
VERSION_2
);
image_base
=
image_data
->
image_base
;
/* Obtain a reference to the image by querying the platform layer */
io_result
=
plat_get_image_source
(
image_id
,
&
dev_handle
,
&
image_spec
);
if
(
io_result
!=
0
)
{
WARN
(
"Failed to obtain reference to image id=%u (%i)
\n
"
,
image_id
,
io_result
);
return
io_result
;
}
/* Attempt to access the image */
io_result
=
io_open
(
dev_handle
,
image_spec
,
&
image_handle
);
if
(
io_result
!=
0
)
{
WARN
(
"Failed to access image id=%u (%i)
\n
"
,
image_id
,
io_result
);
return
io_result
;
}
INFO
(
"Loading image id=%u at address %p
\n
"
,
image_id
,
(
void
*
)
image_base
);
/* Find the size of the image */
io_result
=
io_size
(
image_handle
,
&
image_size
);
if
((
io_result
!=
0
)
||
(
image_size
==
0
))
{
WARN
(
"Failed to determine the size of the image id=%u (%i)
\n
"
,
image_id
,
io_result
);
goto
exit
;
}
/* Check that the image size to load is within limit */
if
(
image_size
>
image_data
->
image_max_size
)
{
WARN
(
"Image id=%u size out of bounds
\n
"
,
image_id
);
io_result
=
-
EFBIG
;
goto
exit
;
}
image_data
->
image_size
=
image_size
;
/* We have enough space so load the image now */
/* TODO: Consider whether to try to recover/retry a partially successful read */
io_result
=
io_read
(
image_handle
,
image_base
,
image_size
,
&
bytes_read
);
if
((
io_result
!=
0
)
||
(
bytes_read
<
image_size
))
{
WARN
(
"Failed to load image id=%u (%i)
\n
"
,
image_id
,
io_result
);
goto
exit
;
}
#if !TRUSTED_BOARD_BOOT
/*
* File has been successfully loaded.
* Flush the image to main memory so that it can be executed later by
* any CPU, regardless of cache and MMU state.
* When TBB is enabled the image is flushed later, after image
* authentication.
*/
flush_dcache_range
(
image_base
,
image_size
);
#endif
/* TRUSTED_BOARD_BOOT */
INFO
(
"Image id=%u loaded: %p - %p
\n
"
,
image_id
,
(
void
*
)
image_base
,
(
void
*
)
(
image_base
+
image_size
));
exit:
io_close
(
image_handle
);
/* Ignore improbable/unrecoverable error in 'close' */
/* TODO: Consider maintaining open device connection from this bootloader stage */
io_dev_close
(
dev_handle
);
/* Ignore improbable/unrecoverable error in 'dev_close' */
return
io_result
;
}
/*******************************************************************************
* Generic function to load and authenticate an image. The image is actually
* loaded by calling the 'load_image()' function. Therefore, it returns the
* same error codes if the loading operation failed, or -EAUTH if the
* authentication failed. In addition, this function uses recursion to
* authenticate the parent images up to the root of trust.
******************************************************************************/
int
load_auth_image
(
unsigned
int
image_id
,
image_info_t
*
image_data
)
{
int
rc
;
#if TRUSTED_BOARD_BOOT
unsigned
int
parent_id
;
/* Use recursion to authenticate parent images */
rc
=
auth_mod_get_parent_id
(
image_id
,
&
parent_id
);
if
(
rc
==
0
)
{
rc
=
load_auth_image
(
parent_id
,
image_data
);
if
(
rc
!=
0
)
{
return
rc
;
}
}
#endif
/* TRUSTED_BOARD_BOOT */
/* Load the image */
rc
=
load_image
(
image_id
,
image_data
);
if
(
rc
!=
0
)
{
return
rc
;
}
#if TRUSTED_BOARD_BOOT
/* Authenticate it */
rc
=
auth_mod_verify_img
(
image_id
,
(
void
*
)
image_data
->
image_base
,
image_data
->
image_size
);
if
(
rc
!=
0
)
{
memset
((
void
*
)
image_data
->
image_base
,
0x00
,
image_data
->
image_size
);
flush_dcache_range
(
image_data
->
image_base
,
image_data
->
image_size
);
return
-
EAUTH
;
}
/*
* File has been successfully loaded and authenticated.
* Flush the image to main memory so that it can be executed later by
* any CPU, regardless of cache and MMU state.
*/
flush_dcache_range
(
image_data
->
image_base
,
image_data
->
image_size
);
#endif
/* TRUSTED_BOARD_BOOT */
return
0
;
}
#else
/* LOAD_IMAGE_V2 */
/*******************************************************************************
/*******************************************************************************
* Generic function to load an image at a specific address given an image ID and
* Generic function to load an image at a specific address given an image ID and
* extents of free memory.
* extents of free memory.
...
@@ -255,7 +403,7 @@ int load_image(meminfo_t *mem_layout,
...
@@ -255,7 +403,7 @@ int load_image(meminfo_t *mem_layout,
assert
(
mem_layout
!=
NULL
);
assert
(
mem_layout
!=
NULL
);
assert
(
image_data
!=
NULL
);
assert
(
image_data
!=
NULL
);
assert
(
image_data
->
h
.
version
>
=
VERSION_1
);
assert
(
image_data
->
h
.
version
=
=
VERSION_1
);
/* Obtain a reference to the image by querying the platform layer */
/* Obtain a reference to the image by querying the platform layer */
io_result
=
plat_get_image_source
(
image_id
,
&
dev_handle
,
&
image_spec
);
io_result
=
plat_get_image_source
(
image_id
,
&
dev_handle
,
&
image_spec
);
...
@@ -348,8 +496,10 @@ exit:
...
@@ -348,8 +496,10 @@ exit:
/*******************************************************************************
/*******************************************************************************
* Generic function to load and authenticate an image. The image is actually
* Generic function to load and authenticate an image. The image is actually
* loaded by calling the 'load_image()' function. In addition, this function
* loaded by calling the 'load_image()' function. Therefore, it returns the
* uses recursion to authenticate the parent images up to the root of trust.
* same error codes if the loading operation failed, or -EAUTH if the
* authentication failed. In addition, this function uses recursion to
* authenticate the parent images up to the root of trust.
******************************************************************************/
******************************************************************************/
int
load_auth_image
(
meminfo_t
*
mem_layout
,
int
load_auth_image
(
meminfo_t
*
mem_layout
,
unsigned
int
image_id
,
unsigned
int
image_id
,
...
@@ -403,6 +553,8 @@ int load_auth_image(meminfo_t *mem_layout,
...
@@ -403,6 +553,8 @@ int load_auth_image(meminfo_t *mem_layout,
return
0
;
return
0
;
}
}
#endif
/* LOAD_IMAGE_V2 */
/*******************************************************************************
/*******************************************************************************
* Print the content of an entry_point_info_t structure.
* Print the content of an entry_point_info_t structure.
******************************************************************************/
******************************************************************************/
...
...
common/desc_image_load.c
0 → 100644
View file @
44abeaa6
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch_helpers.h>
#include <assert.h>
#include <bl_common.h>
#include <desc_image_load.h>
extern
bl_mem_params_node_t
*
bl_mem_params_desc_ptr
;
extern
unsigned
int
bl_mem_params_desc_num
;
static
bl_load_info_t
bl_load_info
;
static
bl_params_t
next_bl_params
;
/*******************************************************************************
* This function flushes the data structures so that they are visible
* in memory for the next BL image.
******************************************************************************/
void
flush_bl_params_desc
(
void
)
{
flush_dcache_range
((
unsigned
long
)
bl_mem_params_desc_ptr
,
sizeof
(
*
bl_mem_params_desc_ptr
)
*
bl_mem_params_desc_num
);
}
/*******************************************************************************
* This function returns the index for given image_id, within the
* image descriptor array provided by bl_image_info_descs_ptr, if the
* image is found else it returns -1.
******************************************************************************/
int
get_bl_params_node_index
(
unsigned
int
image_id
)
{
int
index
;
assert
(
image_id
!=
INVALID_IMAGE_ID
);
for
(
index
=
0
;
index
<
bl_mem_params_desc_num
;
index
++
)
{
if
(
bl_mem_params_desc_ptr
[
index
].
image_id
==
image_id
)
return
index
;
}
return
-
1
;
}
/*******************************************************************************
* This function returns the pointer to `bl_mem_params_node_t` object for
* given image_id, within the image descriptor array provided by
* bl_mem_params_desc_ptr, if the image is found else it returns NULL.
******************************************************************************/
bl_mem_params_node_t
*
get_bl_mem_params_node
(
unsigned
int
image_id
)
{
int
index
;
assert
(
image_id
!=
INVALID_IMAGE_ID
);
index
=
get_bl_params_node_index
(
image_id
);
if
(
index
>=
0
)
return
&
bl_mem_params_desc_ptr
[
index
];
else
return
NULL
;
}
/*******************************************************************************
* This function creates the list of loadable images, by populating and
* linking each `bl_load_info_node_t` type node, using the internal array
* of image descriptor provided by bl_mem_params_desc_ptr. It also populates
* and returns `bl_load_info_t` type structure that contains head of the list
* of loadable images.
******************************************************************************/
bl_load_info_t
*
get_bl_load_info_from_mem_params_desc
(
void
)
{
int
index
=
0
;
/* If there is no image to start with, return NULL */
if
(
!
bl_mem_params_desc_num
)
return
NULL
;
/* Assign initial data structures */
bl_load_info_node_t
*
bl_node_info
=
&
bl_mem_params_desc_ptr
[
index
].
load_node_mem
;
bl_load_info
.
head
=
bl_node_info
;
SET_PARAM_HEAD
(
&
bl_load_info
,
PARAM_BL_LOAD_INFO
,
VERSION_2
,
0
);
/* Go through the image descriptor array and create the list */
for
(;
index
<
bl_mem_params_desc_num
;
index
++
)
{
/* Populate the image information */
bl_node_info
->
image_id
=
bl_mem_params_desc_ptr
[
index
].
image_id
;
bl_node_info
->
image_info
=
&
bl_mem_params_desc_ptr
[
index
].
image_info
;
/* Link next image if present */
if
((
index
+
1
)
<
bl_mem_params_desc_num
)
{
/* Get the memory and link the next node */
bl_node_info
->
next_load_info
=
&
bl_mem_params_desc_ptr
[
index
+
1
].
load_node_mem
;
bl_node_info
=
bl_node_info
->
next_load_info
;
}
}
return
&
bl_load_info
;
}
/*******************************************************************************
* This function creates the list of executable images, by populating and
* linking each `bl_params_node_t` type node, using the internal array of
* image descriptor provided by bl_mem_params_desc_ptr. It also populates
* and returns `bl_params_t` type structure that contains head of the list
* of executable images.
******************************************************************************/
bl_params_t
*
get_next_bl_params_from_mem_params_desc
(
void
)
{
int
count
;
unsigned
int
img_id
=
0
;
int
link_index
=
0
;
bl_params_node_t
*
bl_current_exec_node
=
NULL
;
bl_params_node_t
*
bl_last_exec_node
=
NULL
;
bl_mem_params_node_t
*
desc_ptr
;
/* If there is no image to start with, return NULL */
if
(
!
bl_mem_params_desc_num
)
return
NULL
;
/* Get the list HEAD */
for
(
count
=
0
;
count
<
bl_mem_params_desc_num
;
count
++
)
{
desc_ptr
=
&
bl_mem_params_desc_ptr
[
count
];
if
((
EP_GET_EXE
(
desc_ptr
->
ep_info
.
h
.
attr
)
==
EXECUTABLE
)
&&
(
EP_GET_FIRST_EXE
(
desc_ptr
->
ep_info
.
h
.
attr
)
==
EP_FIRST_EXE
))
{
next_bl_params
.
head
=
&
desc_ptr
->
params_node_mem
;
link_index
=
count
;
break
;
}
}
/* Make sure we have a HEAD node */
assert
(
next_bl_params
.
head
!=
NULL
);
/* Populate the HEAD information */
SET_PARAM_HEAD
(
&
next_bl_params
,
PARAM_BL_PARAMS
,
VERSION_2
,
0
);
/*
* Go through the image descriptor array and create the list.
* This bounded loop is to make sure that we are not looping forever.
*/
for
(
count
=
0
;
count
<
bl_mem_params_desc_num
;
count
++
)
{
desc_ptr
=
&
bl_mem_params_desc_ptr
[
link_index
];
/* Make sure the image is executable */
assert
(
EP_GET_EXE
(
desc_ptr
->
ep_info
.
h
.
attr
)
==
EXECUTABLE
);
/* Get the memory for current node */
bl_current_exec_node
=
&
desc_ptr
->
params_node_mem
;
/* Populate the image information */
bl_current_exec_node
->
image_id
=
desc_ptr
->
image_id
;
bl_current_exec_node
->
image_info
=
&
desc_ptr
->
image_info
;
bl_current_exec_node
->
ep_info
=
&
desc_ptr
->
ep_info
;
if
(
bl_last_exec_node
)
{
/* Assert if loop detected */
assert
(
bl_last_exec_node
->
next_params_info
==
NULL
);
/* Link the previous node to the current one */
bl_last_exec_node
->
next_params_info
=
bl_current_exec_node
;
}
/* Update the last node */
bl_last_exec_node
=
bl_current_exec_node
;
/* If no next hand-off image then break out */
img_id
=
desc_ptr
->
next_handoff_image_id
;
if
(
img_id
==
INVALID_IMAGE_ID
)
break
;
/* Get the index for the next hand-off image */
link_index
=
get_bl_params_node_index
(
img_id
);
assert
((
link_index
>
0
)
&&
(
link_index
<
bl_mem_params_desc_num
));
}
/* Invalid image is expected to terminate the loop */
assert
(
img_id
==
INVALID_IMAGE_ID
);
/* Populate arg0 for the next BL image */
next_bl_params
.
head
->
ep_info
->
args
.
arg0
=
(
unsigned
long
)
&
next_bl_params
;
/* Flush the parameters to be passed to the next BL image */
flush_dcache_range
((
unsigned
long
)
&
next_bl_params
,
sizeof
(
next_bl_params
));
return
&
next_bl_params
;
}
docs/porting-guide.md
View file @
44abeaa6
...
@@ -721,7 +721,6 @@ Firmware represents the power domain topology and how this relates to the
...
@@ -721,7 +721,6 @@ Firmware represents the power domain topology and how this relates to the
linear CPU index, please refer [Power Domain Topology Design].
linear CPU index, please refer [Power Domain Topology Design].
2.4 Common optional modifications
2.4 Common optional modifications
---------------------------------
---------------------------------
...
@@ -777,11 +776,15 @@ called in the following circumstances:
...
@@ -777,11 +776,15 @@ called in the following circumstances:
The default implementation doesn't do anything, to avoid making assumptions
The default implementation doesn't do anything, to avoid making assumptions
about the way the platform displays its status information.
about the way the platform displays its status information.
This function receives the exception type as its argument. Possible values for
For AArch64, this function receives the exception type as its argument.
exceptions types are listed in the [include/common/bl_common.h] header file.
Possible values for exceptions types are listed in the
Note that these constants are not related to any architectural exception code;
[include/common/bl_common.h] header file. Note that these constants are not
they are just an ARM Trusted Firmware convention.
related to any architectural exception code; they are just an ARM Trusted
Firmware convention.
For AArch32, this function receives the exception mode as its argument.
Possible values for exception modes are listed in the
[include/lib/aarch32/arch.h] header file.
### Function : plat_reset_handler()
### Function : plat_reset_handler()
...
@@ -841,10 +844,37 @@ and must be implemented in assembly because it may be called before the C
...
@@ -841,10 +844,37 @@ and must be implemented in assembly because it may be called before the C
environment is initialized.
environment is initialized.
Note: The address from where it was called is stored in x30 (Link Register).
Note: The address from where it was called is stored in x30 (Link Register).
The default implementation simply spins.
The default implementation simply spins.
### Function : plat_get_bl_image_load_info()
Argument : void
Return : bl_load_info_t *
This function returns pointer to the list of images that the platform has
populated to load. This function is currently invoked in BL2 to load the
BL3xx images, when LOAD_IMAGE_V2 is enabled.
### Function : plat_get_next_bl_params()
Argument : void
Return : bl_params_t *
This function returns a pointer to the shared memory that the platform has
kept aside to pass trusted firmware related information that next BL image
needs. This function is currently invoked in BL2 to pass this information to
the next BL image, when LOAD_IMAGE_V2 is enabled.
### Function : plat_flush_next_bl_params()
Argument : void
Return : void
This function flushes to main memory all the image params that are passed to
next image. This function is currently invoked in BL2 to flush this information
to the next BL image, when LOAD_IMAGE_V2 is enabled.
3.
Modifications specific to a Boot Loader stage
3.
Modifications specific to a Boot Loader stage
-------------------------------------------------
-------------------------------------------------
...
@@ -1175,6 +1205,20 @@ populated with the extents of secure RAM available for BL2 to use. See
...
@@ -1175,6 +1205,20 @@ populated with the extents of secure RAM available for BL2 to use. See
`bl2_early_platform_setup()`
above.
`bl2_early_platform_setup()`
above.
Following function is required only when LOAD_IMAGE_V2 is enabled.
### Function : bl2_plat_handle_post_image_load() [mandatory]
Argument : unsigned int
Return : int
This function can be used by the platforms to update/use image information
for given
`image_id`
. This function is currently invoked in BL2 to handle
BL image specific information based on the
`image_id`
passed, when
LOAD_IMAGE_V2 is enabled.
Following functions are required only when LOAD_IMAGE_V2 is disabled.
### Function : bl2_plat_get_scp_bl2_meminfo() [mandatory]
### Function : bl2_plat_get_scp_bl2_meminfo() [mandatory]
Argument : meminfo *
Argument : meminfo *
...
@@ -2194,6 +2238,7 @@ _Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved._
...
@@ -2194,6 +2238,7 @@ _Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved._
[
plat/common/aarch64/platform_up_stack.S
]:
../plat/common/aarch64/platform_up_stack.S
[
plat/common/aarch64/platform_up_stack.S
]:
../plat/common/aarch64/platform_up_stack.S
[
plat/arm/board/fvp/fvp_pm.c
]:
../plat/arm/board/fvp/fvp_pm.c
[
plat/arm/board/fvp/fvp_pm.c
]:
../plat/arm/board/fvp/fvp_pm.c
[
include/common/bl_common.h
]:
../include/common/bl_common.h
[
include/common/bl_common.h
]:
../include/common/bl_common.h
[
include/lib/aarch32/arch.h
]:
../include/lib/aarch32/arch.h
[
include/plat/arm/common/arm_def.h
]:
../include/plat/arm/common/arm_def.h
[
include/plat/arm/common/arm_def.h
]:
../include/plat/arm/common/arm_def.h
[
include/plat/common/common_def.h
]:
../include/plat/common/common_def.h
[
include/plat/common/common_def.h
]:
../include/plat/common/common_def.h
[
include/plat/common/platform.h
]:
../include/plat/common/platform.h
[
include/plat/common/platform.h
]:
../include/plat/common/platform.h
...
...
docs/user-guide.md
View file @
44abeaa6
...
@@ -430,6 +430,12 @@ performed.
...
@@ -430,6 +430,12 @@ performed.
pages" section in [Firmware Design]. This flag is disabled by default and
pages" section in [Firmware Design]. This flag is disabled by default and
affects all BL images.
affects all BL images.
*
`LOAD_IMAGE_V2`
: Boolean option to enable support for new version (v2) of
image loading, which provides more flexibility and scalability around what
images are loaded and executed during boot. Default is 0.
Note:
`TRUSTED_BOARD_BOOT`
is currently not supported when
`LOAD_IMAGE_V2`
is enabled.
#### ARM development platform specific build options
#### ARM development platform specific build options
*
`ARM_TSP_RAM_LOCATION`
: location of the TSP binary. Options:
*
`ARM_TSP_RAM_LOCATION`
: location of the TSP binary. Options:
...
...
include/bl32/sp_min/platform_sp_min.h
View file @
44abeaa6
...
@@ -34,7 +34,8 @@
...
@@ -34,7 +34,8 @@
/*******************************************************************************
/*******************************************************************************
* Mandatory SP_MIN functions
* Mandatory SP_MIN functions
******************************************************************************/
******************************************************************************/
void
sp_min_early_platform_setup
(
void
);
void
sp_min_early_platform_setup
(
void
*
from_bl2
,
void
*
plat_params_from_bl2
);
void
sp_min_plat_arch_setup
(
void
);
void
sp_min_plat_arch_setup
(
void
);
void
sp_min_platform_setup
(
void
);
void
sp_min_platform_setup
(
void
);
entry_point_info_t
*
sp_min_plat_get_bl33_ep_info
(
void
);
entry_point_info_t
*
sp_min_plat_get_bl33_ep_info
(
void
);
...
...
include/common/aarch32/asm_macros.S
View file @
44abeaa6
...
@@ -69,6 +69,16 @@
...
@@ -69,6 +69,16 @@
lsl
\
reg
,
\
reg
,
\
tmp
lsl
\
reg
,
\
reg
,
\
tmp
.
endm
.
endm
/
*
*
Declare
the
exception
vector
table
,
enforcing
it
is
aligned
on
a
*
32
byte
boundary
.
*/
.
macro
vector_base
label
.
section
.
vectors
,
"ax"
.
align
5
\
label
:
.
endm
/
*
/
*
*
This
macro
calculates
the
base
address
of
the
current
CPU
's multi
*
This
macro
calculates
the
base
address
of
the
current
CPU
's multi
*
processor
(
MP
)
stack
using
the
plat_my_core_pos
()
index
,
the
name
of
*
processor
(
MP
)
stack
using
the
plat_my_core_pos
()
index
,
the
name
of
...
...
include/common/aarch32/el3_common_macros.S
0 → 100644
View file @
44abeaa6
/*
*
Copyright
(
c
)
2016
,
ARM
Limited
and
Contributors
.
All
rights
reserved
.
*
*
Redistribution
and
use
in
source
and
binary
forms
,
with
or
without
*
modification
,
are
permitted
provided
that
the
following
conditions
are
met
:
*
*
Redistributions
of
source
code
must
retain
the
above
copyright
notice
,
this
*
list
of
conditions
and
the
following
disclaimer
.
*
*
Redistributions
in
binary
form
must
reproduce
the
above
copyright
notice
,
*
this
list
of
conditions
and
the
following
disclaimer
in
the
documentation
*
and
/
or
other
materials
provided
with
the
distribution
.
*
*
Neither
the
name
of
ARM
nor
the
names
of
its
contributors
may
be
used
*
to
endorse
or
promote
products
derived
from
this
software
without
specific
*
prior
written
permission
.
*
*
THIS
SOFTWARE
IS
PROVIDED
BY
THE
COPYRIGHT
HOLDERS
AND
CONTRIBUTORS
"AS IS"
*
AND
ANY
EXPRESS
OR
IMPLIED
WARRANTIES
,
INCLUDING
,
BUT
NOT
LIMITED
TO
,
THE
*
IMPLIED
WARRANTIES
OF
MERCHANTABILITY
AND
FITNESS
FOR
A
PARTICULAR
PURPOSE
*
ARE
DISCLAIMED
.
IN
NO
EVENT
SHALL
THE
COPYRIGHT
HOLDER
OR
CONTRIBUTORS
BE
*
LIABLE
FOR
ANY
DIRECT
,
INDIRECT
,
INCIDENTAL
,
SPECIAL
,
EXEMPLARY
,
OR
*
CONSEQUENTIAL
DAMAGES
(
INCLUDING
,
BUT
NOT
LIMITED
TO
,
PROCUREMENT
OF
*
SUBSTITUTE
GOODS
OR
SERVICES
; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
*
INTERRUPTION
)
HOWEVER
CAUSED
AND
ON
ANY
THEORY
OF
LIABILITY
,
WHETHER
IN
*
CONTRACT
,
STRICT
LIABILITY
,
OR
TORT
(
INCLUDING
NEGLIGENCE
OR
OTHERWISE
)
*
ARISING
IN
ANY
WAY
OUT
OF
THE
USE
OF
THIS
SOFTWARE
,
EVEN
IF
ADVISED
OF
THE
*
POSSIBILITY
OF
SUCH
DAMAGE
.
*/
#ifndef __EL3_COMMON_MACROS_S__
#define __EL3_COMMON_MACROS_S__
#include <arch.h>
#include <asm_macros.S>
#include <assert_macros.S>
/
*
*
Helper
macro
to
initialise
EL3
registers
we
care
about
.
*/
.
macro
el3_arch_init_common
_exception_vectors
/
*
---------------------------------------------------------------------
*
Enable
the
instruction
cache
and
alignment
checks
*
---------------------------------------------------------------------
*/
ldr
r1
,
=(
SCTLR_RES1
| SCTLR_I_BIT |
SCTLR_A_BIT
)
ldcopr
r0
,
SCTLR
orr
r0
,
r0
,
r1
stcopr
r0
,
SCTLR
isb
/
*
---------------------------------------------------------------------
*
Set
the
exception
vectors
(
VBAR
/
MVBAR
)
.
*
---------------------------------------------------------------------
*/
ldr
r0
,
=
\
_exception_vectors
stcopr
r0
,
VBAR
stcopr
r0
,
MVBAR
isb
/
*
-----------------------------------------------------
*
Enable
the
SIF
bit
to
disable
instruction
fetches
*
from
Non
-
secure
memory
.
*
-----------------------------------------------------
*/
ldcopr
r0
,
SCR
orr
r0
,
r0
,
#
SCR_SIF_BIT
stcopr
r0
,
SCR
/
*
-----------------------------------------------------
*
Enable
the
Asynchronous
data
abort
now
that
the
*
exception
vectors
have
been
setup
.
*
-----------------------------------------------------
*/
cpsie
a
isb
/
*
Enable
access
to
Advanced
SIMD
registers
*/
ldcopr
r0
,
NSACR
bic
r0
,
r0
,
#
NSASEDIS_BIT
bic
r0
,
r0
,
#
NSTRCDIS_BIT
orr
r0
,
r0
,
#(
NASCR_CP10_BIT
|
NASCR_CP11_BIT
)
stcopr
r0
,
NSACR
isb
/
*
*
Enable
access
to
Advanced
SIMD
,
Floating
point
and
to
the
Trace
*
functionality
as
well
.
*/
ldcopr
r0
,
CPACR
bic
r0
,
r0
,
#
ASEDIS_BIT
bic
r0
,
r0
,
#
TRCDIS_BIT
orr
r0
,
r0
,
#
CPACR_ENABLE_FP_ACCESS
stcopr
r0
,
CPACR
isb
vmrs
r0
,
FPEXC
orr
r0
,
r0
,
#
FPEXC_EN_BIT
vmsr
FPEXC
,
r0
isb
.
endm
/*
-----------------------------------------------------------------------------
*
This
is
the
super
set
of
actions
that
need
to
be
performed
during
a
cold
boot
*
or
a
warm
boot
in
EL3
.
This
code
is
shared
by
BL1
and
BL32
(
SP_MIN
)
.
*
*
This
macro
will
always
perform
reset
handling
,
architectural
initialisations
*
and
stack
setup
.
The
rest
of
the
actions
are
optional
because
they
might
not
*
be
needed
,
depending
on
the
context
in
which
this
macro
is
called
.
This
is
*
why
this
macro
is
parameterised
; each parameter allows to enable/disable
*
some
actions
.
*
*
_set_endian
:
*
Whether
the
macro
needs
to
configure
the
endianness
of
data
accesses
.
*
*
_warm_boot_mailbox
:
*
Whether
the
macro
needs
to
detect
the
type
of
boot
(
cold
/
warm
)
.
The
*
detection
is
based
on
the
platform
entrypoint
address
:
if
it
is
zero
*
then
it
is
a
cold
boot
,
otherwise
it
is
a
warm
boot
.
In
the
latter
case
,
*
this
macro
jumps
on
the
platform
entrypoint
address
.
*
*
_secondary_cold_boot
:
*
Whether
the
macro
needs
to
identify
the
CPU
that
is
calling
it
:
primary
*
CPU
or
secondary
CPU
.
The
primary
CPU
will
be
allowed
to
carry
on
with
*
the
platform
initialisations
,
while
the
secondaries
will
be
put
in
a
*
platform
-
specific
state
in
the
meantime
.
*
*
If
the
caller
knows
this
macro
will
only
be
called
by
the
primary
CPU
*
then
this
parameter
can
be
defined
to
0
to
skip
this
step
.
*
*
_init_memory
:
*
Whether
the
macro
needs
to
initialise
the
memory
.
*
*
_init_c_runtime
:
*
Whether
the
macro
needs
to
initialise
the
C
runtime
environment
.
*
*
_exception_vectors
:
*
Address
of
the
exception
vectors
to
program
in
the
VBAR_EL3
register
.
*
-----------------------------------------------------------------------------
*/
.
macro
el3_entrypoint_common
\
_set_endian
,
_warm_boot_mailbox
,
_secondary_cold_boot
,
\
_init_memory
,
_init_c_runtime
,
_exception_vectors
/
*
Make
sure
we
are
in
Secure
Mode
*/
#if ASM_ASSERTION
ldcopr
r0
,
SCR
tst
r0
,
#
SCR_NS_BIT
ASM_ASSERT
(
eq
)
#endif
.
if
\
_set_endian
/
*
-------------------------------------------------------------
*
Set
the
CPU
endianness
before
doing
anything
that
might
*
involve
memory
reads
or
writes
.
*
-------------------------------------------------------------
*/
ldcopr
r0
,
SCTLR
bic
r0
,
r0
,
#
SCTLR_EE_BIT
stcopr
r0
,
SCTLR
isb
.
endif
/*
_set_endian
*/
/
*
Switch
to
monitor
mode
*/
cps
#
MODE32_mon
isb
.
if
\
_warm_boot_mailbox
/
*
-------------------------------------------------------------
*
This
code
will
be
executed
for
both
warm
and
cold
resets
.
*
Now
is
the
time
to
distinguish
between
the
two
.
*
Query
the
platform
entrypoint
address
and
if
it
is
not
zero
*
then
it
means
it
is
a
warm
boot
so
jump
to
this
address
.
*
-------------------------------------------------------------
*/
bl
plat_get_my_entrypoint
cmp
r0
,
#
0
bxne
r0
.
endif
/*
_warm_boot_mailbox
*/
/
*
---------------------------------------------------------------------
*
It
is
a
cold
boot
.
*
Perform
any
processor
specific
actions
upon
reset
e
.
g
.
cache
,
TLB
*
invalidations
etc
.
*
---------------------------------------------------------------------
*/
bl
reset_handler
el3_arch_init_common
\
_exception_vectors
.
if
\
_secondary_cold_boot
/
*
-------------------------------------------------------------
*
Check
if
this
is
a
primary
or
secondary
CPU
cold
boot
.
*
The
primary
CPU
will
set
up
the
platform
while
the
*
secondaries
are
placed
in
a
platform
-
specific
state
until
the
*
primary
CPU
performs
the
necessary
actions
to
bring
them
out
*
of
that
state
and
allows
entry
into
the
OS
.
*
-------------------------------------------------------------
*/
bl
plat_is_my_cpu_primary
cmp
r0
,
#
0
bne
do_primary_cold_boot
/
*
This
is
a
cold
boot
on
a
secondary
CPU
*/
bl
plat_secondary_cold_boot_setup
/
*
plat_secondary_cold_boot_setup
()
is
not
supposed
to
return
*/
bl
plat_panic_handler
do_primary_cold_boot
:
.
endif
/*
_secondary_cold_boot
*/
/
*
---------------------------------------------------------------------
*
Initialize
memory
now
.
Secondary
CPU
initialization
won
't get to this
*
point
.
*
---------------------------------------------------------------------
*/
.
if
\
_init_memory
bl
platform_mem_init
.
endif
/*
_init_memory
*/
/
*
---------------------------------------------------------------------
*
Init
C
runtime
environment
:
*
-
Zero
-
initialise
the
NOBITS
sections
.
There
are
2
of
them
:
*
-
the
.
bss
section
;
*
-
the
coherent
memory
section
(
if
any
)
.
*
-
Relocate
the
data
section
from
ROM
to
RAM
,
if
required
.
*
---------------------------------------------------------------------
*/
.
if
\
_init_c_runtime
#if IMAGE_BL32
/
*
-----------------------------------------------------------------
*
Invalidate
the
RW
memory
used
by
the
BL32
(
SP_MIN
)
image
.
This
*
includes
the
data
and
NOBITS
sections
.
This
is
done
to
*
safeguard
against
possible
corruption
of
this
memory
by
*
dirty
cache
lines
in
a
system
cache
as
a
result
of
use
by
*
an
earlier
boot
loader
stage
.
*
-----------------------------------------------------------------
*/
ldr
r0
,
=
__RW_START__
ldr
r1
,
=
__RW_END__
sub
r1
,
r1
,
r0
bl
inv_dcache_range
#endif /* IMAGE_BL32 */
ldr
r0
,
=
__BSS_START__
ldr
r1
,
=
__BSS_SIZE__
bl
zeromem
#if USE_COHERENT_MEM
ldr
r0
,
=
__COHERENT_RAM_START__
ldr
r1
,
=
__COHERENT_RAM_UNALIGNED_SIZE__
bl
zeromem
#endif
#if IMAGE_BL1
/
*
-----------------------------------------------------
*
Copy
data
from
ROM
to
RAM
.
*
-----------------------------------------------------
*/
ldr
r0
,
=
__DATA_RAM_START__
ldr
r1
,
=
__DATA_ROM_START__
ldr
r2
,
=
__DATA_SIZE__
bl
memcpy
#endif
.
endif
/*
_init_c_runtime
*/
/
*
---------------------------------------------------------------------
*
Allocate
a
stack
whose
memory
will
be
marked
as
Normal
-
IS
-
WBWA
when
*
the
MMU
is
enabled
.
There
is
no
risk
of
reading
stale
stack
memory
*
after
enabling
the
MMU
as
only
the
primary
CPU
is
running
at
the
*
moment
.
*
---------------------------------------------------------------------
*/
bl
plat_set_my_stack
.
endm
#endif /* __EL3_COMMON_MACROS_S__ */
include/common/bl_common.h
View file @
44abeaa6
...
@@ -93,11 +93,22 @@
...
@@ -93,11 +93,22 @@
#define EP_GET_EXE(x) (x & EP_EXE_MASK)
#define EP_GET_EXE(x) (x & EP_EXE_MASK)
#define EP_SET_EXE(x, ee) ((x) = ((x) & ~EP_EXE_MASK) | (ee))
#define EP_SET_EXE(x, ee) ((x) = ((x) & ~EP_EXE_MASK) | (ee))
#define EP_FIRST_EXE_MASK 0x10
#define EP_FIRST_EXE 0x10
#define EP_GET_FIRST_EXE(x) ((x) & EP_FIRST_EXE_MASK)
#define EP_SET_FIRST_EXE(x, ee) ((x) = ((x) & ~EP_FIRST_EXE_MASK) | (ee))
#define PARAM_EP 0x01
#define PARAM_EP 0x01
#define PARAM_IMAGE_BINARY 0x02
#define PARAM_IMAGE_BINARY 0x02
#define PARAM_BL31 0x03
#define PARAM_BL31 0x03
#define PARAM_BL_LOAD_INFO 0x04
#define PARAM_BL_PARAMS 0x05
#define IMAGE_ATTRIB_SKIP_LOADING 0x02
#define IMAGE_ATTRIB_PLAT_SETUP 0x04
#define VERSION_1 0x01
#define VERSION_1 0x01
#define VERSION_2 0x02
#define INVALID_IMAGE_ID (0xFFFFFFFF)
#define INVALID_IMAGE_ID (0xFFFFFFFF)
...
@@ -181,8 +192,10 @@ extern uintptr_t __COHERENT_RAM_END__;
...
@@ -181,8 +192,10 @@ extern uintptr_t __COHERENT_RAM_END__;
typedef
struct
meminfo
{
typedef
struct
meminfo
{
uintptr_t
total_base
;
uintptr_t
total_base
;
size_t
total_size
;
size_t
total_size
;
#if !LOAD_IMAGE_V2
uintptr_t
free_base
;
uintptr_t
free_base
;
size_t
free_size
;
size_t
free_size
;
#endif
}
meminfo_t
;
}
meminfo_t
;
typedef
struct
aapcs64_params
{
typedef
struct
aapcs64_params
{
...
@@ -245,6 +258,9 @@ typedef struct image_info {
...
@@ -245,6 +258,9 @@ typedef struct image_info {
param_header_t
h
;
param_header_t
h
;
uintptr_t
image_base
;
/* physical address of base of image */
uintptr_t
image_base
;
/* physical address of base of image */
uint32_t
image_size
;
/* bytes read from image file */
uint32_t
image_size
;
/* bytes read from image file */
#if LOAD_IMAGE_V2
uint32_t
image_max_size
;
#endif
}
image_info_t
;
}
image_info_t
;
/*****************************************************************************
/*****************************************************************************
...
@@ -263,6 +279,39 @@ typedef struct image_desc {
...
@@ -263,6 +279,39 @@ typedef struct image_desc {
entry_point_info_t
ep_info
;
entry_point_info_t
ep_info
;
}
image_desc_t
;
}
image_desc_t
;
#if LOAD_IMAGE_V2
/* BL image node in the BL image loading sequence */
typedef
struct
bl_load_info_node
{
unsigned
int
image_id
;
image_info_t
*
image_info
;
struct
bl_load_info_node
*
next_load_info
;
}
bl_load_info_node_t
;
/* BL image head node in the BL image loading sequence */
typedef
struct
bl_load_info
{
param_header_t
h
;
bl_load_info_node_t
*
head
;
}
bl_load_info_t
;
/* BL image node in the BL image execution sequence */
typedef
struct
bl_params_node
{
unsigned
int
image_id
;
image_info_t
*
image_info
;
entry_point_info_t
*
ep_info
;
struct
bl_params_node
*
next_params_info
;
}
bl_params_node_t
;
/*
* BL image head node in the BL image execution sequence
* It is also used to pass information to next BL image.
*/
typedef
struct
bl_params
{
param_header_t
h
;
bl_params_node_t
*
head
;
}
bl_params_t
;
#else
/* LOAD_IMAGE_V2 */
/*******************************************************************************
/*******************************************************************************
* This structure represents the superset of information that can be passed to
* This structure represents the superset of information that can be passed to
* BL31 e.g. while passing control to it from BL2. The BL32 parameters will be
* BL31 e.g. while passing control to it from BL2. The BL32 parameters will be
...
@@ -286,6 +335,7 @@ typedef struct bl31_params {
...
@@ -286,6 +335,7 @@ typedef struct bl31_params {
image_info_t
*
bl33_image_info
;
image_info_t
*
bl33_image_info
;
}
bl31_params_t
;
}
bl31_params_t
;
#endif
/* LOAD_IMAGE_V2 */
/*
/*
* Compile time assertions related to the 'entry_point_info' structure to
* Compile time assertions related to the 'entry_point_info' structure to
...
@@ -308,24 +358,34 @@ CASSERT(sizeof(uintptr_t) ==
...
@@ -308,24 +358,34 @@ CASSERT(sizeof(uintptr_t) ==
/*******************************************************************************
/*******************************************************************************
* Function & variable prototypes
* Function & variable prototypes
******************************************************************************/
******************************************************************************/
uintptr_t
page_align
(
uintptr_t
,
unsigned
);
size_t
image_size
(
unsigned
int
image_id
);
size_t
image_size
(
unsigned
int
image_id
);
#if LOAD_IMAGE_V2
int
load_image
(
unsigned
int
image_id
,
image_info_t
*
image_data
);
int
load_auth_image
(
unsigned
int
image_id
,
image_info_t
*
image_data
);
#else
uintptr_t
page_align
(
uintptr_t
,
unsigned
);
int
load_image
(
meminfo_t
*
mem_layout
,
int
load_image
(
meminfo_t
*
mem_layout
,
unsigned
int
image_id
,
unsigned
int
image_id
,
uintptr_t
image_base
,
uintptr_t
image_base
,
image_info_t
*
image_data
,
image_info_t
*
image_data
,
entry_point_info_t
*
entry_point_info
);
entry_point_info_t
*
entry_point_info
);
int
load_auth_image
(
meminfo_t
*
mem_layout
,
int
load_auth_image
(
meminfo_t
*
mem_layout
,
unsigned
int
image_
name
,
unsigned
int
image_
id
,
uintptr_t
image_base
,
uintptr_t
image_base
,
image_info_t
*
image_data
,
image_info_t
*
image_data
,
entry_point_info_t
*
entry_point_info
);
entry_point_info_t
*
entry_point_info
);
extern
const
char
build_message
[];
extern
const
char
version_string
[];
void
reserve_mem
(
uintptr_t
*
free_base
,
size_t
*
free_size
,
void
reserve_mem
(
uintptr_t
*
free_base
,
size_t
*
free_size
,
uintptr_t
addr
,
size_t
size
);
uintptr_t
addr
,
size_t
size
);
#endif
/* LOAD_IMAGE_V2 */
extern
const
char
build_message
[];
extern
const
char
version_string
[];
void
print_entry_point_info
(
const
entry_point_info_t
*
ep_info
);
void
print_entry_point_info
(
const
entry_point_info_t
*
ep_info
);
#endif
/*__ASSEMBLY__*/
#endif
/*__ASSEMBLY__*/
...
...
include/common/desc_image_load.h
0 → 100644
View file @
44abeaa6
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __DESC_IMAGE_LOAD_H__
#define __DESC_IMAGE_LOAD_H__
#include <bl_common.h>
#if LOAD_IMAGE_V2
/* Following structure is used to store BL ep/image info. */
typedef
struct
bl_mem_params_node
{
unsigned
int
image_id
;
image_info_t
image_info
;
entry_point_info_t
ep_info
;
unsigned
int
next_handoff_image_id
;
bl_load_info_node_t
load_node_mem
;
bl_params_node_t
params_node_mem
;
}
bl_mem_params_node_t
;
/*
* Macro to register list of BL image descriptors,
* defined as an array of bl_mem_params_node_t.
*/
#define REGISTER_BL_IMAGE_DESCS(_img_desc) \
bl_mem_params_node_t *bl_mem_params_desc_ptr = &_img_desc[0]; \
unsigned int bl_mem_params_desc_num = ARRAY_SIZE(_img_desc);
/* BL image loading utility functions */
void
flush_bl_params_desc
(
void
);
int
get_bl_params_node_index
(
unsigned
int
image_id
);
bl_mem_params_node_t
*
get_bl_mem_params_node
(
unsigned
int
image_id
);
bl_load_info_t
*
get_bl_load_info_from_mem_params_desc
(
void
);
bl_params_t
*
get_next_bl_params_from_mem_params_desc
(
void
);
#endif
/* LOAD_IMAGE_V2 */
#endif
/* __DESC_IMAGE_LOAD_H__ */
include/lib/aarch32/arch.h
View file @
44abeaa6
...
@@ -191,6 +191,7 @@
...
@@ -191,6 +191,7 @@
/* NASCR definitions */
/* NASCR definitions */
#define NSASEDIS_BIT (1 << 15)
#define NSASEDIS_BIT (1 << 15)
#define NSTRCDIS_BIT (1 << 20)
#define NASCR_CP11_BIT (1 << 11)
#define NASCR_CP11_BIT (1 << 11)
#define NASCR_CP10_BIT (1 << 10)
#define NASCR_CP10_BIT (1 << 10)
...
...
include/lib/aarch32/arch_helpers.h
View file @
44abeaa6
...
@@ -187,6 +187,9 @@ void flush_dcache_range(uintptr_t addr, size_t size);
...
@@ -187,6 +187,9 @@ void flush_dcache_range(uintptr_t addr, size_t size);
void
clean_dcache_range
(
uintptr_t
addr
,
size_t
size
);
void
clean_dcache_range
(
uintptr_t
addr
,
size_t
size
);
void
inv_dcache_range
(
uintptr_t
addr
,
size_t
size
);
void
inv_dcache_range
(
uintptr_t
addr
,
size_t
size
);
void
disable_mmu_secure
(
void
);
void
disable_mmu_icache_secure
(
void
);
DEFINE_SYSOP_FUNC
(
wfi
)
DEFINE_SYSOP_FUNC
(
wfi
)
DEFINE_SYSOP_FUNC
(
wfe
)
DEFINE_SYSOP_FUNC
(
wfe
)
DEFINE_SYSOP_FUNC
(
sev
)
DEFINE_SYSOP_FUNC
(
sev
)
...
@@ -196,6 +199,9 @@ DEFINE_SYSOP_TYPE_FUNC(dsb, ish)
...
@@ -196,6 +199,9 @@ DEFINE_SYSOP_TYPE_FUNC(dsb, ish)
DEFINE_SYSOP_TYPE_FUNC
(
dmb
,
ish
)
DEFINE_SYSOP_TYPE_FUNC
(
dmb
,
ish
)
DEFINE_SYSOP_FUNC
(
isb
)
DEFINE_SYSOP_FUNC
(
isb
)
void
__dead2
smc
(
uint32_t
r0
,
uint32_t
r1
,
uint32_t
r2
,
uint32_t
r3
,
uint32_t
r4
,
uint32_t
r5
,
uint32_t
r6
,
uint32_t
r7
);
DEFINE_SYSREG_RW_FUNCS
(
spsr
)
DEFINE_SYSREG_RW_FUNCS
(
spsr
)
DEFINE_SYSREG_RW_FUNCS
(
cpsr
)
DEFINE_SYSREG_RW_FUNCS
(
cpsr
)
...
@@ -289,4 +295,6 @@ DEFINE_DCOP_PARAM_FUNC(cvac, DCCMVAC)
...
@@ -289,4 +295,6 @@ DEFINE_DCOP_PARAM_FUNC(cvac, DCCMVAC)
#define read_cntpct_el0() read64_cntpct()
#define read_cntpct_el0() read64_cntpct()
#define read_ctr_el0() read_ctr()
#endif
/* __ARCH_HELPERS_H__ */
#endif
/* __ARCH_HELPERS_H__ */
include/lib/cpus/aarch32/cortex_a32.h
0 → 100644
View file @
44abeaa6
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __CORTEX_A32_H__
#define __CORTEX_A32_H__
/* Cortex-A32 Main ID register for revision 0 */
#define CORTEX_A32_MIDR 0x410FD010
/*******************************************************************************
* CPU Extended Control register specific definitions.
* CPUECTLR_EL1 is an implementation-specific register.
******************************************************************************/
#define CORTEX_A32_CPUECTLR_EL1 p15, 1, c15
#define CORTEX_A32_CPUECTLR_SMPEN_BIT (1 << 6)
#endif
/* __CORTEX_A32_H__ */
include/lib/cpus/aarch32/cpu_macros.S
View file @
44abeaa6
...
@@ -42,12 +42,16 @@
...
@@ -42,12 +42,16 @@
CPU_MIDR
:
/
*
cpu_ops
midr
*/
CPU_MIDR
:
/
*
cpu_ops
midr
*/
.
space
4
.
space
4
/*
Reset
fn
is
needed
during
reset
*/
/*
Reset
fn
is
needed
during
reset
*/
#if IMAGE_BL1 || IMAGE_BL32
CPU_RESET_FUNC
:
/
*
cpu_ops
reset_func
*/
CPU_RESET_FUNC
:
/
*
cpu_ops
reset_func
*/
.
space
4
.
space
4
#endif
#if IMAGE_BL32 /* The power down core and cluster is needed only in BL32 */
CPU_PWR_DWN_CORE
:
/
*
cpu_ops
core_pwr_dwn
*/
CPU_PWR_DWN_CORE
:
/
*
cpu_ops
core_pwr_dwn
*/
.
space
4
.
space
4
CPU_PWR_DWN_CLUSTER
:
/
*
cpu_ops
cluster_pwr_dwn
*/
CPU_PWR_DWN_CLUSTER
:
/
*
cpu_ops
cluster_pwr_dwn
*/
.
space
4
.
space
4
#endif
CPU_OPS_SIZE
=
.
CPU_OPS_SIZE
=
.
/
*
/
*
...
@@ -60,13 +64,17 @@ CPU_OPS_SIZE = .
...
@@ -60,13 +64,17 @@ CPU_OPS_SIZE = .
.
align
2
.
align
2
.
type
cpu_ops_
\
_name
,
%
object
.
type
cpu_ops_
\
_name
,
%
object
.
word
\
_midr
.
word
\
_midr
#if IMAGE_BL1 || IMAGE_BL32
.
if
\
_noresetfunc
.
if
\
_noresetfunc
.
word
0
.
word
0
.
else
.
else
.
word
\
_name
\
()
_reset_func
.
word
\
_name
\
()
_reset_func
.
endif
.
endif
#endif
#if IMAGE_BL32
.
word
\
_name
\
()
_core_pwr_dwn
.
word
\
_name
\
()
_core_pwr_dwn
.
word
\
_name
\
()
_cluster_pwr_dwn
.
word
\
_name
\
()
_cluster_pwr_dwn
#endif
.
endm
.
endm
#endif /* __CPU_MACROS_S__ */
#endif /* __CPU_MACROS_S__ */
include/lib/el3_runtime/context_mgmt.h
View file @
44abeaa6
...
@@ -103,5 +103,9 @@ static inline void cm_set_next_context(void *context)
...
@@ -103,5 +103,9 @@ static inline void cm_set_next_context(void *context)
"msr spsel, #0
\n
"
"msr spsel, #0
\n
"
:
:
"r"
(
context
));
:
:
"r"
(
context
));
}
}
#else
void
*
cm_get_next_context
(
void
);
#endif
/* AARCH32 */
#endif
/* AARCH32 */
#endif
/* __CM_H__ */
#endif
/* __CM_H__ */
include/plat/arm/common/plat_arm.h
View file @
44abeaa6
...
@@ -42,6 +42,7 @@
...
@@ -42,6 +42,7 @@
******************************************************************************/
******************************************************************************/
struct
bl31_params
;
struct
bl31_params
;
struct
meminfo
;
struct
meminfo
;
struct
image_info
;
#define ARM_CASSERT_MMAP \
#define ARM_CASSERT_MMAP \
CASSERT((ARRAY_SIZE(plat_arm_mmap) + ARM_BL_REGIONS) \
CASSERT((ARRAY_SIZE(plat_arm_mmap) + ARM_BL_REGIONS) \
...
@@ -164,8 +165,13 @@ void arm_bl2u_platform_setup(void);
...
@@ -164,8 +165,13 @@ void arm_bl2u_platform_setup(void);
void
arm_bl2u_plat_arch_setup
(
void
);
void
arm_bl2u_plat_arch_setup
(
void
);
/* BL31 utility functions */
/* BL31 utility functions */
#if LOAD_IMAGE_V2
void
arm_bl31_early_platform_setup
(
void
*
from_bl2
,
void
*
plat_params_from_bl2
);
#else
void
arm_bl31_early_platform_setup
(
struct
bl31_params
*
from_bl2
,
void
arm_bl31_early_platform_setup
(
struct
bl31_params
*
from_bl2
,
void
*
plat_params_from_bl2
);
void
*
plat_params_from_bl2
);
#endif
/* LOAD_IMAGE_V2 */
void
arm_bl31_platform_setup
(
void
);
void
arm_bl31_platform_setup
(
void
);
void
arm_bl31_plat_runtime_setup
(
void
);
void
arm_bl31_plat_runtime_setup
(
void
);
void
arm_bl31_plat_arch_setup
(
void
);
void
arm_bl31_plat_arch_setup
(
void
);
...
@@ -174,7 +180,8 @@ void arm_bl31_plat_arch_setup(void);
...
@@ -174,7 +180,8 @@ void arm_bl31_plat_arch_setup(void);
void
arm_tsp_early_platform_setup
(
void
);
void
arm_tsp_early_platform_setup
(
void
);
/* SP_MIN utility functions */
/* SP_MIN utility functions */
void
arm_sp_min_early_platform_setup
(
void
);
void
arm_sp_min_early_platform_setup
(
void
*
from_bl2
,
void
*
plat_params_from_bl2
);
/* FIP TOC validity check */
/* FIP TOC validity check */
int
arm_io_is_toc_valid
(
void
);
int
arm_io_is_toc_valid
(
void
);
...
@@ -194,6 +201,14 @@ void plat_arm_interconnect_init(void);
...
@@ -194,6 +201,14 @@ void plat_arm_interconnect_init(void);
void
plat_arm_interconnect_enter_coherency
(
void
);
void
plat_arm_interconnect_enter_coherency
(
void
);
void
plat_arm_interconnect_exit_coherency
(
void
);
void
plat_arm_interconnect_exit_coherency
(
void
);
#if LOAD_IMAGE_V2
/*
* This function is called after loading SCP_BL2 image and it is used to perform
* any platform-specific actions required to handle the SCP firmware.
*/
int
plat_arm_bl2_handle_scp_bl2
(
struct
image_info
*
scp_bl2_image_info
);
#endif
/*
/*
* Optional functions required in ARM standard platforms
* Optional functions required in ARM standard platforms
*/
*/
...
...
include/plat/common/common_def.h
View file @
44abeaa6
...
@@ -41,9 +41,13 @@
...
@@ -41,9 +41,13 @@
/*
/*
* Platform binary types for linking
* Platform binary types for linking
*/
*/
#ifdef AARCH32
#define PLATFORM_LINKER_FORMAT "elf32-littlearm"
#define PLATFORM_LINKER_ARCH arm
#else
#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64"
#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64"
#define PLATFORM_LINKER_ARCH aarch64
#define PLATFORM_LINKER_ARCH aarch64
#endif
/* AARCH32 */
/*
/*
* Generic platform constants
* Generic platform constants
...
@@ -70,6 +74,18 @@
...
@@ -70,6 +74,18 @@
#define MAKE_ULL(x) x
#define MAKE_ULL(x) x
#endif
#endif
#if LOAD_IMAGE_V2
#define BL2_IMAGE_DESC { \
.image_id = BL2_IMAGE_ID, \
SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, \
VERSION_2, image_info_t, 0), \
.image_info.image_base = BL2_BASE, \
.image_info.image_max_size = BL2_LIMIT - BL2_BASE,\
SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, \
VERSION_2, entry_point_info_t, SECURE | EXECUTABLE),\
.ep_info.pc = BL2_BASE, \
}
#else
/* LOAD_IMAGE_V2 */
#define BL2_IMAGE_DESC { \
#define BL2_IMAGE_DESC { \
.image_id = BL2_IMAGE_ID, \
.image_id = BL2_IMAGE_ID, \
SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, \
SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, \
...
@@ -79,6 +95,7 @@
...
@@ -79,6 +95,7 @@
VERSION_1, entry_point_info_t, SECURE | EXECUTABLE),\
VERSION_1, entry_point_info_t, SECURE | EXECUTABLE),\
.ep_info.pc = BL2_BASE, \
.ep_info.pc = BL2_BASE, \
}
}
#endif
/* LOAD_IMAGE_V2 */
/*
/*
* The following constants identify the extents of the code & read-only data
* The following constants identify the extents of the code & read-only data
...
...
include/plat/common/platform.h
View file @
44abeaa6
...
@@ -44,6 +44,8 @@ struct image_info;
...
@@ -44,6 +44,8 @@ struct image_info;
struct
entry_point_info
;
struct
entry_point_info
;
struct
bl31_params
;
struct
bl31_params
;
struct
image_desc
;
struct
image_desc
;
struct
bl_load_info
;
struct
bl_params
;
/*******************************************************************************
/*******************************************************************************
* plat_get_rotpk_info() flags
* plat_get_rotpk_info() flags
...
@@ -84,7 +86,7 @@ uint32_t plat_interrupt_type_to_line(uint32_t type,
...
@@ -84,7 +86,7 @@ uint32_t plat_interrupt_type_to_line(uint32_t type,
* Optional common functions (may be overridden)
* Optional common functions (may be overridden)
******************************************************************************/
******************************************************************************/
uintptr_t
plat_get_my_stack
(
void
);
uintptr_t
plat_get_my_stack
(
void
);
void
plat_report_exception
(
unsigned
long
);
void
plat_report_exception
(
unsigned
int
exception_type
);
int
plat_crash_console_init
(
void
);
int
plat_crash_console_init
(
void
);
int
plat_crash_console_putc
(
int
c
);
int
plat_crash_console_putc
(
int
c
);
void
plat_error_handler
(
int
err
)
__dead2
;
void
plat_error_handler
(
int
err
)
__dead2
;
...
@@ -138,6 +140,15 @@ void bl2_plat_arch_setup(void);
...
@@ -138,6 +140,15 @@ void bl2_plat_arch_setup(void);
void
bl2_platform_setup
(
void
);
void
bl2_platform_setup
(
void
);
struct
meminfo
*
bl2_plat_sec_mem_layout
(
void
);
struct
meminfo
*
bl2_plat_sec_mem_layout
(
void
);
#if LOAD_IMAGE_V2
/*
* This function can be used by the platforms to update/use image
* information for given `image_id`.
*/
int
bl2_plat_handle_post_image_load
(
unsigned
int
image_id
);
#else
/* LOAD_IMAGE_V2 */
/*
/*
* This function returns a pointer to the shared memory that the platform has
* This function returns a pointer to the shared memory that the platform has
* kept aside to pass trusted firmware related information that BL31
* kept aside to pass trusted firmware related information that BL31
...
@@ -194,6 +205,8 @@ void bl2_plat_set_bl32_ep_info(struct image_info *image,
...
@@ -194,6 +205,8 @@ void bl2_plat_set_bl32_ep_info(struct image_info *image,
/* Gets the memory layout for BL32 */
/* Gets the memory layout for BL32 */
void
bl2_plat_get_bl32_meminfo
(
struct
meminfo
*
mem_info
);
void
bl2_plat_get_bl32_meminfo
(
struct
meminfo
*
mem_info
);
#endif
/* LOAD_IMAGE_V2 */
/*******************************************************************************
/*******************************************************************************
* Optional BL2 functions (may be overridden)
* Optional BL2 functions (may be overridden)
******************************************************************************/
******************************************************************************/
...
@@ -218,8 +231,13 @@ int bl2u_plat_handle_scp_bl2u(void);
...
@@ -218,8 +231,13 @@ int bl2u_plat_handle_scp_bl2u(void);
/*******************************************************************************
/*******************************************************************************
* Mandatory BL31 functions
* Mandatory BL31 functions
******************************************************************************/
******************************************************************************/
#if LOAD_IMAGE_V2
void
bl31_early_platform_setup
(
void
*
from_bl2
,
void
*
plat_params_from_bl2
);
#else
void
bl31_early_platform_setup
(
struct
bl31_params
*
from_bl2
,
void
bl31_early_platform_setup
(
struct
bl31_params
*
from_bl2
,
void
*
plat_params_from_bl2
);
void
*
plat_params_from_bl2
);
#endif
void
bl31_plat_arch_setup
(
void
);
void
bl31_plat_arch_setup
(
void
);
void
bl31_platform_setup
(
void
);
void
bl31_platform_setup
(
void
);
void
bl31_plat_runtime_setup
(
void
);
void
bl31_plat_runtime_setup
(
void
);
...
@@ -257,6 +275,31 @@ int plat_get_rotpk_info(void *cookie, void **key_ptr, unsigned int *key_len,
...
@@ -257,6 +275,31 @@ int plat_get_rotpk_info(void *cookie, void **key_ptr, unsigned int *key_len,
int
plat_get_nv_ctr
(
void
*
cookie
,
unsigned
int
*
nv_ctr
);
int
plat_get_nv_ctr
(
void
*
cookie
,
unsigned
int
*
nv_ctr
);
int
plat_set_nv_ctr
(
void
*
cookie
,
unsigned
int
nv_ctr
);
int
plat_set_nv_ctr
(
void
*
cookie
,
unsigned
int
nv_ctr
);
#if LOAD_IMAGE_V2
/*******************************************************************************
* Mandatory BL image load functions(may be overridden).
******************************************************************************/
/*
* This function returns pointer to the list of images that the
* platform has populated to load.
*/
struct
bl_load_info
*
plat_get_bl_image_load_info
(
void
);
/*
* This function returns a pointer to the shared memory that the
* platform has kept aside to pass trusted firmware related
* information that next BL image could need.
*/
struct
bl_params
*
plat_get_next_bl_params
(
void
);
/*
* This function flushes to main memory all the params that are
* passed to next image.
*/
void
plat_flush_next_bl_params
(
void
);
#endif
/* LOAD_IMAGE_V2 */
#if ENABLE_PLAT_COMPAT
#if ENABLE_PLAT_COMPAT
/*
/*
* The below declarations are to enable compatibility for the platform ports
* The below declarations are to enable compatibility for the platform ports
...
...
lib/aarch32/misc_helpers.S
View file @
44abeaa6
...
@@ -32,7 +32,21 @@
...
@@ -32,7 +32,21 @@
#include <asm_macros.S>
#include <asm_macros.S>
#include <assert_macros.S>
#include <assert_macros.S>
.
globl
smc
.
globl
zeromem
.
globl
zeromem
.
globl
disable_mmu_icache_secure
.
globl
disable_mmu_secure
func
smc
/
*
*
For
AArch32
only
r0
-
r3
will
be
in
the
registers
;
*
rest
r4
-
r6
will
be
pushed
on
to
the
stack
.
So
here
,
we
'll
*
have
to
load
them
from
the
stack
to
registers
r4
-
r6
explicitly
.
*
Clobbers
:
r4
-
r6
*/
ldm
sp
,
{
r4
,
r5
,
r6
}
smc
#
0
endfunc
smc
/*
-----------------------------------------------------------------------
/*
-----------------------------------------------------------------------
*
void
zeromem
(
void
*
mem
,
unsigned
int
length
)
;
*
void
zeromem
(
void
*
mem
,
unsigned
int
length
)
;
...
@@ -58,3 +72,25 @@ z_loop:
...
@@ -58,3 +72,25 @@ z_loop:
z_end
:
z_end
:
bx
lr
bx
lr
endfunc
zeromem
endfunc
zeromem
/*
---------------------------------------------------------------------------
*
Disable
the
MMU
in
Secure
State
*
---------------------------------------------------------------------------
*/
func
disable_mmu_secure
mov
r1
,
#(
SCTLR_M_BIT
|
SCTLR_C_BIT
)
do_disable_mmu
:
ldcopr
r0
,
SCTLR
bic
r0
,
r0
,
r1
stcopr
r0
,
SCTLR
isb
//
ensure
MMU
is
off
dsb
sy
bx
lr
endfunc
disable_mmu_secure
func
disable_mmu_icache_secure
ldr
r1
,
=(
SCTLR_M_BIT
| SCTLR_C_BIT |
SCTLR_I_BIT
)
b
do_disable_mmu
endfunc
disable_mmu_icache_secure
lib/cpus/aarch32/cortex_a32.S
0 → 100644
View file @
44abeaa6
/*
*
Copyright
(
c
)
2016
,
ARM
Limited
and
Contributors
.
All
rights
reserved
.
*
*
Redistribution
and
use
in
source
and
binary
forms
,
with
or
without
*
modification
,
are
permitted
provided
that
the
following
conditions
are
met
:
*
*
Redistributions
of
source
code
must
retain
the
above
copyright
notice
,
this
*
list
of
conditions
and
the
following
disclaimer
.
*
*
Redistributions
in
binary
form
must
reproduce
the
above
copyright
notice
,
*
this
list
of
conditions
and
the
following
disclaimer
in
the
documentation
*
and
/
or
other
materials
provided
with
the
distribution
.
*
*
Neither
the
name
of
ARM
nor
the
names
of
its
contributors
may
be
used
*
to
endorse
or
promote
products
derived
from
this
software
without
specific
*
prior
written
permission
.
*
*
THIS
SOFTWARE
IS
PROVIDED
BY
THE
COPYRIGHT
HOLDERS
AND
CONTRIBUTORS
"AS IS"
*
AND
ANY
EXPRESS
OR
IMPLIED
WARRANTIES
,
INCLUDING
,
BUT
NOT
LIMITED
TO
,
THE
*
IMPLIED
WARRANTIES
OF
MERCHANTABILITY
AND
FITNESS
FOR
A
PARTICULAR
PURPOSE
*
ARE
DISCLAIMED
.
IN
NO
EVENT
SHALL
THE
COPYRIGHT
HOLDER
OR
CONTRIBUTORS
BE
*
LIABLE
FOR
ANY
DIRECT
,
INDIRECT
,
INCIDENTAL
,
SPECIAL
,
EXEMPLARY
,
OR
*
CONSEQUENTIAL
DAMAGES
(
INCLUDING
,
BUT
NOT
LIMITED
TO
,
PROCUREMENT
OF
*
SUBSTITUTE
GOODS
OR
SERVICES
; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
*
INTERRUPTION
)
HOWEVER
CAUSED
AND
ON
ANY
THEORY
OF
LIABILITY
,
WHETHER
IN
*
CONTRACT
,
STRICT
LIABILITY
,
OR
TORT
(
INCLUDING
NEGLIGENCE
OR
OTHERWISE
)
*
ARISING
IN
ANY
WAY
OUT
OF
THE
USE
OF
THIS
SOFTWARE
,
EVEN
IF
ADVISED
OF
THE
*
POSSIBILITY
OF
SUCH
DAMAGE
.
*/
#include <arch.h>
#include <asm_macros.S>
#include <assert_macros.S>
#include <cortex_a32.h>
#include <cpu_macros.S>
/
*
---------------------------------------------
*
Disable
intra
-
cluster
coherency
*
Clobbers
:
r0
-
r1
*
---------------------------------------------
*/
func
cortex_a32_disable_smp
ldcopr16
r0
,
r1
,
CORTEX_A32_CPUECTLR_EL1
bic
r0
,
r0
,
#
CORTEX_A32_CPUECTLR_SMPEN_BIT
stcopr16
r0
,
r1
,
CORTEX_A32_CPUECTLR_EL1
isb
dsb
sy
bx
lr
endfunc
cortex_a32_disable_smp
/
*
-------------------------------------------------
*
The
CPU
Ops
reset
function
for
Cortex
-
A32
.
*
Clobbers
:
r0
-
r1
*
-------------------------------------------------
*/
func
cortex_a32_reset_func
/
*
---------------------------------------------
*
Enable
the
SMP
bit
.
*
---------------------------------------------
*/
ldcopr16
r0
,
r1
,
CORTEX_A32_CPUECTLR_EL1
orr
r0
,
r0
,
#
CORTEX_A32_CPUECTLR_SMPEN_BIT
stcopr16
r0
,
r1
,
CORTEX_A32_CPUECTLR_EL1
isb
bx
lr
endfunc
cortex_a32_reset_func
/
*
----------------------------------------------------
*
The
CPU
Ops
core
power
down
function
for
Cortex
-
A32
.
*
Clobbers
:
r0
-
r3
*
----------------------------------------------------
*/
func
cortex_a32_core_pwr_dwn
push
{
lr
}
/
*
Assert
if
cache
is
enabled
*/
#if ASM_ASSERTION
ldcopr
r0
,
SCTLR
tst
r0
,
#
SCTLR_C_BIT
ASM_ASSERT
(
eq
)
#endif
/
*
---------------------------------------------
*
Flush
L1
caches
.
*
---------------------------------------------
*/
mov
r0
,
#
DC_OP_CISW
bl
dcsw_op_level1
/
*
---------------------------------------------
*
Come
out
of
intra
cluster
coherency
*
---------------------------------------------
*/
pop
{
lr
}
b
cortex_a32_disable_smp
endfunc
cortex_a32_core_pwr_dwn
/
*
-------------------------------------------------------
*
The
CPU
Ops
cluster
power
down
function
for
Cortex
-
A32
.
*
Clobbers
:
r0
-
r3
*
-------------------------------------------------------
*/
func
cortex_a32_cluster_pwr_dwn
push
{
lr
}
/
*
Assert
if
cache
is
enabled
*/
#if ASM_ASSERTION
ldcopr
r0
,
SCTLR
tst
r0
,
#
SCTLR_C_BIT
ASM_ASSERT
(
eq
)
#endif
/
*
---------------------------------------------
*
Flush
L1
cache
.
*
---------------------------------------------
*/
mov
r0
,
#
DC_OP_CISW
bl
dcsw_op_level1
/
*
---------------------------------------------
*
Disable
the
optional
ACP
.
*
---------------------------------------------
*/
bl
plat_disable_acp
/
*
---------------------------------------------
*
Flush
L2
cache
.
*
---------------------------------------------
*/
mov
r0
,
#
DC_OP_CISW
bl
dcsw_op_level2
/
*
---------------------------------------------
*
Come
out
of
intra
cluster
coherency
*
---------------------------------------------
*/
pop
{
lr
}
b
cortex_a32_disable_smp
endfunc
cortex_a32_cluster_pwr_dwn
declare_cpu_ops
cortex_a32
,
CORTEX_A32_MIDR
Prev
1
2
3
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment