Commit 0f22bef3 authored by Scott Branden's avatar Scott Branden Committed by GitHub
Browse files

Merge branch 'integration' into tf_issue_461

Showing with 681 additions and 43 deletions
+681 -43
......@@ -34,30 +34,27 @@
* @(#)assert.h 8.2 (Berkeley) 1/21/94
* $FreeBSD$
*/
#include <sys/cdefs.h>
/*
* Unlike other ANSI header files, <assert.h> may usefully be included
* multiple times, with and without NDEBUG defined.
* Portions copyright (c) 2017, ARM Limited and Contributors.
* All rights reserved.
*/
#undef assert
#undef _assert
#ifndef _ASSERT_H_
#define _ASSERT_H_
#ifdef NDEBUG
#define assert(e) ((void)0)
#define _assert(e) ((void)0)
#else
#define _assert(e) assert(e)
#include <sys/cdefs.h>
#if ENABLE_ASSERTIONS
#define _assert(e) assert(e)
#define assert(e) ((e) ? (void)0 : __assert(__func__, __FILE__, \
__LINE__, #e))
#endif /* NDEBUG */
#else
#define assert(e) ((void)0)
#define _assert(e) ((void)0)
#endif /* ENABLE_ASSERTIONS */
#ifndef _ASSERT_H_
#define _ASSERT_H_
__BEGIN_DECLS
void __assert(const char *, const char *, int, const char *) __dead2;
__END_DECLS
#endif /* !_ASSERT_H_ */
/*
* Copyright (c) 2000 Jeroen Ruigrok van der Werven <asmodai@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef __bool_true_false_are_defined
#define __bool_true_false_are_defined 1
#ifndef __cplusplus
#define false 0
#define true 1
#define bool _Bool
#if __STDC_VERSION__ < 199901L && __GNUC__ < 3 && !defined(__INTEL_COMPILER)
typedef int _Bool;
#endif
#endif /* !__cplusplus */
#endif /* __bool_true_false_are_defined */
......@@ -108,7 +108,7 @@ typedef struct mmap_region {
/* Generic translation table APIs */
void init_xlat_tables(void);
void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
size_t size, unsigned int attr);
size_t size, mmap_attr_t attr);
void mmap_add(const mmap_region_t *mm);
#endif /*__ASSEMBLY__*/
......
......@@ -114,7 +114,7 @@ void init_xlat_tables(void);
* be added before initializing the MMU and cannot be removed later.
*/
void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
size_t size, unsigned int attr);
size_t size, mmap_attr_t attr);
/*
* Add a region with defined base PA and base VA. This type of region can be
......@@ -128,7 +128,7 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
* EPERM: It overlaps another region in an invalid way.
*/
int mmap_add_dynamic_region(unsigned long long base_pa, uintptr_t base_va,
size_t size, unsigned int attr);
size_t size, mmap_attr_t attr);
/*
* Add an array of static regions with defined base PA and base VA. This type
......
......@@ -30,7 +30,7 @@
#ifndef __V2M_DEF_H__
#define __V2M_DEF_H__
#include <xlat_tables_v2.h>
#include <arm_xlat_tables.h>
/* V2M motherboard system registers & offsets */
......
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#if ARM_XLAT_TABLES_LIB_V1
#include <xlat_tables.h>
#else
#include <xlat_tables_v2.h>
#endif /* ARM_XLAT_TABLES_LIB_V1 */
......@@ -30,6 +30,7 @@
#ifndef __PLAT_ARM_H__
#define __PLAT_ARM_H__
#include <arm_xlat_tables.h>
#include <bakery_lock.h>
#include <cassert.h>
#include <cpu_data.h>
......@@ -80,7 +81,7 @@ void arm_setup_page_tables(uintptr_t total_base,
#else
/*
* Empty macros for all other BL stages other than BL31
* Empty macros for all other BL stages other than BL31 and BL32
*/
#define ARM_INSTANTIATE_LOCK
#define arm_lock_init()
......@@ -156,6 +157,7 @@ void arm_bl2_platform_setup(void);
void arm_bl2_plat_arch_setup(void);
uint32_t arm_get_spsr_for_bl32_entry(void);
uint32_t arm_get_spsr_for_bl33_entry(void);
int arm_bl2_handle_post_image_load(unsigned int image_id);
/* BL2U utility functions */
void arm_bl2u_early_platform_setup(struct meminfo *mem_layout,
......
/*
* Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
......@@ -35,11 +35,15 @@
#include <psci.h>
#include <types.h>
/* System power domain at level 2, as currently implemented by CSS platforms */
#define CSS_SYSTEM_PWR_DMN_LVL ARM_PWR_LVL2
/* Macros to read the CSS power domain state */
#define CSS_CORE_PWR_STATE(state) (state)->pwr_domain_state[ARM_PWR_LVL0]
#define CSS_CLUSTER_PWR_STATE(state) (state)->pwr_domain_state[ARM_PWR_LVL1]
#define CSS_SYSTEM_PWR_STATE(state) ((PLAT_MAX_PWR_LVL > ARM_PWR_LVL1) ?\
(state)->pwr_domain_state[ARM_PWR_LVL2] : 0)
#define CSS_SYSTEM_PWR_STATE(state) \
((PLAT_MAX_PWR_LVL == CSS_SYSTEM_PWR_DMN_LVL) ?\
(state)->pwr_domain_state[CSS_SYSTEM_PWR_DMN_LVL] : 0)
int css_pwr_domain_on(u_register_t mpidr);
void css_pwr_domain_on_finish(const psci_power_state_t *target_state);
......
......@@ -96,9 +96,16 @@
/*
* Required platform porting definitions common to all ARM CSS SoCs
*/
#if JUNO_AARCH32_EL3_RUNTIME
/*
* Following change is required to initialize TZC
* for enabling access to the HI_VECTOR (0xFFFF0000)
* location needed for JUNO AARCH32 support.
*/
#define PLAT_ARM_SCP_TZC_DRAM1_SIZE ULL(0x8000)
#else
/* 2MB used for SCP DDR retraining */
#define PLAT_ARM_SCP_TZC_DRAM1_SIZE ULL(0x00200000)
#endif
#endif /* __SOC_CSS_DEF_H__ */
......@@ -100,6 +100,7 @@ uintptr_t plat_get_my_stack(void);
void plat_report_exception(unsigned int exception_type);
int plat_crash_console_init(void);
int plat_crash_console_putc(int c);
int plat_crash_console_flush(void);
void plat_error_handler(int err) __dead2;
void plat_panic_handler(void) __dead2;
......
......@@ -162,7 +162,7 @@ endfunc zeromem
* --------------------------------------------------------------------------
*/
func memcpy4
#if ASM_ASSERTION
#if ENABLE_ASSERTIONS
orr r3, r0, r1
tst r3, #0x3
ASM_ASSERT(eq)
......
......@@ -215,7 +215,7 @@ func zeromem_dczva
tmp1 .req x4
tmp2 .req x5
#if ASM_ASSERTION
#if ENABLE_ASSERTIONS
/*
* Check for M bit (MMU enabled) of the current SCTLR_EL(1|3)
* register value and panic if the MMU is disabled.
......@@ -228,7 +228,7 @@ func zeromem_dczva
tst tmp1, #SCTLR_M_BIT
ASM_ASSERT(ne)
#endif /* ASM_ASSERTION */
#endif /* ENABLE_ASSERTIONS */
/* stop_address is the address past the last to zero */
add stop_address, cursor, length
......@@ -247,7 +247,7 @@ func zeromem_dczva
mov tmp2, #(1 << 2)
lsl block_size, tmp2, block_size
#if ASM_ASSERTION
#if ENABLE_ASSERTIONS
/*
* Assumes block size is at least 16 bytes to avoid manual realignment
* of the cursor at the end of the DCZVA loop.
......@@ -444,7 +444,7 @@ endfunc zeromem_dczva
* --------------------------------------------------------------------------
*/
func memcpy16
#if ASM_ASSERTION
#if ENABLE_ASSERTIONS
orr x3, x0, x1
tst x3, #0xf
ASM_ASSERT(eq)
......
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
......@@ -35,7 +35,7 @@
func aem_generic_core_pwr_dwn
/* Assert if cache is enabled */
#if ASM_ASSERTION
#if ENABLE_ASSERTIONS
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)
......@@ -51,7 +51,7 @@ endfunc aem_generic_core_pwr_dwn
func aem_generic_cluster_pwr_dwn
/* Assert if cache is enabled */
#if ASM_ASSERTION
#if ENABLE_ASSERTIONS
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)
......
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
......@@ -76,7 +76,7 @@ func cortex_a32_core_pwr_dwn
push {r12, lr}
/* Assert if cache is enabled */
#if ASM_ASSERTION
#if ENABLE_ASSERTIONS
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)
......@@ -107,7 +107,7 @@ func cortex_a32_cluster_pwr_dwn
push {r12, lr}
/* Assert if cache is enabled */
#if ASM_ASSERTION
#if ENABLE_ASSERTIONS
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)
......
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <asm_macros.S>
#include <assert_macros.S>
#include <cortex_a53.h>
#include <cpu_macros.S>
#include <debug.h>
/* ---------------------------------------------
* Disable intra-cluster coherency
* ---------------------------------------------
*/
func cortex_a53_disable_smp
ldcopr16 r0, r1, CPUECTLR
bic64_imm r0, r1, CPUECTLR_SMP_BIT
stcopr16 r0, r1, CPUECTLR
isb
dsb sy
bx lr
endfunc cortex_a53_disable_smp
/* -------------------------------------------------
* The CPU Ops reset function for Cortex-A53.
* -------------------------------------------------
*/
func cortex_a53_reset_func
/* ---------------------------------------------
* Enable the SMP bit.
* ---------------------------------------------
*/
ldcopr16 r0, r1, CPUECTLR
orr64_imm r0, r1, CPUECTLR_SMP_BIT
stcopr16 r0, r1, CPUECTLR
isb
bx lr
endfunc cortex_a53_reset_func
/* ----------------------------------------------------
* The CPU Ops core power down function for Cortex-A53.
* ----------------------------------------------------
*/
func cortex_a53_core_pwr_dwn
push {r12, lr}
/* Assert if cache is enabled */
#if ASM_ASSERTION
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)
#endif
/* ---------------------------------------------
* Flush L1 caches.
* ---------------------------------------------
*/
mov r0, #DC_OP_CISW
bl dcsw_op_level1
/* ---------------------------------------------
* Come out of intra cluster coherency
* ---------------------------------------------
*/
pop {r12, lr}
b cortex_a53_disable_smp
endfunc cortex_a53_core_pwr_dwn
/* -------------------------------------------------------
* The CPU Ops cluster power down function for Cortex-A53.
* Clobbers: r0-r3
* -------------------------------------------------------
*/
func cortex_a53_cluster_pwr_dwn
push {r12, lr}
/* Assert if cache is enabled */
#if ASM_ASSERTION
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)
#endif
/* ---------------------------------------------
* Flush L1 caches.
* ---------------------------------------------
*/
mov r0, #DC_OP_CISW
bl dcsw_op_level1
/* ---------------------------------------------
* Disable the optional ACP.
* ---------------------------------------------
*/
bl plat_disable_acp
/* ---------------------------------------------
* Flush L2 caches.
* ---------------------------------------------
*/
mov r0, #DC_OP_CISW
bl dcsw_op_level2
/* ---------------------------------------------
* Come out of intra cluster coherency
* ---------------------------------------------
*/
pop {r12, lr}
b cortex_a53_disable_smp
endfunc cortex_a53_cluster_pwr_dwn
declare_cpu_ops cortex_a53, CORTEX_A53_MIDR, \
cortex_a53_reset_func, \
cortex_a53_core_pwr_dwn, \
cortex_a53_cluster_pwr_dwn
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <asm_macros.S>
#include <assert_macros.S>
#include <cortex_a57.h>
#include <cpu_macros.S>
#include <debug.h>
/* ---------------------------------------------
* Disable intra-cluster coherency
* Clobbers: r0-r1
* ---------------------------------------------
*/
func cortex_a57_disable_smp
ldcopr16 r0, r1, CPUECTLR
bic64_imm r0, r1, CPUECTLR_SMP_BIT
stcopr16 r0, r1, CPUECTLR
bx lr
endfunc cortex_a57_disable_smp
/* ---------------------------------------------
* Disable all types of L2 prefetches.
* Clobbers: r0-r2
* ---------------------------------------------
*/
func cortex_a57_disable_l2_prefetch
ldcopr16 r0, r1, CPUECTLR
orr64_imm r0, r1, CPUECTLR_DIS_TWD_ACC_PFTCH_BIT
bic64_imm r0, r1, (CPUECTLR_L2_IPFTCH_DIST_MASK | \
CPUECTLR_L2_DPFTCH_DIST_MASK)
stcopr16 r0, r1, CPUECTLR
isb
dsb ish
bx lr
endfunc cortex_a57_disable_l2_prefetch
/* ---------------------------------------------
* Disable debug interfaces
* ---------------------------------------------
*/
func cortex_a57_disable_ext_debug
mov r0, #1
stcopr r0, DBGOSDLR
isb
dsb sy
bx lr
endfunc cortex_a57_disable_ext_debug
/* -------------------------------------------------
* The CPU Ops reset function for Cortex-A57.
* -------------------------------------------------
*/
func cortex_a57_reset_func
/* ---------------------------------------------
* Enable the SMP bit.
* ---------------------------------------------
*/
ldcopr16 r0, r1, CPUECTLR
orr64_imm r0, r1, CPUECTLR_SMP_BIT
stcopr16 r0, r1, CPUECTLR
isb
bx lr
endfunc cortex_a57_reset_func
/* ----------------------------------------------------
* The CPU Ops core power down function for Cortex-A57.
* ----------------------------------------------------
*/
func cortex_a57_core_pwr_dwn
push {r12, lr}
/* Assert if cache is enabled */
#if ASM_ASSERTION
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)
#endif
/* ---------------------------------------------
* Disable the L2 prefetches.
* ---------------------------------------------
*/
bl cortex_a57_disable_l2_prefetch
/* ---------------------------------------------
* Flush L1 caches.
* ---------------------------------------------
*/
mov r0, #DC_OP_CISW
bl dcsw_op_level1
/* ---------------------------------------------
* Come out of intra cluster coherency
* ---------------------------------------------
*/
bl cortex_a57_disable_smp
/* ---------------------------------------------
* Force the debug interfaces to be quiescent
* ---------------------------------------------
*/
pop {r12, lr}
b cortex_a57_disable_ext_debug
endfunc cortex_a57_core_pwr_dwn
/* -------------------------------------------------------
* The CPU Ops cluster power down function for Cortex-A57.
* Clobbers: r0-r3
* -------------------------------------------------------
*/
func cortex_a57_cluster_pwr_dwn
push {r12, lr}
/* Assert if cache is enabled */
#if ASM_ASSERTION
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)
#endif
/* ---------------------------------------------
* Disable the L2 prefetches.
* ---------------------------------------------
*/
bl cortex_a57_disable_l2_prefetch
/* ---------------------------------------------
* Flush L1 caches.
* ---------------------------------------------
*/
mov r0, #DC_OP_CISW
bl dcsw_op_level1
/* ---------------------------------------------
* Disable the optional ACP.
* ---------------------------------------------
*/
bl plat_disable_acp
/* ---------------------------------------------
* Flush L2 caches.
* ---------------------------------------------
*/
mov r0, #DC_OP_CISW
bl dcsw_op_level2
/* ---------------------------------------------
* Come out of intra cluster coherency
* ---------------------------------------------
*/
bl cortex_a57_disable_smp
/* ---------------------------------------------
* Force the debug interfaces to be quiescent
* ---------------------------------------------
*/
pop {r12, lr}
b cortex_a57_disable_ext_debug
endfunc cortex_a57_cluster_pwr_dwn
declare_cpu_ops cortex_a57, CORTEX_A57_MIDR, \
cortex_a57_reset_func, \
cortex_a57_core_pwr_dwn, \
cortex_a57_cluster_pwr_dwn
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <asm_macros.S>
#include <assert_macros.S>
#include <cortex_a72.h>
#include <cpu_macros.S>
#include <debug.h>
/* ---------------------------------------------
* Disable all types of L2 prefetches.
* ---------------------------------------------
*/
func cortex_a72_disable_l2_prefetch
ldcopr16 r0, r1, CPUECTLR
orr64_imm r0, r1, CPUECTLR_DIS_TWD_ACC_PFTCH_BIT
bic64_imm r0, r1, (CPUECTLR_L2_IPFTCH_DIST_MASK | \
CPUECTLR_L2_DPFTCH_DIST_MASK)
stcopr16 r0, r1, CPUECTLR
isb
bx lr
endfunc cortex_a72_disable_l2_prefetch
/* ---------------------------------------------
* Disable the load-store hardware prefetcher.
* ---------------------------------------------
*/
func cortex_a72_disable_hw_prefetcher
ldcopr16 r0, r1, CPUACTLR
orr64_imm r0, r1, CPUACTLR_DISABLE_L1_DCACHE_HW_PFTCH
stcopr16 r0, r1, CPUACTLR
isb
dsb ish
bx lr
endfunc cortex_a72_disable_hw_prefetcher
/* ---------------------------------------------
* Disable intra-cluster coherency
* Clobbers: r0-r1
* ---------------------------------------------
*/
func cortex_a72_disable_smp
ldcopr16 r0, r1, CPUECTLR
bic64_imm r0, r1, CPUECTLR_SMP_BIT
stcopr16 r0, r1, CPUECTLR
bx lr
endfunc cortex_a72_disable_smp
/* ---------------------------------------------
* Disable debug interfaces
* ---------------------------------------------
*/
func cortex_a72_disable_ext_debug
mov r0, #1
stcopr r0, DBGOSDLR
isb
dsb sy
bx lr
endfunc cortex_a72_disable_ext_debug
/* -------------------------------------------------
* The CPU Ops reset function for Cortex-A72.
* -------------------------------------------------
*/
func cortex_a72_reset_func
/* ---------------------------------------------
* Enable the SMP bit.
* ---------------------------------------------
*/
ldcopr16 r0, r1, CPUECTLR
orr64_imm r0, r1, CPUECTLR_SMP_BIT
stcopr16 r0, r1, CPUECTLR
isb
bx lr
endfunc cortex_a72_reset_func
/* ----------------------------------------------------
* The CPU Ops core power down function for Cortex-A72.
* ----------------------------------------------------
*/
func cortex_a72_core_pwr_dwn
push {r12, lr}
/* Assert if cache is enabled */
#if ASM_ASSERTION
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)
#endif
/* ---------------------------------------------
* Disable the L2 prefetches.
* ---------------------------------------------
*/
bl cortex_a72_disable_l2_prefetch
/* ---------------------------------------------
* Disable the load-store hardware prefetcher.
* ---------------------------------------------
*/
bl cortex_a72_disable_hw_prefetcher
/* ---------------------------------------------
* Flush L1 caches.
* ---------------------------------------------
*/
mov r0, #DC_OP_CISW
bl dcsw_op_level1
/* ---------------------------------------------
* Come out of intra cluster coherency
* ---------------------------------------------
*/
bl cortex_a72_disable_smp
/* ---------------------------------------------
* Force the debug interfaces to be quiescent
* ---------------------------------------------
*/
pop {r12, lr}
b cortex_a72_disable_ext_debug
endfunc cortex_a72_core_pwr_dwn
/* -------------------------------------------------------
* The CPU Ops cluster power down function for Cortex-A72.
* -------------------------------------------------------
*/
func cortex_a72_cluster_pwr_dwn
push {r12, lr}
/* Assert if cache is enabled */
#if ASM_ASSERTION
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)
#endif
/* ---------------------------------------------
* Disable the L2 prefetches.
* ---------------------------------------------
*/
bl cortex_a72_disable_l2_prefetch
/* ---------------------------------------------
* Disable the load-store hardware prefetcher.
* ---------------------------------------------
*/
bl cortex_a72_disable_hw_prefetcher
#if !SKIP_A72_L1_FLUSH_PWR_DWN
/* ---------------------------------------------
* Flush L1 caches.
* ---------------------------------------------
*/
mov r0, #DC_OP_CISW
bl dcsw_op_level1
#endif
/* ---------------------------------------------
* Disable the optional ACP.
* ---------------------------------------------
*/
bl plat_disable_acp
/* -------------------------------------------------
* Flush the L2 caches.
* -------------------------------------------------
*/
mov r0, #DC_OP_CISW
bl dcsw_op_level2
/* ---------------------------------------------
* Come out of intra cluster coherency
* ---------------------------------------------
*/
bl cortex_a72_disable_smp
/* ---------------------------------------------
* Force the debug interfaces to be quiescent
* ---------------------------------------------
*/
pop {r12, lr}
b cortex_a72_disable_ext_debug
endfunc cortex_a72_cluster_pwr_dwn
declare_cpu_ops cortex_a72, CORTEX_A72_MIDR, \
cortex_a72_reset_func, \
cortex_a72_core_pwr_dwn, \
cortex_a72_cluster_pwr_dwn
......@@ -53,7 +53,7 @@ func reset_handler
/* Get the matching cpu_ops pointer (clobbers: r0 - r5) */
bl get_cpu_ops_ptr
#if ASM_ASSERTION
#if ENABLE_ASSERTIONS
cmp r0, #0
ASM_ASSERT(ne)
#endif
......@@ -92,7 +92,7 @@ func prepare_cpu_pwr_dwn
pop {r2, lr}
ldr r0, [r0, #CPU_DATA_CPU_OPS_PTR]
#if ASM_ASSERTION
#if ENABLE_ASSERTIONS
cmp r0, #0
ASM_ASSERT(ne)
#endif
......@@ -118,7 +118,7 @@ func init_cpu_ops
cmp r1, #0
bne 1f
bl get_cpu_ops_ptr
#if ASM_ASSERTION
#if ENABLE_ASSERTIONS
cmp r0, #0
ASM_ASSERT(ne)
#endif
......
......@@ -55,7 +55,7 @@ func reset_handler
/* Get the matching cpu_ops pointer */
bl get_cpu_ops_ptr
#if ASM_ASSERTION
#if ENABLE_ASSERTIONS
cmp x0, #0
ASM_ASSERT(ne)
#endif
......@@ -94,7 +94,7 @@ func prepare_cpu_pwr_dwn
mrs x1, tpidr_el3
ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR]
#if ASM_ASSERTION
#if ENABLE_ASSERTIONS
cmp x0, #0
ASM_ASSERT(ne)
#endif
......@@ -120,7 +120,7 @@ func init_cpu_ops
cbnz x0, 1f
mov x10, x30
bl get_cpu_ops_ptr
#if ASM_ASSERTION
#if ENABLE_ASSERTIONS
cmp x0, #0
ASM_ASSERT(ne)
#endif
......
......@@ -165,7 +165,7 @@ void psci_cpu_on_finish(unsigned int cpu_idx,
*/
psci_plat_pm_ops->pwr_domain_on_finish(state_info);
#if !HW_ASSISTED_COHERENCY
#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
/*
* Arch. management: Enable data cache and manage stack memory
*/
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment