diff --git a/include/common/aarch32/asm_macros.S b/include/common/aarch32/asm_macros.S index 45023a0bbac5eb766731e9a311df88745fdaa7be..7b141da6c06cfe3e4836196c50d99221b1d5dcb0 100644 --- a/include/common/aarch32/asm_macros.S +++ b/include/common/aarch32/asm_macros.S @@ -134,4 +134,37 @@ .space SPINLOCK_ASM_SIZE .endm + /* + * Helper macro to OR the bottom 32 bits of `_val` into `_reg_l` + * and the top 32 bits of `_val` into `_reg_h`. If either the bottom + * or top word of `_val` is zero, the corresponding OR operation + * is skipped. + */ + .macro orr64_imm _reg_l, _reg_h, _val + .if (\_val >> 32) + orr \_reg_h, \_reg_h, #(\_val >> 32) + .endif + .if (\_val & 0xffffffff) + orr \_reg_l, \_reg_l, #(\_val & 0xffffffff) + .endif + .endm + + /* + * Helper macro to bitwise-clear bits in `_reg_l` and + * `_reg_h` given a 64 bit immediate `_val`. The set bits + * in the bottom word of `_val` dictate which bits from + * `_reg_l` should be cleared. Similarly, the set bits in + * the top word of `_val` dictate which bits from `_reg_h` + * should be cleared. If either the bottom or top word of + * `_val` is zero, the corresponding BIC operation is skipped. + */ + .macro bic64_imm _reg_l, _reg_h, _val + .if (\_val >> 32) + bic \_reg_h, \_reg_h, #(\_val >> 32) + .endif + .if (\_val & 0xffffffff) + bic \_reg_l, \_reg_l, #(\_val & 0xffffffff) + .endif + .endm + #endif /* __ASM_MACROS_S__ */ diff --git a/include/lib/aarch32/arch.h b/include/lib/aarch32/arch.h index 234ceeba3168fe490e86f98e595f856a82a1dc00..3c69f9823abbe136ba42dc473de95ddb139c9746 100644 --- a/include/lib/aarch32/arch.h +++ b/include/lib/aarch32/arch.h @@ -394,12 +394,14 @@ #define HCR p15, 4, c1, c1, 0 #define HCPTR p15, 4, c1, c1, 2 #define CNTHCTL p15, 4, c14, c1, 0 +#define CNTKCTL p15, 0, c14, c1, 0 #define VPIDR p15, 4, c0, c0, 0 #define VMPIDR p15, 4, c0, c0, 5 #define ISR p15, 0, c12, c1, 0 #define CLIDR p15, 1, c0, c0, 1 #define CSSELR p15, 2, c0, c0, 0 #define CCSIDR p15, 1, c0, c0, 0 +#define DBGOSDLR p14, 0, c1, c3, 4 /* Debug register defines. The format is: coproc, opt1, CRn, CRm, opt2 */ #define HDCR p15, 4, c1, c1, 1 diff --git a/include/lib/aarch32/arch_helpers.h b/include/lib/aarch32/arch_helpers.h index a7d33d86adbceec21df0dd1558c01f6eb753fb6f..472a8859afacf96b16ec2ac7b42d5dac88467817 100644 --- a/include/lib/aarch32/arch_helpers.h +++ b/include/lib/aarch32/arch_helpers.h @@ -209,6 +209,8 @@ DEFINE_SYSOP_FUNC(wfe) DEFINE_SYSOP_FUNC(sev) DEFINE_SYSOP_TYPE_FUNC(dsb, sy) DEFINE_SYSOP_TYPE_FUNC(dmb, sy) +DEFINE_SYSOP_TYPE_FUNC(dmb, st) +DEFINE_SYSOP_TYPE_FUNC(dmb, ld) DEFINE_SYSOP_TYPE_FUNC(dsb, ish) DEFINE_SYSOP_TYPE_FUNC(dsb, ishst) DEFINE_SYSOP_TYPE_FUNC(dmb, ish) diff --git a/include/lib/aarch64/arch.h b/include/lib/aarch64/arch.h index d766490d17312825296e37c2bcacbd7cb9a3042f..4b323d335025ebb49251625b8fc02e77f4b17780 100644 --- a/include/lib/aarch64/arch.h +++ b/include/lib/aarch64/arch.h @@ -261,6 +261,16 @@ #define DISABLE_ALL_EXCEPTIONS \ (DAIF_FIQ_BIT | DAIF_IRQ_BIT | DAIF_ABT_BIT | DAIF_DBG_BIT) +/* + * RMR_EL3 definitions + */ +#define RMR_EL3_RR_BIT (1 << 1) +#define RMR_EL3_AA64_BIT (1 << 0) + +/* + * HI-VECTOR address for AArch32 state + */ +#define HI_VECTOR_BASE (0xFFFF0000) /* * TCR defintions diff --git a/include/lib/cpus/aarch32/cortex_a53.h b/include/lib/cpus/aarch32/cortex_a53.h new file mode 100644 index 0000000000000000000000000000000000000000..5173d88e10943a595235cfe40ec18bab42e16713 --- /dev/null +++ b/include/lib/cpus/aarch32/cortex_a53.h @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __CORTEX_A53_H__ +#define __CORTEX_A53_H__ + +/* Cortex-A53 midr for revision 0 */ +#define CORTEX_A53_MIDR 0x410FD030 + +/* Retention timer tick definitions */ +#define RETENTION_ENTRY_TICKS_2 0x1 +#define RETENTION_ENTRY_TICKS_8 0x2 +#define RETENTION_ENTRY_TICKS_32 0x3 +#define RETENTION_ENTRY_TICKS_64 0x4 +#define RETENTION_ENTRY_TICKS_128 0x5 +#define RETENTION_ENTRY_TICKS_256 0x6 +#define RETENTION_ENTRY_TICKS_512 0x7 + +/******************************************************************************* + * CPU Extended Control register specific definitions. + ******************************************************************************/ +#define CPUECTLR p15, 1, c15 /* Instruction def. */ + +#define CPUECTLR_SMP_BIT (1 << 6) + +#define CPUECTLR_CPU_RET_CTRL_SHIFT 0 +#define CPUECTLR_CPU_RET_CTRL_MASK (0x7 << CPUECTLR_CPU_RET_CTRL_SHIFT) + +#define CPUECTLR_FPU_RET_CTRL_SHIFT 3 +#define CPUECTLR_FPU_RET_CTRL_MASK (0x7 << CPUECTLR_FPU_RET_CTRL_SHIFT) + +/******************************************************************************* + * CPU Memory Error Syndrome register specific definitions. + ******************************************************************************/ +#define CPUMERRSR p15, 2, c15 /* Instruction def. */ + +/******************************************************************************* + * CPU Auxiliary Control register specific definitions. + ******************************************************************************/ +#define CPUACTLR p15, 0, c15 /* Instruction def. */ + +#define CPUACTLR_DTAH (1 << 24) + +/******************************************************************************* + * L2 Auxiliary Control register specific definitions. + ******************************************************************************/ +#define L2ACTLR p15, 1, c15, c0, 0 /* Instruction def. */ + +#define L2ACTLR_ENABLE_UNIQUECLEAN (1 << 14) +#define L2ACTLR_DISABLE_CLEAN_PUSH (1 << 3) + +/******************************************************************************* + * L2 Extended Control register specific definitions. + ******************************************************************************/ +#define L2ECTLR p15, 1, c9, c0, 3 /* Instruction def. */ + +#define L2ECTLR_RET_CTRL_SHIFT 0 +#define L2ECTLR_RET_CTRL_MASK (0x7 << L2ECTLR_RET_CTRL_SHIFT) + +/******************************************************************************* + * L2 Memory Error Syndrome register specific definitions. + ******************************************************************************/ +#define L2MERRSR p15, 3, c15 /* Instruction def. */ + +#endif /* __CORTEX_A53_H__ */ diff --git a/include/lib/cpus/aarch32/cortex_a57.h b/include/lib/cpus/aarch32/cortex_a57.h new file mode 100644 index 0000000000000000000000000000000000000000..a09ae9b71d2f10722f669f636d70a81cd3ca8ae4 --- /dev/null +++ b/include/lib/cpus/aarch32/cortex_a57.h @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __CORTEX_A57_H__ +#define __CORTEX_A57_H__ + +/* Cortex-A57 midr for revision 0 */ +#define CORTEX_A57_MIDR 0x410FD070 + +/* Retention timer tick definitions */ +#define RETENTION_ENTRY_TICKS_2 0x1 +#define RETENTION_ENTRY_TICKS_8 0x2 +#define RETENTION_ENTRY_TICKS_32 0x3 +#define RETENTION_ENTRY_TICKS_64 0x4 +#define RETENTION_ENTRY_TICKS_128 0x5 +#define RETENTION_ENTRY_TICKS_256 0x6 +#define RETENTION_ENTRY_TICKS_512 0x7 + +/******************************************************************************* + * CPU Extended Control register specific definitions. + ******************************************************************************/ +#define CPUECTLR p15, 1, c15 /* Instruction def. */ + +#define CPUECTLR_SMP_BIT (1 << 6) +#define CPUECTLR_DIS_TWD_ACC_PFTCH_BIT (1 << 38) +#define CPUECTLR_L2_IPFTCH_DIST_MASK (0x3 << 35) +#define CPUECTLR_L2_DPFTCH_DIST_MASK (0x3 << 32) + +#define CPUECTLR_CPU_RET_CTRL_SHIFT 0 +#define CPUECTLR_CPU_RET_CTRL_MASK (0x7 << CPUECTLR_CPU_RET_CTRL_SHIFT) + +/******************************************************************************* + * CPU Memory Error Syndrome register specific definitions. + ******************************************************************************/ +#define CPUMERRSR p15, 2, c15 /* Instruction def. */ + +/******************************************************************************* + * CPU Auxiliary Control register specific definitions. + ******************************************************************************/ +#define CPUACTLR p15, 0, c15 /* Instruction def. */ + +#define CPUACTLR_DIS_LOAD_PASS_DMB (1 << 59) +#define CPUACTLR_GRE_NGRE_AS_NGNRE (1 << 54) +#define CPUACTLR_DIS_OVERREAD (1 << 52) +#define CPUACTLR_NO_ALLOC_WBWA (1 << 49) +#define CPUACTLR_DCC_AS_DCCI (1 << 44) +#define CPUACTLR_FORCE_FPSCR_FLUSH (1 << 38) +#define CPUACTLR_DIS_STREAMING (3 << 27) +#define CPUACTLR_DIS_L1_STREAMING (3 << 25) +#define CPUACTLR_DIS_INDIRECT_PREDICTOR (1 << 4) + +/******************************************************************************* + * L2 Control register specific definitions. + ******************************************************************************/ +#define L2CTLR p15, 1, c9, c0, 3 /* Instruction def. */ + +#define L2CTLR_DATA_RAM_LATENCY_SHIFT 0 +#define L2CTLR_TAG_RAM_LATENCY_SHIFT 6 + +#define L2_DATA_RAM_LATENCY_3_CYCLES 0x2 +#define L2_TAG_RAM_LATENCY_3_CYCLES 0x2 + +/******************************************************************************* + * L2 Extended Control register specific definitions. + ******************************************************************************/ +#define L2ECTLR p15, 1, c9, c0, 3 /* Instruction def. */ + +#define L2ECTLR_RET_CTRL_SHIFT 0 +#define L2ECTLR_RET_CTRL_MASK (0x7 << L2ECTLR_RET_CTRL_SHIFT) + +/******************************************************************************* + * L2 Memory Error Syndrome register specific definitions. + ******************************************************************************/ +#define L2MERRSR p15, 3, c15 /* Instruction def. */ + +#endif /* __CORTEX_A57_H__ */ diff --git a/include/lib/cpus/aarch32/cortex_a72.h b/include/lib/cpus/aarch32/cortex_a72.h new file mode 100644 index 0000000000000000000000000000000000000000..c16a09bc7b0f57563156a194b4cbe05b1996bbfe --- /dev/null +++ b/include/lib/cpus/aarch32/cortex_a72.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __CORTEX_A72_H__ +#define __CORTEX_A72_H__ + +/* Cortex-A72 midr for revision 0 */ +#define CORTEX_A72_MIDR 0x410FD080 + +/******************************************************************************* + * CPU Extended Control register specific definitions. + ******************************************************************************/ +#define CPUECTLR p15, 1, c15 /* Instruction def. */ + +#define CPUECTLR_SMP_BIT (1 << 6) +#define CPUECTLR_DIS_TWD_ACC_PFTCH_BIT (1 << 38) +#define CPUECTLR_L2_IPFTCH_DIST_MASK (0x3 << 35) +#define CPUECTLR_L2_DPFTCH_DIST_MASK (0x3 << 32) + +/******************************************************************************* + * CPU Memory Error Syndrome register specific definitions. + ******************************************************************************/ +#define CPUMERRSR p15, 2, c15 /* Instruction def. */ + +/******************************************************************************* + * CPU Auxiliary Control register specific definitions. + ******************************************************************************/ +#define CPUACTLR p15, 0, c15 /* Instruction def. */ + +#define CPUACTLR_DISABLE_L1_DCACHE_HW_PFTCH (1 << 56) +#define CPUACTLR_NO_ALLOC_WBWA (1 << 49) +#define CPUACTLR_DCC_AS_DCCI (1 << 44) + +/******************************************************************************* + * L2 Control register specific definitions. + ******************************************************************************/ +#define L2CTLR p15, 1, c9, c0, 3 /* Instruction def. */ + +#define L2CTLR_DATA_RAM_LATENCY_SHIFT 0 +#define L2CTLR_TAG_RAM_LATENCY_SHIFT 6 + +#define L2_DATA_RAM_LATENCY_3_CYCLES 0x2 +#define L2_TAG_RAM_LATENCY_2_CYCLES 0x1 +#define L2_TAG_RAM_LATENCY_3_CYCLES 0x2 + +/******************************************************************************* + * L2 Memory Error Syndrome register specific definitions. + ******************************************************************************/ +#define L2MERRSR p15, 3, c15 /* Instruction def. */ + +#endif /* __CORTEX_A72_H__ */ diff --git a/include/plat/arm/common/plat_arm.h b/include/plat/arm/common/plat_arm.h index 5a2a9215f4356b6d153ac1032dd3dc01389aa083..4ba4a84549171e1b5e2e40381f0ce172b66ed1b9 100644 --- a/include/plat/arm/common/plat_arm.h +++ b/include/plat/arm/common/plat_arm.h @@ -81,7 +81,7 @@ void arm_setup_page_tables(uintptr_t total_base, #else /* - * Empty macros for all other BL stages other than BL31 + * Empty macros for all other BL stages other than BL31 and BL32 */ #define ARM_INSTANTIATE_LOCK #define arm_lock_init() @@ -157,6 +157,7 @@ void arm_bl2_platform_setup(void); void arm_bl2_plat_arch_setup(void); uint32_t arm_get_spsr_for_bl32_entry(void); uint32_t arm_get_spsr_for_bl33_entry(void); +int arm_bl2_handle_post_image_load(unsigned int image_id); /* BL2U utility functions */ void arm_bl2u_early_platform_setup(struct meminfo *mem_layout, diff --git a/include/plat/arm/soc/common/soc_css_def.h b/include/plat/arm/soc/common/soc_css_def.h index 3b4cc79f8e3661df85f293cc93dc7f128b7cdab8..efd78f04454f0f0dc30bc5eb053ebef76568979e 100644 --- a/include/plat/arm/soc/common/soc_css_def.h +++ b/include/plat/arm/soc/common/soc_css_def.h @@ -96,9 +96,16 @@ /* * Required platform porting definitions common to all ARM CSS SoCs */ - +#if JUNO_AARCH32_EL3_RUNTIME +/* + * Following change is required to initialize TZC + * for enabling access to the HI_VECTOR (0xFFFF0000) + * location needed for JUNO AARCH32 support. + */ +#define PLAT_ARM_SCP_TZC_DRAM1_SIZE ULL(0x8000) +#else /* 2MB used for SCP DDR retraining */ #define PLAT_ARM_SCP_TZC_DRAM1_SIZE ULL(0x00200000) - +#endif #endif /* __SOC_CSS_DEF_H__ */ diff --git a/lib/cpus/aarch32/cortex_a53.S b/lib/cpus/aarch32/cortex_a53.S new file mode 100644 index 0000000000000000000000000000000000000000..a16ead8bffcfaff5e3418b91fba7c6c6c4186630 --- /dev/null +++ b/lib/cpus/aarch32/cortex_a53.S @@ -0,0 +1,141 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#include <arch.h> +#include <asm_macros.S> +#include <assert_macros.S> +#include <cortex_a53.h> +#include <cpu_macros.S> +#include <debug.h> + + /* --------------------------------------------- + * Disable intra-cluster coherency + * --------------------------------------------- + */ +func cortex_a53_disable_smp + ldcopr16 r0, r1, CPUECTLR + bic64_imm r0, r1, CPUECTLR_SMP_BIT + stcopr16 r0, r1, CPUECTLR + isb + dsb sy + bx lr +endfunc cortex_a53_disable_smp + + /* ------------------------------------------------- + * The CPU Ops reset function for Cortex-A53. + * ------------------------------------------------- + */ +func cortex_a53_reset_func + /* --------------------------------------------- + * Enable the SMP bit. + * --------------------------------------------- + */ + ldcopr16 r0, r1, CPUECTLR + orr64_imm r0, r1, CPUECTLR_SMP_BIT + stcopr16 r0, r1, CPUECTLR + isb + bx lr +endfunc cortex_a53_reset_func + + /* ---------------------------------------------------- + * The CPU Ops core power down function for Cortex-A53. + * ---------------------------------------------------- + */ +func cortex_a53_core_pwr_dwn + push {r12, lr} + + /* Assert if cache is enabled */ +#if ASM_ASSERTION + ldcopr r0, SCTLR + tst r0, #SCTLR_C_BIT + ASM_ASSERT(eq) +#endif + + /* --------------------------------------------- + * Flush L1 caches. + * --------------------------------------------- + */ + mov r0, #DC_OP_CISW + bl dcsw_op_level1 + + /* --------------------------------------------- + * Come out of intra cluster coherency + * --------------------------------------------- + */ + pop {r12, lr} + b cortex_a53_disable_smp +endfunc cortex_a53_core_pwr_dwn + + /* ------------------------------------------------------- + * The CPU Ops cluster power down function for Cortex-A53. + * Clobbers: r0-r3 + * ------------------------------------------------------- + */ +func cortex_a53_cluster_pwr_dwn + push {r12, lr} + + /* Assert if cache is enabled */ +#if ASM_ASSERTION + ldcopr r0, SCTLR + tst r0, #SCTLR_C_BIT + ASM_ASSERT(eq) +#endif + + /* --------------------------------------------- + * Flush L1 caches. + * --------------------------------------------- + */ + mov r0, #DC_OP_CISW + bl dcsw_op_level1 + + /* --------------------------------------------- + * Disable the optional ACP. + * --------------------------------------------- + */ + bl plat_disable_acp + + /* --------------------------------------------- + * Flush L2 caches. + * --------------------------------------------- + */ + mov r0, #DC_OP_CISW + bl dcsw_op_level2 + + /* --------------------------------------------- + * Come out of intra cluster coherency + * --------------------------------------------- + */ + pop {r12, lr} + b cortex_a53_disable_smp +endfunc cortex_a53_cluster_pwr_dwn + +declare_cpu_ops cortex_a53, CORTEX_A53_MIDR, \ + cortex_a53_reset_func, \ + cortex_a53_core_pwr_dwn, \ + cortex_a53_cluster_pwr_dwn diff --git a/lib/cpus/aarch32/cortex_a57.S b/lib/cpus/aarch32/cortex_a57.S new file mode 100644 index 0000000000000000000000000000000000000000..3c5c4549c98876517e0a4618ca79aee4d65e17a7 --- /dev/null +++ b/lib/cpus/aarch32/cortex_a57.S @@ -0,0 +1,192 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#include <arch.h> +#include <asm_macros.S> +#include <assert_macros.S> +#include <cortex_a57.h> +#include <cpu_macros.S> +#include <debug.h> + + /* --------------------------------------------- + * Disable intra-cluster coherency + * Clobbers: r0-r1 + * --------------------------------------------- + */ +func cortex_a57_disable_smp + ldcopr16 r0, r1, CPUECTLR + bic64_imm r0, r1, CPUECTLR_SMP_BIT + stcopr16 r0, r1, CPUECTLR + bx lr +endfunc cortex_a57_disable_smp + + /* --------------------------------------------- + * Disable all types of L2 prefetches. + * Clobbers: r0-r2 + * --------------------------------------------- + */ +func cortex_a57_disable_l2_prefetch + ldcopr16 r0, r1, CPUECTLR + orr64_imm r0, r1, CPUECTLR_DIS_TWD_ACC_PFTCH_BIT + bic64_imm r0, r1, (CPUECTLR_L2_IPFTCH_DIST_MASK | \ + CPUECTLR_L2_DPFTCH_DIST_MASK) + stcopr16 r0, r1, CPUECTLR + isb + dsb ish + bx lr +endfunc cortex_a57_disable_l2_prefetch + + /* --------------------------------------------- + * Disable debug interfaces + * --------------------------------------------- + */ +func cortex_a57_disable_ext_debug + mov r0, #1 + stcopr r0, DBGOSDLR + isb + dsb sy + bx lr +endfunc cortex_a57_disable_ext_debug + + /* ------------------------------------------------- + * The CPU Ops reset function for Cortex-A57. + * ------------------------------------------------- + */ +func cortex_a57_reset_func + /* --------------------------------------------- + * Enable the SMP bit. + * --------------------------------------------- + */ + ldcopr16 r0, r1, CPUECTLR + orr64_imm r0, r1, CPUECTLR_SMP_BIT + stcopr16 r0, r1, CPUECTLR + isb + bx lr +endfunc cortex_a57_reset_func + + /* ---------------------------------------------------- + * The CPU Ops core power down function for Cortex-A57. + * ---------------------------------------------------- + */ +func cortex_a57_core_pwr_dwn + push {r12, lr} + + /* Assert if cache is enabled */ +#if ASM_ASSERTION + ldcopr r0, SCTLR + tst r0, #SCTLR_C_BIT + ASM_ASSERT(eq) +#endif + + /* --------------------------------------------- + * Disable the L2 prefetches. + * --------------------------------------------- + */ + bl cortex_a57_disable_l2_prefetch + + /* --------------------------------------------- + * Flush L1 caches. + * --------------------------------------------- + */ + mov r0, #DC_OP_CISW + bl dcsw_op_level1 + + /* --------------------------------------------- + * Come out of intra cluster coherency + * --------------------------------------------- + */ + bl cortex_a57_disable_smp + + /* --------------------------------------------- + * Force the debug interfaces to be quiescent + * --------------------------------------------- + */ + pop {r12, lr} + b cortex_a57_disable_ext_debug +endfunc cortex_a57_core_pwr_dwn + + /* ------------------------------------------------------- + * The CPU Ops cluster power down function for Cortex-A57. + * Clobbers: r0-r3 + * ------------------------------------------------------- + */ +func cortex_a57_cluster_pwr_dwn + push {r12, lr} + + /* Assert if cache is enabled */ +#if ASM_ASSERTION + ldcopr r0, SCTLR + tst r0, #SCTLR_C_BIT + ASM_ASSERT(eq) +#endif + + /* --------------------------------------------- + * Disable the L2 prefetches. + * --------------------------------------------- + */ + bl cortex_a57_disable_l2_prefetch + + /* --------------------------------------------- + * Flush L1 caches. + * --------------------------------------------- + */ + mov r0, #DC_OP_CISW + bl dcsw_op_level1 + + /* --------------------------------------------- + * Disable the optional ACP. + * --------------------------------------------- + */ + bl plat_disable_acp + + /* --------------------------------------------- + * Flush L2 caches. + * --------------------------------------------- + */ + mov r0, #DC_OP_CISW + bl dcsw_op_level2 + + /* --------------------------------------------- + * Come out of intra cluster coherency + * --------------------------------------------- + */ + bl cortex_a57_disable_smp + + /* --------------------------------------------- + * Force the debug interfaces to be quiescent + * --------------------------------------------- + */ + pop {r12, lr} + b cortex_a57_disable_ext_debug +endfunc cortex_a57_cluster_pwr_dwn + +declare_cpu_ops cortex_a57, CORTEX_A57_MIDR, \ + cortex_a57_reset_func, \ + cortex_a57_core_pwr_dwn, \ + cortex_a57_cluster_pwr_dwn diff --git a/lib/cpus/aarch32/cortex_a72.S b/lib/cpus/aarch32/cortex_a72.S new file mode 100644 index 0000000000000000000000000000000000000000..583c1b58aefda44d9baf0bed992e13ccdee4c20c --- /dev/null +++ b/lib/cpus/aarch32/cortex_a72.S @@ -0,0 +1,216 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#include <arch.h> +#include <asm_macros.S> +#include <assert_macros.S> +#include <cortex_a72.h> +#include <cpu_macros.S> +#include <debug.h> + + /* --------------------------------------------- + * Disable all types of L2 prefetches. + * --------------------------------------------- + */ +func cortex_a72_disable_l2_prefetch + ldcopr16 r0, r1, CPUECTLR + orr64_imm r0, r1, CPUECTLR_DIS_TWD_ACC_PFTCH_BIT + bic64_imm r0, r1, (CPUECTLR_L2_IPFTCH_DIST_MASK | \ + CPUECTLR_L2_DPFTCH_DIST_MASK) + stcopr16 r0, r1, CPUECTLR + isb + bx lr +endfunc cortex_a72_disable_l2_prefetch + + /* --------------------------------------------- + * Disable the load-store hardware prefetcher. + * --------------------------------------------- + */ +func cortex_a72_disable_hw_prefetcher + ldcopr16 r0, r1, CPUACTLR + orr64_imm r0, r1, CPUACTLR_DISABLE_L1_DCACHE_HW_PFTCH + stcopr16 r0, r1, CPUACTLR + isb + dsb ish + bx lr +endfunc cortex_a72_disable_hw_prefetcher + + /* --------------------------------------------- + * Disable intra-cluster coherency + * Clobbers: r0-r1 + * --------------------------------------------- + */ +func cortex_a72_disable_smp + ldcopr16 r0, r1, CPUECTLR + bic64_imm r0, r1, CPUECTLR_SMP_BIT + stcopr16 r0, r1, CPUECTLR + bx lr +endfunc cortex_a72_disable_smp + + /* --------------------------------------------- + * Disable debug interfaces + * --------------------------------------------- + */ +func cortex_a72_disable_ext_debug + mov r0, #1 + stcopr r0, DBGOSDLR + isb + dsb sy + bx lr +endfunc cortex_a72_disable_ext_debug + + /* ------------------------------------------------- + * The CPU Ops reset function for Cortex-A72. + * ------------------------------------------------- + */ +func cortex_a72_reset_func + /* --------------------------------------------- + * Enable the SMP bit. + * --------------------------------------------- + */ + ldcopr16 r0, r1, CPUECTLR + orr64_imm r0, r1, CPUECTLR_SMP_BIT + stcopr16 r0, r1, CPUECTLR + isb + bx lr +endfunc cortex_a72_reset_func + + /* ---------------------------------------------------- + * The CPU Ops core power down function for Cortex-A72. + * ---------------------------------------------------- + */ +func cortex_a72_core_pwr_dwn + push {r12, lr} + + /* Assert if cache is enabled */ +#if ASM_ASSERTION + ldcopr r0, SCTLR + tst r0, #SCTLR_C_BIT + ASM_ASSERT(eq) +#endif + + /* --------------------------------------------- + * Disable the L2 prefetches. + * --------------------------------------------- + */ + bl cortex_a72_disable_l2_prefetch + + /* --------------------------------------------- + * Disable the load-store hardware prefetcher. + * --------------------------------------------- + */ + bl cortex_a72_disable_hw_prefetcher + + /* --------------------------------------------- + * Flush L1 caches. + * --------------------------------------------- + */ + mov r0, #DC_OP_CISW + bl dcsw_op_level1 + + /* --------------------------------------------- + * Come out of intra cluster coherency + * --------------------------------------------- + */ + bl cortex_a72_disable_smp + + /* --------------------------------------------- + * Force the debug interfaces to be quiescent + * --------------------------------------------- + */ + pop {r12, lr} + b cortex_a72_disable_ext_debug +endfunc cortex_a72_core_pwr_dwn + + /* ------------------------------------------------------- + * The CPU Ops cluster power down function for Cortex-A72. + * ------------------------------------------------------- + */ +func cortex_a72_cluster_pwr_dwn + push {r12, lr} + + /* Assert if cache is enabled */ +#if ASM_ASSERTION + ldcopr r0, SCTLR + tst r0, #SCTLR_C_BIT + ASM_ASSERT(eq) +#endif + + /* --------------------------------------------- + * Disable the L2 prefetches. + * --------------------------------------------- + */ + bl cortex_a72_disable_l2_prefetch + + /* --------------------------------------------- + * Disable the load-store hardware prefetcher. + * --------------------------------------------- + */ + bl cortex_a72_disable_hw_prefetcher + +#if !SKIP_A72_L1_FLUSH_PWR_DWN + /* --------------------------------------------- + * Flush L1 caches. + * --------------------------------------------- + */ + mov r0, #DC_OP_CISW + bl dcsw_op_level1 +#endif + + /* --------------------------------------------- + * Disable the optional ACP. + * --------------------------------------------- + */ + bl plat_disable_acp + + /* ------------------------------------------------- + * Flush the L2 caches. + * ------------------------------------------------- + */ + mov r0, #DC_OP_CISW + bl dcsw_op_level2 + + /* --------------------------------------------- + * Come out of intra cluster coherency + * --------------------------------------------- + */ + bl cortex_a72_disable_smp + + /* --------------------------------------------- + * Force the debug interfaces to be quiescent + * --------------------------------------------- + */ + pop {r12, lr} + b cortex_a72_disable_ext_debug +endfunc cortex_a72_cluster_pwr_dwn + +declare_cpu_ops cortex_a72, CORTEX_A72_MIDR, \ + cortex_a72_reset_func, \ + cortex_a72_core_pwr_dwn, \ + cortex_a72_cluster_pwr_dwn diff --git a/plat/arm/board/common/board_css_common.c b/plat/arm/board/common/board_css_common.c index 3fcc6ee02ccf07b6426e7cd24a69a5727d30b39f..6593d2a0c456a7cb6c39ffc0f8738ba10915d5f6 100644 --- a/plat/arm/board/common/board_css_common.c +++ b/plat/arm/board/common/board_css_common.c @@ -79,6 +79,9 @@ const mmap_region_t plat_arm_mmap[] = { #endif #ifdef IMAGE_BL32 const mmap_region_t plat_arm_mmap[] = { +#ifdef AARCH32 + ARM_MAP_SHARED_RAM, +#endif V2M_MAP_IOFPGA, CSS_MAP_DEVICE, SOC_CSS_MAP_DEVICE, diff --git a/plat/arm/board/juno/aarch32/juno_helpers.S b/plat/arm/board/juno/aarch32/juno_helpers.S new file mode 100644 index 0000000000000000000000000000000000000000..86eeb2c43fee5320b1878f4454beab65806b9dd9 --- /dev/null +++ b/plat/arm/board/juno/aarch32/juno_helpers.S @@ -0,0 +1,216 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <arch.h> +#include <asm_macros.S> +#include <bl_common.h> +#include <cortex_a53.h> +#include <cortex_a57.h> +#include <cortex_a72.h> +#include <v2m_def.h> +#include "../juno_def.h" + + + .globl plat_reset_handler + .globl plat_arm_calc_core_pos + +#define JUNO_REVISION(rev) REV_JUNO_R##rev +#define JUNO_HANDLER(rev) plat_reset_handler_juno_r##rev +#define JUMP_TO_HANDLER_IF_JUNO_R(revision) \ + jump_to_handler JUNO_REVISION(revision), JUNO_HANDLER(revision) + + /* -------------------------------------------------------------------- + * Helper macro to jump to the given handler if the board revision + * matches. + * Expects the Juno board revision in x0. + * -------------------------------------------------------------------- + */ + .macro jump_to_handler _revision, _handler + cmp r0, #\_revision + beq \_handler + .endm + + /* -------------------------------------------------------------------- + * Helper macro that reads the part number of the current CPU and jumps + * to the given label if it matches the CPU MIDR provided. + * + * Clobbers r0. + * -------------------------------------------------------------------- + */ + .macro jump_if_cpu_midr _cpu_midr, _label + ldcopr r0, MIDR + ubfx r0, r0, #MIDR_PN_SHIFT, #12 + ldr r1, =((\_cpu_midr >> MIDR_PN_SHIFT) & MIDR_PN_MASK) + cmp r0, r1 + beq \_label + .endm + + /* -------------------------------------------------------------------- + * Platform reset handler for Juno R0. + * + * Juno R0 has the following topology: + * - Quad core Cortex-A53 processor cluster; + * - Dual core Cortex-A57 processor cluster. + * + * This handler does the following: + * - Implement workaround for defect id 831273 by enabling an event + * stream every 65536 cycles. + * - Set the L2 Data RAM latency to 2 (i.e. 3 cycles) for Cortex-A57 + * - Set the L2 Tag RAM latency to 2 (i.e. 3 cycles) for Cortex-A57 + * -------------------------------------------------------------------- + */ +func JUNO_HANDLER(0) + /* -------------------------------------------------------------------- + * Enable the event stream every 65536 cycles + * -------------------------------------------------------------------- + */ + mov r0, #(0xf << EVNTI_SHIFT) + orr r0, r0, #EVNTEN_BIT + stcopr r0, CNTKCTL + + /* -------------------------------------------------------------------- + * Nothing else to do on Cortex-A53. + * -------------------------------------------------------------------- + */ + jump_if_cpu_midr CORTEX_A53_MIDR, 1f + + /* -------------------------------------------------------------------- + * Cortex-A57 specific settings + * -------------------------------------------------------------------- + */ + mov r0, #((L2_DATA_RAM_LATENCY_3_CYCLES << L2CTLR_DATA_RAM_LATENCY_SHIFT) | \ + (L2_TAG_RAM_LATENCY_3_CYCLES << L2CTLR_TAG_RAM_LATENCY_SHIFT)) + stcopr r0, L2CTLR +1: + isb + bx lr +endfunc JUNO_HANDLER(0) + + /* -------------------------------------------------------------------- + * Platform reset handler for Juno R1. + * + * Juno R1 has the following topology: + * - Quad core Cortex-A53 processor cluster; + * - Dual core Cortex-A57 processor cluster. + * + * This handler does the following: + * - Set the L2 Data RAM latency to 2 (i.e. 3 cycles) for Cortex-A57 + * + * Note that: + * - The default value for the L2 Tag RAM latency for Cortex-A57 is + * suitable. + * - Defect #831273 doesn't affect Juno R1. + * -------------------------------------------------------------------- + */ +func JUNO_HANDLER(1) + /* -------------------------------------------------------------------- + * Nothing to do on Cortex-A53. + * -------------------------------------------------------------------- + */ + jump_if_cpu_midr CORTEX_A57_MIDR, A57 + bx lr + +A57: + /* -------------------------------------------------------------------- + * Cortex-A57 specific settings + * -------------------------------------------------------------------- + */ + mov r0, #(L2_DATA_RAM_LATENCY_3_CYCLES << L2CTLR_DATA_RAM_LATENCY_SHIFT) + stcopr r0, L2CTLR + isb + bx lr +endfunc JUNO_HANDLER(1) + + /* -------------------------------------------------------------------- + * Platform reset handler for Juno R2. + * + * Juno R2 has the following topology: + * - Quad core Cortex-A53 processor cluster; + * - Dual core Cortex-A72 processor cluster. + * + * This handler does the following: + * - Set the L2 Data RAM latency to 2 (i.e. 3 cycles) for Cortex-A72 + * - Set the L2 Tag RAM latency to 1 (i.e. 2 cycles) for Cortex-A72 + * + * Note that: + * - Defect #831273 doesn't affect Juno R2. + * -------------------------------------------------------------------- + */ +func JUNO_HANDLER(2) + /* -------------------------------------------------------------------- + * Nothing to do on Cortex-A53. + * -------------------------------------------------------------------- + */ + jump_if_cpu_midr CORTEX_A72_MIDR, A72 + bx lr + +A72: + /* -------------------------------------------------------------------- + * Cortex-A72 specific settings + * -------------------------------------------------------------------- + */ + mov r0, #((L2_DATA_RAM_LATENCY_3_CYCLES << L2CTLR_DATA_RAM_LATENCY_SHIFT) | \ + (L2_TAG_RAM_LATENCY_2_CYCLES << L2CTLR_TAG_RAM_LATENCY_SHIFT)) + stcopr r0, L2CTLR + isb + bx lr +endfunc JUNO_HANDLER(2) + + /* -------------------------------------------------------------------- + * void plat_reset_handler(void); + * + * Determine the Juno board revision and call the appropriate reset + * handler. + * -------------------------------------------------------------------- + */ +func plat_reset_handler + /* Read the V2M SYS_ID register */ + ldr r0, =(V2M_SYSREGS_BASE + V2M_SYS_ID) + ldr r1, [r0] + /* Extract board revision from the SYS_ID */ + ubfx r0, r1, #V2M_SYS_ID_REV_SHIFT, #4 + + JUMP_TO_HANDLER_IF_JUNO_R(0) + JUMP_TO_HANDLER_IF_JUNO_R(1) + JUMP_TO_HANDLER_IF_JUNO_R(2) + + /* Board revision is not supported */ + no_ret plat_panic_handler + +endfunc plat_reset_handler + + /* ----------------------------------------------------- + * unsigned int plat_arm_calc_core_pos(u_register_t mpidr) + * Helper function to calculate the core position. + * ----------------------------------------------------- + */ +func plat_arm_calc_core_pos + b css_calc_core_pos_swap_cluster +endfunc plat_arm_calc_core_pos diff --git a/plat/arm/board/juno/aarch64/juno_helpers.S b/plat/arm/board/juno/aarch64/juno_helpers.S index ac54ac9b3779e4ad042bbb09652e78e42582ac2b..49fef16ff0847366697e86fc8f3f8995ff118f2f 100644 --- a/plat/arm/board/juno/aarch64/juno_helpers.S +++ b/plat/arm/board/juno/aarch64/juno_helpers.S @@ -34,12 +34,18 @@ #include <cortex_a53.h> #include <cortex_a57.h> #include <cortex_a72.h> +#include <cpu_macros.S> +#include <css_def.h> #include <v2m_def.h> #include "../juno_def.h" .globl plat_reset_handler .globl plat_arm_calc_core_pos +#if JUNO_AARCH32_EL3_RUNTIME + .globl plat_get_my_entrypoint + .globl juno_reset_to_aarch32_state +#endif #define JUNO_REVISION(rev) REV_JUNO_R##rev #define JUNO_HANDLER(rev) plat_reset_handler_juno_r##rev @@ -205,6 +211,20 @@ func plat_reset_handler endfunc plat_reset_handler + /* ----------------------------------------------------- + * void juno_do_reset_to_aarch32_state(void); + * + * Request warm reset to AArch32 mode. + * ----------------------------------------------------- + */ +func juno_do_reset_to_aarch32_state + mov x0, #RMR_EL3_RR_BIT + dsb sy + msr rmr_el3, x0 + isb + wfi +endfunc juno_do_reset_to_aarch32_state + /* ----------------------------------------------------- * unsigned int plat_arm_calc_core_pos(u_register_t mpidr) * Helper function to calculate the core position. @@ -213,3 +233,77 @@ endfunc plat_reset_handler func plat_arm_calc_core_pos b css_calc_core_pos_swap_cluster endfunc plat_arm_calc_core_pos + +#if JUNO_AARCH32_EL3_RUNTIME + /* --------------------------------------------------------------------- + * uintptr_t plat_get_my_entrypoint (void); + * + * Main job of this routine is to distinguish between a cold and a warm + * boot. On JUNO platform, this distinction is based on the contents of + * the Trusted Mailbox. It is initialised to zero by the SCP before the + * AP cores are released from reset. Therefore, a zero mailbox means + * it's a cold reset. If it is a warm boot then a request to reset to + * AArch32 state is issued. This is the only way to reset to AArch32 + * in EL3 on Juno. A trampoline located at the high vector address + * has already been prepared by BL1. + * + * This functions returns the contents of the mailbox, i.e.: + * - 0 for a cold boot; + * - request warm reset in AArch32 state for warm boot case; + * --------------------------------------------------------------------- + */ +func plat_get_my_entrypoint + mov_imm x0, PLAT_ARM_TRUSTED_MAILBOX_BASE + ldr x0, [x0] + cbz x0, return + b juno_do_reset_to_aarch32_state +1: + b 1b +return: + ret +endfunc plat_get_my_entrypoint + +/* + * Emit a "movw r0, #imm16" which moves the lower + * 16 bits of `_val` into r0. + */ +.macro emit_movw _reg_d, _val + mov_imm \_reg_d, (0xe3000000 | \ + ((\_val & 0xfff) | \ + ((\_val & 0xf000) << 4))) +.endm + +/* + * Emit a "movt r0, #imm16" which moves the upper + * 16 bits of `_val` into r0. + */ +.macro emit_movt _reg_d, _val + mov_imm \_reg_d, (0xe3400000 | \ + (((\_val & 0x0fff0000) >> 16) | \ + ((\_val & 0xf0000000) >> 12))) +.endm + +/* + * This function writes the trampoline code at HI-VEC (0xFFFF0000) + * address which loads r0 with the entrypoint address for + * BL32 (a.k.a SP_MIN) when EL3 is in AArch32 mode. A warm reset + * to AArch32 mode is then requested by writing into RMR_EL3. + */ +func juno_reset_to_aarch32_state + emit_movw w0, BL32_BASE + emit_movt w1, BL32_BASE + /* opcode "bx r0" to branch using r0 in AArch32 mode */ + mov_imm w2, 0xe12fff10 + + /* Write the above opcodes at HI-VECTOR location */ + mov_imm x3, HI_VECTOR_BASE + str w0, [x3], #4 + str w1, [x3], #4 + str w2, [x3] + + bl juno_do_reset_to_aarch32_state +1: + b 1b +endfunc juno_reset_to_aarch32_state + +#endif /* JUNO_AARCH32_EL3_RUNTIME */ diff --git a/plat/arm/board/juno/include/platform_def.h b/plat/arm/board/juno/include/platform_def.h index f89f7b4634b45dd56dc5bc56db03deaafca46425..4da8ab06de0863cbda4d7dca10dd845442cb6fe8 100644 --- a/plat/arm/board/juno/include/platform_def.h +++ b/plat/arm/board/juno/include/platform_def.h @@ -103,8 +103,8 @@ #endif #ifdef IMAGE_BL32 -# define PLAT_ARM_MMAP_ENTRIES 4 -# define MAX_XLAT_TABLES 3 +# define PLAT_ARM_MMAP_ENTRIES 5 +# define MAX_XLAT_TABLES 4 #endif /* diff --git a/plat/arm/board/juno/juno_bl1_setup.c b/plat/arm/board/juno/juno_bl1_setup.c index e805c9a38594f41826feead5f62050ca92d581f4..93ca1c319617d5d91f26a6fab8a0c02b632e2047 100644 --- a/plat/arm/board/juno/juno_bl1_setup.c +++ b/plat/arm/board/juno/juno_bl1_setup.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -32,11 +32,15 @@ #include <errno.h> #include <platform.h> #include <plat_arm.h> +#include <sp805.h> #include <tbbr_img_def.h> #include <v2m_def.h> #define RESET_REASON_WDOG_RESET (0x2) +void juno_reset_to_aarch32_state(void); + + /******************************************************************************* * The following function checks if Firmware update is needed, * by checking if TOC in FIP image is valid or watchdog reset happened. @@ -85,3 +89,15 @@ __dead2 void bl1_plat_fwu_done(void *client_cookie, void *reserved) while (1) wfi(); } + +#if JUNO_AARCH32_EL3_RUNTIME +void bl1_plat_prepare_exit(entry_point_info_t *ep_info) +{ +#if !ARM_DISABLE_TRUSTED_WDOG + /* Disable watchdog before leaving BL1 */ + sp805_stop(ARM_SP805_TWDG_BASE); +#endif + + juno_reset_to_aarch32_state(); +} +#endif /* JUNO_AARCH32_EL3_RUNTIME */ diff --git a/plat/arm/board/juno/juno_bl2_setup.c b/plat/arm/board/juno/juno_bl2_setup.c new file mode 100644 index 0000000000000000000000000000000000000000..ffb6387e057f9234929428d9138bd5611c318a05 --- /dev/null +++ b/plat/arm/board/juno/juno_bl2_setup.c @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <assert.h> +#include <bl_common.h> +#include <desc_image_load.h> +#include <plat_arm.h> + +#if JUNO_AARCH32_EL3_RUNTIME +/******************************************************************************* + * This function changes the spsr for BL32 image to bypass + * the check in BL1 AArch64 exception handler. This is needed in the aarch32 + * boot flow as the core comes up in aarch64 and to enter the BL32 image a warm + * reset in aarch32 state is required. + ******************************************************************************/ +int bl2_plat_handle_post_image_load(unsigned int image_id) +{ + int err = arm_bl2_handle_post_image_load(image_id); + + if (!err && (image_id == BL32_IMAGE_ID)) { + bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id); + assert(bl_mem_params); + bl_mem_params->ep_info.spsr = SPSR_64(MODE_EL3, MODE_SP_ELX, + DISABLE_ALL_EXCEPTIONS); + } + + return err; +} +#endif /* JUNO_AARCH32_EL3_RUNTIME */ diff --git a/plat/arm/board/juno/platform.mk b/plat/arm/board/juno/platform.mk index 39977240bcfc8ff3f4fe2191f494f3339cae6d3b..0855438139b1340070f2f7ccfefcc12e2ff189cf 100644 --- a/plat/arm/board/juno/platform.mk +++ b/plat/arm/board/juno/platform.mk @@ -48,8 +48,14 @@ endif PLAT_INCLUDES := -Iplat/arm/board/juno/include -PLAT_BL_COMMON_SOURCES := plat/arm/board/juno/aarch64/juno_helpers.S +PLAT_BL_COMMON_SOURCES := plat/arm/board/juno/${ARCH}/juno_helpers.S +# Flag to enable support for AArch32 state on JUNO +JUNO_AARCH32_EL3_RUNTIME := 0 +$(eval $(call assert_boolean,JUNO_AARCH32_EL3_RUNTIME)) +$(eval $(call add_define,JUNO_AARCH32_EL3_RUNTIME)) + +ifeq (${ARCH},aarch64) BL1_SOURCES += lib/cpus/aarch64/cortex_a53.S \ lib/cpus/aarch64/cortex_a57.S \ lib/cpus/aarch64/cortex_a72.S \ @@ -59,6 +65,7 @@ BL1_SOURCES += lib/cpus/aarch64/cortex_a53.S \ ${JUNO_SECURITY_SOURCES} BL2_SOURCES += plat/arm/board/juno/juno_err.c \ + plat/arm/board/juno/juno_bl2_setup.c \ ${JUNO_SECURITY_SOURCES} BL2U_SOURCES += ${JUNO_SECURITY_SOURCES} @@ -71,6 +78,7 @@ BL31_SOURCES += lib/cpus/aarch64/cortex_a53.S \ ${JUNO_GIC_SOURCES} \ ${JUNO_INTERCONNECT_SOURCES} \ ${JUNO_SECURITY_SOURCES} +endif # Enable workarounds for selected Cortex-A53 and A57 errata. ERRATA_A53_855873 := 1 diff --git a/plat/arm/board/juno/sp_min/sp_min-juno.mk b/plat/arm/board/juno/sp_min/sp_min-juno.mk new file mode 100644 index 0000000000000000000000000000000000000000..fb3c55e7e506b3ebf25fc29619ed883000ea0acb --- /dev/null +++ b/plat/arm/board/juno/sp_min/sp_min-juno.mk @@ -0,0 +1,47 @@ +# +# Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# Neither the name of ARM nor the names of its contributors may be used +# to endorse or promote products derived from this software without specific +# prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# + +# SP_MIN source files specific to JUNO platform +BL32_SOURCES += lib/cpus/aarch32/cortex_a53.S \ + lib/cpus/aarch32/cortex_a57.S \ + lib/cpus/aarch32/cortex_a72.S \ + plat/arm/board/juno/juno_pm.c \ + plat/arm/board/juno/juno_topology.c \ + plat/arm/css/common/css_pm.c \ + plat/arm/css/common/css_topology.c \ + plat/arm/soc/common/soc_css_security.c \ + plat/arm/css/drivers/scp/css_pm_scpi.c \ + plat/arm/css/drivers/scpi/css_mhu.c \ + plat/arm/css/drivers/scpi/css_scpi.c \ + ${JUNO_GIC_SOURCES} \ + ${JUNO_INTERCONNECT_SOURCES} \ + ${JUNO_SECURITY_SOURCES} + +include plat/arm/common/sp_min/arm_sp_min.mk diff --git a/plat/arm/common/arm_bl1_setup.c b/plat/arm/common/arm_bl1_setup.c index 474dc8b2ef3259cfb45b5ac22dd6321f0c2d66a6..c588f965c1662091ace886a7bc13b778b56cd409 100644 --- a/plat/arm/common/arm_bl1_setup.c +++ b/plat/arm/common/arm_bl1_setup.c @@ -44,6 +44,7 @@ #pragma weak bl1_plat_arch_setup #pragma weak bl1_platform_setup #pragma weak bl1_plat_sec_mem_layout +#pragma weak bl1_plat_prepare_exit /* Data structure which holds the extents of the trusted SRAM for BL1*/ diff --git a/plat/arm/common/arm_bl2_setup.c b/plat/arm/common/arm_bl2_setup.c index 007108d12039dcb4921cdc4514ffa3b4c5df39e6..66e350aad4892b8822fbb1509a2f54bdb639a743 100644 --- a/plat/arm/common/arm_bl2_setup.c +++ b/plat/arm/common/arm_bl2_setup.c @@ -249,11 +249,7 @@ void bl2_plat_arch_setup(void) } #if LOAD_IMAGE_V2 -/******************************************************************************* - * This function can be used by the platforms to update/use image - * information for given `image_id`. - ******************************************************************************/ -int bl2_plat_handle_post_image_load(unsigned int image_id) +int arm_bl2_handle_post_image_load(unsigned int image_id) { int err = 0; bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id); @@ -286,6 +282,15 @@ int bl2_plat_handle_post_image_load(unsigned int image_id) return err; } +/******************************************************************************* + * This function can be used by the platforms to update/use image + * information for given `image_id`. + ******************************************************************************/ +int bl2_plat_handle_post_image_load(unsigned int image_id) +{ + return arm_bl2_handle_post_image_load(image_id); +} + #else /* LOAD_IMAGE_V2 */ /******************************************************************************* diff --git a/plat/arm/common/arm_common.mk b/plat/arm/common/arm_common.mk index 6627513dbf83d1d1150b5d824f6884f1ae7ada56..9cf2b7e052b8c1abe368346ac90a2bb8e22aa50b 100644 --- a/plat/arm/common/arm_common.mk +++ b/plat/arm/common/arm_common.mk @@ -148,8 +148,14 @@ BL2_SOURCES += drivers/io/io_fip.c \ plat/arm/common/arm_bl2_setup.c \ plat/arm/common/arm_io_storage.c ifeq (${LOAD_IMAGE_V2},1) -BL2_SOURCES += plat/arm/common/${ARCH}/arm_bl2_mem_params_desc.c\ - plat/arm/common/arm_image_load.c \ +# Because BL1/BL2 execute in AArch64 mode but BL32 in AArch32 we need to use +# the AArch32 descriptors. +ifeq (${JUNO_AARCH32_EL3_RUNTIME},1) +BL2_SOURCES += plat/arm/common/aarch32/arm_bl2_mem_params_desc.c +else +BL2_SOURCES += plat/arm/common/${ARCH}/arm_bl2_mem_params_desc.c +endif +BL2_SOURCES += plat/arm/common/arm_image_load.c \ common/desc_image_load.c endif diff --git a/plat/arm/css/common/aarch32/css_helpers.S b/plat/arm/css/common/aarch32/css_helpers.S new file mode 100644 index 0000000000000000000000000000000000000000..b7075bd7c3f992e1f084f1a83403cdd526df9baf --- /dev/null +++ b/plat/arm/css/common/aarch32/css_helpers.S @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#include <arch.h> +#include <asm_macros.S> +#include <cpu_macros.S> +#include <css_def.h> + + .weak plat_secondary_cold_boot_setup + .weak plat_get_my_entrypoint + .globl css_calc_core_pos_swap_cluster + .weak plat_is_my_cpu_primary + + /* --------------------------------------------------------------------- + * void plat_secondary_cold_boot_setup(void); + * In the normal boot flow, cold-booting secondary + * CPUs is not yet implemented and they panic. + * --------------------------------------------------------------------- + */ +func plat_secondary_cold_boot_setup + /* TODO: Implement secondary CPU cold boot setup on CSS platforms */ +cb_panic: + b cb_panic +endfunc plat_secondary_cold_boot_setup + + /* --------------------------------------------------------------------- + * uintptr_t plat_get_my_entrypoint (void); + * + * Main job of this routine is to distinguish between a cold and a warm + * boot. On CSS platforms, this distinction is based on the contents of + * the Trusted Mailbox. It is initialised to zero by the SCP before the + * AP cores are released from reset. Therefore, a zero mailbox means + * it's a cold reset. + * + * This functions returns the contents of the mailbox, i.e.: + * - 0 for a cold boot; + * - the warm boot entrypoint for a warm boot. + * --------------------------------------------------------------------- + */ +func plat_get_my_entrypoint + ldr r0, =PLAT_ARM_TRUSTED_MAILBOX_BASE + ldr r0, [r0] + bx lr +endfunc plat_get_my_entrypoint + + /* ----------------------------------------------------------- + * unsigned int css_calc_core_pos_swap_cluster(u_register_t mpidr) + * Utility function to calculate the core position by + * swapping the cluster order. This is necessary in order to + * match the format of the boot information passed by the SCP + * and read in plat_is_my_cpu_primary below. + * ----------------------------------------------------------- + */ +func css_calc_core_pos_swap_cluster + and r1, r0, #MPIDR_CPU_MASK + and r0, r0, #MPIDR_CLUSTER_MASK + eor r0, r0, #(1 << MPIDR_AFFINITY_BITS) // swap cluster order + add r0, r1, r0, LSR #6 + bx lr +endfunc css_calc_core_pos_swap_cluster + + /* ----------------------------------------------------- + * unsigned int plat_is_my_cpu_primary (void); + * + * Find out whether the current cpu is the primary + * cpu (applicable ony after a cold boot) + * ----------------------------------------------------- + */ +func plat_is_my_cpu_primary + mov r10, lr + bl plat_my_core_pos + ldr r1, =SCP_BOOT_CFG_ADDR + ldr r1, [r1] + ubfx r1, r1, #PLAT_CSS_PRIMARY_CPU_SHIFT, \ + #PLAT_CSS_PRIMARY_CPU_BIT_WIDTH + cmp r0, r1 + moveq r0, #1 + movne r0, #0 + bx r10 +endfunc plat_is_my_cpu_primary diff --git a/plat/arm/css/common/css_common.mk b/plat/arm/css/common/css_common.mk index 7829e8b25d003582d7dddcfb37fface7e5769b56..24215a5d1151e6a369e5844ffe29c610d4a9c078 100644 --- a/plat/arm/css/common/css_common.mk +++ b/plat/arm/css/common/css_common.mk @@ -36,7 +36,7 @@ PLAT_INCLUDES += -Iinclude/plat/arm/css/common \ -Iinclude/plat/arm/css/common/aarch64 -PLAT_BL_COMMON_SOURCES += plat/arm/css/common/aarch64/css_helpers.S +PLAT_BL_COMMON_SOURCES += plat/arm/css/common/${ARCH}/css_helpers.S BL1_SOURCES += plat/arm/css/common/css_bl1_setup.c diff --git a/plat/arm/css/drivers/scp/css_pm_scpi.c b/plat/arm/css/drivers/scp/css_pm_scpi.c index e22504d1d4a0842f05785f1e01b2ff0473c81ef0..3b643e6692d5ea617dca950c0ec37b310ec832d9 100644 --- a/plat/arm/css/drivers/scp/css_pm_scpi.c +++ b/plat/arm/css/drivers/scp/css_pm_scpi.c @@ -32,6 +32,7 @@ #include <assert.h> #include <css_pm.h> #include <debug.h> +#include <plat_arm.h> #include "../scpi/css_scpi.h" #include "css_scp.h" @@ -134,6 +135,12 @@ void __dead2 css_scp_sys_shutdown(void) { uint32_t response; + /* + * Disable GIC CPU interface to prevent pending interrupt + * from waking up the AP from WFI. + */ + plat_arm_gic_cpuif_disable(); + /* Send the power down request to the SCP */ response = scpi_sys_power_state(scpi_system_shutdown); @@ -153,6 +160,12 @@ void __dead2 css_scp_sys_reboot(void) { uint32_t response; + /* + * Disable GIC CPU interface to prevent pending interrupt + * from waking up the AP from WFI. + */ + plat_arm_gic_cpuif_disable(); + /* Send the system reset request to the SCP */ response = scpi_sys_power_state(scpi_system_reboot);