Unverified Commit 71f8a6a9 authored by davidcunado-arm's avatar davidcunado-arm Committed by GitHub
Browse files

Merge pull request #1145 from etienne-lms/rfc-armv7-2

Support ARMv7 architectures
Showing with 565 additions and 0 deletions
+565 -0
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <asm_macros.S>
#include <assert_macros.S>
#include <cortex_a12.h>
#include <cpu_macros.S>
.macro assert_cache_enabled
#if ENABLE_ASSERTIONS
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)
#endif
.endm
func cortex_a12_disable_smp
ldcopr r0, ACTLR
bic r0, #CORTEX_A12_ACTLR_SMP_BIT
stcopr r0, ACTLR
isb
dsb sy
bx lr
endfunc cortex_a12_disable_smp
func cortex_a12_enable_smp
ldcopr r0, ACTLR
orr r0, #CORTEX_A12_ACTLR_SMP_BIT
stcopr r0, ACTLR
isb
bx lr
endfunc cortex_a12_enable_smp
func cortex_a12_reset_func
b cortex_a12_enable_smp
endfunc cortex_a12_reset_func
func cortex_a12_core_pwr_dwn
push {r12, lr}
assert_cache_enabled
/* Flush L1 cache */
mov r0, #DC_OP_CISW
bl dcsw_op_level1
/* Exit cluster coherency */
pop {r12, lr}
b cortex_a12_disable_smp
endfunc cortex_a12_core_pwr_dwn
func cortex_a12_cluster_pwr_dwn
push {r12, lr}
assert_cache_enabled
/* Flush L1 caches */
mov r0, #DC_OP_CISW
bl dcsw_op_level1
bl plat_disable_acp
/* Exit cluster coherency */
pop {r12, lr}
b cortex_a12_disable_smp
endfunc cortex_a12_cluster_pwr_dwn
declare_cpu_ops cortex_a12, CORTEX_A12_MIDR, \
cortex_a12_reset_func, \
cortex_a12_core_pwr_dwn, \
cortex_a12_cluster_pwr_dwn
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <asm_macros.S>
#include <assert_macros.S>
#include <cortex_a15.h>
#include <cpu_macros.S>
/*
* Cortex-A15 support LPAE and Virtualization Extensions.
* Don't care if confiugration uses or not LPAE and VE.
* Therefore, where we don't check ARCH_IS_ARMV7_WITH_LPAE/VE
*/
.macro assert_cache_enabled
#if ENABLE_ASSERTIONS
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)
#endif
.endm
func cortex_a15_disable_smp
ldcopr r0, ACTLR
bic r0, #CORTEX_A15_ACTLR_SMP_BIT
stcopr r0, ACTLR
isb
dsb sy
bx lr
endfunc cortex_a15_disable_smp
func cortex_a15_enable_smp
ldcopr r0, ACTLR
orr r0, #CORTEX_A15_ACTLR_SMP_BIT
stcopr r0, ACTLR
isb
bx lr
endfunc cortex_a15_enable_smp
func cortex_a15_reset_func
b cortex_a15_enable_smp
endfunc cortex_a15_reset_func
func cortex_a15_core_pwr_dwn
push {r12, lr}
assert_cache_enabled
/* Flush L1 cache */
mov r0, #DC_OP_CISW
bl dcsw_op_level1
/* Exit cluster coherency */
pop {r12, lr}
b cortex_a15_disable_smp
endfunc cortex_a15_core_pwr_dwn
func cortex_a15_cluster_pwr_dwn
push {r12, lr}
assert_cache_enabled
/* Flush L1 caches */
mov r0, #DC_OP_CISW
bl dcsw_op_level1
bl plat_disable_acp
/* Exit cluster coherency */
pop {r12, lr}
b cortex_a15_disable_smp
endfunc cortex_a15_cluster_pwr_dwn
declare_cpu_ops cortex_a15, CORTEX_A15_MIDR, \
cortex_a15_reset_func, \
cortex_a15_core_pwr_dwn, \
cortex_a15_cluster_pwr_dwn
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <asm_macros.S>
#include <assert_macros.S>
#include <cortex_a17.h>
#include <cpu_macros.S>
.macro assert_cache_enabled
#if ENABLE_ASSERTIONS
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)
#endif
.endm
func cortex_a17_disable_smp
ldcopr r0, ACTLR
bic r0, #CORTEX_A17_ACTLR_SMP_BIT
stcopr r0, ACTLR
isb
dsb sy
bx lr
endfunc cortex_a17_disable_smp
func cortex_a17_enable_smp
ldcopr r0, ACTLR
orr r0, #CORTEX_A17_ACTLR_SMP_BIT
stcopr r0, ACTLR
isb
bx lr
endfunc cortex_a17_enable_smp
func cortex_a17_reset_func
b cortex_a17_enable_smp
endfunc cortex_a17_reset_func
func cortex_a17_core_pwr_dwn
push {r12, lr}
assert_cache_enabled
/* Flush L1 cache */
mov r0, #DC_OP_CISW
bl dcsw_op_level1
/* Exit cluster coherency */
pop {r12, lr}
b cortex_a17_disable_smp
endfunc cortex_a17_core_pwr_dwn
func cortex_a17_cluster_pwr_dwn
push {r12, lr}
assert_cache_enabled
/* Flush L1 caches */
mov r0, #DC_OP_CISW
bl dcsw_op_level1
bl plat_disable_acp
/* Exit cluster coherency */
pop {r12, lr}
b cortex_a17_disable_smp
endfunc cortex_a17_cluster_pwr_dwn
declare_cpu_ops cortex_a17, CORTEX_A17_MIDR, \
cortex_a17_reset_func, \
cortex_a17_core_pwr_dwn, \
cortex_a17_cluster_pwr_dwn
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <asm_macros.S>
#include <assert_macros.S>
#include <cortex_a5.h>
#include <cpu_macros.S>
.macro assert_cache_enabled
#if ENABLE_ASSERTIONS
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)
#endif
.endm
func cortex_a5_disable_smp
ldcopr r0, ACTLR
bic r0, #CORTEX_A5_ACTLR_SMP_BIT
stcopr r0, ACTLR
isb
dsb sy
bx lr
endfunc cortex_a5_disable_smp
func cortex_a5_enable_smp
ldcopr r0, ACTLR
orr r0, #CORTEX_A5_ACTLR_SMP_BIT
stcopr r0, ACTLR
isb
bx lr
endfunc cortex_a5_enable_smp
func cortex_a5_reset_func
b cortex_a5_enable_smp
endfunc cortex_a5_reset_func
func cortex_a5_core_pwr_dwn
push {r12, lr}
assert_cache_enabled
/* Flush L1 cache */
mov r0, #DC_OP_CISW
bl dcsw_op_level1
/* Exit cluster coherency */
pop {r12, lr}
b cortex_a5_disable_smp
endfunc cortex_a5_core_pwr_dwn
func cortex_a5_cluster_pwr_dwn
push {r12, lr}
assert_cache_enabled
/* Flush L1 caches */
mov r0, #DC_OP_CISW
bl dcsw_op_level1
bl plat_disable_acp
/* Exit cluster coherency */
pop {r12, lr}
b cortex_a5_disable_smp
endfunc cortex_a5_cluster_pwr_dwn
declare_cpu_ops cortex_a5, CORTEX_A5_MIDR, \
cortex_a5_reset_func, \
cortex_a5_core_pwr_dwn, \
cortex_a5_cluster_pwr_dwn
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <asm_macros.S>
#include <assert_macros.S>
#include <cortex_a7.h>
#include <cpu_macros.S>
.macro assert_cache_enabled
#if ENABLE_ASSERTIONS
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)
#endif
.endm
func cortex_a7_disable_smp
ldcopr r0, ACTLR
bic r0, #CORTEX_A7_ACTLR_SMP_BIT
stcopr r0, ACTLR
isb
dsb sy
bx lr
endfunc cortex_a7_disable_smp
func cortex_a7_enable_smp
ldcopr r0, ACTLR
orr r0, #CORTEX_A7_ACTLR_SMP_BIT
stcopr r0, ACTLR
isb
bx lr
endfunc cortex_a7_enable_smp
func cortex_a7_reset_func
b cortex_a7_enable_smp
endfunc cortex_a7_reset_func
func cortex_a7_core_pwr_dwn
push {r12, lr}
assert_cache_enabled
/* Flush L1 cache */
mov r0, #DC_OP_CISW
bl dcsw_op_level1
/* Exit cluster coherency */
pop {r12, lr}
b cortex_a7_disable_smp
endfunc cortex_a7_core_pwr_dwn
func cortex_a7_cluster_pwr_dwn
push {r12, lr}
assert_cache_enabled
/* Flush L1 caches */
mov r0, #DC_OP_CISW
bl dcsw_op_level1
bl plat_disable_acp
/* Exit cluster coherency */
pop {r12, lr}
b cortex_a7_disable_smp
endfunc cortex_a7_cluster_pwr_dwn
declare_cpu_ops cortex_a7, CORTEX_A7_MIDR, \
cortex_a7_reset_func, \
cortex_a7_core_pwr_dwn, \
cortex_a7_cluster_pwr_dwn
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <asm_macros.S>
#include <assert_macros.S>
#include <cortex_a9.h>
#include <cpu_macros.S>
.macro assert_cache_enabled
#if ENABLE_ASSERTIONS
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)
#endif
.endm
func cortex_a9_disable_smp
ldcopr r0, ACTLR
bic r0, #CORTEX_A9_ACTLR_SMP_BIT
stcopr r0, ACTLR
isb
dsb sy
bx lr
endfunc cortex_a9_disable_smp
func cortex_a9_enable_smp
ldcopr r0, ACTLR
orr r0, #CORTEX_A9_ACTLR_SMP_BIT
stcopr r0, ACTLR
isb
bx lr
endfunc cortex_a9_enable_smp
func cortex_a9_reset_func
b cortex_a9_enable_smp
endfunc cortex_a9_reset_func
func cortex_a9_core_pwr_dwn
push {r12, lr}
assert_cache_enabled
/* Flush L1 cache */
mov r0, #DC_OP_CISW
bl dcsw_op_level1
/* Exit cluster coherency */
pop {r12, lr}
b cortex_a9_disable_smp
endfunc cortex_a9_core_pwr_dwn
func cortex_a9_cluster_pwr_dwn
push {r12, lr}
assert_cache_enabled
/* Flush L1 caches */
mov r0, #DC_OP_CISW
bl dcsw_op_level1
bl plat_disable_acp
/* Exit cluster coherency */
pop {r12, lr}
b cortex_a9_disable_smp
endfunc cortex_a9_cluster_pwr_dwn
declare_cpu_ops cortex_a9, CORTEX_A9_MIDR, \
cortex_a9_reset_func, \
cortex_a9_core_pwr_dwn, \
cortex_a9_cluster_pwr_dwn
......@@ -9,6 +9,17 @@
.globl spin_lock
.globl spin_unlock
#if ARM_ARCH_AT_LEAST(8, 0)
/*
* According to the ARMv8-A Architecture Reference Manual, "when the global
* monitor for a PE changes from Exclusive Access state to Open Access state,
* an event is generated.". This applies to both AArch32 and AArch64 modes of
* ARMv8-A. As a result, no explicit SEV with unlock is required.
*/
#define COND_SEV()
#else
#define COND_SEV() sev
#endif
func spin_lock
mov r2, #1
......@@ -27,5 +38,6 @@ endfunc spin_lock
func spin_unlock
mov r1, #0
stl r1, [r0]
COND_SEV()
bx lr
endfunc spin_unlock
......@@ -266,8 +266,10 @@ int psci_setup(const psci_lib_args_t *lib_args)
******************************************************************************/
void psci_arch_setup(void)
{
#if ARM_ARCH_MAJOR > 7 || defined(ARMV7_SUPPORTS_GENERIC_TIMER)
/* Program the counter frequency */
write_cntfrq_el0(plat_get_syscnt_freq2());
#endif
/* Initialize the cpu_ops pointer. */
init_cpu_ops();
......
......@@ -13,6 +13,10 @@
#include <xlat_tables.h>
#include "../xlat_tables_private.h"
#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING)
#error ARMv7 target does not support LPAE MMU descriptors
#endif
#define XLAT_TABLE_LEVEL_BASE \
GET_XLAT_TABLE_LEVEL_BASE(PLAT_VIRT_ADDR_SPACE_SIZE)
......
......@@ -14,6 +14,10 @@
#include <xlat_tables_v2.h>
#include "../xlat_tables_private.h"
#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING)
#error ARMv7 target does not support LPAE MMU descriptors
#endif
#if ENABLE_ASSERTIONS
unsigned long long xlat_arch_get_max_supported_pa(void)
{
......
......@@ -91,6 +91,11 @@ Files:
- docs/plat/xilinx-zynqmp.md
- plat/xilinx/\*
ARMv7 architecture sub-maintainer
---------------------------------
Etienne Carriere (etienne.carriere@linaro.org, `etienne-lms`_)
.. _danh-arm: https://github.com/danh-arm
.. _davidcunado-arm: https://github.com/davidcunado-arm
.. _jenswi-linaro: https://github.com/jenswi-linaro
......@@ -100,3 +105,4 @@ Files:
.. _TonyXie06: https://github.com/TonyXie06
.. _rkchrome: https://github.com/rkchrome
.. _sorenb-xlnx: https://github.com/sorenb-xlnx
.. _etienne-lms: https://github.com/etienne-lms
#
# Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
ifneq (${ARCH},aarch32)
$(error ARM_ARCH_MAJOR=7 mandates ARCH=aarch32)
endif
# For ARMv7, set march32 from platform directive ARMV7_CORTEX_Ax=yes
# and ARM_WITH_NEON=yes/no.
#
# GCC and Clang require -march=armv7-a for C-A9 and -march=armv7ve for C-A15.
# armClang requires -march=armv7-a for all ARMv7 Cortex-A. To comply with
# all, just drop -march and supply only -mcpu.
# Platform can override march32-directive through MARCH32_DIRECTIVE
ifdef MARCH32_DIRECTIVE
march32-directive := $(MARCH32_DIRECTIVE)
else
march32-set-${ARM_CORTEX_A5} := -mcpu=cortex-a5
march32-set-${ARM_CORTEX_A7} := -mcpu=cortex-a7
march32-set-${ARM_CORTEX_A9} := -mcpu=cortex-a9
march32-set-${ARM_CORTEX_A12} := -mcpu=cortex-a12
march32-set-${ARM_CORTEX_A15} := -mcpu=cortex-a15
march32-set-${ARM_CORTEX_A17} := -mcpu=cortex-a17
march32-neon-$(ARM_WITH_NEON) := -mfpu=neon
# default to -march=armv7-a as target directive
march32-set-yes ?= -march=armv7-a
march32-directive := ${march32-set-yes} ${march32-neon-yes}
endif
# Platform may override these extension support directives:
#
# ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING
# Defined if core supports the Large Page Addressing extension.
#
# ARMV7_SUPPORTS_VIRTUALIZATION
# Defined if ARMv7 core supports the Virtualization extension.
#
# ARMV7_SUPPORTS_GENERIC_TIMER
# Defined if ARMv7 core supports the Generic Timer extension.
ifeq ($(filter yes,$(ARM_CORTEX_A7) $(ARM_CORTEX_A12) $(ARM_CORTEX_A15) $(ARM_CORTEX_A17)),yes)
$(eval $(call add_define,ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING))
$(eval $(call add_define,ARMV7_SUPPORTS_VIRTUALIZATION))
$(eval $(call add_define,ARMV7_SUPPORTS_GENERIC_TIMER))
endif
......@@ -7,16 +7,28 @@
#include <arch.h>
#include <asm_macros.S>
.weak plat_report_exception
.weak plat_crash_console_init
.weak plat_crash_console_putc
.weak plat_crash_console_flush
.weak plat_reset_handler
.weak plat_disable_acp
.weak bl1_plat_prepare_exit
.weak platform_mem_init
.weak plat_error_handler
.weak plat_panic_handler
.weak bl2_plat_preload_setup
.weak plat_try_next_boot_source
/* -----------------------------------------------------
* Placeholder function which should be redefined by
* each platform.
* -----------------------------------------------------
*/
func plat_report_exception
bx lr
endfunc plat_report_exception
/* -----------------------------------------------------
* Placeholder function which should be redefined by
* each platform.
......@@ -73,6 +85,25 @@ func platform_mem_init
bx lr
endfunc platform_mem_init
/* -----------------------------------------------------
* void bl1_plat_prepare_exit(entry_point_info_t *ep_info);
* Called before exiting BL1. Default: do nothing
* -----------------------------------------------------
*/
func bl1_plat_prepare_exit
bx lr
endfunc bl1_plat_prepare_exit
/* -----------------------------------------------------
* void plat_error_handler(int err) __dead2;
* Endless loop by default.
* -----------------------------------------------------
*/
func plat_error_handler
wfi
b plat_error_handler
endfunc plat_error_handler
/* -----------------------------------------------------
* void plat_panic_handler(void) __dead2;
* Endless loop by default.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment