Commit 937108a0 authored by danh-arm's avatar danh-arm Committed by GitHub
Browse files

Merge pull request #678 from soby-mathew/sm/PSCI_AArch32

Introduce AArch32 support for PSCI library
parents 974603b5 9d29c227
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __SMCC_MACROS_S__
#define __SMCC_MACROS_S__
#include <arch.h>
/*
* Macro to save the General purpose registers including the banked
* registers to the SMC context on entry due a SMC call. On return, r0
* contains the pointer to the `smc_context_t`.
*/
.macro smcc_save_gp_mode_regs
push {r0-r3, lr}
ldcopr r0, SCR
and r0, r0, #SCR_NS_BIT
bl smc_get_ctx
/* Save r4 - r12 in the SMC context */
add r1, r0, #SMC_CTX_GPREG_R4
stm r1!, {r4-r12}
/*
* Pop r0 - r3, lr to r4 - r7, lr from stack and then save
* it to SMC context.
*/
pop {r4-r7, lr}
stm r0, {r4-r7}
/* Save the banked registers including the current SPSR and LR */
mrs r4, sp_usr
mrs r5, lr_usr
mrs r6, spsr_irq
mrs r7, sp_irq
mrs r8, lr_irq
mrs r9, spsr_fiq
mrs r10, sp_fiq
mrs r11, lr_fiq
mrs r12, spsr_svc
stm r1!, {r4-r12}
mrs r4, sp_svc
mrs r5, lr_svc
mrs r6, spsr_abt
mrs r7, sp_abt
mrs r8, lr_abt
mrs r9, spsr_und
mrs r10, sp_und
mrs r11, lr_und
mrs r12, spsr
stm r1!, {r4-r12, lr}
.endm
/*
* Macro to restore the General purpose registers including the banked
* registers from the SMC context prior to exit from the SMC call.
* r0 must point to the `smc_context_t` to restore from.
*/
.macro smcc_restore_gp_mode_regs
/* Restore the banked registers including the current SPSR and LR */
add r1, r0, #SMC_CTX_SP_USR
ldm r1!, {r4-r12}
msr sp_usr, r4
msr lr_usr, r5
msr spsr_irq, r6
msr sp_irq, r7
msr lr_irq, r8
msr spsr_fiq, r9
msr sp_fiq, r10
msr lr_fiq, r11
msr spsr_svc, r12
ldm r1!, {r4-r12, lr}
msr sp_svc, r4
msr lr_svc, r5
msr spsr_abt, r6
msr sp_abt, r7
msr lr_abt, r8
msr spsr_und, r9
msr sp_und, r10
msr lr_und, r11
msr spsr, r12
/* Restore the rest of the general purpose registers */
ldm r0, {r0-r12}
.endm
#endif /* __SMCC_MACROS_S__ */
/*
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
......@@ -334,8 +334,6 @@
#define CTR_IMINLINE_MASK 0xf
#define MAX_CACHE_LINE_SIZE 0x800 /* 2KB */
#define SIZE_FROM_LOG2_WORDS(n) (4 << (n))
/* Physical timer control register bit fields shifts and masks */
#define CNTP_CTL_ENABLE_SHIFT 0
......
......@@ -82,5 +82,17 @@
((const uint32_t *) &(_uuid))[2], \
((const uint32_t *) &(_uuid))[3])
/*
* Helper macro to retrieve the SMC parameters from cpu_context_t.
*/
#define get_smc_params_from_ctx(_hdl, _x1, _x2, _x3, _x4) \
do { \
const gp_regs_t *regs = get_gpregs_ctx(_hdl); \
_x1 = read_ctx_reg(regs, CTX_GPREG_X1); \
_x2 = read_ctx_reg(regs, CTX_GPREG_X2); \
_x3 = read_ctx_reg(regs, CTX_GPREG_X3); \
_x4 = read_ctx_reg(regs, CTX_GPREG_X4); \
} while (0)
#endif /*__ASSEMBLY__*/
#endif /* __SMCC_HELPERS_H__ */
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __AEM_GENERIC_H__
#define __AEM_GENERIC_H__
/* BASE AEM midr for revision 0 */
#define BASE_AEM_MIDR 0x410FD0F0
#endif /* __AEM_GENERIC_H__ */
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __CPU_MACROS_S__
#define __CPU_MACROS_S__
#include <arch.h>
#define CPU_IMPL_PN_MASK (MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) | \
(MIDR_PN_MASK << MIDR_PN_SHIFT)
/*
* Define the offsets to the fields in cpu_ops structure.
*/
.struct 0
CPU_MIDR: /* cpu_ops midr */
.space 4
/* Reset fn is needed during reset */
CPU_RESET_FUNC: /* cpu_ops reset_func */
.space 4
CPU_PWR_DWN_CORE: /* cpu_ops core_pwr_dwn */
.space 4
CPU_PWR_DWN_CLUSTER: /* cpu_ops cluster_pwr_dwn */
.space 4
CPU_OPS_SIZE = .
/*
* Convenience macro to declare cpu_ops structure.
* Make sure the structure fields are as per the offsets
* defined above.
*/
.macro declare_cpu_ops _name:req, _midr:req, _noresetfunc = 0
.section cpu_ops, "a"
.align 2
.type cpu_ops_\_name, %object
.word \_midr
.if \_noresetfunc
.word 0
.else
.word \_name\()_reset_func
.endif
.word \_name\()_core_pwr_dwn
.word \_name\()_cluster_pwr_dwn
.endm
#endif /* __CPU_MACROS_S__ */
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __CONTEXT_H__
#define __CONTEXT_H__
/*******************************************************************************
* Constants that allow assembler code to access members of and the 'regs'
* structure at their correct offsets.
******************************************************************************/
#define CTX_REGS_OFFSET 0x0
#define CTX_GPREG_R0 0x0
#define CTX_GPREG_R1 0x4
#define CTX_GPREG_R2 0x8
#define CTX_GPREG_R3 0xC
#define CTX_LR 0x10
#define CTX_SCR 0x14
#define CTX_SPSR 0x18
#define CTX_NS_SCTLR 0x1C
#define CTX_REGS_END 0x20
#ifndef __ASSEMBLY__
#include <cassert.h>
#include <stdint.h>
/*
* Common constants to help define the 'cpu_context' structure and its
* members below.
*/
#define WORD_SHIFT 2
#define DEFINE_REG_STRUCT(name, num_regs) \
typedef struct name { \
uint32_t _regs[num_regs]; \
} __aligned(8) name##_t
/* Constants to determine the size of individual context structures */
#define CTX_REG_ALL (CTX_REGS_END >> WORD_SHIFT)
DEFINE_REG_STRUCT(regs, CTX_REG_ALL);
#undef CTX_REG_ALL
#define read_ctx_reg(ctx, offset) ((ctx)->_regs[offset >> WORD_SHIFT])
#define write_ctx_reg(ctx, offset, val) (((ctx)->_regs[offset >> WORD_SHIFT]) \
= val)
typedef struct cpu_context {
regs_t regs_ctx;
} cpu_context_t;
/* Macros to access members of the 'cpu_context_t' structure */
#define get_regs_ctx(h) (&((cpu_context_t *) h)->regs_ctx)
/*
* Compile time assertions related to the 'cpu_context' structure to
* ensure that the assembler and the compiler view of the offsets of
* the structure members is the same.
*/
CASSERT(CTX_REGS_OFFSET == __builtin_offsetof(cpu_context_t, regs_ctx), \
assert_core_context_regs_offset_mismatch);
#endif /* __ASSEMBLY__ */
#endif /* __CONTEXT_H__ */
......@@ -42,11 +42,6 @@ struct entry_point_info;
* Function & variable prototypes
******************************************************************************/
void cm_init(void);
void *cm_get_context_by_mpidr(uint64_t mpidr,
uint32_t security_state) __deprecated;
void cm_set_context_by_mpidr(uint64_t mpidr,
void *context,
uint32_t security_state) __deprecated;
void *cm_get_context_by_index(unsigned int cpu_idx,
unsigned int security_state);
void cm_set_context_by_index(unsigned int cpu_idx,
......@@ -54,12 +49,12 @@ void cm_set_context_by_index(unsigned int cpu_idx,
unsigned int security_state);
void *cm_get_context(uint32_t security_state);
void cm_set_context(void *context, uint32_t security_state);
void cm_init_context(uint64_t mpidr,
const struct entry_point_info *ep) __deprecated;
void cm_init_my_context(const struct entry_point_info *ep);
void cm_init_context_by_index(unsigned int cpu_idx,
const struct entry_point_info *ep);
void cm_prepare_el3_exit(uint32_t security_state);
#ifndef AARCH32
void cm_el1_sysregs_context_save(uint32_t security_state);
void cm_el1_sysregs_context_restore(uint32_t security_state);
void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint);
......@@ -71,6 +66,16 @@ void cm_write_scr_el3_bit(uint32_t security_state,
void cm_set_next_eret_context(uint32_t security_state);
uint32_t cm_get_scr_el3(uint32_t security_state);
void cm_init_context(uint64_t mpidr,
const struct entry_point_info *ep) __deprecated;
void *cm_get_context_by_mpidr(uint64_t mpidr,
uint32_t security_state) __deprecated;
void cm_set_context_by_mpidr(uint64_t mpidr,
void *context,
uint32_t security_state) __deprecated;
/* Inline definitions */
/*******************************************************************************
......@@ -98,4 +103,5 @@ static inline void cm_set_next_context(void *context)
"msr spsel, #0\n"
: : "r" (context));
}
#endif /* AARCH32 */
#endif /* __CM_H__ */
......@@ -31,16 +31,28 @@
#ifndef __CPU_DATA_H__
#define __CPU_DATA_H__
#ifdef AARCH32
#if CRASH_REPORTING
#error "Crash reporting is not supported in AArch32"
#endif
#define CPU_DATA_CPU_OPS_PTR 0x0
#else /* AARCH32 */
/* Offsets for the cpu_data structure */
#define CPU_DATA_CRASH_BUF_OFFSET 0x18
/* need enough space in crash buffer to save 8 registers */
#define CPU_DATA_CRASH_BUF_SIZE 64
#define CPU_DATA_CPU_OPS_PTR 0x10
#endif /* AARCH32 */
#if CRASH_REPORTING
#define CPU_DATA_LOG2SIZE 7
#else
#define CPU_DATA_LOG2SIZE 6
#endif
/* need enough space in crash buffer to save 8 registers */
#define CPU_DATA_CRASH_BUF_SIZE 64
#define CPU_DATA_CPU_OPS_PTR 0x10
#ifndef __ASSEMBLY__
......@@ -77,7 +89,9 @@
* used for this.
******************************************************************************/
typedef struct cpu_data {
#ifndef AARCH32
void *cpu_context[2];
#endif
uintptr_t cpu_ops_ptr;
#if CRASH_REPORTING
u_register_t crash_buf[CPU_DATA_CRASH_BUF_SIZE >> 3];
......@@ -104,12 +118,15 @@ CASSERT(CPU_DATA_CPU_OPS_PTR == __builtin_offsetof
struct cpu_data *_cpu_data_by_index(uint32_t cpu_index);
#ifndef AARCH32
/* Return the cpu_data structure for the current CPU. */
static inline struct cpu_data *_cpu_data(void)
{
return (cpu_data_t *)read_tpidr_el3();
}
#else
struct cpu_data *_cpu_data(void);
#endif
/**************************************************************************
* APIs for initialising and accessing per-cpu data
......
......@@ -359,6 +359,8 @@ u_register_t psci_smc_handler(uint32_t smc_fid,
int psci_setup(uintptr_t mailbox_ep);
void psci_warmboot_entrypoint(void);
void psci_register_spd_pm_hook(const spd_pm_ops_t *pm);
void psci_prepare_next_non_secure_ctx(
struct entry_point_info *next_image_info);
#endif /*__ASSEMBLY__*/
......
......@@ -31,6 +31,10 @@
* From: @(#)types.h 8.3 (Berkeley) 1/5/94
* $FreeBSD$
*/
/*
* Portions copyright (c) 2016, ARM Limited and Contributors.
* All rights reserved.
*/
#ifndef _MACHINE__TYPES_H_
#define _MACHINE__TYPES_H_
......@@ -48,19 +52,56 @@ typedef short __int16_t;
typedef unsigned short __uint16_t;
typedef int __int32_t;
typedef unsigned int __uint32_t;
/*
* Standard type definitions which are different in AArch64 and AArch32
*/
#ifdef AARCH32
typedef long long __int64_t;
typedef unsigned long long __uint64_t;
typedef __int32_t __critical_t;
typedef __int32_t __intfptr_t;
typedef __int32_t __intptr_t;
typedef __int32_t __ptrdiff_t; /* ptr1 - ptr2 */
typedef __int32_t __register_t;
typedef __int32_t __segsz_t; /* segment size (in pages) */
typedef __uint32_t __size_t; /* sizeof() */
typedef __int32_t __ssize_t; /* byte count or error */
typedef __uint32_t __uintfptr_t;
typedef __uint32_t __uintptr_t;
typedef __uint32_t __u_register_t;
typedef __uint32_t __vm_offset_t;
typedef __uint32_t __vm_paddr_t;
typedef __uint32_t __vm_size_t;
#elif defined AARCH64
typedef long __int64_t;
typedef unsigned long __uint64_t;
typedef __int64_t __critical_t;
typedef __int64_t __intfptr_t;
typedef __int64_t __intptr_t;
typedef __int64_t __ptrdiff_t; /* ptr1 - ptr2 */
typedef __int64_t __register_t;
typedef __int64_t __segsz_t; /* segment size (in pages) */
typedef __uint64_t __size_t; /* sizeof() */
typedef __int64_t __ssize_t; /* byte count or error */
typedef __uint64_t __uintfptr_t;
typedef __uint64_t __uintptr_t;
typedef __uint64_t __u_register_t;
typedef __uint64_t __vm_offset_t;
typedef __uint64_t __vm_paddr_t;
typedef __uint64_t __vm_size_t;
#else
#error "Only AArch32 or AArch64 supported"
#endif /* AARCH32 */
/*
* Standard type definitions.
*/
typedef __int32_t __clock_t; /* clock()... */
typedef __int64_t __critical_t;
typedef double __double_t;
typedef float __float_t;
typedef __int64_t __intfptr_t;
typedef __int64_t __intmax_t;
typedef __int64_t __intptr_t;
typedef __int32_t __int_fast8_t;
typedef __int32_t __int_fast16_t;
typedef __int32_t __int_fast32_t;
......@@ -69,15 +110,8 @@ typedef __int8_t __int_least8_t;
typedef __int16_t __int_least16_t;
typedef __int32_t __int_least32_t;
typedef __int64_t __int_least64_t;
typedef __int64_t __ptrdiff_t; /* ptr1 - ptr2 */
typedef __int64_t __register_t;
typedef __int64_t __segsz_t; /* segment size (in pages) */
typedef __uint64_t __size_t; /* sizeof() */
typedef __int64_t __ssize_t; /* byte count or error */
typedef __int64_t __time_t; /* time()... */
typedef __uint64_t __uintfptr_t;
typedef __uint64_t __uintmax_t;
typedef __uint64_t __uintptr_t;
typedef __uint32_t __uint_fast8_t;
typedef __uint32_t __uint_fast16_t;
typedef __uint32_t __uint_fast32_t;
......@@ -86,12 +120,8 @@ typedef __uint8_t __uint_least8_t;
typedef __uint16_t __uint_least16_t;
typedef __uint32_t __uint_least32_t;
typedef __uint64_t __uint_least64_t;
typedef __uint64_t __u_register_t;
typedef __uint64_t __vm_offset_t;
typedef __int64_t __vm_ooffset_t;
typedef __uint64_t __vm_paddr_t;
typedef __uint64_t __vm_pindex_t;
typedef __uint64_t __vm_size_t;
/*
* Unusual type definitions.
......
......@@ -38,6 +38,8 @@
#define IS_POWER_OF_TWO(x) \
(((x) & ((x) - 1)) == 0)
#define SIZE_FROM_LOG2_WORDS(n) (4 << (n))
/*
* The round_up() macro rounds up a value to the given boundary in a
* type-agnostic yet type-safe manner. The boundary must be a power of two.
......
......@@ -188,9 +188,14 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
size_t size, unsigned int attr);
void mmap_add(const mmap_region_t *mm);
#ifdef AARCH32
/* AArch32 specific translation table API */
void enable_mmu_secure(uint32_t flags);
#else
/* AArch64 specific translation table APIs */
void enable_mmu_el1(unsigned int flags);
void enable_mmu_el3(unsigned int flags);
#endif /* AARCH32 */
#endif /*__ASSEMBLY__*/
#endif /* __XLAT_TABLES_H__ */
......@@ -321,9 +321,12 @@
# error "Unsupported ARM_TSP_RAM_LOCATION_ID value"
#endif
/* BL32 is mandatory in AArch32 */
#ifndef AARCH32
#ifdef SPD_none
#undef BL32_BASE
#endif /* SPD_none */
#endif
/*******************************************************************************
* FWU Images: NS_BL1U, BL2U & NS_BL2U defines.
......
......@@ -167,6 +167,9 @@ void arm_bl31_plat_arch_setup(void);
/* TSP utility functions */
void arm_tsp_early_platform_setup(void);
/* SP_MIN utility functions */
void arm_sp_min_early_platform_setup(void);
/* FIP TOC validity check */
int arm_io_is_toc_valid(void);
......
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <asm_macros.S>
.globl flush_dcache_range
.globl clean_dcache_range
.globl inv_dcache_range
.globl dcsw_op_louis
.globl dcsw_op_all
.globl dcsw_op_level1
.globl dcsw_op_level2
.globl dcsw_op_level3
/*
* This macro can be used for implementing various data cache operations `op`
*/
.macro do_dcache_maintenance_by_mva op, coproc, opc1, CRn, CRm, opc2
dcache_line_size r2, r3
add r1, r0, r1
sub r3, r2, #1
bic r0, r0, r3
loop_\op:
stcopr r0, \coproc, \opc1, \CRn, \CRm, \opc2
add r0, r0, r2
cmp r0, r1
blo loop_\op
dsb sy
bx lr
.endm
/* ------------------------------------------
* Clean+Invalidate from base address till
* size. 'r0' = addr, 'r1' = size
* ------------------------------------------
*/
func flush_dcache_range
do_dcache_maintenance_by_mva cimvac, DCCIMVAC
endfunc flush_dcache_range
/* ------------------------------------------
* Clean from base address till size.
* 'r0' = addr, 'r1' = size
* ------------------------------------------
*/
func clean_dcache_range
do_dcache_maintenance_by_mva cmvac, DCCMVAC
endfunc clean_dcache_range
/* ------------------------------------------
* Invalidate from base address till
* size. 'r0' = addr, 'r1' = size
* ------------------------------------------
*/
func inv_dcache_range
do_dcache_maintenance_by_mva imvac, DCIMVAC
endfunc inv_dcache_range
/* ----------------------------------------------------------------
* Data cache operations by set/way to the level specified
*
* The main function, do_dcsw_op requires:
* r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW),
* as defined in arch.h
* r1: The cache level to begin operation from
* r2: clidr_el1
* r3: The last cache level to operate on
* and will carry out the operation on each data cache from level 0
* to the level in r3 in sequence
*
* The dcsw_op macro sets up the r2 and r3 parameters based on
* clidr_el1 cache information before invoking the main function
* ----------------------------------------------------------------
*/
.macro dcsw_op shift, fw, ls
ldcopr r2, CLIDR
ubfx r3, r2, \shift, \fw
lsl r3, r3, \ls
mov r1, #0
b do_dcsw_op
.endm
func do_dcsw_op
push {r4-r12,lr}
adr r11, dcsw_loop_table // compute cache op based on the operation type
add r6, r11, r0, lsl #3 // cache op is 2x32-bit instructions
loop1:
add r10, r1, r1, LSR #1 // Work out 3x current cache level
mov r12, r2, LSR r10 // extract cache type bits from clidr
and r12, r12, #7 // mask the bits for current cache only
cmp r12, #2 // see what cache we have at this level
blt level_done // no cache or only instruction cache at this level
stcopr r1, CSSELR // select current cache level in csselr
isb // isb to sych the new cssr&csidr
ldcopr r12, CCSIDR // read the new ccsidr
and r10, r12, #7 // extract the length of the cache lines
add r10, r10, #4 // add 4 (r10 = line length offset)
ubfx r4, r12, #3, #10 // r4 = maximum way number (right aligned)
clz r5, r4 // r5 = the bit position of the way size increment
mov r9, r4 // r9 working copy of the aligned max way number
loop2:
ubfx r7, r12, #13, #15 // r7 = max set number (right aligned)
loop3:
orr r0, r1, r9, LSL r5 // factor in the way number and cache level into r0
orr r0, r0, r7, LSL r10 // factor in the set number
blx r6
subs r7, r7, #1 // decrement the set number
bge loop3
subs r9, r9, #1 // decrement the way number
bge loop2
level_done:
add r1, r1, #2 // increment the cache number
cmp r3, r1
dsb sy // ensure completion of previous cache maintenance instruction
bgt loop1
mov r6, #0
stcopr r6, CSSELR //select cache level 0 in csselr
dsb sy
isb
pop {r4-r12,pc}
dcsw_loop_table:
stcopr r0, DCISW
bx lr
stcopr r0, DCCISW
bx lr
stcopr r0, DCCSW
bx lr
endfunc do_dcsw_op
/* ---------------------------------------------------------------
* Data cache operations by set/way till PoU.
*
* The function requires :
* r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW),
* as defined in arch.h
* ---------------------------------------------------------------
*/
func dcsw_op_louis
dcsw_op #LOUIS_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
endfunc dcsw_op_louis
/* ---------------------------------------------------------------
* Data cache operations by set/way till PoC.
*
* The function requires :
* r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW),
* as defined in arch.h
* ---------------------------------------------------------------
*/
func dcsw_op_all
dcsw_op #LOC_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
endfunc dcsw_op_all
/* ---------------------------------------------------------------
* Helper macro for data cache operations by set/way for the
* level specified
* ---------------------------------------------------------------
*/
.macro dcsw_op_level level
ldcopr r2, CLIDR
mov r3, \level
sub r1, r3, #2
b do_dcsw_op
.endm
/* ---------------------------------------------------------------
* Data cache operations by set/way for level 1 cache
*
* The main function, do_dcsw_op requires:
* r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW),
* as defined in arch.h
* ---------------------------------------------------------------
*/
func dcsw_op_level1
dcsw_op_level #(1 << LEVEL_SHIFT)
endfunc dcsw_op_level1
/* ---------------------------------------------------------------
* Data cache operations by set/way for level 2 cache
*
* The main function, do_dcsw_op requires:
* r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW),
* as defined in arch.h
* ---------------------------------------------------------------
*/
func dcsw_op_level2
dcsw_op_level #(2 << LEVEL_SHIFT)
endfunc dcsw_op_level2
/* ---------------------------------------------------------------
* Data cache operations by set/way for level 3 cache
*
* The main function, do_dcsw_op requires:
* r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW),
* as defined in arch.h
* ---------------------------------------------------------------
*/
func dcsw_op_level3
dcsw_op_level #(3 << LEVEL_SHIFT)
endfunc dcsw_op_level3
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <asm_macros.S>
#include <assert_macros.S>
.globl zeromem
/* -----------------------------------------------------------------------
* void zeromem(void *mem, unsigned int length);
*
* Initialise a memory region to 0.
* The memory address and length must be 4-byte aligned.
* -----------------------------------------------------------------------
*/
func zeromem
#if ASM_ASSERTION
tst r0, #0x3
ASM_ASSERT(eq)
tst r1, #0x3
ASM_ASSERT(eq)
#endif
add r2, r0, r1
mov r1, #0
z_loop:
cmp r2, r0
beq z_end
str r1, [r0], #4
b z_loop
z_end:
bx lr
endfunc zeromem
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <aem_generic.h>
#include <arch.h>
#include <asm_macros.S>
#include <assert_macros.S>
#include <cpu_macros.S>
func aem_generic_core_pwr_dwn
/* Assert if cache is enabled */
#if ASM_ASSERTION
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)
#endif
/* ---------------------------------------------
* Flush L1 cache to PoU.
* ---------------------------------------------
*/
mov r0, #DC_OP_CISW
b dcsw_op_louis
endfunc aem_generic_core_pwr_dwn
func aem_generic_cluster_pwr_dwn
/* Assert if cache is enabled */
#if ASM_ASSERTION
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)
#endif
/* ---------------------------------------------
* Flush L1 and L2 caches to PoC.
* ---------------------------------------------
*/
mov r0, #DC_OP_CISW
b dcsw_op_all
endfunc aem_generic_cluster_pwr_dwn
/* cpu_ops for Base AEM FVP */
declare_cpu_ops aem_generic, BASE_AEM_MIDR, 1
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <asm_macros.S>
#include <assert_macros.S>
#include <cpu_data.h>
#include <cpu_macros.S>
/*
* The reset handler common to all platforms. After a matching
* cpu_ops structure entry is found, the correponding reset_handler
* in the cpu_ops is invoked. The reset handler is invoked very early
* in the boot sequence and it is assumed that we can clobber r0 - r10
* without the need to follow AAPCS.
* Clobbers: r0 - r10
*/
.globl reset_handler
func reset_handler
mov r10, lr
/* The plat_reset_handler can clobber r0 - r9 */
bl plat_reset_handler
/* Get the matching cpu_ops pointer (clobbers: r0 - r5) */
bl get_cpu_ops_ptr
#if ASM_ASSERTION
cmp r0, #0
ASM_ASSERT(ne)
#endif
/* Get the cpu_ops reset handler */
ldr r1, [r0, #CPU_RESET_FUNC]
cmp r1, #0
mov lr, r10
bxne r1
bx lr
endfunc reset_handler
/*
* The prepare core power down function for all platforms. After
* the cpu_ops pointer is retrieved from cpu_data, the corresponding
* pwr_dwn_core in the cpu_ops is invoked. Follows AAPCS.
*/
.globl prepare_core_pwr_dwn
func prepare_core_pwr_dwn
push {lr}
bl _cpu_data
pop {lr}
ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR]
#if ASM_ASSERTION
cmp r1, #0
ASM_ASSERT(ne)
#endif
/* Get the cpu_ops core_pwr_dwn handler */
ldr r0, [r1, #CPU_PWR_DWN_CORE]
bx r0
endfunc prepare_core_pwr_dwn
/*
* The prepare cluster power down function for all platforms. After
* the cpu_ops pointer is retrieved from cpu_data, the corresponding
* pwr_dwn_cluster in the cpu_ops is invoked. Follows AAPCS.
*/
.globl prepare_cluster_pwr_dwn
func prepare_cluster_pwr_dwn
push {lr}
bl _cpu_data
pop {lr}
ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR]
#if ASM_ASSERTION
cmp r1, #0
ASM_ASSERT(ne)
#endif
/* Get the cpu_ops cluster_pwr_dwn handler */
ldr r0, [r1, #CPU_PWR_DWN_CLUSTER]
bx r0
endfunc prepare_cluster_pwr_dwn
/*
* Initializes the cpu_ops_ptr if not already initialized
* in cpu_data. This must only be called after the data cache
* is enabled. AAPCS is followed.
*/
.globl init_cpu_ops
func init_cpu_ops
push {r4 - r6, lr}
bl _cpu_data
mov r6, r0
ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR]
cmp r1, #0
bne 1f
bl get_cpu_ops_ptr
#if ASM_ASSERTION
cmp r0, #0
ASM_ASSERT(ne)
#endif
str r0, [r6, #CPU_DATA_CPU_OPS_PTR]!
1:
pop {r4 - r6, pc}
endfunc init_cpu_ops
/*
* The below function returns the cpu_ops structure matching the
* midr of the core. It reads the MIDR and finds the matching
* entry in cpu_ops entries. Only the implementation and part number
* are used to match the entries.
* Return :
* r0 - The matching cpu_ops pointer on Success
* r0 - 0 on failure.
* Clobbers: r0 - r5
*/
.globl get_cpu_ops_ptr
func get_cpu_ops_ptr
/* Get the cpu_ops start and end locations */
ldr r4, =(__CPU_OPS_START__ + CPU_MIDR)
ldr r5, =(__CPU_OPS_END__ + CPU_MIDR)
/* Initialize the return parameter */
mov r0, #0
/* Read the MIDR_EL1 */
ldcopr r2, MIDR
ldr r3, =CPU_IMPL_PN_MASK
/* Retain only the implementation and part number using mask */
and r2, r2, r3
1:
/* Check if we have reached end of list */
cmp r4, r5
bge error_exit
/* load the midr from the cpu_ops */
ldr r1, [r4], #CPU_OPS_SIZE
and r1, r1, r3
/* Check if midr matches to midr of this core */
cmp r1, r2
bne 1b
/* Subtract the increment and offset to get the cpu-ops pointer */
sub r0, r4, #(CPU_OPS_SIZE + CPU_MIDR)
error_exit:
bx lr
endfunc get_cpu_ops_ptr
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
#include <bl_common.h>
#include <context.h>
#include <context_mgmt.h>
#include <platform.h>
#include <platform_def.h>
#include <smcc_helpers.h>
#include <string.h>
/*******************************************************************************
* Context management library initialisation routine. This library is used by
* runtime services to share pointers to 'cpu_context' structures for the secure
* and non-secure states. Management of the structures and their associated
* memory is not done by the context management library e.g. the PSCI service
* manages the cpu context used for entry from and exit to the non-secure state.
* The Secure payload manages the context(s) corresponding to the secure state.
* It also uses this library to get access to the non-secure
* state cpu context pointers.
******************************************************************************/
void cm_init(void)
{
/*
* The context management library has only global data to initialize, but
* that will be done when the BSS is zeroed out
*/
}
/*******************************************************************************
* The following function initializes the cpu_context 'ctx' for
* first use, and sets the initial entrypoint state as specified by the
* entry_point_info structure.
*
* The security state to initialize is determined by the SECURE attribute
* of the entry_point_info. The function returns a pointer to the initialized
* context and sets this as the next context to return to.
*
* The EE and ST attributes are used to configure the endianness and secure
* timer availability for the new execution context.
*
* To prepare the register state for entry call cm_prepare_el3_exit() and
* el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to
* cm_e1_sysreg_context_restore().
******************************************************************************/
static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t *ep)
{
unsigned int security_state;
uint32_t scr, sctlr;
regs_t *reg_ctx;
assert(ctx);
security_state = GET_SECURITY_STATE(ep->h.attr);
/* Clear any residual register values from the context */
memset(ctx, 0, sizeof(*ctx));
/*
* Base the context SCR on the current value, adjust for entry point
* specific requirements
*/
scr = read_scr();
scr &= ~(SCR_NS_BIT | SCR_HCE_BIT);
if (security_state != SECURE)
scr |= SCR_NS_BIT;
/*
* Set up SCTLR for the Non Secure context.
* EE bit is taken from the entrypoint attributes
* M, C and I bits must be zero (as required by PSCI specification)
*
* The target exception level is based on the spsr mode requested.
* If execution is requested to hyp mode, HVC is enabled
* via SCR.HCE.
*
* Always compute the SCTLR_EL1 value and save in the cpu_context
* - the HYP registers are set up by cm_preapre_ns_entry() as they
* are not part of the stored cpu_context
*
* TODO: In debug builds the spsr should be validated and checked
* against the CPU support, security state, endianness and pc
*/
if (security_state != SECURE) {
sctlr = EP_GET_EE(ep->h.attr) ? SCTLR_EE_BIT : 0;
sctlr |= SCTLR_RES1;
write_ctx_reg(reg_ctx, CTX_NS_SCTLR, sctlr);
}
if (GET_M32(ep->spsr) == MODE32_hyp)
scr |= SCR_HCE_BIT;
reg_ctx = get_regs_ctx(ctx);
write_ctx_reg(reg_ctx, CTX_SCR, scr);
write_ctx_reg(reg_ctx, CTX_LR, ep->pc);
write_ctx_reg(reg_ctx, CTX_SPSR, ep->spsr);
/*
* Store the r0-r3 value from the entrypoint into the context
* Use memcpy as we are in control of the layout of the structures
*/
memcpy((void *)reg_ctx, (void *)&ep->args, sizeof(aapcs32_params_t));
}
/*******************************************************************************
* The following function initializes the cpu_context for a CPU specified by
* its `cpu_idx` for first use, and sets the initial entrypoint state as
* specified by the entry_point_info structure.
******************************************************************************/
void cm_init_context_by_index(unsigned int cpu_idx,
const entry_point_info_t *ep)
{
cpu_context_t *ctx;
ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr));
cm_init_context_common(ctx, ep);
}
/*******************************************************************************
* The following function initializes the cpu_context for the current CPU
* for first use, and sets the initial entrypoint state as specified by the
* entry_point_info structure.
******************************************************************************/
void cm_init_my_context(const entry_point_info_t *ep)
{
cpu_context_t *ctx;
ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr));
cm_init_context_common(ctx, ep);
}
/*******************************************************************************
* Prepare the CPU system registers for first entry into secure or normal world
*
* If execution is requested to hyp mode, HSCTLR is initialized
* If execution is requested to non-secure PL1, and the CPU supports
* HYP mode then HYP mode is disabled by configuring all necessary HYP mode
* registers.
******************************************************************************/
void cm_prepare_el3_exit(uint32_t security_state)
{
uint32_t sctlr, scr, hcptr;
cpu_context_t *ctx = cm_get_context(security_state);
assert(ctx);
if (security_state == NON_SECURE) {
scr = read_ctx_reg(get_regs_ctx(ctx), CTX_SCR);
if (scr & SCR_HCE_BIT) {
/* Use SCTLR value to initialize HSCTLR */
sctlr = read_ctx_reg(get_regs_ctx(ctx),
CTX_NS_SCTLR);
sctlr |= HSCTLR_RES1;
/* Temporarily set the NS bit to access HSCTLR */
write_scr(read_scr() | SCR_NS_BIT);
/*
* Make sure the write to SCR is complete so that
* we can access HSCTLR
*/
isb();
write_hsctlr(sctlr);
isb();
write_scr(read_scr() & ~SCR_NS_BIT);
isb();
} else if (read_id_pfr1() &
(ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) {
/* Set the NS bit to access HCR, HCPTR, CNTHCTL, VPIDR, VMPIDR */
write_scr(read_scr() | SCR_NS_BIT);
isb();
/* PL2 present but unused, need to disable safely */
write_hcr(0);
/* HSCTLR : can be ignored when bypassing */
/* HCPTR : disable all traps TCPAC, TTA, TCP */
hcptr = read_hcptr();
hcptr &= ~(TCPAC_BIT | TTA_BIT | TCP11_BIT | TCP10_BIT);
write_hcptr(hcptr);
/* Enable EL1 access to timer */
write_cnthctl(PL1PCEN_BIT | PL1PCTEN_BIT);
/* Reset CNTVOFF_EL2 */
write64_cntvoff(0);
/* Set VPIDR, VMPIDR to match MIDR, MPIDR */
write_vpidr(read_midr());
write_vmpidr(read_mpidr());
/*
* Reset VTTBR.
* Needed because cache maintenance operations depend on
* the VMID even when non-secure EL1&0 stage 2 address
* translation are disabled.
*/
write64_vttbr(0);
isb();
write_scr(read_scr() & ~SCR_NS_BIT);
isb();
}
}
}
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <asm_macros.S>
#include <cpu_data.h>
.globl _cpu_data
.globl _cpu_data_by_index
/* -----------------------------------------------------------------
* cpu_data_t *_cpu_data(void)
*
* Return the cpu_data structure for the current CPU.
* -----------------------------------------------------------------
*/
func _cpu_data
push {lr}
bl plat_my_core_pos
pop {lr}
b _cpu_data_by_index
endfunc _cpu_data
/* -----------------------------------------------------------------
* cpu_data_t *_cpu_data_by_index(uint32_t cpu_index)
*
* Return the cpu_data structure for the CPU with given linear index
*
* This can be called without a valid stack.
* clobbers: r0, r1
* -----------------------------------------------------------------
*/
func _cpu_data_by_index
ldr r1, =percpu_data
add r0, r1, r0, LSL #CPU_DATA_LOG2SIZE
bx lr
endfunc _cpu_data_by_index
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment