Commit 937108a0 authored by danh-arm's avatar danh-arm Committed by GitHub
Browse files

Merge pull request #678 from soby-mathew/sm/PSCI_AArch32

Introduce AArch32 support for PSCI library
parents 974603b5 9d29c227
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __SMCC_MACROS_S__
#define __SMCC_MACROS_S__
#include <arch.h>
/*
* Macro to save the General purpose registers including the banked
* registers to the SMC context on entry due a SMC call. On return, r0
* contains the pointer to the `smc_context_t`.
*/
.macro smcc_save_gp_mode_regs
push {r0-r3, lr}
ldcopr r0, SCR
and r0, r0, #SCR_NS_BIT
bl smc_get_ctx
/* Save r4 - r12 in the SMC context */
add r1, r0, #SMC_CTX_GPREG_R4
stm r1!, {r4-r12}
/*
* Pop r0 - r3, lr to r4 - r7, lr from stack and then save
* it to SMC context.
*/
pop {r4-r7, lr}
stm r0, {r4-r7}
/* Save the banked registers including the current SPSR and LR */
mrs r4, sp_usr
mrs r5, lr_usr
mrs r6, spsr_irq
mrs r7, sp_irq
mrs r8, lr_irq
mrs r9, spsr_fiq
mrs r10, sp_fiq
mrs r11, lr_fiq
mrs r12, spsr_svc
stm r1!, {r4-r12}
mrs r4, sp_svc
mrs r5, lr_svc
mrs r6, spsr_abt
mrs r7, sp_abt
mrs r8, lr_abt
mrs r9, spsr_und
mrs r10, sp_und
mrs r11, lr_und
mrs r12, spsr
stm r1!, {r4-r12, lr}
.endm
/*
* Macro to restore the General purpose registers including the banked
* registers from the SMC context prior to exit from the SMC call.
* r0 must point to the `smc_context_t` to restore from.
*/
.macro smcc_restore_gp_mode_regs
/* Restore the banked registers including the current SPSR and LR */
add r1, r0, #SMC_CTX_SP_USR
ldm r1!, {r4-r12}
msr sp_usr, r4
msr lr_usr, r5
msr spsr_irq, r6
msr sp_irq, r7
msr lr_irq, r8
msr spsr_fiq, r9
msr sp_fiq, r10
msr lr_fiq, r11
msr spsr_svc, r12
ldm r1!, {r4-r12, lr}
msr sp_svc, r4
msr lr_svc, r5
msr spsr_abt, r6
msr sp_abt, r7
msr lr_abt, r8
msr spsr_und, r9
msr sp_und, r10
msr lr_und, r11
msr spsr, r12
/* Restore the rest of the general purpose registers */
ldm r0, {r0-r12}
.endm
#endif /* __SMCC_MACROS_S__ */
/* /*
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met: * modification, are permitted provided that the following conditions are met:
...@@ -334,8 +334,6 @@ ...@@ -334,8 +334,6 @@
#define CTR_IMINLINE_MASK 0xf #define CTR_IMINLINE_MASK 0xf
#define MAX_CACHE_LINE_SIZE 0x800 /* 2KB */ #define MAX_CACHE_LINE_SIZE 0x800 /* 2KB */
#define SIZE_FROM_LOG2_WORDS(n) (4 << (n))
/* Physical timer control register bit fields shifts and masks */ /* Physical timer control register bit fields shifts and masks */
#define CNTP_CTL_ENABLE_SHIFT 0 #define CNTP_CTL_ENABLE_SHIFT 0
......
...@@ -82,5 +82,17 @@ ...@@ -82,5 +82,17 @@
((const uint32_t *) &(_uuid))[2], \ ((const uint32_t *) &(_uuid))[2], \
((const uint32_t *) &(_uuid))[3]) ((const uint32_t *) &(_uuid))[3])
/*
* Helper macro to retrieve the SMC parameters from cpu_context_t.
*/
#define get_smc_params_from_ctx(_hdl, _x1, _x2, _x3, _x4) \
do { \
const gp_regs_t *regs = get_gpregs_ctx(_hdl); \
_x1 = read_ctx_reg(regs, CTX_GPREG_X1); \
_x2 = read_ctx_reg(regs, CTX_GPREG_X2); \
_x3 = read_ctx_reg(regs, CTX_GPREG_X3); \
_x4 = read_ctx_reg(regs, CTX_GPREG_X4); \
} while (0)
#endif /*__ASSEMBLY__*/ #endif /*__ASSEMBLY__*/
#endif /* __SMCC_HELPERS_H__ */ #endif /* __SMCC_HELPERS_H__ */
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __AEM_GENERIC_H__
#define __AEM_GENERIC_H__
/* BASE AEM midr for revision 0 */
#define BASE_AEM_MIDR 0x410FD0F0
#endif /* __AEM_GENERIC_H__ */
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __CPU_MACROS_S__
#define __CPU_MACROS_S__
#include <arch.h>
#define CPU_IMPL_PN_MASK (MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) | \
(MIDR_PN_MASK << MIDR_PN_SHIFT)
/*
* Define the offsets to the fields in cpu_ops structure.
*/
.struct 0
CPU_MIDR: /* cpu_ops midr */
.space 4
/* Reset fn is needed during reset */
CPU_RESET_FUNC: /* cpu_ops reset_func */
.space 4
CPU_PWR_DWN_CORE: /* cpu_ops core_pwr_dwn */
.space 4
CPU_PWR_DWN_CLUSTER: /* cpu_ops cluster_pwr_dwn */
.space 4
CPU_OPS_SIZE = .
/*
* Convenience macro to declare cpu_ops structure.
* Make sure the structure fields are as per the offsets
* defined above.
*/
.macro declare_cpu_ops _name:req, _midr:req, _noresetfunc = 0
.section cpu_ops, "a"
.align 2
.type cpu_ops_\_name, %object
.word \_midr
.if \_noresetfunc
.word 0
.else
.word \_name\()_reset_func
.endif
.word \_name\()_core_pwr_dwn
.word \_name\()_cluster_pwr_dwn
.endm
#endif /* __CPU_MACROS_S__ */
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __CONTEXT_H__
#define __CONTEXT_H__
/*******************************************************************************
* Constants that allow assembler code to access members of and the 'regs'
* structure at their correct offsets.
******************************************************************************/
#define CTX_REGS_OFFSET 0x0
#define CTX_GPREG_R0 0x0
#define CTX_GPREG_R1 0x4
#define CTX_GPREG_R2 0x8
#define CTX_GPREG_R3 0xC
#define CTX_LR 0x10
#define CTX_SCR 0x14
#define CTX_SPSR 0x18
#define CTX_NS_SCTLR 0x1C
#define CTX_REGS_END 0x20
#ifndef __ASSEMBLY__
#include <cassert.h>
#include <stdint.h>
/*
* Common constants to help define the 'cpu_context' structure and its
* members below.
*/
#define WORD_SHIFT 2
#define DEFINE_REG_STRUCT(name, num_regs) \
typedef struct name { \
uint32_t _regs[num_regs]; \
} __aligned(8) name##_t
/* Constants to determine the size of individual context structures */
#define CTX_REG_ALL (CTX_REGS_END >> WORD_SHIFT)
DEFINE_REG_STRUCT(regs, CTX_REG_ALL);
#undef CTX_REG_ALL
#define read_ctx_reg(ctx, offset) ((ctx)->_regs[offset >> WORD_SHIFT])
#define write_ctx_reg(ctx, offset, val) (((ctx)->_regs[offset >> WORD_SHIFT]) \
= val)
typedef struct cpu_context {
regs_t regs_ctx;
} cpu_context_t;
/* Macros to access members of the 'cpu_context_t' structure */
#define get_regs_ctx(h) (&((cpu_context_t *) h)->regs_ctx)
/*
* Compile time assertions related to the 'cpu_context' structure to
* ensure that the assembler and the compiler view of the offsets of
* the structure members is the same.
*/
CASSERT(CTX_REGS_OFFSET == __builtin_offsetof(cpu_context_t, regs_ctx), \
assert_core_context_regs_offset_mismatch);
#endif /* __ASSEMBLY__ */
#endif /* __CONTEXT_H__ */
...@@ -42,11 +42,6 @@ struct entry_point_info; ...@@ -42,11 +42,6 @@ struct entry_point_info;
* Function & variable prototypes * Function & variable prototypes
******************************************************************************/ ******************************************************************************/
void cm_init(void); void cm_init(void);
void *cm_get_context_by_mpidr(uint64_t mpidr,
uint32_t security_state) __deprecated;
void cm_set_context_by_mpidr(uint64_t mpidr,
void *context,
uint32_t security_state) __deprecated;
void *cm_get_context_by_index(unsigned int cpu_idx, void *cm_get_context_by_index(unsigned int cpu_idx,
unsigned int security_state); unsigned int security_state);
void cm_set_context_by_index(unsigned int cpu_idx, void cm_set_context_by_index(unsigned int cpu_idx,
...@@ -54,12 +49,12 @@ void cm_set_context_by_index(unsigned int cpu_idx, ...@@ -54,12 +49,12 @@ void cm_set_context_by_index(unsigned int cpu_idx,
unsigned int security_state); unsigned int security_state);
void *cm_get_context(uint32_t security_state); void *cm_get_context(uint32_t security_state);
void cm_set_context(void *context, uint32_t security_state); void cm_set_context(void *context, uint32_t security_state);
void cm_init_context(uint64_t mpidr,
const struct entry_point_info *ep) __deprecated;
void cm_init_my_context(const struct entry_point_info *ep); void cm_init_my_context(const struct entry_point_info *ep);
void cm_init_context_by_index(unsigned int cpu_idx, void cm_init_context_by_index(unsigned int cpu_idx,
const struct entry_point_info *ep); const struct entry_point_info *ep);
void cm_prepare_el3_exit(uint32_t security_state); void cm_prepare_el3_exit(uint32_t security_state);
#ifndef AARCH32
void cm_el1_sysregs_context_save(uint32_t security_state); void cm_el1_sysregs_context_save(uint32_t security_state);
void cm_el1_sysregs_context_restore(uint32_t security_state); void cm_el1_sysregs_context_restore(uint32_t security_state);
void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint); void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint);
...@@ -71,6 +66,16 @@ void cm_write_scr_el3_bit(uint32_t security_state, ...@@ -71,6 +66,16 @@ void cm_write_scr_el3_bit(uint32_t security_state,
void cm_set_next_eret_context(uint32_t security_state); void cm_set_next_eret_context(uint32_t security_state);
uint32_t cm_get_scr_el3(uint32_t security_state); uint32_t cm_get_scr_el3(uint32_t security_state);
void cm_init_context(uint64_t mpidr,
const struct entry_point_info *ep) __deprecated;
void *cm_get_context_by_mpidr(uint64_t mpidr,
uint32_t security_state) __deprecated;
void cm_set_context_by_mpidr(uint64_t mpidr,
void *context,
uint32_t security_state) __deprecated;
/* Inline definitions */ /* Inline definitions */
/******************************************************************************* /*******************************************************************************
...@@ -98,4 +103,5 @@ static inline void cm_set_next_context(void *context) ...@@ -98,4 +103,5 @@ static inline void cm_set_next_context(void *context)
"msr spsel, #0\n" "msr spsel, #0\n"
: : "r" (context)); : : "r" (context));
} }
#endif /* AARCH32 */
#endif /* __CM_H__ */ #endif /* __CM_H__ */
...@@ -31,16 +31,28 @@ ...@@ -31,16 +31,28 @@
#ifndef __CPU_DATA_H__ #ifndef __CPU_DATA_H__
#define __CPU_DATA_H__ #define __CPU_DATA_H__
#ifdef AARCH32
#if CRASH_REPORTING
#error "Crash reporting is not supported in AArch32"
#endif
#define CPU_DATA_CPU_OPS_PTR 0x0
#else /* AARCH32 */
/* Offsets for the cpu_data structure */ /* Offsets for the cpu_data structure */
#define CPU_DATA_CRASH_BUF_OFFSET 0x18 #define CPU_DATA_CRASH_BUF_OFFSET 0x18
/* need enough space in crash buffer to save 8 registers */
#define CPU_DATA_CRASH_BUF_SIZE 64
#define CPU_DATA_CPU_OPS_PTR 0x10
#endif /* AARCH32 */
#if CRASH_REPORTING #if CRASH_REPORTING
#define CPU_DATA_LOG2SIZE 7 #define CPU_DATA_LOG2SIZE 7
#else #else
#define CPU_DATA_LOG2SIZE 6 #define CPU_DATA_LOG2SIZE 6
#endif #endif
/* need enough space in crash buffer to save 8 registers */
#define CPU_DATA_CRASH_BUF_SIZE 64
#define CPU_DATA_CPU_OPS_PTR 0x10
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
...@@ -77,7 +89,9 @@ ...@@ -77,7 +89,9 @@
* used for this. * used for this.
******************************************************************************/ ******************************************************************************/
typedef struct cpu_data { typedef struct cpu_data {
#ifndef AARCH32
void *cpu_context[2]; void *cpu_context[2];
#endif
uintptr_t cpu_ops_ptr; uintptr_t cpu_ops_ptr;
#if CRASH_REPORTING #if CRASH_REPORTING
u_register_t crash_buf[CPU_DATA_CRASH_BUF_SIZE >> 3]; u_register_t crash_buf[CPU_DATA_CRASH_BUF_SIZE >> 3];
...@@ -104,12 +118,15 @@ CASSERT(CPU_DATA_CPU_OPS_PTR == __builtin_offsetof ...@@ -104,12 +118,15 @@ CASSERT(CPU_DATA_CPU_OPS_PTR == __builtin_offsetof
struct cpu_data *_cpu_data_by_index(uint32_t cpu_index); struct cpu_data *_cpu_data_by_index(uint32_t cpu_index);
#ifndef AARCH32
/* Return the cpu_data structure for the current CPU. */ /* Return the cpu_data structure for the current CPU. */
static inline struct cpu_data *_cpu_data(void) static inline struct cpu_data *_cpu_data(void)
{ {
return (cpu_data_t *)read_tpidr_el3(); return (cpu_data_t *)read_tpidr_el3();
} }
#else
struct cpu_data *_cpu_data(void);
#endif
/************************************************************************** /**************************************************************************
* APIs for initialising and accessing per-cpu data * APIs for initialising and accessing per-cpu data
......
...@@ -359,6 +359,8 @@ u_register_t psci_smc_handler(uint32_t smc_fid, ...@@ -359,6 +359,8 @@ u_register_t psci_smc_handler(uint32_t smc_fid,
int psci_setup(uintptr_t mailbox_ep); int psci_setup(uintptr_t mailbox_ep);
void psci_warmboot_entrypoint(void); void psci_warmboot_entrypoint(void);
void psci_register_spd_pm_hook(const spd_pm_ops_t *pm); void psci_register_spd_pm_hook(const spd_pm_ops_t *pm);
void psci_prepare_next_non_secure_ctx(
struct entry_point_info *next_image_info);
#endif /*__ASSEMBLY__*/ #endif /*__ASSEMBLY__*/
......
...@@ -31,6 +31,10 @@ ...@@ -31,6 +31,10 @@
* From: @(#)types.h 8.3 (Berkeley) 1/5/94 * From: @(#)types.h 8.3 (Berkeley) 1/5/94
* $FreeBSD$ * $FreeBSD$
*/ */
/*
* Portions copyright (c) 2016, ARM Limited and Contributors.
* All rights reserved.
*/
#ifndef _MACHINE__TYPES_H_ #ifndef _MACHINE__TYPES_H_
#define _MACHINE__TYPES_H_ #define _MACHINE__TYPES_H_
...@@ -48,19 +52,56 @@ typedef short __int16_t; ...@@ -48,19 +52,56 @@ typedef short __int16_t;
typedef unsigned short __uint16_t; typedef unsigned short __uint16_t;
typedef int __int32_t; typedef int __int32_t;
typedef unsigned int __uint32_t; typedef unsigned int __uint32_t;
/*
* Standard type definitions which are different in AArch64 and AArch32
*/
#ifdef AARCH32
typedef long long __int64_t;
typedef unsigned long long __uint64_t;
typedef __int32_t __critical_t;
typedef __int32_t __intfptr_t;
typedef __int32_t __intptr_t;
typedef __int32_t __ptrdiff_t; /* ptr1 - ptr2 */
typedef __int32_t __register_t;
typedef __int32_t __segsz_t; /* segment size (in pages) */
typedef __uint32_t __size_t; /* sizeof() */
typedef __int32_t __ssize_t; /* byte count or error */
typedef __uint32_t __uintfptr_t;
typedef __uint32_t __uintptr_t;
typedef __uint32_t __u_register_t;
typedef __uint32_t __vm_offset_t;
typedef __uint32_t __vm_paddr_t;
typedef __uint32_t __vm_size_t;
#elif defined AARCH64
typedef long __int64_t; typedef long __int64_t;
typedef unsigned long __uint64_t; typedef unsigned long __uint64_t;
typedef __int64_t __critical_t;
typedef __int64_t __intfptr_t;
typedef __int64_t __intptr_t;
typedef __int64_t __ptrdiff_t; /* ptr1 - ptr2 */
typedef __int64_t __register_t;
typedef __int64_t __segsz_t; /* segment size (in pages) */
typedef __uint64_t __size_t; /* sizeof() */
typedef __int64_t __ssize_t; /* byte count or error */
typedef __uint64_t __uintfptr_t;
typedef __uint64_t __uintptr_t;
typedef __uint64_t __u_register_t;
typedef __uint64_t __vm_offset_t;
typedef __uint64_t __vm_paddr_t;
typedef __uint64_t __vm_size_t;
#else
#error "Only AArch32 or AArch64 supported"
#endif /* AARCH32 */
/* /*
* Standard type definitions. * Standard type definitions.
*/ */
typedef __int32_t __clock_t; /* clock()... */ typedef __int32_t __clock_t; /* clock()... */
typedef __int64_t __critical_t;
typedef double __double_t; typedef double __double_t;
typedef float __float_t; typedef float __float_t;
typedef __int64_t __intfptr_t;
typedef __int64_t __intmax_t; typedef __int64_t __intmax_t;
typedef __int64_t __intptr_t;
typedef __int32_t __int_fast8_t; typedef __int32_t __int_fast8_t;
typedef __int32_t __int_fast16_t; typedef __int32_t __int_fast16_t;
typedef __int32_t __int_fast32_t; typedef __int32_t __int_fast32_t;
...@@ -69,15 +110,8 @@ typedef __int8_t __int_least8_t; ...@@ -69,15 +110,8 @@ typedef __int8_t __int_least8_t;
typedef __int16_t __int_least16_t; typedef __int16_t __int_least16_t;
typedef __int32_t __int_least32_t; typedef __int32_t __int_least32_t;
typedef __int64_t __int_least64_t; typedef __int64_t __int_least64_t;
typedef __int64_t __ptrdiff_t; /* ptr1 - ptr2 */
typedef __int64_t __register_t;
typedef __int64_t __segsz_t; /* segment size (in pages) */
typedef __uint64_t __size_t; /* sizeof() */
typedef __int64_t __ssize_t; /* byte count or error */
typedef __int64_t __time_t; /* time()... */ typedef __int64_t __time_t; /* time()... */
typedef __uint64_t __uintfptr_t;
typedef __uint64_t __uintmax_t; typedef __uint64_t __uintmax_t;
typedef __uint64_t __uintptr_t;
typedef __uint32_t __uint_fast8_t; typedef __uint32_t __uint_fast8_t;
typedef __uint32_t __uint_fast16_t; typedef __uint32_t __uint_fast16_t;
typedef __uint32_t __uint_fast32_t; typedef __uint32_t __uint_fast32_t;
...@@ -86,12 +120,8 @@ typedef __uint8_t __uint_least8_t; ...@@ -86,12 +120,8 @@ typedef __uint8_t __uint_least8_t;
typedef __uint16_t __uint_least16_t; typedef __uint16_t __uint_least16_t;
typedef __uint32_t __uint_least32_t; typedef __uint32_t __uint_least32_t;
typedef __uint64_t __uint_least64_t; typedef __uint64_t __uint_least64_t;
typedef __uint64_t __u_register_t;
typedef __uint64_t __vm_offset_t;
typedef __int64_t __vm_ooffset_t; typedef __int64_t __vm_ooffset_t;
typedef __uint64_t __vm_paddr_t;
typedef __uint64_t __vm_pindex_t; typedef __uint64_t __vm_pindex_t;
typedef __uint64_t __vm_size_t;
/* /*
* Unusual type definitions. * Unusual type definitions.
......
...@@ -38,6 +38,8 @@ ...@@ -38,6 +38,8 @@
#define IS_POWER_OF_TWO(x) \ #define IS_POWER_OF_TWO(x) \
(((x) & ((x) - 1)) == 0) (((x) & ((x) - 1)) == 0)
#define SIZE_FROM_LOG2_WORDS(n) (4 << (n))
/* /*
* The round_up() macro rounds up a value to the given boundary in a * The round_up() macro rounds up a value to the given boundary in a
* type-agnostic yet type-safe manner. The boundary must be a power of two. * type-agnostic yet type-safe manner. The boundary must be a power of two.
......
...@@ -188,9 +188,14 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, ...@@ -188,9 +188,14 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
size_t size, unsigned int attr); size_t size, unsigned int attr);
void mmap_add(const mmap_region_t *mm); void mmap_add(const mmap_region_t *mm);
#ifdef AARCH32
/* AArch32 specific translation table API */
void enable_mmu_secure(uint32_t flags);
#else
/* AArch64 specific translation table APIs */ /* AArch64 specific translation table APIs */
void enable_mmu_el1(unsigned int flags); void enable_mmu_el1(unsigned int flags);
void enable_mmu_el3(unsigned int flags); void enable_mmu_el3(unsigned int flags);
#endif /* AARCH32 */
#endif /*__ASSEMBLY__*/ #endif /*__ASSEMBLY__*/
#endif /* __XLAT_TABLES_H__ */ #endif /* __XLAT_TABLES_H__ */
...@@ -321,9 +321,12 @@ ...@@ -321,9 +321,12 @@
# error "Unsupported ARM_TSP_RAM_LOCATION_ID value" # error "Unsupported ARM_TSP_RAM_LOCATION_ID value"
#endif #endif
/* BL32 is mandatory in AArch32 */
#ifndef AARCH32
#ifdef SPD_none #ifdef SPD_none
#undef BL32_BASE #undef BL32_BASE
#endif /* SPD_none */ #endif /* SPD_none */
#endif
/******************************************************************************* /*******************************************************************************
* FWU Images: NS_BL1U, BL2U & NS_BL2U defines. * FWU Images: NS_BL1U, BL2U & NS_BL2U defines.
......
...@@ -167,6 +167,9 @@ void arm_bl31_plat_arch_setup(void); ...@@ -167,6 +167,9 @@ void arm_bl31_plat_arch_setup(void);
/* TSP utility functions */ /* TSP utility functions */
void arm_tsp_early_platform_setup(void); void arm_tsp_early_platform_setup(void);
/* SP_MIN utility functions */
void arm_sp_min_early_platform_setup(void);
/* FIP TOC validity check */ /* FIP TOC validity check */
int arm_io_is_toc_valid(void); int arm_io_is_toc_valid(void);
......
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <asm_macros.S>
.globl flush_dcache_range
.globl clean_dcache_range
.globl inv_dcache_range
.globl dcsw_op_louis
.globl dcsw_op_all
.globl dcsw_op_level1
.globl dcsw_op_level2
.globl dcsw_op_level3
/*
* This macro can be used for implementing various data cache operations `op`
*/
.macro do_dcache_maintenance_by_mva op, coproc, opc1, CRn, CRm, opc2
dcache_line_size r2, r3
add r1, r0, r1
sub r3, r2, #1
bic r0, r0, r3
loop_\op:
stcopr r0, \coproc, \opc1, \CRn, \CRm, \opc2
add r0, r0, r2
cmp r0, r1
blo loop_\op
dsb sy
bx lr
.endm
/* ------------------------------------------
* Clean+Invalidate from base address till
* size. 'r0' = addr, 'r1' = size
* ------------------------------------------
*/
func flush_dcache_range
do_dcache_maintenance_by_mva cimvac, DCCIMVAC
endfunc flush_dcache_range
/* ------------------------------------------
* Clean from base address till size.
* 'r0' = addr, 'r1' = size
* ------------------------------------------
*/
func clean_dcache_range
do_dcache_maintenance_by_mva cmvac, DCCMVAC
endfunc clean_dcache_range
/* ------------------------------------------
* Invalidate from base address till
* size. 'r0' = addr, 'r1' = size
* ------------------------------------------
*/
func inv_dcache_range
do_dcache_maintenance_by_mva imvac, DCIMVAC
endfunc inv_dcache_range
/* ----------------------------------------------------------------
* Data cache operations by set/way to the level specified
*
* The main function, do_dcsw_op requires:
* r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW),
* as defined in arch.h
* r1: The cache level to begin operation from
* r2: clidr_el1
* r3: The last cache level to operate on
* and will carry out the operation on each data cache from level 0
* to the level in r3 in sequence
*
* The dcsw_op macro sets up the r2 and r3 parameters based on
* clidr_el1 cache information before invoking the main function
* ----------------------------------------------------------------
*/
.macro dcsw_op shift, fw, ls
ldcopr r2, CLIDR
ubfx r3, r2, \shift, \fw
lsl r3, r3, \ls
mov r1, #0
b do_dcsw_op
.endm
func do_dcsw_op
push {r4-r12,lr}
adr r11, dcsw_loop_table // compute cache op based on the operation type
add r6, r11, r0, lsl #3 // cache op is 2x32-bit instructions
loop1:
add r10, r1, r1, LSR #1 // Work out 3x current cache level
mov r12, r2, LSR r10 // extract cache type bits from clidr
and r12, r12, #7 // mask the bits for current cache only
cmp r12, #2 // see what cache we have at this level
blt level_done // no cache or only instruction cache at this level
stcopr r1, CSSELR // select current cache level in csselr
isb // isb to sych the new cssr&csidr
ldcopr r12, CCSIDR // read the new ccsidr
and r10, r12, #7 // extract the length of the cache lines
add r10, r10, #4 // add 4 (r10 = line length offset)
ubfx r4, r12, #3, #10 // r4 = maximum way number (right aligned)
clz r5, r4 // r5 = the bit position of the way size increment
mov r9, r4 // r9 working copy of the aligned max way number
loop2:
ubfx r7, r12, #13, #15 // r7 = max set number (right aligned)
loop3:
orr r0, r1, r9, LSL r5 // factor in the way number and cache level into r0
orr r0, r0, r7, LSL r10 // factor in the set number
blx r6
subs r7, r7, #1 // decrement the set number
bge loop3
subs r9, r9, #1 // decrement the way number
bge loop2
level_done:
add r1, r1, #2 // increment the cache number
cmp r3, r1
dsb sy // ensure completion of previous cache maintenance instruction
bgt loop1
mov r6, #0
stcopr r6, CSSELR //select cache level 0 in csselr
dsb sy
isb
pop {r4-r12,pc}
dcsw_loop_table:
stcopr r0, DCISW
bx lr
stcopr r0, DCCISW
bx lr
stcopr r0, DCCSW
bx lr
endfunc do_dcsw_op
/* ---------------------------------------------------------------
* Data cache operations by set/way till PoU.
*
* The function requires :
* r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW),
* as defined in arch.h
* ---------------------------------------------------------------
*/
func dcsw_op_louis
dcsw_op #LOUIS_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
endfunc dcsw_op_louis
/* ---------------------------------------------------------------
* Data cache operations by set/way till PoC.
*
* The function requires :
* r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW),
* as defined in arch.h
* ---------------------------------------------------------------
*/
func dcsw_op_all
dcsw_op #LOC_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
endfunc dcsw_op_all
/* ---------------------------------------------------------------
* Helper macro for data cache operations by set/way for the
* level specified
* ---------------------------------------------------------------
*/
.macro dcsw_op_level level
ldcopr r2, CLIDR
mov r3, \level
sub r1, r3, #2
b do_dcsw_op
.endm
/* ---------------------------------------------------------------
* Data cache operations by set/way for level 1 cache
*
* The main function, do_dcsw_op requires:
* r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW),
* as defined in arch.h
* ---------------------------------------------------------------
*/
func dcsw_op_level1
dcsw_op_level #(1 << LEVEL_SHIFT)
endfunc dcsw_op_level1
/* ---------------------------------------------------------------
* Data cache operations by set/way for level 2 cache
*
* The main function, do_dcsw_op requires:
* r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW),
* as defined in arch.h
* ---------------------------------------------------------------
*/
func dcsw_op_level2
dcsw_op_level #(2 << LEVEL_SHIFT)
endfunc dcsw_op_level2
/* ---------------------------------------------------------------
* Data cache operations by set/way for level 3 cache
*
* The main function, do_dcsw_op requires:
* r0: The operation type (DC_OP_ISW, DC_OP_CISW, DC_OP_CSW),
* as defined in arch.h
* ---------------------------------------------------------------
*/
func dcsw_op_level3
dcsw_op_level #(3 << LEVEL_SHIFT)
endfunc dcsw_op_level3
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <asm_macros.S>
#include <assert_macros.S>
.globl zeromem
/* -----------------------------------------------------------------------
* void zeromem(void *mem, unsigned int length);
*
* Initialise a memory region to 0.
* The memory address and length must be 4-byte aligned.
* -----------------------------------------------------------------------
*/
func zeromem
#if ASM_ASSERTION
tst r0, #0x3
ASM_ASSERT(eq)
tst r1, #0x3
ASM_ASSERT(eq)
#endif
add r2, r0, r1
mov r1, #0
z_loop:
cmp r2, r0
beq z_end
str r1, [r0], #4
b z_loop
z_end:
bx lr
endfunc zeromem
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment