diff --git a/include/services/gtsi_svc.h b/include/services/gtsi_svc.h new file mode 100644 index 0000000000000000000000000000000000000000..ca504fc8dcd483261f560775bed7e4f5e2687114 --- /dev/null +++ b/include/services/gtsi_svc.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2021, Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef GTSI_SVC_H +#define GTSI_SVC_H + +/* GTSI error codes. */ +#define GTSI_SUCCESS 0 +#define GTSI_ERROR_NOT_SUPPORTED -1 +#define GTSI_ERROR_INVALID_ADDRESS -2 +#define GTSI_ERROR_INVALID_PAS -3 + +/* The macros below are used to identify GTSI calls from the SMC function ID */ +#define GTSI_FNUM_MIN_VALUE U(0x100) +#define GTSI_FNUM_MAX_VALUE U(0x101) +#define is_gtsi_fid(fid) __extension__ ({ \ + __typeof__(fid) _fid = (fid); \ + ((GET_SMC_NUM(_fid) >= GTSI_FNUM_MIN_VALUE) && \ + (GET_SMC_NUM(_fid) <= GTSI_FNUM_MAX_VALUE)); }) + +/* Get GTSI fastcall std FID from function number */ +#define GTSI_FID(smc_cc, func_num) \ + ((SMC_TYPE_FAST << FUNCID_TYPE_SHIFT) | \ + ((smc_cc) << FUNCID_CC_SHIFT) | \ + (OEN_STD_START << FUNCID_OEN_SHIFT) | \ + ((func_num) << FUNCID_NUM_SHIFT)) + +#define GRAN_TRANS_TO_REALM_FNUM 0x100 +#define GRAN_TRANS_TO_NS_FNUM 0x101 + +#define SMC_ASC_MARK_REALM GTSI_FID(SMC_64, GRAN_TRANS_TO_REALM_FNUM) +#define SMC_ASC_MARK_NONSECURE GTSI_FID(SMC_64, GRAN_TRANS_TO_NS_FNUM) + +#define GRAN_TRANS_RET_BAD_ADDR -2 +#define GRAN_TRANS_RET_BAD_PAS -3 + +#endif /* GTSI_SVC_H */ diff --git a/include/services/rmi_svc.h b/include/services/rmi_svc.h new file mode 100644 index 0000000000000000000000000000000000000000..df2243c1038d2be5d6d6699ad8a8b9078d5e3902 --- /dev/null +++ b/include/services/rmi_svc.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2021, Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef RMI_SVC_H +#define RMI_SVC_H + +#include +#include + +/* RMI error codes. */ +#define RMI_SUCCESS 0 +#define RMI_ERROR_NOT_SUPPORTED -1 +#define RMI_ERROR_INVALID_ADDRESS -2 +#define RMI_ERROR_INVALID_PAS -3 + +/* The macros below are used to identify RMI calls from the SMC function ID */ +#define RMI_FNUM_MIN_VALUE U(0x00) +#define RMI_FNUM_MAX_VALUE U(0x20) +#define is_rmi_fid(fid) __extension__ ({ \ + __typeof__(fid) _fid = (fid); \ + ((GET_SMC_NUM(_fid) >= RMI_FNUM_MIN_VALUE) && \ + (GET_SMC_NUM(_fid) <= RMI_FNUM_MAX_VALUE) && \ + (GET_SMC_TYPE(_fid) == SMC_TYPE_FAST) && \ + (GET_SMC_CC(_fid) == SMC_64) && \ + (GET_SMC_OEN(_fid) == OEN_ARM_START) && \ + ((_fid & 0x00FE0000) == 0U)); }) + +/* Get RMI fastcall std FID from function number */ +#define RMI_FID(smc_cc, func_num) \ + ((SMC_TYPE_FAST << FUNCID_TYPE_SHIFT) | \ + ((smc_cc) << FUNCID_CC_SHIFT) | \ + (OEN_ARM_START << FUNCID_OEN_SHIFT) | \ + ((func_num) << FUNCID_NUM_SHIFT)) + +/* + * SMC_RMM_INIT_COMPLETE is the only function in the RMI that originates from + * the Realm world and is handled by the RMMD. The remaining functions are + * always invoked by the Normal world, forwarded by RMMD and handled by the + * RMM + */ +#define RMI_FNUM_REQ_COMPLETE U(16) +#define RMI_FNUM_VERSION_REQ U(0) + +#define RMI_FNUM_GRAN_NS_REALM U(1) +#define RMI_FNUM_GRAN_REALM_NS U(2) + +/* RMI SMC64 FIDs handled by the RMMD */ +#define RMI_RMM_REQ_COMPLETE RMI_FID(SMC_64, RMI_FNUM_REQ_COMPLETE) +#define RMI_RMM_REQ_VERSION RMI_FID(SMC_64, RMI_FNUM_VERSION_REQ) + +#define RMI_RMM_GRANULE_DELEGATE RMI_FID(SMC_64, RMI_FNUM_GRAN_NS_REALM) +#define RMI_RMM_GRANULE_UNDELEGATE RMI_FID(SMC_64, RMI_FNUM_GRAN_REALM_NS) + + +#define RMI_ABI_VERSION_GET_MAJOR(_version) ((_version) >> 16) +#define RMI_ABI_VERSION_GET_MINOR(_version) ((_version) & 0xFFFF) + +/* Reserve a special value for MBZ parameters. */ +#define RMI_PARAM_MBZ U(0x0) + +#endif /* RMI_SVC_H */ diff --git a/include/services/rmmd_svc.h b/include/services/rmmd_svc.h new file mode 100644 index 0000000000000000000000000000000000000000..f723253bfcfa2a14737346c86617c8c2f49157f7 --- /dev/null +++ b/include/services/rmmd_svc.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef RMMD_SVC_H +#define RMMD_SVC_H + +#ifndef __ASSEMBLER__ +#include + +int rmmd_setup(void); +uint64_t rmmd_rmi_handler(uint32_t smc_fid, + uint64_t x1, + uint64_t x2, + uint64_t x3, + uint64_t x4, + void *cookie, + void *handle, + uint64_t flags); + +uint64_t rmmd_gtsi_handler(uint32_t smc_fid, + uint64_t x1, + uint64_t x2, + uint64_t x3, + uint64_t x4, + void *cookie, + void *handle, + uint64_t flags); + +#endif /* __ASSEMBLER__ */ + +#endif /* RMMD_SVC_H */ diff --git a/services/arm_arch_svc/arm_arch_svc_setup.c b/services/arm_arch_svc/arm_arch_svc_setup.c index 37bfc62e2bb3b5276eebd72aa17f69950cc902a6..1d4423cb33bbd42247cb507267c301d9269408e0 100644 --- a/services/arm_arch_svc/arm_arch_svc_setup.c +++ b/services/arm_arch_svc/arm_arch_svc_setup.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2018-2021, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -11,9 +11,19 @@ #include #include #include +#include +#include #include #include +#if ENABLE_RME +/* Setup Arm architecture Services */ +static int32_t arm_arch_svc_setup(void) +{ + return rmmd_setup(); +} +#endif + static int32_t smccc_version(void) { return MAKE_SMCCC_VERSION(SMCCC_MAJOR_VERSION, SMCCC_MINOR_VERSION); @@ -133,6 +143,16 @@ static uintptr_t arm_arch_svc_smc_handler(uint32_t smc_fid, SMC_RET0(handle); #endif default: +#if ENABLE_RME + /* + * RMI functions are allocated from the Arch service range. Call + * the RMM dispatcher to handle RMI calls. + */ + if (is_rmi_fid(smc_fid)) { + return rmmd_rmi_handler(smc_fid, x1, x2, x3, x4, cookie, + handle, flags); + } +#endif WARN("Unimplemented Arm Architecture Service Call: 0x%x \n", smc_fid); SMC_RET1(handle, SMC_UNK); @@ -145,6 +165,10 @@ DECLARE_RT_SVC( OEN_ARM_START, OEN_ARM_END, SMC_TYPE_FAST, +#if ENABLE_RME + arm_arch_svc_setup, +#else NULL, +#endif arm_arch_svc_smc_handler ); diff --git a/services/std_svc/rmmd/aarch64/rmmd_helpers.S b/services/std_svc/rmmd/aarch64/rmmd_helpers.S new file mode 100644 index 0000000000000000000000000000000000000000..8a7639f9f7b9f322d6f2f77515325f493db08ea2 --- /dev/null +++ b/services/std_svc/rmmd/aarch64/rmmd_helpers.S @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include "../rmmd_private.h" + + .global rmmd_rmm_enter + .global rmmd_rmm_exit + + /* --------------------------------------------------------------------- + * This function is called with SP_EL0 as stack. Here we stash our EL3 + * callee-saved registers on to the stack as a part of saving the C + * runtime and enter the secure payload. + * 'x0' contains a pointer to the memory where the address of the C + * runtime context is to be saved. + * --------------------------------------------------------------------- + */ +func rmmd_rmm_enter + /* Make space for the registers that we're going to save */ + mov x3, sp + str x3, [x0, #0] + sub sp, sp, #RMMD_C_RT_CTX_SIZE + + /* Save callee-saved registers on to the stack */ + stp x19, x20, [sp, #RMMD_C_RT_CTX_X19] + stp x21, x22, [sp, #RMMD_C_RT_CTX_X21] + stp x23, x24, [sp, #RMMD_C_RT_CTX_X23] + stp x25, x26, [sp, #RMMD_C_RT_CTX_X25] + stp x27, x28, [sp, #RMMD_C_RT_CTX_X27] + stp x29, x30, [sp, #RMMD_C_RT_CTX_X29] + + /* --------------------------------------------------------------------- + * Everything is setup now. el3_exit() will use the secure context to + * restore to the general purpose and EL3 system registers to ERET + * into the secure payload. + * --------------------------------------------------------------------- + */ + b el3_exit +endfunc rmmd_rmm_enter + + /* --------------------------------------------------------------------- + * This function is called with 'x0' pointing to a C runtime context. + * It restores the saved registers and jumps to that runtime with 'x0' + * as the new SP register. This destroys the C runtime context that had + * been built on the stack below the saved context by the caller. Later + * the second parameter 'x1' is passed as a return value to the caller. + * --------------------------------------------------------------------- + */ +func rmmd_rmm_exit + /* Restore the previous stack */ + mov sp, x0 + + /* Restore callee-saved registers on to the stack */ + ldp x19, x20, [x0, #(RMMD_C_RT_CTX_X19 - RMMD_C_RT_CTX_SIZE)] + ldp x21, x22, [x0, #(RMMD_C_RT_CTX_X21 - RMMD_C_RT_CTX_SIZE)] + ldp x23, x24, [x0, #(RMMD_C_RT_CTX_X23 - RMMD_C_RT_CTX_SIZE)] + ldp x25, x26, [x0, #(RMMD_C_RT_CTX_X25 - RMMD_C_RT_CTX_SIZE)] + ldp x27, x28, [x0, #(RMMD_C_RT_CTX_X27 - RMMD_C_RT_CTX_SIZE)] + ldp x29, x30, [x0, #(RMMD_C_RT_CTX_X29 - RMMD_C_RT_CTX_SIZE)] + + /* --------------------------------------------------------------------- + * This should take us back to the instruction after the call to the + * last rmmd_rmm_enter().* Place the second parameter to x0 + * so that the caller will see it as a return value from the original + * entry call. + * --------------------------------------------------------------------- + */ + mov x0, x1 + ret +endfunc rmmd_rmm_exit diff --git a/services/std_svc/rmmd/rmmd.mk b/services/std_svc/rmmd/rmmd.mk new file mode 100644 index 0000000000000000000000000000000000000000..2b2d3c2b47d0ab6d571de8a9bcaf3436fab2f91f --- /dev/null +++ b/services/std_svc/rmmd/rmmd.mk @@ -0,0 +1,27 @@ +# +# Copyright (c) 2021, ARM Limited and Contributors. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +ifneq (${ARCH},aarch64) + $(error "Error: RMMD is only supported on aarch64.") +endif + +# In cases where an associated Realm Payload lies outside this build +# system/source tree, then the dispatcher Makefile can either invoke an external +# build command or assume it pre-built + +BL32_ROOT := bl32/trp + +# Include SP's Makefile. The assumption is that the TRP's build system is +# compatible with that of Trusted Firmware, and it'll add and populate necessary +# build targets and variables +include ${BL32_ROOT}/trp.mk + +RMMD_SOURCES += $(addprefix services/std_svc/rmmd/, \ + ${ARCH}/rmmd_helpers.S \ + rmmd_main.c) + +# Let the top-level Makefile know that we intend to include a BL32 image +NEED_BL32 := yes diff --git a/services/std_svc/rmmd/rmmd_initial_context.h b/services/std_svc/rmmd/rmmd_initial_context.h new file mode 100644 index 0000000000000000000000000000000000000000..dc026e4c0933846cbae69126fe2308181367be89 --- /dev/null +++ b/services/std_svc/rmmd/rmmd_initial_context.h @@ -0,0 +1,195 @@ +/* + * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef RMMD_INITIAL_CONTEXT_H +#define RMMD_INITIAL_CONTEXT_H + +#include + +#define NS_SCTLR_EL2 NS_SCTLR_EL2_RES1 + +#define REALM_ACTLR_EL2 0x0 +#define REALM_AFSR0_EL2 0x0 +#define REALM_AFSR1_EL2 0x0 +#define REALM_AMAIR_EL2 0x0 + +#define REALM_CNTHCTL_EL2 (CNTHCTL_EL0PCTEN_BIT | CNTHCTL_EL0VCTEN_BIT) + +#define REALM_CNTVOFF_EL2 0x0 + +/* + * CPTR_EL2 + * ZEN=1 Trap at EL0 but not at EL2 + * FPEN=1 Trap at EL0 but not at EL2 + * TTA=1 Trap trace access + * TA=1 Trap AMU access + */ +#define REALM_CPTR_EL2 ( \ + CPTR_EL2_ZEN_DISABLE_EL0 | \ + CPTR_EL2_FPEN_DISABLE_EL0 | \ + CPTR_EL2_TTA_BIT | \ + CPTR_EL2_TAM_BIT | \ + CPTR_EL2_RES1 \ + ) + +#define REALM_HACR_EL2 0x0 + +/* + * HCR_EL2 + * TGE=1 + * E2H=1 + * TEA=1 + */ +#define REALM_HCR_EL2 ( \ + HCR_TGE_BIT | \ + HCR_E2H_BIT | \ + HCR_TEA_BIT \ + ) + +#define NS_HCR_EL2 ( \ + HCR_API_BIT | \ + HCR_APK_BIT | \ + HCR_RW_BIT \ + ) + +/* Attr0=0xFF indicates normal inner/outer write-back non-transient memory */ +#define REALM_MAIR_EL2 (0xFF) + +#define REALM_MDCR_EL2 ( \ + MDCR_EL2_TPMCR_BIT | \ + MDCR_EL2_TPM_BIT | \ + MDCR_EL2_TDA_BIT \ + ) + +#define REALM_MPAM_EL2 0x0 /* Only if MPAM is present */ + +#define REALM_MPAMHCR_EL2 0x0 /* Only if MPAM is present */ + +#define REALM_PMSCR_EL2 0x0 /* Only if SPE is present */ + +/* + * SCTLR_EL2 + * M=0 MMU disabled. + * A=0 Alignment checks disabled. + * C=1 Data accesses are cacheable as per translation tables. + * SA=1 SP aligned at EL2. + * SA0=1 SP alignment check enable for EL0. + * CP15BEN=0 EL0 use of CP15xxx instructions UNDEFINED. + * NAA=0 Unaligned MA fault at EL2 and EL0. + * ITD=0 (A32 only) + * SED=1 A32 only, RES1 for non-A32 systems. + * EOS=0 ARMv8.5-CSEH, otherwise RES1. + * I=1 I$ is on for EL2 and EL0. + * DZE=1 Do not trap DC ZVA. + * UCT=1 Allow EL0 access to CTR_EL0. + * NTWI=1 Don't trap WFI from EL0 to EL2. + * NTWE=1 Don't trap WFE from EL0 to EL2. + * WXN=1 W implies XN. + * TSCXT=1 Trap EL0 accesses to SCXTNUM_EL0. + * EIS=0 EL2 exception is context synchronizing. + * SPAN=0 Set PSTATE.PAN=1 on exceptions to EL2. + * UCI=1 Allow cache maintenance instructions at EL0. + * nTLSMD=1 (A32/T32 only) + * LSMAOE=1 (A32/T32 only) + */ +#define REALM_SCTLR_EL2 ( \ + SCTLR_C_BIT | \ + SCTLR_SA_BIT | \ + SCTLR_SA0_BIT | \ + SCTLR_SED_BIT | \ + SCTLR_I_BIT | \ + SCTLR_DZE_BIT | \ + SCTLR_UCT_BIT | \ + SCTLR_NTWI_BIT | \ + SCTLR_NTWE_BIT | \ + SCTLR_WXN_BIT | \ + SCTLR_TSCXT_BIT | \ + SCTLR_UCI_BIT | \ + SCTLR_nTLSMD_BIT | \ + SCTLR_LSMAOE_BIT | \ + (U(1) << 22) | \ + (U(1) << 11) \ + ) + +#define REALM_SCXTNUM_EL2 0x0 + +/* + * SPSR_EL2 + * M=0x9 (0b1001 EL2h) + * M[4]=0 + * DAIF=0xF Exceptions masked on entry. + * BTYPE=0 BTI not yet supported. + * SSBS=0 Not yet supported. + * IL=0 Not an illegal exception return. + * SS=0 Not single stepping. + * PAN=1 RMM shouldn't access realm memory. + * UAO=0 + * DIT=0 + * TCO=0 + * NZCV=0 + */ +#define REALM_SPSR_EL2 ( \ + SPSR_M_EL2H | \ + (0xF << SPSR_DAIF_SHIFT) | \ + SPSR_PAN_BIT \ + ) + +/* + * TCR_EL2 + * T0SZ=16 VA range 48 bits. + * EPD0=1 TTBR0_EL2 disabled. + * IRGN=0b01 Normal, inner wb ra wa cacheable. + * ORGN=0b01 Normal, outer wb ra wa cacheable. + * SH=0b11 Inner shareable. + * TG0=0 4k pages. + * T1SZ=28 36 bit VA. + * A1=0 TTBR0_EL2.ASID defines the ASID. + * EPD1=0 TTBR1_EL2 not disabled. + * IRGN1=0b01 Normal, inner wb ra wa cacheable. + * ORGN1=0b01 Normal, outer wb ra wa cacheable. + * SH1=0b11 Inner shareable. + * TG1=0b10 4k pages. + * IPS=0b001 36 bits. + * AS=1 16 bit ASID. + * TBI0=0 Top byte used for address calc. + * TBI1=0 Same. + * HA=0 Hardware access flag update disabled. + * HD=0 Hardware management of dirty flag disabled. + * HPD0=1 Hierarchical permissions disabled. + * HPD1=1 Same. + * HWU0xx Hardware cannot use bits 59-62 in page tables. + * HWU1xx Hardware cannot use bits 59-62 in page tables. + * TBID0=0 Pointer auth not enabled. + * TBID1=0 Pointer auth not enabled. + * NFD0=0 SVE not enabled, do not disable stage 1 trans using TTBR0_EL2. + * NFD1=0 Same but TTBR1_EL2. + * E0PD0=0 EL0 access to anything translated by TTBR0_EL2 will not fault. + * E0PD0=0 Same but TTBR1_EL2. + * TCCMA0=0 Memory tagging not enabled. + * TCCMA1=0 Same. + */ +#define REALM_TCR_EL2 ( \ + (16UL << TCR_T0SZ_SHIFT) | \ + TCR_EPD0_BIT | \ + TCR_RGN_INNER_WBA | \ + TCR_RGN_OUTER_WBA | \ + TCR_SH_INNER_SHAREABLE | \ + (28UL << TCR_T1SZ_SHIFT) | \ + TCR_RGN1_INNER_WBA | \ + TCR_RGN1_OUTER_WBA | \ + TCR_SH1_INNER_SHAREABLE | \ + TCR_TG1_4K | \ + (1UL << TCR_EL2_IPS_SHIFT) | \ + TCR_AS_BIT | \ + TCR_HPD0_BIT | \ + TCR_HPD1_BIT \ + ) + +#define REALM_TRFCR_EL2 0x0 + +#define REALM_TTBR0_EL2 0x0 + +#endif /* RMMD_INITIAL_CONTEXT_H */ diff --git a/services/std_svc/rmmd/rmmd_main.c b/services/std_svc/rmmd/rmmd_main.c new file mode 100644 index 0000000000000000000000000000000000000000..3fbc2c40ae3a30a0ba14e1d2c826f4a376fdc3e9 --- /dev/null +++ b/services/std_svc/rmmd/rmmd_main.c @@ -0,0 +1,374 @@ +/* + * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "rmmd_initial_context.h" +#include "rmmd_private.h" + +/******************************************************************************* + * RMM context information. + ******************************************************************************/ +rmmd_rmm_context_t rmm_context[PLATFORM_CORE_COUNT]; + +/******************************************************************************* + * RMM entry point information. Discovered on the primary core and reused + * on secondary cores. + ******************************************************************************/ +static entry_point_info_t *rmm_ep_info; + +/******************************************************************************* + * Static function declaration. + ******************************************************************************/ +static int32_t rmm_init(void); +static uint64_t rmmd_smc_forward(uint32_t smc_fid, uint32_t src_sec_state, + uint32_t dst_sec_state, uint64_t x1, + uint64_t x2, uint64_t x3, uint64_t x4, + void *handle); + +/******************************************************************************* + * This function takes an RMM context pointer and performs a synchronous entry + * into it. + ******************************************************************************/ +uint64_t rmmd_rmm_sync_entry(rmmd_rmm_context_t *rmm_ctx) +{ + uint64_t rc; + + assert(rmm_ctx != NULL); + + cm_set_context(&(rmm_ctx->cpu_ctx), REALM); + + /* Save the current el1/el2 context before loading realm context. */ + cm_el1_sysregs_context_save(NON_SECURE); + cm_el2_sysregs_context_save(NON_SECURE); + + /* Restore the realm context assigned above */ + cm_el1_sysregs_context_restore(REALM); + cm_el2_sysregs_context_restore(REALM); + cm_set_next_eret_context(REALM); + + /* Enter RMM */ + rc = rmmd_rmm_enter(&rmm_ctx->c_rt_ctx); + + /* Save realm context */ + cm_el1_sysregs_context_save(REALM); + cm_el2_sysregs_context_save(REALM); + + /* Restore the el1/el2 context again. */ + cm_el1_sysregs_context_restore(NON_SECURE); + cm_el2_sysregs_context_restore(NON_SECURE); + + return rc; +} + +/******************************************************************************* + * This function returns to the place where rmmd_rmm_sync_entry() was + * called originally. + ******************************************************************************/ +__dead2 void rmmd_rmm_sync_exit(uint64_t rc) +{ + rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()]; + + /* Get context of the RMM in use by this CPU. */ + assert(cm_get_context(REALM) == &(ctx->cpu_ctx)); + + /* + * The RMMD must have initiated the original request through a + * synchronous entry into RMM. Jump back to the original C runtime + * context with the value of rc in x0; + */ + rmmd_rmm_exit(ctx->c_rt_ctx, rc); + + panic(); +} + +static void rmm_el2_context_init(el2_sysregs_t *regs) +{ + /* + * el2_sysregs_t is just a structure containing an array of uint64_t named + * named ctx_regs of size CTX_EL2_SYSREGS_ALL. + * + * typedef struct { + * uint64_t ctx_regs[CTX_EL2_SYSREGS_ALL]; + * } el2_sysregs_t; + */ + regs->ctx_regs[CTX_ACTLR_EL2 >> 3] = REALM_ACTLR_EL2; + regs->ctx_regs[CTX_AFSR0_EL2 >> 3] = REALM_AFSR0_EL2; + regs->ctx_regs[CTX_AFSR1_EL2 >> 3] = REALM_AFSR1_EL2; + regs->ctx_regs[CTX_AMAIR_EL2 >> 3] = REALM_AMAIR_EL2; + regs->ctx_regs[CTX_CNTHCTL_EL2 >> 3] = REALM_CNTHCTL_EL2; + regs->ctx_regs[CTX_CNTVOFF_EL2 >> 3] = REALM_CNTVOFF_EL2; + regs->ctx_regs[CTX_CPTR_EL2 >> 3] = REALM_CPTR_EL2; + regs->ctx_regs[CTX_HACR_EL2 >> 3] = REALM_HACR_EL2; + regs->ctx_regs[CTX_HCR_EL2 >> 3] = REALM_HCR_EL2; + regs->ctx_regs[CTX_MAIR_EL2 >> 3] = REALM_MAIR_EL2; + regs->ctx_regs[CTX_MDCR_EL2 >> 3] = REALM_MDCR_EL2; + regs->ctx_regs[CTX_PMSCR_EL2 >> 3] = REALM_PMSCR_EL2; + regs->ctx_regs[CTX_SCTLR_EL2 >> 3] = REALM_SCTLR_EL2; + regs->ctx_regs[CTX_SCXTNUM_EL2 >> 3] = REALM_SCXTNUM_EL2; + regs->ctx_regs[CTX_SPSR_EL2 >> 3] = REALM_SPSR_EL2; + regs->ctx_regs[CTX_TCR_EL2 >> 3] = REALM_TCR_EL2; + +#if ENABLE_MPAM_FOR_LOWER_ELS + regs->ctx_regs[CTX_MPAM2_EL2 >> 3] = REALM_MPAM_EL2; + regs->ctx_regs[CTX_MPAMHCR_EL2 >> 3] = REALM_MPAMHCR_EL2; +#endif +} + +/******************************************************************************* + * Jump to the RMM for the first time. + ******************************************************************************/ +static int32_t rmm_init(void) +{ + + uint64_t rc; + + rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()]; + + INFO("RMM init start.\n"); + ctx->state = RMM_STATE_RESET; + + /* Initialize RMM EL2 context. */ + rmm_el2_context_init(&ctx->cpu_ctx.el2_sysregs_ctx); + + rc = rmmd_rmm_sync_entry(ctx); + if (rc != 0ULL) { + ERROR("RMM initialisation failed 0x%llx\n", rc); + panic(); + } + + ctx->state = RMM_STATE_IDLE; + INFO("RMM init end.\n"); + + return 1; +} + +/******************************************************************************* + * Load and read RMM manifest, setup RMM. + ******************************************************************************/ +int rmmd_setup(void) +{ + uint32_t ep_attr; + unsigned int linear_id = plat_my_core_pos(); + rmmd_rmm_context_t *rmm_ctx = &rmm_context[linear_id]; + + /* Make sure RME is supported. */ + assert(get_armv9_2_feat_rme_support() != 0U); + + rmm_ep_info = bl31_plat_get_next_image_ep_info(REALM); + if (rmm_ep_info == NULL) { + WARN("No RMM image provided by BL2 boot loader, Booting " + "device without RMM initialization. SMCs destined for " + "RMM will return SMC_UNK\n"); + return -ENOENT; + } + + /* Under no circumstances will this parameter be 0 */ + assert(rmm_ep_info->pc == BL32_BASE); + + /* Initialise an entrypoint to set up the CPU context */ + ep_attr = REALM; + if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0U) { + ep_attr |= EP_EE_BIG; + } + + SET_PARAM_HEAD(rmm_ep_info, PARAM_EP, VERSION_1, ep_attr); + rmm_ep_info->spsr = SPSR_64(MODE_EL2, + MODE_SP_ELX, + DISABLE_ALL_EXCEPTIONS); + + /* Initialise RMM context with this entry point information */ + cm_setup_context(&rmm_ctx->cpu_ctx, rmm_ep_info); + + /* Register power management hooks with PSCI */ + psci_register_spd_pm_hook(&rmmd_pm); + + INFO("RMM setup done.\n"); + + + + /* Register init function for deferred init. */ + bl31_register_bl32_init(&rmm_init); + + return 0; +} + +/******************************************************************************* + * Forward SMC to the other security state + ******************************************************************************/ +static uint64_t rmmd_smc_forward(uint32_t smc_fid, uint32_t src_sec_state, + uint32_t dst_sec_state, uint64_t x1, + uint64_t x2, uint64_t x3, uint64_t x4, + void *handle) +{ + /* Save incoming security state */ + cm_el1_sysregs_context_save(src_sec_state); + cm_el2_sysregs_context_save(src_sec_state); + + /* Restore outgoing security state */ + cm_el1_sysregs_context_restore(dst_sec_state); + cm_el2_sysregs_context_restore(dst_sec_state); + cm_set_next_eret_context(dst_sec_state); + + SMC_RET8(cm_get_context(dst_sec_state), smc_fid, x1, x2, x3, x4, + SMC_GET_GP(handle, CTX_GPREG_X5), + SMC_GET_GP(handle, CTX_GPREG_X6), + SMC_GET_GP(handle, CTX_GPREG_X7)); +} + +/******************************************************************************* + * This function handles all SMCs in the range reserved for RMI. Each call is + * either forwarded to the other security state or handled by the RMM dispatcher + ******************************************************************************/ +uint64_t rmmd_rmi_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, + uint64_t x3, uint64_t x4, void *cookie, + void *handle, uint64_t flags) +{ + rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()]; + uint32_t src_sec_state; + + /* Determine which security state this SMC originated from */ + src_sec_state = caller_sec_state(flags); + + /* RMI must not be invoked by the Secure world */ + if (src_sec_state == SMC_FROM_SECURE) { + WARN("RMM: RMI invoked by secure world.\n"); + SMC_RET1(handle, SMC_UNK); + } + + /* + * Forward an RMI call from the Normal world to the Realm world as it + * is. + */ + if (src_sec_state == SMC_FROM_NON_SECURE) { + VERBOSE("RMM: RMI call from non-secure world.\n"); + return rmmd_smc_forward(smc_fid, NON_SECURE, REALM, + x1, x2, x3, x4, handle); + } + + assert(src_sec_state == SMC_FROM_REALM); + + switch (smc_fid) { + case RMI_RMM_REQ_COMPLETE: + if (ctx->state == RMM_STATE_RESET) { + VERBOSE("RMM: running rmmd_rmm_sync_exit\n"); + rmmd_rmm_sync_exit(x1); + } + + return rmmd_smc_forward(x1, REALM, NON_SECURE, + x2, x3, x4, 0, handle); + + default: + WARN("RMM: Unsupported RMM call 0x%08x\n", smc_fid); + SMC_RET1(handle, SMC_UNK); + } +} + +/******************************************************************************* + * This cpu has been turned on. Enter RMM to initialise R-EL2. Entry into RMM + * is done after initialising minimal architectural state that guarantees safe + * execution. + ******************************************************************************/ +static void rmmd_cpu_on_finish_handler(u_register_t unused) +{ + int32_t rc; + uint32_t linear_id = plat_my_core_pos(); + rmmd_rmm_context_t *ctx = &rmm_context[linear_id]; + + ctx->state = RMM_STATE_RESET; + + /* Initialise RMM context with this entry point information */ + cm_setup_context(&ctx->cpu_ctx, rmm_ep_info); + + /* Initialize RMM EL2 context. */ + rmm_el2_context_init(&ctx->cpu_ctx.el2_sysregs_ctx); + + rc = rmmd_rmm_sync_entry(ctx); + if (rc != 0) { + ERROR("RMM initialisation failed (%d) on CPU%d\n", rc, + linear_id); + panic(); + } + + ctx->state = RMM_STATE_IDLE; +} + +/******************************************************************************* + * Structure populated by the RMM Dispatcher to perform any bookkeeping before + * PSCI executes a power mgmt. operation. + ******************************************************************************/ +const spd_pm_ops_t rmmd_pm = { + .svc_on_finish = rmmd_cpu_on_finish_handler, +}; + +static int gtsi_transition_granule(uint64_t pa, + unsigned int src_sec_state, + unsigned int target_pas) +{ + int ret; + + ret = gpt_transition_pas(pa, src_sec_state, target_pas); + + /* Convert TF-A error codes into GTSI error codes */ + if (ret == -EINVAL) { + ret = GRAN_TRANS_RET_BAD_ADDR; + } else if (ret == -EPERM) { + ret = GRAN_TRANS_RET_BAD_PAS; + } + + return ret; +} + +/******************************************************************************* + * This function handles all SMCs in the range reserved for GTF. + ******************************************************************************/ +uint64_t rmmd_gtsi_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, + uint64_t x3, uint64_t x4, void *cookie, + void *handle, uint64_t flags) +{ + uint32_t src_sec_state; + + /* Determine which security state this SMC originated from */ + src_sec_state = caller_sec_state(flags); + + if (src_sec_state != SMC_FROM_REALM) { + WARN("RMM: GTF call originated from secure or normal world\n"); + SMC_RET1(handle, SMC_UNK); + } + + switch (smc_fid) { + case SMC_ASC_MARK_REALM: + SMC_RET1(handle, gtsi_transition_granule(x1, SMC_FROM_REALM, + GPI_REALM)); + case SMC_ASC_MARK_NONSECURE: + SMC_RET1(handle, gtsi_transition_granule(x1, SMC_FROM_REALM, + GPI_NS)); + default: + WARN("RMM: Unsupported GTF call 0x%08x\n", smc_fid); + SMC_RET1(handle, SMC_UNK); + } +} diff --git a/services/std_svc/rmmd/rmmd_private.h b/services/std_svc/rmmd/rmmd_private.h new file mode 100644 index 0000000000000000000000000000000000000000..fc24cac0b984b979cac64e04d531cb68c6fffd48 --- /dev/null +++ b/services/std_svc/rmmd/rmmd_private.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef RMMD_PRIVATE_H +#define RMMD_PRIVATE_H + +#include + +/******************************************************************************* + * Constants that allow assembler code to preserve callee-saved registers of the + * C runtime context while performing a security state switch. + ******************************************************************************/ +#define RMMD_C_RT_CTX_X19 0x0 +#define RMMD_C_RT_CTX_X20 0x8 +#define RMMD_C_RT_CTX_X21 0x10 +#define RMMD_C_RT_CTX_X22 0x18 +#define RMMD_C_RT_CTX_X23 0x20 +#define RMMD_C_RT_CTX_X24 0x28 +#define RMMD_C_RT_CTX_X25 0x30 +#define RMMD_C_RT_CTX_X26 0x38 +#define RMMD_C_RT_CTX_X27 0x40 +#define RMMD_C_RT_CTX_X28 0x48 +#define RMMD_C_RT_CTX_X29 0x50 +#define RMMD_C_RT_CTX_X30 0x58 + +#define RMMD_C_RT_CTX_SIZE 0x60 +#define RMMD_C_RT_CTX_ENTRIES (RMMD_C_RT_CTX_SIZE >> DWORD_SHIFT) + +#ifndef __ASSEMBLER__ +#include +#include + +typedef enum rmm_state { + RMM_STATE_RESET = 0, + RMM_STATE_IDLE +} rmm_state_t; + +/* + * Data structure used by the RMM dispatcher (RMMD) in EL3 to track context of + * the RMM at R-EL2. + */ +typedef struct rmmd_rmm_context { + uint64_t c_rt_ctx; + cpu_context_t cpu_ctx; + rmm_state_t state; +} rmmd_rmm_context_t; + +/* Functions used to enter/exit the RMM synchronously */ +uint64_t rmmd_rmm_sync_entry(rmmd_rmm_context_t *ctx); +__dead2 void rmmd_rmm_sync_exit(uint64_t rc); + +/* Assembly helpers */ +uint64_t rmmd_rmm_enter(uint64_t *c_rt_ctx); +void __dead2 rmmd_rmm_exit(uint64_t c_rt_ctx, uint64_t ret); + +/* Reference to PM ops for the RMMD */ +extern const spd_pm_ops_t rmmd_pm; + +#endif /* __ASSEMBLER__ */ + +#endif /* RMMD_PRIVATE_H */ diff --git a/services/std_svc/std_svc_setup.c b/services/std_svc/std_svc_setup.c index 23f13ab823a3ed276e1ad38ef98af59c3805b665..d82da9fcaa500315207d7df851006e7de6a7a7b1 100644 --- a/services/std_svc/std_svc_setup.c +++ b/services/std_svc/std_svc_setup.c @@ -13,6 +13,8 @@ #include #include #include +#include +#include #include #include #include @@ -148,6 +150,16 @@ static uintptr_t std_svc_smc_handler(uint32_t smc_fid, flags); } #endif +#if ENABLE_RME + /* + * Granule transition service interface functions (GTSI) are allocated + * from the Std service range. Call the RMM dispatcher to handle calls. + */ + if (is_gtsi_fid(smc_fid)) { + return rmmd_gtsi_handler(smc_fid, x1, x2, x3, x4, cookie, + handle, flags); + } +#endif switch (smc_fid) { case ARM_STD_SVC_CALL_COUNT: