Commit 37a12f04 authored by Julius Werner's avatar Julius Werner Committed by TrustedFirmware Code Review
Browse files

Merge "sc7180 platform support" into integration

parents 8ae3a91c 5bd9c17d
......@@ -446,6 +446,15 @@ QEMU platform port
:F: docs/plat/qemu.rst
:F: plat/qemu/
QTI platform port
^^^^^^^^^^^^^^^^^
:M: Saurabh Gorecha <sgorecha@codeaurora.org>
:G: `sgorecha`_
:M: Debasish Mandal <dmandal@codeaurora.org>
:M: QTI TF Maintainers <qti.trustedfirmware.maintainers@codeaurora.org>
:F: docs/plat/qti.rst
:F: plat/qti/
Raspberry Pi 3 platform port
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
:M: Ying-Chun Liu (PaulLiu) <paul.liu@linaro.org>
......@@ -614,6 +623,7 @@ Build system
.. _remi-triplefault: https://github.com/repk
.. _rockchip-linux: https://github.com/rockchip-linux
.. _sandrine-bailleux-arm: https://github.com/sandrine-bailleux-arm
.. _sgorecha: https://github.com/sgorecha
.. _shawnguo2: https://github.com/shawnguo2
.. _sivadur: https://github.com/sivadur
.. _smaeul: https://github.com/smaeul
......
......@@ -28,6 +28,7 @@ Platform Ports
poplar
qemu
qemu-sbsa
qti
rpi3
rpi4
rcar-gen3
......
Qualcomm Technologies, Inc.
===========================
Trusted Firmware-A (TF-A) implements the EL3 firmware layer for QTI SC7180.
Boot Trace
-------------
Bootrom --> BL1/BL2 --> BL31 --> BL33 --> Linux kernel
BL1/2 and BL33 can currently be supplied from Coreboot + Depthcharge
How to build
------------
Code Locations
~~~~~~~~~~~~~~
- Trusted Firmware-A:
`link <https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git>`__
Build Procedure
~~~~~~~~~~~~~~~
QTI SoC expects TF-A's BL31 to get integrated with other boot software
Coreboot, so only bl31.elf need to get build from the TF-A repository.
The build command looks like
make CROSS_COMPILE=aarch64-linux-gnu- PLAT=sc7180 COREBOOT=1
update value of CROSS_COMPILE argument with your cross-compilation toolchain.
Additional QTISECLIB_PATH=<path to qtiseclib> can be added in build command.
if QTISECLIB_PATH is not added in build command stub implementation of qtiseclib
is picked. qtiseclib with stub implementation doesn't boot device. This was
added to satisfy compilation.
QTISELIB for SC7180 is available at
`link <https://review.coreboot.org/cgit/qc_blobs.git/plain/sc7180/qtiseclib/libqtisec.a>`__
/*
* Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2018,2020, The Linux Foundation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef __PLAT_MACROS_S__
#define __PLAT_MACROS_S__
#include <drivers/arm/gic_common.h>
#include <drivers/arm/gicv2.h>
#include <drivers/arm/gicv3.h>
#include <platform_def.h>
.section .rodata.gic_reg_name, "aS"
/* Applicable only to GICv2 and GICv3 with SRE disabled (legacy mode) */
gicc_regs:
.asciz "gicc_hppir", "gicc_ahppir", "gicc_ctlr", ""
/* Applicable only to GICv3 with SRE enabled */
icc_regs:
.asciz "icc_hppir0_el1", "icc_hppir1_el1", "icc_ctlr_el3", ""
/* Registers common to both GICv2 and GICv3 */
gicd_pend_reg:
.asciz "gicd_ispendr regs (Offsets 0x200 - 0x278)\n" \
" Offset:\t\t\tvalue\n"
newline:
.asciz "\n"
spacer:
.asciz ":\t\t0x"
/** Macro : plat_crash_print_regs
* This macro allows the crash reporting routine to print GIC registers
* in case of an unhandled exception in BL31. This aids in debugging and
* this macro can be defined to be empty in case GIC register reporting is
* not desired.
* The below required platform porting macro
* prints out relevant GIC registers whenever an
* unhandled exception is taken in BL31.
* Clobbers: x0 - x10, x26, x27, sp
* ---------------------------------------------
*/
.macro plat_crash_print_regs
print_gic_regs:
ldr x26, =QTI_GICD_BASE
ldr x27, =QTI_GICC_BASE
/* Check for GICv3 system register access */
mrs x7, id_aa64pfr0_el1
ubfx x7, x7, #ID_AA64PFR0_GIC_SHIFT, #ID_AA64PFR0_GIC_WIDTH
cmp x7, #1
b.ne print_gicv2
/* Check for SRE enable */
mrs x8, ICC_SRE_EL3
tst x8, #ICC_SRE_SRE_BIT
b.eq print_gicv2
/* Load the icc reg list to x6 */
adr x6, icc_regs
/* Load the icc regs to gp regs used by str_in_crash_buf_print */
mrs x8, ICC_HPPIR0_EL1
mrs x9, ICC_HPPIR1_EL1
mrs x10, ICC_CTLR_EL3
/* Store to the crash buf and print to console */
bl str_in_crash_buf_print
b print_gic_common
print_gicv2:
/* Load the gicc reg list to x6 */
adr x6, gicc_regs
/* Load the gicc regs to gp regs used by str_in_crash_buf_print */
ldr w8, [x27, #GICC_HPPIR]
ldr w9, [x27, #GICC_AHPPIR]
ldr w10, [x27, #GICC_CTLR]
/* Store to the crash buf and print to console */
bl str_in_crash_buf_print
print_gic_common:
/* Print the GICD_ISPENDR regs */
add x7, x26, #GICD_ISPENDR
adr x4, gicd_pend_reg
bl asm_print_str
gicd_ispendr_loop:
sub x4, x7, x26
cmp x4, #0x280
b.eq exit_print_gic_regs
bl asm_print_hex
adr x4, spacer
bl asm_print_str
ldr x4, [x7], #8
bl asm_print_hex
adr x4, newline
bl asm_print_str
b gicd_ispendr_loop
exit_print_gic_regs:
.endm
#endif /* __PLAT_MACROS_S__ */
/*
* Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef QTI_BOARD_DEF_H
#define QTI_BOARD_DEF_H
/*
* Required platform porting definitions common to all ARM
* development platforms
*/
/* Size of cacheable stacks */
#define PLATFORM_STACK_SIZE 0x1000
/*
* PLAT_QTI_MMAP_ENTRIES depends on the number of entries in the
* plat_qti_mmap array defined for each BL stage.
*/
#define PLAT_QTI_MMAP_ENTRIES 12
/*
* Platform specific page table and MMU setup constants
*/
#define MAX_XLAT_TABLES 12
#endif /* QTI_BOARD_DEF_H */
/*
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef QTI_CPU_H
#define QTI_CPU_H
/* KRYO-4xx Gold MIDR */
#define QTI_KRYO4_GOLD_MIDR 0x517F804D
/* KRYO-4xx Silver MIDR */
#define QTI_KRYO4_SILVER_MIDR 0x517F805D
#endif /* QTI_CPU_H */
/*
* Copyright (c) 2018,2020, The Linux Foundation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef QTI_INTERRUPT_SVC_H
#define QTI_INTERRUPT_SVC_H
int qti_interrupt_svc_init(void);
#endif /* QTI_INTERRUPT_SVC_H */
/*
* Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef QTI_PLAT_H
#define QTI_PLAT_H
#include <stdint.h>
#include <common/bl_common.h>
#include <lib/cassert.h>
#include <lib/el3_runtime/cpu_data.h>
#include <lib/xlat_tables/xlat_tables_v2.h>
/*
* Utility functions common to QTI platforms
*/
int qti_mmap_add_dynamic_region(uintptr_t base_pa, size_t size,
unsigned int attr);
int qti_mmap_remove_dynamic_region(uintptr_t base_va, size_t size);
/*
* Utility functions common to ARM standard platforms
*/
void qti_setup_page_tables(uintptr_t total_base,
size_t total_size,
uintptr_t code_start,
uintptr_t code_limit,
uintptr_t rodata_start,
uintptr_t rodata_limit,
uintptr_t coh_start, uintptr_t coh_limit);
/*
* Mandatory functions required in ARM standard platforms
*/
void plat_qti_gic_driver_init(void);
void plat_qti_gic_init(void);
void plat_qti_gic_cpuif_enable(void);
void plat_qti_gic_cpuif_disable(void);
void plat_qti_gic_pcpu_init(void);
/*
* Optional functions required in ARM standard platforms
*/
unsigned int plat_qti_core_pos_by_mpidr(u_register_t mpidr);
unsigned int plat_qti_my_cluster_pos(void);
void gic_set_spi_routing(unsigned int id, unsigned int irm, u_register_t mpidr);
#endif /* QTI_PLAT_H */
/*
* Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2018,2020 The Linux Foundation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef QTI_UART_CONSOLE_H
#define QTI_UART_CONSOLE_H
#include <drivers/console.h>
#ifndef __ASSEMBLER__
int qti_console_uart_register(console_t *console, uintptr_t uart_base_addr);
#endif /* __ASSEMBLER__ */
#endif /* QTI_UART_CONSOLE_H */
/*
* Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2018,2020, The Linux Foundation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <asm_macros.S>
#include <drivers/arm/gicv2.h>
#include <drivers/arm/gicv3.h>
#include <drivers/console.h>
#include <platform_def.h>
.globl plat_my_core_pos
.globl plat_qti_core_pos_by_mpidr
.globl plat_reset_handler
.globl plat_panic_handler
/* -----------------------------------------------------
* unsigned int plat_qti_core_pos_by_mpidr(uint64_t mpidr)
* Helper function to calculate the core position.
* With this function:
* CorePos = (ClusterId * 4) + CoreId
* - In ARM v8 (MPIDR_EL1[24]=0)
* ClusterId = MPIDR_EL1[15:8]
* CoreId = MPIDR_EL1[7:0]
* - In ARM v8.1 (MPIDR_EL1[24]=1)
* ClusterId = MPIDR_EL1[23:15]
* CoreId = MPIDR_EL1[15:8]
* Clobbers: x0 & x1.
* -----------------------------------------------------
*/
func plat_qti_core_pos_by_mpidr
mrs x1, mpidr_el1
tst x1, #MPIDR_MT_MASK
beq plat_qti_core_pos_by_mpidr_no_mt
/* Right shift mpidr by one affinity level when MT=1. */
lsr x0, x0, #MPIDR_AFFINITY_BITS
plat_qti_core_pos_by_mpidr_no_mt:
and x1, x0, #MPIDR_CPU_MASK
and x0, x0, #MPIDR_CLUSTER_MASK
add x0, x1, x0, LSR #6
ret
endfunc plat_qti_core_pos_by_mpidr
/* --------------------------------------------------------------------
* void plat_panic_handler(void)
* calls SDI and reset system
* --------------------------------------------------------------------
*/
func plat_panic_handler
msr spsel, #0
bl plat_set_my_stack
b qtiseclib_panic
endfunc plat_panic_handler
/* -----------------------------------------------------
* unsigned int plat_my_core_pos(void)
* This function uses the plat_qti_calc_core_pos()
* definition to get the index of the calling CPU
* Clobbers: x0 & x1.
* -----------------------------------------------------
*/
func plat_my_core_pos
mrs x0, mpidr_el1
b plat_qti_core_pos_by_mpidr
endfunc plat_my_core_pos
func plat_reset_handler
/* save the lr */
mov x18, x30
/* Serialize CPUSS boot setup. Multi core enter simultaneously. */
ldr x0, =g_qti_cpuss_boot_lock
bl spin_lock
/* pass cold boot status. */
ldr w0, g_qti_bl31_cold_booted
/* Execuete CPUSS boot set up on every core. */
bl qtiseclib_cpuss_reset_asm
ldr x0, =g_qti_cpuss_boot_lock
bl spin_unlock
ret x18
endfunc plat_reset_handler
/*
* Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <asm_macros.S>
#include <cpu_macros.S>
#include <plat_macros.S>
#include <qti_cpu.h>
.p2align 3
/* -------------------------------------------------
* The CPU Ops reset function for Kryo-3 Gold
* -------------------------------------------------
*/
func qti_kryo4_gold_reset_func
#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
adr x0, wa_cve_2017_5715_bpiall_vbar
msr vbar_el3, x0
isb
#endif
mov x19, x30
bl qtiseclib_kryo4_gold_reset_asm
ret x19
endfunc qti_kryo4_gold_reset_func
/* ----------------------------------------------------
* The CPU Ops core power down function for Kryo-3 Gold
* ----------------------------------------------------
*/
func qti_kryo4_gold_core_pwr_dwn
ret
endfunc qti_kryo4_gold_core_pwr_dwn
/* -------------------------------------------------------
* The CPU Ops cluster power down function for Kryo-3 Gold
* -------------------------------------------------------
*/
func qti_kryo4_gold_cluster_pwr_dwn
ret
endfunc qti_kryo4_gold_cluster_pwr_dwn
#if REPORT_ERRATA
/*
* Errata printing function for Kryo4 Gold. Must follow AAPCS.
*/
func qti_kryo4_gold_errata_report
/* TODO : Need to add support. Required only for debug bl31 image.*/
ret
endfunc qti_kryo4_gold_errata_report
#endif
/* ---------------------------------------------
* This function provides kryo4_gold specific
* register information for crash reporting.
* It needs to return with x6 pointing to
* a list of register names in ASCII and
* x8 - x15 having values of registers to be
* reported.
* ---------------------------------------------
*/
.section .rodata.qti_kryo4_gold_regs, "aS"
qti_kryo4_gold_regs: /* The ASCII list of register names to be reported */
.asciz ""
func qti_kryo4_gold_cpu_reg_dump
adr x6, qti_kryo4_gold_regs
ret
endfunc qti_kryo4_gold_cpu_reg_dump
declare_cpu_ops qti_kryo4_gold, QTI_KRYO4_GOLD_MIDR, \
qti_kryo4_gold_reset_func, \
qti_kryo4_gold_core_pwr_dwn, \
qti_kryo4_gold_cluster_pwr_dwn
/*
* Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <asm_macros.S>
#include <cpu_macros.S>
#include <plat_macros.S>
#include <qti_cpu.h>
.p2align 3
/* -------------------------------------------------
* The CPU Ops reset function for Kryo-3 Silver
* -------------------------------------------------
*/
func qti_kryo4_silver_reset_func
mov x19, x30
bl qtiseclib_kryo4_silver_reset_asm
ret x19
endfunc qti_kryo4_silver_reset_func
/* ------------------------------------------------------
* The CPU Ops core power down function for Kryo-3 Silver
* ------------------------------------------------------
*/
func qti_kryo4_silver_core_pwr_dwn
ret
endfunc qti_kryo4_silver_core_pwr_dwn
/* ---------------------------------------------------------
* The CPU Ops cluster power down function for Kryo-3 Silver
* ---------------------------------------------------------
*/
func qti_kryo4_silver_cluster_pwr_dwn
ret
endfunc qti_kryo4_silver_cluster_pwr_dwn
#if REPORT_ERRATA
/*
* Errata printing function for Kryo4 Silver. Must follow AAPCS.
*/
func qti_kryo4_silver_errata_report
/* TODO : Need to add support. Required only for debug bl31 image.*/
ret
endfunc qti_kryo4_silver_errata_report
#endif
/* ---------------------------------------------
* This function provides kryo4_silver specific
* register information for crash reporting.
* It needs to return with x6 pointing to
* a list of register names in ASCII and
* x8 - x15 having values of registers to be
* reported.
* ---------------------------------------------
*/
.section .rodata.qti_kryo4_silver_regs, "aS"
qti_kryo4_silver_regs: /* The ASCII list of register names to be reported */
.asciz ""
func qti_kryo4_silver_cpu_reg_dump
adr x6, qti_kryo4_silver_regs
ret
endfunc qti_kryo4_silver_cpu_reg_dump
declare_cpu_ops qti_kryo4_silver, QTI_KRYO4_SILVER_MIDR, \
qti_kryo4_silver_reset_func, \
qti_kryo4_silver_core_pwr_dwn, \
qti_kryo4_silver_cluster_pwr_dwn
/*
* Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2018,2020 The Linux Foundation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <asm_macros.S>
#include <console_macros.S>
#include <platform_def.h>
#include <qti_uart_console.h>
/*
* This driver implements console logging into a ring buffer.
*/
.globl qti_console_uart_register
/* -----------------------------------------------
* int qti_console_uart_register(console_t *console,
* uintptr_t uart_base_addr)
* Registers uart console instance.
* In: x0 - pointer to empty console_t struct
* x1 - start address of uart block.
* Out: x0 - 1 to indicate success
* Clobber list: x0, x1, x14
* -----------------------------------------------
*/
func qti_console_uart_register
str x1, [x0, #CONSOLE_T_BASE] /* Save UART base. */
finish_console_register uart putc=1, flush=1
endfunc qti_console_uart_register
/* -----------------------------------------------
* int qti_console_uart_puts(int c, console_t *console)
* Writes a character to the UART console.
* The character must be preserved in x0.
* In: x0 - character to be stored
* x1 - pointer to console_t struct
* Clobber list: x1, x2
* -----------------------------------------------
*/
func console_uart_putc
/* set x1 = UART base. */
ldr x1, [x1, #CONSOLE_T_BASE]
/* Loop until M_GENI_CMD_ACTIVE bit not clear. */
1: ldr w2, [x1, #GENI_STATUS_REG]
and w2, w2, #GENI_STATUS_M_GENI_CMD_ACTIVE_MASK
cmp w2, #GENI_STATUS_M_GENI_CMD_ACTIVE_MASK
b.eq 1b
/* Transmit data. */
cmp w0, #0xA
b.ne 3f
/* Add '\r' when input char is '\n' */
mov w2, #0x1
mov w0, #0xD
str w2, [x1, #UART_TX_TRANS_LEN_REG]
mov w2, #GENI_M_CMD_TX
str w2, [x1, #GENI_M_CMD0_REG]
str w0, [x1, #GENI_TX_FIFOn_REG]
mov w0, #0xA
/* Loop until M_GENI_CMD_ACTIVE bit not clear. */
2: ldr w2, [x1, #GENI_STATUS_REG]
and w2, w2, #GENI_STATUS_M_GENI_CMD_ACTIVE_MASK
cmp w2, #GENI_STATUS_M_GENI_CMD_ACTIVE_MASK
b.eq 2b
/* Transmit i/p data. */
3: mov w2, #0x1
str w2, [x1, #UART_TX_TRANS_LEN_REG]
mov w2, #GENI_M_CMD_TX
str w2, [x1, #GENI_M_CMD0_REG]
str w0, [x1, #GENI_TX_FIFOn_REG]
ret
endfunc console_uart_putc
/* -----------------------------------------------
* int qti_console_uart_flush(console_t *console)
* In: x0 - pointer to console_t struct
* Out: x0 - 0 for success
* Clobber list: x0, x1
* -----------------------------------------------
*/
func console_uart_flush
/* set x0 = UART base. */
ldr x0, [x0, #CONSOLE_T_BASE]
/* Loop until M_GENI_CMD_ACTIVE bit not clear. */
1: ldr w1, [x0, #GENI_STATUS_REG]
and w1, w1, #GENI_STATUS_M_GENI_CMD_ACTIVE_MASK
cmp w1, #GENI_STATUS_M_GENI_CMD_ACTIVE_MASK
b.eq 1b
mov w0, #0
ret
endfunc console_uart_flush
/*
* Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
#include <bl31/bl31.h>
#include <common/debug.h>
#include <common/desc_image_load.h>
#include <drivers/console.h>
#include <drivers/generic_delay_timer.h>
#include <lib/bl_aux_params/bl_aux_params.h>
#include <lib/coreboot.h>
#include <lib/spinlock.h>
#include <platform.h>
#include <qti_interrupt_svc.h>
#include <qti_plat.h>
#include <qti_uart_console.h>
#include <qtiseclib_interface.h>
/*
* Placeholder variables for copying the arguments that have been passed to
* BL31 from BL2.
*/
static entry_point_info_t bl33_image_ep_info;
/*
* Variable to hold counter frequency for the CPU's generic timer. In this
* platform coreboot image configure counter frequency for boot core before
* reaching TF-A.
*/
static uint64_t g_qti_cpu_cntfrq;
/*
* Lock variable to serialize cpuss reset execution.
*/
spinlock_t g_qti_cpuss_boot_lock __attribute__ ((section("tzfw_coherent_mem"),
aligned(CACHE_WRITEBACK_GRANULE))) = {0x0};
/*
* Variable to hold bl31 cold boot status. Default value 0x0 means yet to boot.
* Any other value means cold booted.
*/
uint32_t g_qti_bl31_cold_booted __attribute__ ((section("tzfw_coherent_mem"))) = 0x0;
/*******************************************************************************
* Perform any BL31 early platform setup common to ARM standard platforms.
* Here is an opportunity to copy parameters passed by the calling EL (S-EL1
* in BL2 & S-EL3 in BL1) before they are lost (potentially). This needs to be
* done before the MMU is initialized so that the memory layout can be used
* while creating page tables. BL2 has flushed this information to memory, so
* we are guaranteed to pick up good data.
******************************************************************************/
void bl31_early_platform_setup(u_register_t from_bl2,
u_register_t plat_params_from_bl2)
{
g_qti_cpu_cntfrq = read_cntfrq_el0();
bl_aux_params_parse(plat_params_from_bl2, NULL);
#if COREBOOT
if (coreboot_serial.baseaddr != 0) {
static console_t g_qti_console_uart;
qti_console_uart_register(&g_qti_console_uart,
coreboot_serial.baseaddr);
}
#endif
/*
* Tell BL31 where the non-trusted software image
* is located and the entry state information
*/
bl31_params_parse_helper(from_bl2, NULL, &bl33_image_ep_info);
}
void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
u_register_t arg2, u_register_t arg3)
{
bl31_early_platform_setup(arg0, arg1);
}
/*******************************************************************************
* Perform the very early platform specific architectural setup here. At the
* moment this only intializes the mmu in a quick and dirty way.
******************************************************************************/
void bl31_plat_arch_setup(void)
{
qti_setup_page_tables(BL_CODE_BASE,
BL_COHERENT_RAM_END - BL_CODE_BASE,
BL_CODE_BASE,
BL_CODE_END,
BL_RO_DATA_BASE,
BL_RO_DATA_END,
BL_COHERENT_RAM_BASE, BL_COHERENT_RAM_END);
enable_mmu_el3(0);
}
/*******************************************************************************
* Perform any BL31 platform setup common to ARM standard platforms
******************************************************************************/
void bl31_platform_setup(void)
{
generic_delay_timer_init();
/* Initialize the GIC driver, CPU and distributor interfaces */
plat_qti_gic_driver_init();
plat_qti_gic_init();
qti_interrupt_svc_init();
qtiseclib_bl31_platform_setup();
/* set boot state to cold boot complete. */
g_qti_bl31_cold_booted = 0x1;
}
/*******************************************************************************
* Return a pointer to the 'entry_point_info' structure of the next image for the
* security state specified. BL33 corresponds to the non-secure image type
* while BL32 corresponds to the secure image type. A NULL pointer is returned
* if the image does not exist.
******************************************************************************/
entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type)
{
/* QTI platform don't have BL32 implementation. */
assert(type == NON_SECURE);
assert(bl33_image_ep_info.h.type == PARAM_EP);
assert(bl33_image_ep_info.h.attr == NON_SECURE);
/*
* None of the images on the platforms can have 0x0
* as the entrypoint.
*/
if (bl33_image_ep_info.pc) {
return &bl33_image_ep_info;
} else {
return NULL;
}
}
/*******************************************************************************
* This function is used by the architecture setup code to retrieve the counter
* frequency for the CPU's generic timer. This value will be programmed into the
* CNTFRQ_EL0 register. In Arm standard platforms, it returns the base frequency
* of the system counter, which is retrieved from the first entry in the
* frequency modes table. This will be used later in warm boot (psci_arch_setup)
* of CPUs to set when CPU frequency.
******************************************************************************/
unsigned int plat_get_syscnt_freq2(void)
{
assert(g_qti_cpu_cntfrq != 0);
return g_qti_cpu_cntfrq;
}
/*
* Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
#include <errno.h>
#include <stdbool.h>
#include <stdint.h>
#include <common/debug.h>
#include <lib/xlat_tables/xlat_tables_v2.h>
#include <platform_def.h>
#include <qti_plat.h>
#include <qtiseclib_interface.h>
/*
* Table of regions for various BL stages to map using the MMU.
* This doesn't include TZRAM as the 'mem_layout' argument passed to
* qti_configure_mmu_elx() will give the available subset of that,
*/
const mmap_region_t plat_qti_mmap[] = {
MAP_REGION_FLAT(QTI_DEVICE_BASE, QTI_DEVICE_SIZE,
MT_DEVICE | MT_RW | MT_SECURE),
MAP_REGION_FLAT(QTI_AOP_CMD_DB_BASE, QTI_AOP_CMD_DB_SIZE,
MT_NS | MT_RO | MT_EXECUTE_NEVER),
{0}
};
CASSERT(ARRAY_SIZE(plat_qti_mmap) <= MAX_MMAP_REGIONS, assert_max_mmap_regions);
bool qti_is_overlap_atf_rg(unsigned long long addr, size_t size)
{
if (addr > addr + size
|| (BL31_BASE < addr + size && BL31_LIMIT > addr)) {
return true;
}
return false;
}
/*
* unsigned int plat_qti_my_cluster_pos(void)
* definition to get the cluster index of the calling CPU.
* - In ARM v8 (MPIDR_EL1[24]=0)
* ClusterId = MPIDR_EL1[15:8]
* - In ARM v8.1 & Later version (MPIDR_EL1[24]=1)
* ClusterId = MPIDR_EL1[23:15]
*/
unsigned int plat_qti_my_cluster_pos(void)
{
unsigned int mpidr, cluster_id;
mpidr = read_mpidr_el1();
if ((mpidr & MPIDR_MT_MASK) == 0) { /* MT not supported */
cluster_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
} else { /* MT supported */
cluster_id = (mpidr >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK;
}
assert(cluster_id < PLAT_CLUSTER_COUNT);
return cluster_id;
}
/*
* Set up the page tables for the generic and platform-specific memory regions.
* The extents of the generic memory regions are specified by the function
* arguments and consist of:
* - Trusted SRAM seen by the BL image;
* - Code section;
* - Read-only data section;
* - Coherent memory region, if applicable.
*/
void qti_setup_page_tables(uintptr_t total_base,
size_t total_size,
uintptr_t code_start,
uintptr_t code_limit,
uintptr_t rodata_start,
uintptr_t rodata_limit,
uintptr_t coh_start, uintptr_t coh_limit)
{
/*
* Map the Trusted SRAM with appropriate memory attributes.
* Subsequent mappings will adjust the attributes for specific regions.
*/
VERBOSE("Trusted SRAM seen by this BL image: %p - %p\n",
(void *)total_base, (void *)(total_base + total_size));
mmap_add_region(total_base, total_base,
total_size, MT_MEMORY | MT_RW | MT_SECURE);
/* Re-map the code section */
VERBOSE("Code region: %p - %p\n",
(void *)code_start, (void *)code_limit);
mmap_add_region(code_start, code_start,
code_limit - code_start, MT_CODE | MT_SECURE);
/* Re-map the read-only data section */
VERBOSE("Read-only data region: %p - %p\n",
(void *)rodata_start, (void *)rodata_limit);
mmap_add_region(rodata_start, rodata_start,
rodata_limit - rodata_start, MT_RO_DATA | MT_SECURE);
/* Re-map the coherent memory region */
VERBOSE("Coherent region: %p - %p\n",
(void *)coh_start, (void *)coh_limit);
mmap_add_region(coh_start, coh_start,
coh_limit - coh_start, MT_DEVICE | MT_RW | MT_SECURE);
/* Now (re-)map the platform-specific memory regions */
mmap_add(plat_qti_mmap);
/* Create the page tables to reflect the above mappings */
init_xlat_tables();
}
static inline void qti_align_mem_region(uintptr_t addr, size_t size,
uintptr_t *aligned_addr,
size_t *aligned_size)
{
*aligned_addr = round_down(addr, PAGE_SIZE);
*aligned_size = round_up(addr - *aligned_addr + size, PAGE_SIZE);
}
int qti_mmap_add_dynamic_region(uintptr_t base_pa, size_t size,
unsigned int attr)
{
uintptr_t aligned_pa;
size_t aligned_size;
qti_align_mem_region(base_pa, size, &aligned_pa, &aligned_size);
if (qti_is_overlap_atf_rg(base_pa, size)) {
/* Memory shouldn't overlap with TF-A range. */
return -EPERM;
}
return mmap_add_dynamic_region(aligned_pa, aligned_pa, aligned_size,
attr);
}
int qti_mmap_remove_dynamic_region(uintptr_t base_va, size_t size)
{
qti_align_mem_region(base_va, size, &base_va, &size);
return mmap_remove_dynamic_region(base_va, size);
}
/*
* Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <common/bl_common.h>
#include <drivers/arm/gicv3.h>
#include <platform.h>
#include <platform_def.h>
#include <qti_plat.h>
#include <qtiseclib_defs.h>
#include <qtiseclib_defs_plat.h>
/* The GICv3 driver only needs to be initialized in EL3 */
static uintptr_t rdistif_base_addrs[PLATFORM_CORE_COUNT];
/* Array of interrupts to be configured by the gic driver */
static const interrupt_prop_t qti_interrupt_props[] = {
INTR_PROP_DESC(QTISECLIB_INT_ID_CPU_WAKEUP_SGI,
GIC_HIGHEST_SEC_PRIORITY, INTR_GROUP0,
GIC_INTR_CFG_EDGE),
INTR_PROP_DESC(QTISECLIB_INT_ID_RESET_SGI, GIC_HIGHEST_SEC_PRIORITY,
INTR_GROUP0,
GIC_INTR_CFG_EDGE),
INTR_PROP_DESC(QTISECLIB_INT_ID_SEC_WDOG_BARK, GIC_HIGHEST_SEC_PRIORITY,
INTR_GROUP0,
GIC_INTR_CFG_EDGE),
INTR_PROP_DESC(QTISECLIB_INT_ID_NON_SEC_WDOG_BITE,
GIC_HIGHEST_SEC_PRIORITY, INTR_GROUP0,
GIC_INTR_CFG_LEVEL),
INTR_PROP_DESC(QTISECLIB_INT_ID_VMIDMT_ERR_CLT_SEC,
GIC_HIGHEST_SEC_PRIORITY, INTR_GROUP0,
GIC_INTR_CFG_EDGE),
INTR_PROP_DESC(QTISECLIB_INT_ID_VMIDMT_ERR_CLT_NONSEC,
GIC_HIGHEST_SEC_PRIORITY, INTR_GROUP0,
GIC_INTR_CFG_EDGE),
INTR_PROP_DESC(QTISECLIB_INT_ID_VMIDMT_ERR_CFG_SEC,
GIC_HIGHEST_SEC_PRIORITY, INTR_GROUP0,
GIC_INTR_CFG_EDGE),
INTR_PROP_DESC(QTISECLIB_INT_ID_VMIDMT_ERR_CFG_NONSEC,
GIC_HIGHEST_SEC_PRIORITY, INTR_GROUP0,
GIC_INTR_CFG_EDGE),
INTR_PROP_DESC(QTISECLIB_INT_ID_XPU_SEC, GIC_HIGHEST_SEC_PRIORITY,
INTR_GROUP0,
GIC_INTR_CFG_EDGE),
INTR_PROP_DESC(QTISECLIB_INT_ID_XPU_NON_SEC, GIC_HIGHEST_SEC_PRIORITY,
INTR_GROUP0,
GIC_INTR_CFG_EDGE),
#ifdef QTISECLIB_INT_ID_A1_NOC_ERROR
INTR_PROP_DESC(QTISECLIB_INT_ID_A1_NOC_ERROR, GIC_HIGHEST_SEC_PRIORITY,
INTR_GROUP0,
GIC_INTR_CFG_EDGE),
#endif
INTR_PROP_DESC(QTISECLIB_INT_ID_A2_NOC_ERROR, GIC_HIGHEST_SEC_PRIORITY,
INTR_GROUP0,
GIC_INTR_CFG_EDGE),
INTR_PROP_DESC(QTISECLIB_INT_ID_CONFIG_NOC_ERROR,
GIC_HIGHEST_SEC_PRIORITY, INTR_GROUP0,
GIC_INTR_CFG_EDGE),
INTR_PROP_DESC(QTISECLIB_INT_ID_DC_NOC_ERROR, GIC_HIGHEST_SEC_PRIORITY,
INTR_GROUP0,
GIC_INTR_CFG_EDGE),
INTR_PROP_DESC(QTISECLIB_INT_ID_MEM_NOC_ERROR, GIC_HIGHEST_SEC_PRIORITY,
INTR_GROUP0,
GIC_INTR_CFG_EDGE),
INTR_PROP_DESC(QTISECLIB_INT_ID_SYSTEM_NOC_ERROR,
GIC_HIGHEST_SEC_PRIORITY, INTR_GROUP0,
GIC_INTR_CFG_EDGE),
INTR_PROP_DESC(QTISECLIB_INT_ID_MMSS_NOC_ERROR,
GIC_HIGHEST_SEC_PRIORITY, INTR_GROUP0,
GIC_INTR_CFG_EDGE),
};
const gicv3_driver_data_t qti_gic_data = {
.gicd_base = QTI_GICD_BASE,
.gicr_base = QTI_GICR_BASE,
.interrupt_props = qti_interrupt_props,
.interrupt_props_num = ARRAY_SIZE(qti_interrupt_props),
.rdistif_num = PLATFORM_CORE_COUNT,
.rdistif_base_addrs = rdistif_base_addrs,
.mpidr_to_core_pos = plat_qti_core_pos_by_mpidr
};
void plat_qti_gic_driver_init(void)
{
/*
* The GICv3 driver is initialized in EL3 and does not need
* to be initialized again in SEL1. This is because the S-EL1
* can use GIC system registers to manage interrupts and does
* not need GIC interface base addresses to be configured.
*/
gicv3_driver_init(&qti_gic_data);
}
/******************************************************************************
* ARM common helper to initialize the GIC. Only invoked by BL31
*****************************************************************************/
void plat_qti_gic_init(void)
{
unsigned int i;
gicv3_distif_init();
gicv3_rdistif_init(plat_my_core_pos());
gicv3_cpuif_enable(plat_my_core_pos());
/* Route secure spi interrupt to ANY. */
for (i = 0; i < ARRAY_SIZE(qti_interrupt_props); i++) {
unsigned int int_id = qti_interrupt_props[i].intr_num;
if (plat_ic_is_spi(int_id)) {
gicv3_set_spi_routing(int_id, GICV3_IRM_ANY, 0x0);
}
}
}
void gic_set_spi_routing(unsigned int id, unsigned int irm, u_register_t target)
{
gicv3_set_spi_routing(id, irm, target);
}
/******************************************************************************
* ARM common helper to enable the GIC CPU interface
*****************************************************************************/
void plat_qti_gic_cpuif_enable(void)
{
gicv3_cpuif_enable(plat_my_core_pos());
}
/******************************************************************************
* ARM common helper to disable the GIC CPU interface
*****************************************************************************/
void plat_qti_gic_cpuif_disable(void)
{
gicv3_cpuif_disable(plat_my_core_pos());
}
/******************************************************************************
* ARM common helper to initialize the per-CPU redistributor interface in GICv3
*****************************************************************************/
void plat_qti_gic_pcpu_init(void)
{
gicv3_rdistif_init(plat_my_core_pos());
}
/******************************************************************************
* ARM common helpers to power GIC redistributor interface
*****************************************************************************/
void plat_qti_gic_redistif_on(void)
{
gicv3_rdistif_on(plat_my_core_pos());
}
void plat_qti_gic_redistif_off(void)
{
gicv3_rdistif_off(plat_my_core_pos());
}
/*
* Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2018,2020, The Linux Foundation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
#include <stdint.h>
#include <arch_helpers.h>
#include <bl31/interrupt_mgmt.h>
#include <drivers/arm/gic_common.h>
#include <lib/el3_runtime/context_mgmt.h>
#include <platform.h>
#include <qti_interrupt_svc.h>
#include <qtiseclib_interface.h>
#define QTI_INTR_INVALID_INT_NUM 0xFFFFFFFFU
/*
* Top-level EL3 interrupt handler.
*/
static uint64_t qti_el3_interrupt_handler(uint32_t id, uint32_t flags,
void *handle, void *cookie)
{
uint32_t irq = QTI_INTR_INVALID_INT_NUM;
/*
* EL3 non-interruptible. Interrupt shouldn't occur when we are at
* EL3 / Secure.
*/
assert(handle != cm_get_context(SECURE));
irq = plat_ic_acknowledge_interrupt();
qtiseclib_invoke_isr(irq, handle);
/* End of Interrupt. */
if (irq < 1022U) {
plat_ic_end_of_interrupt(irq);
}
return (uint64_t) handle;
}
int qti_interrupt_svc_init(void)
{
int ret;
uint64_t flags = 0U;
/*
* Route EL3 interrupts to EL3 when in Non-secure.
* Note: EL3 won't have interrupt enable
* & we don't have S-EL1 support.
*/
set_interrupt_rm_flag(flags, NON_SECURE);
/* Register handler for EL3 interrupts */
ret = register_interrupt_type_handler(INTR_TYPE_EL3,
qti_el3_interrupt_handler, flags);
assert(ret == 0);
return ret;
}
/*
* Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2018, 2020, The Linux Foundation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
#include <arch_helpers.h>
#include <bl31/bl31.h>
#include <common/debug.h>
#include <lib/psci/psci.h>
#include <platform.h>
#include <platform_def.h>
#include <qti_cpu.h>
#include <qti_plat.h>
#include <qtiseclib_cb_interface.h>
#include <qtiseclib_defs_plat.h>
#include <qtiseclib_interface.h>
#define QTI_LOCAL_PSTATE_WIDTH 4
#define QTI_LOCAL_PSTATE_MASK ((1 << QTI_LOCAL_PSTATE_WIDTH) - 1)
/* Make composite power state parameter till level 0 */
#define qti_make_pwrstate_lvl0(lvl0_state, type) \
(((lvl0_state) << PSTATE_ID_SHIFT) | ((type) << PSTATE_TYPE_SHIFT))
/* Make composite power state parameter till level 1 */
#define qti_make_pwrstate_lvl1(lvl1_state, lvl0_state, type) \
(((lvl1_state) << QTI_LOCAL_PSTATE_WIDTH) | \
qti_make_pwrstate_lvl0(lvl0_state, type))
/* Make composite power state parameter till level 2 */
#define qti_make_pwrstate_lvl2(lvl2_state, lvl1_state, lvl0_state, type) \
(((lvl2_state) << (QTI_LOCAL_PSTATE_WIDTH * 2)) | \
qti_make_pwrstate_lvl1(lvl1_state, lvl0_state, type))
/* Make composite power state parameter till level 3 */
#define qti_make_pwrstate_lvl3(lvl3_state, lvl2_state, lvl1_state, lvl0_state, type) \
(((lvl3_state) << (QTI_LOCAL_PSTATE_WIDTH * 3)) | \
qti_make_pwrstate_lvl2(lvl2_state, lvl1_state, lvl0_state, type))
/* QTI_CORE_PWRDN_EN_MASK happens to be same across all CPUs */
#define QTI_CORE_PWRDN_EN_MASK 1
/* cpu power control happens to be same across all CPUs */
_DEFINE_SYSREG_WRITE_FUNC(cpu_pwrctrl_val, S3_0_C15_C2_7)
_DEFINE_SYSREG_READ_FUNC(cpu_pwrctrl_val, S3_0_C15_C2_7)
const unsigned int qti_pm_idle_states[] = {
qti_make_pwrstate_lvl0(QTI_LOCAL_STATE_OFF,
PSTATE_TYPE_POWERDOWN),
qti_make_pwrstate_lvl0(QTI_LOCAL_STATE_DEEPOFF,
PSTATE_TYPE_POWERDOWN),
qti_make_pwrstate_lvl1(QTI_LOCAL_STATE_DEEPOFF,
QTI_LOCAL_STATE_DEEPOFF,
PSTATE_TYPE_POWERDOWN),
qti_make_pwrstate_lvl2(QTI_LOCAL_STATE_OFF,
QTI_LOCAL_STATE_DEEPOFF,
QTI_LOCAL_STATE_DEEPOFF,
PSTATE_TYPE_POWERDOWN),
qti_make_pwrstate_lvl3(QTI_LOCAL_STATE_OFF,
QTI_LOCAL_STATE_DEEPOFF,
QTI_LOCAL_STATE_DEEPOFF,
QTI_LOCAL_STATE_DEEPOFF,
PSTATE_TYPE_POWERDOWN),
0,
};
/*******************************************************************************
* QTI standard platform handler called to check the validity of the power
* state parameter. The power state parameter has to be a composite power
* state.
******************************************************************************/
int qti_validate_power_state(unsigned int power_state,
psci_power_state_t *req_state)
{
unsigned int state_id;
int i;
assert(req_state);
/*
* Currently we are using a linear search for finding the matching
* entry in the idle power state array. This can be made a binary
* search if the number of entries justify the additional complexity.
*/
for (i = 0; !!qti_pm_idle_states[i]; i++) {
if (power_state == qti_pm_idle_states[i])
break;
}
/* Return error if entry not found in the idle state array */
if (!qti_pm_idle_states[i])
return PSCI_E_INVALID_PARAMS;
i = 0;
state_id = psci_get_pstate_id(power_state);
/* Parse the State ID and populate the state info parameter */
while (state_id) {
req_state->pwr_domain_state[i++] = state_id &
QTI_LOCAL_PSTATE_MASK;
state_id >>= QTI_LOCAL_PSTATE_WIDTH;
}
return PSCI_E_SUCCESS;
}
/*******************************************************************************
* PLATFORM FUNCTIONS
******************************************************************************/
static void qti_set_cpupwrctlr_val(void)
{
unsigned long val;
val = read_cpu_pwrctrl_val();
val |= QTI_CORE_PWRDN_EN_MASK;
write_cpu_pwrctrl_val(val);
isb();
}
/**
* CPU power on function - ideally we want a wrapper since this function is
* target specific. But to unblock teams.
*/
static int qti_cpu_power_on(u_register_t mpidr)
{
int core_pos = plat_core_pos_by_mpidr(mpidr);
/* If not valid mpidr, return error */
if (core_pos < 0 || core_pos >= QTISECLIB_PLAT_CORE_COUNT) {
return PSCI_E_INVALID_PARAMS;
}
return qtiseclib_psci_node_power_on(mpidr);
}
static bool is_cpu_off(const psci_power_state_t *target_state)
{
if ((target_state->pwr_domain_state[QTI_PWR_LVL0] ==
QTI_LOCAL_STATE_OFF) ||
(target_state->pwr_domain_state[QTI_PWR_LVL0] ==
QTI_LOCAL_STATE_DEEPOFF)) {
return true;
} else {
return false;
}
}
static void qti_cpu_power_on_finish(const psci_power_state_t *target_state)
{
const uint8_t *pwr_states =
(const uint8_t *)target_state->pwr_domain_state;
qtiseclib_psci_node_on_finish(pwr_states);
if (is_cpu_off(target_state)) {
plat_qti_gic_cpuif_enable();
}
}
static void qti_cpu_standby(plat_local_state_t cpu_state)
{
}
static void qti_node_power_off(const psci_power_state_t *target_state)
{
qtiseclib_psci_node_power_off((const uint8_t *)
target_state->pwr_domain_state);
if (is_cpu_off(target_state)) {
plat_qti_gic_cpuif_disable();
qti_set_cpupwrctlr_val();
}
}
static void qti_node_suspend(const psci_power_state_t *target_state)
{
qtiseclib_psci_node_suspend((const uint8_t *)target_state->
pwr_domain_state);
if (is_cpu_off(target_state)) {
plat_qti_gic_cpuif_disable();
qti_set_cpupwrctlr_val();
}
}
static void qti_node_suspend_finish(const psci_power_state_t *target_state)
{
const uint8_t *pwr_states =
(const uint8_t *)target_state->pwr_domain_state;
qtiseclib_psci_node_suspend_finish(pwr_states);
if (is_cpu_off(target_state)) {
plat_qti_gic_cpuif_enable();
}
}
__dead2 void qti_domain_power_down_wfi(const psci_power_state_t *target_state)
{
/* For now just do WFI - add any target specific handling if needed */
psci_power_down_wfi();
/* We should never reach here */
}
__dead2 void qti_system_off(void)
{
qtiseclib_psci_system_off();
}
__dead2 void qti_system_reset(void)
{
qtiseclib_psci_system_reset();
}
void qti_get_sys_suspend_power_state(psci_power_state_t *req_state)
{
int i = 0;
unsigned int state_id, power_state;
int size = ARRAY_SIZE(qti_pm_idle_states);
/*
* Find deepest state.
* The arm_pm_idle_states[] array has last element by default 0,
* so the real deepest state is second last element of that array.
*/
power_state = qti_pm_idle_states[size - 2];
state_id = psci_get_pstate_id(power_state);
/* Parse the State ID and populate the state info parameter */
while (state_id) {
req_state->pwr_domain_state[i++] =
state_id & QTI_LOCAL_PSTATE_MASK;
state_id >>= QTI_LOCAL_PSTATE_WIDTH;
}
}
/*
* Structure containing platform specific PSCI operations. Common
* PSCI layer will use this.
*/
const plat_psci_ops_t plat_qti_psci_pm_ops = {
.pwr_domain_on = qti_cpu_power_on,
.pwr_domain_on_finish = qti_cpu_power_on_finish,
.cpu_standby = qti_cpu_standby,
.pwr_domain_off = qti_node_power_off,
.pwr_domain_suspend = qti_node_suspend,
.pwr_domain_suspend_finish = qti_node_suspend_finish,
.pwr_domain_pwr_down_wfi = qti_domain_power_down_wfi,
.system_off = qti_system_off,
.system_reset = qti_system_reset,
.get_node_hw_state = NULL,
.translate_power_state_by_mpidr = NULL,
.get_sys_suspend_power_state = qti_get_sys_suspend_power_state,
.validate_power_state = qti_validate_power_state,
};
/**
* The QTI Standard platform definition of platform porting API
* `plat_setup_psci_ops`.
*/
int plat_setup_psci_ops(uintptr_t sec_entrypoint,
const plat_psci_ops_t **psci_ops)
{
int err;
err = qtiseclib_psci_init((uintptr_t)bl31_warm_entrypoint);
if (err == PSCI_E_SUCCESS) {
*psci_ops = &plat_qti_psci_pm_ops;
}
return err;
}
/*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
#include <stdint.h>
#include <platform.h>
#include <platform_def.h>
#include <qtiseclib_interface.h>
u_register_t plat_get_stack_protector_canary(void)
{
u_register_t random = 0x0;
/* get random data , the below API doesn't return random = 0 in success
* case */
qtiseclib_prng_get_data((uint8_t *) &random, sizeof(random));
assert(0x0 != random);
return random;
}
/*
* Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <common/debug.h>
#include <common/runtime_svc.h>
#include <context.h>
#include <lib/coreboot.h>
#include <lib/utils_def.h>
#include <lib/xlat_tables/xlat_tables_v2.h>
#include <smccc_helpers.h>
#include <tools_share/uuid.h>
#include <qti_plat.h>
#include <qti_secure_io_cfg.h>
#include <qtiseclib_interface.h>
/*
* SIP service - SMC function IDs for SiP Service queries
*
*/
#define QTI_SIP_SVC_CALL_COUNT_ID U(0x0200ff00)
#define QTI_SIP_SVC_UID_ID U(0x0200ff01)
/* 0x8200ff02 is reserved */
#define QTI_SIP_SVC_VERSION_ID U(0x0200ff03)
/*
* Syscall's to allow Non Secure world accessing peripheral/IO memory
* those are secure/proteced BUT not required to be secure.
*/
#define QTI_SIP_SVC_SECURE_IO_READ_ID U(0x02000501)
#define QTI_SIP_SVC_SECURE_IO_WRITE_ID U(0x02000502)
/*
* Syscall's to assigns a list of intermediate PAs from a
* source Virtual Machine (VM) to a destination VM.
*/
#define QTI_SIP_SVC_MEM_ASSIGN_ID U(0x02000C16)
#define QTI_SIP_SVC_SECURE_IO_READ_PARAM_ID U(0x1)
#define QTI_SIP_SVC_SECURE_IO_WRITE_PARAM_ID U(0x2)
#define QTI_SIP_SVC_MEM_ASSIGN_PARAM_ID U(0x1117)
#define QTI_SIP_SVC_CALL_COUNT U(0x3)
#define QTI_SIP_SVC_VERSION_MAJOR U(0x0)
#define QTI_SIP_SVC_VERSION_MINOR U(0x0)
#define QTI_VM_LAST U(44)
#define SIZE4K U(0x1000)
#define QTI_VM_MAX_LIST_SIZE U(0x20)
#define FUNCID_OEN_NUM_MASK ((FUNCID_OEN_MASK << FUNCID_OEN_SHIFT)\
|(FUNCID_NUM_MASK << FUNCID_NUM_SHIFT))
enum {
QTI_SIP_SUCCESS = 0,
QTI_SIP_NOT_SUPPORTED = -1,
QTI_SIP_PREEMPTED = -2,
QTI_SIP_INVALID_PARAM = -3,
};
/* QTI SiP Service UUID */
DEFINE_SVC_UUID2(qti_sip_svc_uid,
0x43864748, 0x217f, 0x41ad, 0xaa, 0x5a,
0xba, 0xe7, 0x0f, 0xa5, 0x52, 0xaf);
static bool qti_is_secure_io_access_allowed(u_register_t addr)
{
int i = 0;
for (i = 0; i < ARRAY_SIZE(qti_secure_io_allowed_regs); i++) {
if ((uintptr_t) addr == qti_secure_io_allowed_regs[i]) {
return true;
}
}
return false;
}
bool qti_mem_assign_validate_param(memprot_info_t *mem_info,
u_register_t u_num_mappings,
uint32_t *source_vm_list,
u_register_t src_vm_list_cnt,
memprot_dst_vm_perm_info_t *dest_vm_list,
u_register_t dst_vm_list_cnt)
{
int i;
if (!source_vm_list || !dest_vm_list || !mem_info
|| (src_vm_list_cnt == 0)
|| (src_vm_list_cnt >= QTI_VM_LAST) || (dst_vm_list_cnt == 0)
|| (dst_vm_list_cnt >= QTI_VM_LAST) || (u_num_mappings == 0)
|| u_num_mappings > QTI_VM_MAX_LIST_SIZE) {
return false;
}
for (i = 0; i < u_num_mappings; i++) {
if ((mem_info[i].mem_addr & (SIZE4K - 1))
|| (mem_info[i].mem_size & (SIZE4K - 1))) {
return false;
}
if ((mem_info[i].mem_addr + mem_info[i].mem_size) <
mem_info[i].mem_addr) {
return false;
}
if (coreboot_get_memory_type(mem_info[i].mem_addr) !=
CB_MEM_RAM) {
return false;
}
if (coreboot_get_memory_type
(mem_info[i].mem_addr + mem_info[i].mem_size) !=
CB_MEM_RAM) {
return false;
}
}
for (i = 0; i < src_vm_list_cnt; i++) {
if (source_vm_list[i] >= QTI_VM_LAST) {
return false;
}
}
for (i = 0; i < dst_vm_list_cnt; i++) {
if (dest_vm_list[i].dst_vm >= QTI_VM_LAST) {
return false;
}
}
return true;
}
static uintptr_t qti_sip_mem_assign(void *handle, uint32_t smc_cc,
u_register_t x1,
u_register_t x2,
u_register_t x3, u_register_t x4)
{
uintptr_t dyn_map_start = 0, dyn_map_end = 0;
size_t dyn_map_size = 0;
u_register_t x6, x7;
int ret = QTI_SIP_NOT_SUPPORTED;
u_register_t x5 = read_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X5);
if (smc_cc == SMC_32) {
x5 = (uint32_t) x5;
}
/* Validate input arg count & retrieve arg3-6 from NS Buffer. */
if ((x1 != QTI_SIP_SVC_MEM_ASSIGN_PARAM_ID) || (x5 == 0x0)) {
goto unmap_return;
}
/* Map NS Buffer. */
dyn_map_start = x5;
dyn_map_size =
(smc_cc ==
SMC_32) ? (sizeof(uint32_t) * 4) : (sizeof(uint64_t) * 4);
if (qti_mmap_add_dynamic_region(dyn_map_start, dyn_map_size,
(MT_NS | MT_RO_DATA)) != 0) {
goto unmap_return;
}
/* Retrieve indirect args. */
if (smc_cc == SMC_32) {
x6 = *((uint32_t *) x5 + 1);
x7 = *((uint32_t *) x5 + 2);
x5 = *(uint32_t *) x5;
} else {
x6 = *((uint64_t *) x5 + 1);
x7 = *((uint64_t *) x5 + 2);
x5 = *(uint64_t *) x5;
}
/* Un-Map NS Buffer. */
if (qti_mmap_remove_dynamic_region(dyn_map_start, dyn_map_size) != 0) {
goto unmap_return;
}
/*
* Map NS Buffers.
* arg0,2,4 points to buffers & arg1,3,5 hold sizes.
* MAP api's fail to map if it's already mapped. Let's
* find lowest start & highest end address, then map once.
*/
dyn_map_start = MIN(x2, x4);
dyn_map_start = MIN(dyn_map_start, x6);
dyn_map_end = MAX((x2 + x3), (x4 + x5));
dyn_map_end = MAX(dyn_map_end, (x6 + x7));
dyn_map_size = dyn_map_end - dyn_map_start;
if (qti_mmap_add_dynamic_region(dyn_map_start, dyn_map_size,
(MT_NS | MT_RO_DATA)) != 0) {
goto unmap_return;
}
memprot_info_t *mem_info_p = (memprot_info_t *) x2;
uint32_t u_num_mappings = x3 / sizeof(memprot_info_t);
uint32_t *source_vm_list_p = (uint32_t *) x4;
uint32_t src_vm_list_cnt = x5 / sizeof(uint32_t);
memprot_dst_vm_perm_info_t *dest_vm_list_p =
(memprot_dst_vm_perm_info_t *) x6;
uint32_t dst_vm_list_cnt =
x7 / sizeof(memprot_dst_vm_perm_info_t);
if (qti_mem_assign_validate_param(mem_info_p, u_num_mappings,
source_vm_list_p, src_vm_list_cnt,
dest_vm_list_p,
dst_vm_list_cnt) != true) {
goto unmap_return;
}
memprot_info_t mem_info[QTI_VM_MAX_LIST_SIZE];
/* Populating the arguments */
for (int i = 0; i < u_num_mappings; i++) {
mem_info[i].mem_addr = mem_info_p[i].mem_addr;
mem_info[i].mem_size = mem_info_p[i].mem_size;
}
memprot_dst_vm_perm_info_t dest_vm_list[QTI_VM_LAST];
for (int i = 0; i < dst_vm_list_cnt; i++) {
dest_vm_list[i].dst_vm = dest_vm_list_p[i].dst_vm;
dest_vm_list[i].dst_vm_perm =
dest_vm_list_p[i].dst_vm_perm;
dest_vm_list[i].ctx = dest_vm_list_p[i].ctx;
dest_vm_list[i].ctx_size = dest_vm_list_p[i].ctx_size;
}
uint32_t source_vm_list[QTI_VM_LAST];
for (int i = 0; i < src_vm_list_cnt; i++) {
source_vm_list[i] = source_vm_list_p[i];
}
/* Un-Map NS Buffers. */
if (qti_mmap_remove_dynamic_region(dyn_map_start,
dyn_map_size) != 0) {
goto unmap_return;
}
/* Invoke API lib api. */
ret = qtiseclib_mem_assign(mem_info, u_num_mappings,
source_vm_list, src_vm_list_cnt,
dest_vm_list, dst_vm_list_cnt);
if (ret == 0) {
SMC_RET2(handle, QTI_SIP_SUCCESS, ret);
}
unmap_return:
/* Un-Map NS Buffers if mapped */
if (dyn_map_start && dyn_map_size) {
qti_mmap_remove_dynamic_region(dyn_map_start, dyn_map_size);
}
SMC_RET2(handle, QTI_SIP_INVALID_PARAM, ret);
}
/*
* This function handles QTI specific syscalls. Currently only SiP calls are present.
* Both FAST & YIELD type call land here.
*/
static uintptr_t qti_sip_handler(uint32_t smc_fid,
u_register_t x1,
u_register_t x2,
u_register_t x3,
u_register_t x4,
void *cookie, void *handle, u_register_t flags)
{
uint32_t l_smc_fid = smc_fid & FUNCID_OEN_NUM_MASK;
if (GET_SMC_CC(smc_fid) == SMC_32) {
x1 = (uint32_t) x1;
x2 = (uint32_t) x2;
x3 = (uint32_t) x3;
x4 = (uint32_t) x4;
}
switch (l_smc_fid) {
case QTI_SIP_SVC_CALL_COUNT_ID:
{
SMC_RET1(handle, QTI_SIP_SVC_CALL_COUNT);
break;
}
case QTI_SIP_SVC_UID_ID:
{
/* Return UID to the caller */
SMC_UUID_RET(handle, qti_sip_svc_uid);
break;
}
case QTI_SIP_SVC_VERSION_ID:
{
/* Return the version of current implementation */
SMC_RET2(handle, QTI_SIP_SVC_VERSION_MAJOR,
QTI_SIP_SVC_VERSION_MINOR);
break;
}
case QTI_SIP_SVC_SECURE_IO_READ_ID:
{
if ((x1 == QTI_SIP_SVC_SECURE_IO_READ_PARAM_ID) &&
qti_is_secure_io_access_allowed(x2)) {
SMC_RET2(handle, QTI_SIP_SUCCESS,
*((volatile uint32_t *)x2));
}
SMC_RET1(handle, QTI_SIP_INVALID_PARAM);
break;
}
case QTI_SIP_SVC_SECURE_IO_WRITE_ID:
{
if ((x1 == QTI_SIP_SVC_SECURE_IO_WRITE_PARAM_ID) &&
qti_is_secure_io_access_allowed(x2)) {
*((volatile uint32_t *)x2) = x3;
SMC_RET1(handle, QTI_SIP_SUCCESS);
}
SMC_RET1(handle, QTI_SIP_INVALID_PARAM);
break;
}
case QTI_SIP_SVC_MEM_ASSIGN_ID:
{
return qti_sip_mem_assign(handle, GET_SMC_CC(smc_fid),
x1, x2, x3, x4);
break;
}
default:
{
SMC_RET1(handle, QTI_SIP_NOT_SUPPORTED);
}
}
return (uintptr_t) handle;
}
/* Define a runtime service descriptor for both fast & yield SiP calls */
DECLARE_RT_SVC(qti_sip_fast_svc, OEN_SIP_START,
OEN_SIP_END, SMC_TYPE_FAST, NULL, qti_sip_handler);
DECLARE_RT_SVC(qti_sip_yield_svc, OEN_SIP_START,
OEN_SIP_END, SMC_TYPE_YIELD, NULL, qti_sip_handler);
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment