Commit 43d97fae authored by Madhukar Pappireddy's avatar Madhukar Pappireddy Committed by TrustedFirmware Code Review
Browse files

Merge changes from topic "qemu-sbsa-topology-psci" into integration

* changes:
  qemu/qemu_sbsa: add support for sbsa-ref Embedded Controller
  qemu/qemu_sbsa: topology is different from qemu so add handling
  qemu/common : change DEVICE2 definition for MMU
  qemu/aarch64/plat_helpers.S : calculate the position shift
parents f03c4ea8 2fb5ed47
...@@ -32,7 +32,8 @@ endfunc plat_my_core_pos ...@@ -32,7 +32,8 @@ endfunc plat_my_core_pos
func plat_qemu_calc_core_pos func plat_qemu_calc_core_pos
and x1, x0, #MPIDR_CPU_MASK and x1, x0, #MPIDR_CPU_MASK
and x0, x0, #MPIDR_CLUSTER_MASK and x0, x0, #MPIDR_CLUSTER_MASK
add x0, x1, x0, LSR #6 add x0, x1, x0, LSR #(MPIDR_AFFINITY_BITS -\
PLATFORM_CPU_PER_CLUSTER_SHIFT)
ret ret
endfunc plat_qemu_calc_core_pos endfunc plat_qemu_calc_core_pos
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
#ifdef DEVICE2_BASE #ifdef DEVICE2_BASE
#define MAP_DEVICE2 MAP_REGION_FLAT(DEVICE2_BASE, \ #define MAP_DEVICE2 MAP_REGION_FLAT(DEVICE2_BASE, \
DEVICE2_SIZE, \ DEVICE2_SIZE, \
MT_DEVICE | MT_RO | MT_SECURE) MT_DEVICE | MT_RW | MT_SECURE)
#endif #endif
#define MAP_SHARED_RAM MAP_REGION_FLAT(SHARED_RAM_BASE, \ #define MAP_SHARED_RAM MAP_REGION_FLAT(SHARED_RAM_BASE, \
...@@ -93,6 +93,9 @@ static const mmap_region_t plat_qemu_mmap[] = { ...@@ -93,6 +93,9 @@ static const mmap_region_t plat_qemu_mmap[] = {
#ifdef MAP_DEVICE1 #ifdef MAP_DEVICE1
MAP_DEVICE1, MAP_DEVICE1,
#endif #endif
#ifdef MAP_DEVICE2
MAP_DEVICE2,
#endif
#if SPM_MM #if SPM_MM
MAP_NS_DRAM0, MAP_NS_DRAM0,
QEMU_SPM_BUF_EL3_MMAP, QEMU_SPM_BUF_EL3_MMAP,
...@@ -108,6 +111,9 @@ static const mmap_region_t plat_qemu_mmap[] = { ...@@ -108,6 +111,9 @@ static const mmap_region_t plat_qemu_mmap[] = {
MAP_DEVICE0, MAP_DEVICE0,
#ifdef MAP_DEVICE1 #ifdef MAP_DEVICE1
MAP_DEVICE1, MAP_DEVICE1,
#endif
#ifdef MAP_DEVICE2
MAP_DEVICE2,
#endif #endif
{0} {0}
}; };
......
...@@ -24,6 +24,14 @@ ...@@ -24,6 +24,14 @@
#define PLATFORM_CLUSTER1_CORE_COUNT U(0) #define PLATFORM_CLUSTER1_CORE_COUNT U(0)
#else #else
#define PLATFORM_MAX_CPUS_PER_CLUSTER U(4) #define PLATFORM_MAX_CPUS_PER_CLUSTER U(4)
/*
* Define the number of cores per cluster used in calculating core position.
* The cluster number is shifted by this value and added to the core ID,
* so its value represents log2(cores/cluster).
* Default is 2**(2) = 4 cores per cluster.
*/
#define PLATFORM_CPU_PER_CLUSTER_SHIFT U(2)
#define PLATFORM_CLUSTER_COUNT U(2) #define PLATFORM_CLUSTER_COUNT U(2)
#define PLATFORM_CLUSTER0_CORE_COUNT PLATFORM_MAX_CPUS_PER_CLUSTER #define PLATFORM_CLUSTER0_CORE_COUNT PLATFORM_MAX_CPUS_PER_CLUSTER
#define PLATFORM_CLUSTER1_CORE_COUNT PLATFORM_MAX_CPUS_PER_CLUSTER #define PLATFORM_CLUSTER1_CORE_COUNT PLATFORM_MAX_CPUS_PER_CLUSTER
......
...@@ -16,13 +16,17 @@ ...@@ -16,13 +16,17 @@
#define PLATFORM_STACK_SIZE 0x1000 #define PLATFORM_STACK_SIZE 0x1000
#define PLATFORM_MAX_CPUS_PER_CLUSTER U(4) #define PLATFORM_MAX_CPUS_PER_CLUSTER U(8)
#define PLATFORM_CLUSTER_COUNT U(2) /*
#define PLATFORM_CLUSTER0_CORE_COUNT PLATFORM_MAX_CPUS_PER_CLUSTER * Define the number of cores per cluster used in calculating core position.
#define PLATFORM_CLUSTER1_CORE_COUNT PLATFORM_MAX_CPUS_PER_CLUSTER * The cluster number is shifted by this value and added to the core ID,
#define PLATFORM_CORE_COUNT (PLATFORM_CLUSTER0_CORE_COUNT + \ * so its value represents log2(cores/cluster).
PLATFORM_CLUSTER1_CORE_COUNT) * Default is 2**(3) = 8 cores per cluster.
*/
#define PLATFORM_CPU_PER_CLUSTER_SHIFT U(3)
#define PLATFORM_CLUSTER_COUNT U(64)
#define PLATFORM_CORE_COUNT (PLATFORM_CLUSTER_COUNT * \
PLATFORM_MAX_CPUS_PER_CLUSTER)
#define QEMU_PRIMARY_CPU U(0) #define QEMU_PRIMARY_CPU U(0)
#define PLAT_NUM_PWR_DOMAINS (PLATFORM_CLUSTER_COUNT + \ #define PLAT_NUM_PWR_DOMAINS (PLATFORM_CLUSTER_COUNT + \
...@@ -130,7 +134,7 @@ ...@@ -130,7 +134,7 @@
* Put BL3-1 at the top of the Trusted SRAM. BL31_BASE is calculated using the * Put BL3-1 at the top of the Trusted SRAM. BL31_BASE is calculated using the
* current BL3-1 debug size plus a little space for growth. * current BL3-1 debug size plus a little space for growth.
*/ */
#define BL31_SIZE 0x50000 #define BL31_SIZE 0x300000
#define BL31_BASE (BL31_LIMIT - BL31_SIZE) #define BL31_BASE (BL31_LIMIT - BL31_SIZE)
#define BL31_LIMIT (BL1_RW_BASE) #define BL31_LIMIT (BL1_RW_BASE)
#define BL31_PROGBITS_LIMIT BL1_RW_BASE #define BL31_PROGBITS_LIMIT BL1_RW_BASE
...@@ -157,10 +161,10 @@ ...@@ -157,10 +161,10 @@
#define PLAT_VIRT_ADDR_SPACE_SIZE (1ull << 42) #define PLAT_VIRT_ADDR_SPACE_SIZE (1ull << 42)
#if SPM_MM #if SPM_MM
#define MAX_MMAP_REGIONS 12 #define MAX_MMAP_REGIONS 12
#define MAX_XLAT_TABLES 11 #define MAX_XLAT_TABLES 12
#else #else
#define MAX_MMAP_REGIONS 11 #define MAX_MMAP_REGIONS 11
#define MAX_XLAT_TABLES 10 #define MAX_XLAT_TABLES 11
#endif #endif
#define MAX_IO_DEVICES 3 #define MAX_IO_DEVICES 3
#define MAX_IO_HANDLES 4 #define MAX_IO_HANDLES 4
...@@ -203,7 +207,10 @@ ...@@ -203,7 +207,10 @@
#define DEVICE0_SIZE 0x04080000 #define DEVICE0_SIZE 0x04080000
/* This is map from NORMAL_UART up to SECURE_UART_MM */ /* This is map from NORMAL_UART up to SECURE_UART_MM */
#define DEVICE1_BASE 0x60000000 #define DEVICE1_BASE 0x60000000
#define DEVICE1_SIZE 0x00041000 #define DEVICE1_SIZE 0x10041000
/* This is a map for SECURE_EC */
#define DEVICE2_BASE 0x50000000
#define DEVICE2_SIZE 0x00001000
/* /*
* GIC related constants * GIC related constants
......
...@@ -79,8 +79,8 @@ BL31_SOURCES += lib/cpus/aarch64/cortex_a57.S \ ...@@ -79,8 +79,8 @@ BL31_SOURCES += lib/cpus/aarch64/cortex_a57.S \
lib/semihosting/semihosting.c \ lib/semihosting/semihosting.c \
lib/semihosting/${ARCH}/semihosting_call.S \ lib/semihosting/${ARCH}/semihosting_call.S \
plat/common/plat_psci_common.c \ plat/common/plat_psci_common.c \
${PLAT_QEMU_COMMON_PATH}/qemu_pm.c \ ${PLAT_QEMU_PATH}/sbsa_pm.c \
${PLAT_QEMU_COMMON_PATH}/topology.c \ ${PLAT_QEMU_PATH}/sbsa_topology.c \
${PLAT_QEMU_COMMON_PATH}/aarch64/plat_helpers.S \ ${PLAT_QEMU_COMMON_PATH}/aarch64/plat_helpers.S \
${PLAT_QEMU_COMMON_PATH}/qemu_bl31_setup.c \ ${PLAT_QEMU_COMMON_PATH}/qemu_bl31_setup.c \
common/fdt_fixup.c \ common/fdt_fixup.c \
......
/*
* Copyright (c) 2020, Nuvia Inc
* Copyright (c) 2015-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch_helpers.h>
#include <assert.h>
#include <lib/mmio.h>
#include <lib/psci/psci.h>
#include <plat/common/platform.h>
#include <platform_def.h>
#include "sbsa_private.h"
#define ADP_STOPPED_APPLICATION_EXIT 0x20026
/*
* Define offset and commands for the fake EC device
*/
#define SBSA_SECURE_EC_OFFSET 0x50000000
#define SBSA_SECURE_EC_CMD_SHUTDOWN 0x01
#define SBSA_SECURE_EC_CMD_REBOOT 0x02
/*
* The secure entry point to be used on warm reset.
*/
static unsigned long secure_entrypoint;
/* Make composite power state parameter till power level 0 */
#if PSCI_EXTENDED_STATE_ID
#define qemu_make_pwrstate_lvl0(lvl0_state, pwr_lvl, type) \
(((lvl0_state) << PSTATE_ID_SHIFT) | \
((type) << PSTATE_TYPE_SHIFT))
#else
#define qemu_make_pwrstate_lvl0(lvl0_state, pwr_lvl, type) \
(((lvl0_state) << PSTATE_ID_SHIFT) | \
((pwr_lvl) << PSTATE_PWR_LVL_SHIFT) | \
((type) << PSTATE_TYPE_SHIFT))
#endif /* PSCI_EXTENDED_STATE_ID */
#define qemu_make_pwrstate_lvl1(lvl1_state, lvl0_state, pwr_lvl, type) \
(((lvl1_state) << PLAT_LOCAL_PSTATE_WIDTH) | \
qemu_make_pwrstate_lvl0(lvl0_state, pwr_lvl, type))
/*
* The table storing the valid idle power states. Ensure that the
* array entries are populated in ascending order of state-id to
* enable us to use binary search during power state validation.
* The table must be terminated by a NULL entry.
*/
static const unsigned int qemu_pm_idle_states[] = {
/* State-id - 0x01 */
qemu_make_pwrstate_lvl1(PLAT_LOCAL_STATE_RUN, PLAT_LOCAL_STATE_RET,
MPIDR_AFFLVL0, PSTATE_TYPE_STANDBY),
/* State-id - 0x02 */
qemu_make_pwrstate_lvl1(PLAT_LOCAL_STATE_RUN, PLAT_LOCAL_STATE_OFF,
MPIDR_AFFLVL0, PSTATE_TYPE_POWERDOWN),
/* State-id - 0x22 */
qemu_make_pwrstate_lvl1(PLAT_LOCAL_STATE_OFF, PLAT_LOCAL_STATE_OFF,
MPIDR_AFFLVL1, PSTATE_TYPE_POWERDOWN),
0
};
/*******************************************************************************
* Platform handler called to check the validity of the power state
* parameter. The power state parameter has to be a composite power state.
******************************************************************************/
static int qemu_validate_power_state(unsigned int power_state,
psci_power_state_t *req_state)
{
unsigned int state_id;
unsigned int i;
assert(req_state != NULL);
/*
* Currently we are using a linear search for finding the matching
* entry in the idle power state array. This can be made a binary
* search if the number of entries justifies the additional complexity.
*/
for (i = 0U; qemu_pm_idle_states[i] != 0U; i++) {
if (power_state == qemu_pm_idle_states[i]) {
break;
}
}
/* Return error if entry not found in the idle state array */
if (qemu_pm_idle_states[i] == 0U) {
return PSCI_E_INVALID_PARAMS;
}
i = 0U;
state_id = psci_get_pstate_id(power_state);
/* Parse the State ID and populate the state info parameter */
while (state_id != 0U) {
req_state->pwr_domain_state[i++] = state_id &
PLAT_LOCAL_PSTATE_MASK;
state_id >>= PLAT_LOCAL_PSTATE_WIDTH;
}
return PSCI_E_SUCCESS;
}
/*******************************************************************************
* Platform handler called when a CPU is about to enter standby.
******************************************************************************/
static void qemu_cpu_standby(plat_local_state_t cpu_state)
{
assert(cpu_state == PLAT_LOCAL_STATE_RET);
/*
* Enter standby state
* dsb is good practice before using wfi to enter low power states
*/
dsb();
wfi();
}
/*******************************************************************************
* Platform handler called when a power domain is about to be turned on. The
* mpidr determines the CPU to be turned on.
******************************************************************************/
static int qemu_pwr_domain_on(u_register_t mpidr)
{
int pos = plat_core_pos_by_mpidr(mpidr);
uint64_t *hold_base = (uint64_t *)PLAT_QEMU_HOLD_BASE;
if (pos < 0) {
return PSCI_E_INVALID_PARAMS;
}
hold_base[pos] = PLAT_QEMU_HOLD_STATE_GO;
dsb();
sev();
return PSCI_E_SUCCESS;
}
/*******************************************************************************
* Platform handler called when a power domain is about to be turned off. The
* target_state encodes the power state that each level should transition to.
******************************************************************************/
static void qemu_pwr_domain_off(const psci_power_state_t *target_state)
{
qemu_pwr_gic_off();
}
void __dead2 plat_secondary_cold_boot_setup(void);
static void __dead2
qemu_pwr_domain_pwr_down_wfi(const psci_power_state_t *target_state)
{
disable_mmu_el3();
plat_secondary_cold_boot_setup();
}
/*******************************************************************************
* Platform handler called when a power domain is about to be suspended. The
* target_state encodes the power state that each level should transition to.
******************************************************************************/
void qemu_pwr_domain_suspend(const psci_power_state_t *target_state)
{
assert(false);
}
/*******************************************************************************
* Platform handler called when a power domain has just been powered on after
* being turned off earlier. The target_state encodes the low power state that
* each level has woken up from.
******************************************************************************/
void qemu_pwr_domain_on_finish(const psci_power_state_t *target_state)
{
assert(target_state->pwr_domain_state[MPIDR_AFFLVL0] ==
PLAT_LOCAL_STATE_OFF);
qemu_pwr_gic_on_finish();
}
/*******************************************************************************
* Platform handler called when a power domain has just been powered on after
* having been suspended earlier. The target_state encodes the low power state
* that each level has woken up from.
******************************************************************************/
void qemu_pwr_domain_suspend_finish(const psci_power_state_t *target_state)
{
assert(false);
}
/*******************************************************************************
* Platform handlers to shutdown/reboot the system
******************************************************************************/
static void __dead2 qemu_system_off(void)
{
mmio_write_32(SBSA_SECURE_EC_OFFSET, SBSA_SECURE_EC_CMD_SHUTDOWN);
panic();
}
static void __dead2 qemu_system_reset(void)
{
mmio_write_32(SBSA_SECURE_EC_OFFSET, SBSA_SECURE_EC_CMD_REBOOT);
panic();
}
static const plat_psci_ops_t plat_qemu_psci_pm_ops = {
.cpu_standby = qemu_cpu_standby,
.pwr_domain_on = qemu_pwr_domain_on,
.pwr_domain_off = qemu_pwr_domain_off,
.pwr_domain_pwr_down_wfi = qemu_pwr_domain_pwr_down_wfi,
.pwr_domain_suspend = qemu_pwr_domain_suspend,
.pwr_domain_on_finish = qemu_pwr_domain_on_finish,
.pwr_domain_suspend_finish = qemu_pwr_domain_suspend_finish,
.system_off = qemu_system_off,
.system_reset = qemu_system_reset,
.validate_power_state = qemu_validate_power_state
};
int plat_setup_psci_ops(uintptr_t sec_entrypoint,
const plat_psci_ops_t **psci_ops)
{
uintptr_t *mailbox = (uintptr_t *)PLAT_QEMU_TRUSTED_MAILBOX_BASE;
*mailbox = sec_entrypoint;
secure_entrypoint = (unsigned long)sec_entrypoint;
*psci_ops = &plat_qemu_psci_pm_ops;
return 0;
}
/*
* Copyright (c) 2020, Nuvia Inc
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef SBSA_PRIVATE_H
#define SBSA_PRIVATE_H
#include <stdint.h>
unsigned int plat_qemu_calc_core_pos(u_register_t mpidr);
void qemu_pwr_gic_on_finish(void);
void qemu_pwr_gic_off(void);
#endif /* SBSA_PRIVATE_H */
/*
* Copyright (c) 2020, Nuvia Inc
* Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <common/debug.h>
#include <platform_def.h>
#include "sbsa_private.h"
/* The power domain tree descriptor */
static unsigned char power_domain_tree_desc[PLATFORM_CLUSTER_COUNT + 1];
/*******************************************************************************
* This function returns the sbsa-ref default topology tree information.
******************************************************************************/
const unsigned char *plat_get_power_domain_tree_desc(void)
{
unsigned int i;
power_domain_tree_desc[0] = PLATFORM_CLUSTER_COUNT;
for (i = 0U; i < PLATFORM_CLUSTER_COUNT; i++) {
power_domain_tree_desc[i + 1] = PLATFORM_MAX_CPUS_PER_CLUSTER;
}
return power_domain_tree_desc;
}
/*******************************************************************************
* This function implements a part of the critical interface between the psci
* generic layer and the platform that allows the former to query the platform
* to convert an MPIDR to a unique linear index. An error code (-1) is returned
* in case the MPIDR is invalid.
******************************************************************************/
int plat_core_pos_by_mpidr(u_register_t mpidr)
{
unsigned int cluster_id, cpu_id;
mpidr &= MPIDR_AFFINITY_MASK;
if ((mpidr & ~(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK)) != 0U) {
ERROR("Invalid MPIDR\n");
return -1;
}
cluster_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
cpu_id = (mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK;
if (cluster_id >= PLATFORM_CLUSTER_COUNT) {
ERROR("cluster_id >= PLATFORM_CLUSTER_COUNT define\n");
return -1;
}
if (cpu_id >= PLATFORM_MAX_CPUS_PER_CLUSTER) {
ERROR("cpu_id >= PLATFORM_MAX_CPUS_PER_CLUSTER define\n");
return -1;
}
return plat_qemu_calc_core_pos(mpidr);
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment