Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
Menu
Open sidebar
adam.huang
Arm Trusted Firmware
Commits
ba0248b5
Unverified
Commit
ba0248b5
authored
Jul 19, 2018
by
danh-arm
Committed by
GitHub
Jul 19, 2018
Browse files
Merge pull request #1450 from MISL-EBU-System-SW/marvell-support-v6
Marvell support for Armada 8K SoC family
parents
992a3536
23e0fe52
Changes
116
Hide whitespace changes
Inline
Side-by-side
plat/marvell/a8k/common/mss/mss_a8k.mk
0 → 100644
View file @
ba0248b5
#
# Copyright (C) 2018 Marvell International Ltd.
#
# SPDX-License-Identifier: BSD-3-Clause
# https://spdx.org/licenses
#
PLAT_MARVELL
:=
plat/marvell
A8K_MSS_SOURCE
:=
$(PLAT_MARVELL)
/a8k/common/mss
BL2_SOURCES
+=
$(A8K_MSS_SOURCE)
/mss_bl2_setup.c
BL31_SOURCES
+=
$(A8K_MSS_SOURCE)
/mss_pm_ipc.c
PLAT_INCLUDES
+=
-I
$(A8K_MSS_SOURCE)
ifneq
(${SCP_BL2},)
# This define is used to inidcate the SCP image is present
$(eval
$(call
add_define,SCP_IMAGE))
endif
plat/marvell/a8k/common/mss/mss_bl2_setup.c
0 → 100644
View file @
ba0248b5
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#include <a8k_common.h>
#include <bl_common.h>
#include <ccu.h>
#include <cp110_setup.h>
#include <debug.h>
#include <marvell_plat_priv.h>
/* timer functionality */
#include <mmio.h>
#include <platform_def.h>
#include "mss_scp_bootloader.h"
/* IO windows configuration */
#define IOW_GCR_OFFSET (0x70)
/* MSS windows configuration */
#define MSS_AEBR(base) (base + 0x160)
#define MSS_AIBR(base) (base + 0x164)
#define MSS_AEBR_MASK 0xFFF
#define MSS_AIBR_MASK 0xFFF
#define MSS_EXTERNAL_SPACE 0x50000000
#define MSS_EXTERNAL_ACCESS_BIT 28
#define MSS_EXTERNAL_ADDR_MASK 0xfffffff
#define MSS_INTERNAL_ACCESS_BIT 28
struct
addr_map_win
ccu_mem_map
[]
=
{
{
MVEBU_CP_REGS_BASE
(
0
),
0x4000000
,
IO_0_TID
}
};
/* Since the scp_bl2 image can contain firmware for cp1 and cp0 coprocessors,
* the access to cp0 and cp1 need to be provided. More precisely it is
* required to:
* - get the information about device id which is stored in CP0 registers
* (to distinguish between cases where we have cp0 and cp1 or standalone cp0)
* - get the access to cp which is needed for loading fw for cp0/cp1
* coprocessors
* This function configures ccu windows accordingly.
*
* Note: there is no need to restore previous ccu configuration, since in next
* phase (BL31) the init_ccu will be called (via apn806_init/
* bl31_plat_arch_setu) and therefore the ccu configuration will be overwritten.
*/
static
int
bl2_plat_mmap_init
(
void
)
{
int
cfg_num
,
win_id
,
cfg_idx
;
cfg_num
=
ARRAY_SIZE
(
ccu_mem_map
);
/* CCU window-0 should not be counted - it's already used */
if
(
cfg_num
>
(
MVEBU_CCU_MAX_WINS
-
1
))
{
ERROR
(
"BL2: %s: trying to open too many windows
\n
"
,
__func__
);
return
-
1
;
}
/* Enable required CCU windows
* Do not touch CCU window 0,
* it's used for the internal registers access
*/
for
(
cfg_idx
=
0
,
win_id
=
1
;
cfg_idx
<
cfg_num
;
cfg_idx
++
,
win_id
++
)
{
/* Enable required CCU windows */
ccu_win_check
(
&
ccu_mem_map
[
cfg_idx
]);
ccu_enable_win
(
MVEBU_AP0
,
&
ccu_mem_map
[
cfg_idx
],
win_id
);
}
/* Set the default target id to PIDI */
mmio_write_32
(
MVEBU_IO_WIN_BASE
(
MVEBU_AP0
)
+
IOW_GCR_OFFSET
,
PIDI_TID
);
return
0
;
}
/*****************************************************************************
* Transfer SCP_BL2 from Trusted RAM using the SCP Download protocol.
* Return 0 on success, -1 otherwise.
*****************************************************************************
*/
int
bl2_plat_handle_scp_bl2
(
image_info_t
*
scp_bl2_image_info
)
{
int
ret
;
INFO
(
"BL2: Initiating SCP_BL2 transfer to SCP
\n
"
);
printf
(
"BL2: Initiating SCP_BL2 transfer to SCP
\n
"
);
/* initialize time (for delay functionality) */
plat_delay_timer_init
();
ret
=
bl2_plat_mmap_init
();
if
(
ret
!=
0
)
return
ret
;
ret
=
scp_bootloader_transfer
((
void
*
)
scp_bl2_image_info
->
image_base
,
scp_bl2_image_info
->
image_size
);
if
(
ret
==
0
)
INFO
(
"BL2: SCP_BL2 transferred to SCP
\n
"
);
else
ERROR
(
"BL2: SCP_BL2 transfer failure
\n
"
);
return
ret
;
}
uintptr_t
bl2_plat_get_cp_mss_regs
(
int
ap_idx
,
int
cp_idx
)
{
return
MVEBU_CP_REGS_BASE
(
cp_idx
)
+
0x280000
;
}
uintptr_t
bl2_plat_get_ap_mss_regs
(
int
ap_idx
)
{
return
MVEBU_REGS_BASE
+
0x580000
;
}
uint32_t
bl2_plat_get_cp_count
(
int
ap_idx
)
{
uint32_t
revision
=
cp110_device_id_get
(
MVEBU_CP_REGS_BASE
(
0
));
/* A8040: two CPs.
* A7040: one CP.
*/
if
(
revision
==
MVEBU_80X0_DEV_ID
||
revision
==
MVEBU_80X0_CP115_DEV_ID
)
return
2
;
else
return
1
;
}
uint32_t
bl2_plat_get_ap_count
(
void
)
{
/* A8040 and A7040 have only one AP */
return
1
;
}
void
bl2_plat_configure_mss_windows
(
uintptr_t
mss_regs
)
{
/* set AXI External and Internal Address Bus extension */
mmio_write_32
(
MSS_AEBR
(
mss_regs
),
((
0x0
>>
MSS_EXTERNAL_ACCESS_BIT
)
&
MSS_AEBR_MASK
));
mmio_write_32
(
MSS_AIBR
(
mss_regs
),
((
mss_regs
>>
MSS_INTERNAL_ACCESS_BIT
)
&
MSS_AIBR_MASK
));
}
plat/marvell/a8k/common/mss/mss_pm_ipc.c
0 → 100644
View file @
ba0248b5
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#include <debug.h>
#include <mmio.h>
#include <psci.h>
#include <string.h>
#include <mss_pm_ipc.h>
/*
* SISR is 32 bit interrupt register representing 32 interrupts
*
* +======+=============+=============+
* + Bits + 31 + 30 - 00 +
* +======+=============+=============+
* + Desc + MSS Msg Int + Reserved +
* +======+=============+=============+
*/
#define MSS_SISR (MVEBU_REGS_BASE + 0x5800D0)
#define MSS_SISTR (MVEBU_REGS_BASE + 0x5800D8)
#define MSS_MSG_INT_MASK (0x80000000)
#define MSS_TIMER_BASE (MVEBU_REGS_BASE_MASK + 0x580110)
#define MSS_TRIGGER_TIMEOUT (1000)
/*****************************************************************************
* mss_pm_ipc_msg_send
*
* DESCRIPTION: create and transmit IPC message
*****************************************************************************
*/
int
mss_pm_ipc_msg_send
(
unsigned
int
channel_id
,
unsigned
int
msg_id
,
const
psci_power_state_t
*
target_state
)
{
/* Transmit IPC message */
#ifndef DISABLE_CLUSTER_LEVEL
mv_pm_ipc_msg_tx
(
channel_id
,
msg_id
,
(
unsigned
int
)
target_state
->
pwr_domain_state
[
MPIDR_AFFLVL1
]);
#else
mv_pm_ipc_msg_tx
(
channel_id
,
msg_id
,
0
);
#endif
return
0
;
}
/*****************************************************************************
* mss_pm_ipc_msg_trigger
*
* DESCRIPTION: Trigger IPC message interrupt to MSS
*****************************************************************************
*/
int
mss_pm_ipc_msg_trigger
(
void
)
{
unsigned
int
timeout
;
unsigned
int
t_end
;
unsigned
int
t_start
=
mmio_read_32
(
MSS_TIMER_BASE
);
mmio_write_32
(
MSS_SISR
,
MSS_MSG_INT_MASK
);
do
{
/* wait while SCP process incoming interrupt */
if
(
mmio_read_32
(
MSS_SISTR
)
!=
MSS_MSG_INT_MASK
)
break
;
/* check timeout */
t_end
=
mmio_read_32
(
MSS_TIMER_BASE
);
timeout
=
((
t_start
>
t_end
)
?
(
t_start
-
t_end
)
:
(
t_end
-
t_start
));
if
(
timeout
>
MSS_TRIGGER_TIMEOUT
)
{
ERROR
(
"PM MSG Trigger Timeout
\n
"
);
break
;
}
}
while
(
1
);
return
0
;
}
plat/marvell/a8k/common/mss/mss_pm_ipc.h
0 → 100644
View file @
ba0248b5
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#ifndef __MSS_PM_IPC_H
#define __MSS_PM_IPC_H
#include <mss_ipc_drv.h>
/* Currently MSS does not support Cluster level Power Down */
#define DISABLE_CLUSTER_LEVEL
/*****************************************************************************
* mss_pm_ipc_msg_send
*
* DESCRIPTION: create and transmit IPC message
*****************************************************************************
*/
int
mss_pm_ipc_msg_send
(
unsigned
int
channel_id
,
unsigned
int
msg_id
,
const
psci_power_state_t
*
target_state
);
/*****************************************************************************
* mss_pm_ipc_msg_trigger
*
* DESCRIPTION: Trigger IPC message interrupt to MSS
*****************************************************************************
*/
int
mss_pm_ipc_msg_trigger
(
void
);
#endif
/* __MSS_PM_IPC_H */
plat/marvell/a8k/common/plat_bl1_setup.c
0 → 100644
View file @
ba0248b5
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#include <mmio.h>
#include <plat_marvell.h>
void
marvell_bl1_setup_mpps
(
void
)
{
/* Enable UART MPPs.
** In a normal system, this is done by Bootrom.
*/
mmio_write_32
(
MVEBU_AP_MPP_REGS
(
1
),
0x3000
);
mmio_write_32
(
MVEBU_AP_MPP_REGS
(
2
),
0x3000
);
}
plat/marvell/a8k/common/plat_bl31_setup.c
0 → 100644
View file @
ba0248b5
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#include <a8k_common.h>
#include <ap_setup.h>
#include <cp110_setup.h>
#include <debug.h>
#include <marvell_plat_priv.h>
#include <marvell_pm.h>
#include <mmio.h>
#include <mci.h>
#include <plat_marvell.h>
#include <mss_ipc_drv.h>
#include <mss_mem.h>
/* In Armada-8k family AP806/AP807, CP0 connected to PIDI
* and CP1 connected to IHB via MCI #0
*/
#define MVEBU_MCI0 0
static
_Bool
pm_fw_running
;
/* Set a weak stub for platforms that don't need to configure GPIO */
#pragma weak marvell_gpio_config
int
marvell_gpio_config
(
void
)
{
return
0
;
}
static
void
marvell_bl31_mpp_init
(
int
cp
)
{
uint32_t
reg
;
/* need to do for CP#0 only */
if
(
cp
)
return
;
/*
* Enable CP0 I2C MPPs (MPP: 37-38)
* U-Boot rely on proper MPP settings for I2C EEPROM usage
* (only for CP0)
*/
reg
=
mmio_read_32
(
MVEBU_CP_MPP_REGS
(
0
,
4
));
mmio_write_32
(
MVEBU_CP_MPP_REGS
(
0
,
4
),
reg
|
0x2200000
);
}
void
marvell_bl31_mss_init
(
void
)
{
struct
mss_pm_ctrl_block
*
mss_pm_crtl
=
(
struct
mss_pm_ctrl_block
*
)
MSS_SRAM_PM_CONTROL_BASE
;
/* Check that the image was loaded successfully */
if
(
mss_pm_crtl
->
handshake
!=
HOST_ACKNOWLEDGMENT
)
{
NOTICE
(
"MSS PM is not supported in this build
\n
"
);
return
;
}
/* If we got here it means that the PM firmware is running */
pm_fw_running
=
1
;
INFO
(
"MSS IPC init
\n
"
);
if
(
mss_pm_crtl
->
ipc_state
==
IPC_INITIALIZED
)
mv_pm_ipc_init
(
mss_pm_crtl
->
ipc_base_address
|
MVEBU_REGS_BASE
);
}
_Bool
is_pm_fw_running
(
void
)
{
return
pm_fw_running
;
}
/* This function overruns the same function in marvell_bl31_setup.c */
void
bl31_plat_arch_setup
(
void
)
{
int
cp
;
uintptr_t
*
mailbox
=
(
void
*
)
PLAT_MARVELL_MAILBOX_BASE
;
/* initialize the timer for mdelay/udelay functionality */
plat_delay_timer_init
();
/* configure apn806 */
ap_init
();
/* In marvell_bl31_plat_arch_setup, el3 mmu is configured.
* el3 mmu configuration MUST be called after apn806_init, if not,
* this will cause an hang in init_io_win
* (after setting the IO windows GCR values).
*/
if
(
mailbox
[
MBOX_IDX_MAGIC
]
!=
MVEBU_MAILBOX_MAGIC_NUM
||
mailbox
[
MBOX_IDX_SUSPEND_MAGIC
]
!=
MVEBU_MAILBOX_SUSPEND_STATE
)
marvell_bl31_plat_arch_setup
();
for
(
cp
=
0
;
cp
<
CP_COUNT
;
cp
++
)
{
/* configure cp110 for CP0*/
if
(
cp
==
1
)
mci_initialize
(
MVEBU_MCI0
);
/* initialize MCI & CP1 */
cp110_init
(
MVEBU_CP_REGS_BASE
(
cp
),
STREAM_ID_BASE
+
(
cp
*
MAX_STREAM_ID_PER_CP
));
/* Should be called only after setting IOB windows */
marvell_bl31_mpp_init
(
cp
);
}
/* initialize IPC between MSS and ATF */
if
(
mailbox
[
MBOX_IDX_MAGIC
]
!=
MVEBU_MAILBOX_MAGIC_NUM
||
mailbox
[
MBOX_IDX_SUSPEND_MAGIC
]
!=
MVEBU_MAILBOX_SUSPEND_STATE
)
marvell_bl31_mss_init
();
/* Configure GPIO */
marvell_gpio_config
();
}
plat/marvell/a8k/common/plat_ble_setup.c
0 → 100644
View file @
ba0248b5
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#include <a8k_common.h>
#include <ap_setup.h>
#include <aro.h>
#include <ccu.h>
#include <cp110_setup.h>
#include <debug.h>
#include <io_win.h>
#include <mv_ddr_if.h>
#include <mvebu_def.h>
#include <plat_marvell.h>
/* Register for skip image use */
#define SCRATCH_PAD_REG2 0xF06F00A8
#define SCRATCH_PAD_SKIP_VAL 0x01
#define NUM_OF_GPIO_PER_REG 32
#define MMAP_SAVE_AND_CONFIG 0
#define MMAP_RESTORE_SAVED 1
/* SAR clock settings */
#define MVEBU_AP_GEN_MGMT_BASE (MVEBU_RFU_BASE + 0x8000)
#define MVEBU_AP_SAR_REG_BASE(r) (MVEBU_AP_GEN_MGMT_BASE + 0x200 +\
((r) << 2))
#define SAR_CLOCK_FREQ_MODE_OFFSET (0)
#define SAR_CLOCK_FREQ_MODE_MASK (0x1f << SAR_CLOCK_FREQ_MODE_OFFSET)
#define SAR_PIDI_LOW_SPEED_OFFSET (20)
#define SAR_PIDI_LOW_SPEED_MASK (1 << SAR_PIDI_LOW_SPEED_OFFSET)
#define SAR_PIDI_LOW_SPEED_SHIFT (15)
#define SAR_PIDI_LOW_SPEED_SET (1 << SAR_PIDI_LOW_SPEED_SHIFT)
#define FREQ_MODE_AP_SAR_REG_NUM (0)
#define SAR_CLOCK_FREQ_MODE(v) (((v) & SAR_CLOCK_FREQ_MODE_MASK) >> \
SAR_CLOCK_FREQ_MODE_OFFSET)
#define AVS_EN_CTRL_REG (MVEBU_AP_GEN_MGMT_BASE + 0x130)
#define AVS_ENABLE_OFFSET (0)
#define AVS_SOFT_RESET_OFFSET (2)
#define AVS_LOW_VDD_LIMIT_OFFSET (4)
#define AVS_HIGH_VDD_LIMIT_OFFSET (12)
#define AVS_TARGET_DELTA_OFFSET (21)
#define AVS_VDD_LOW_LIMIT_MASK (0xFF << AVS_LOW_VDD_LIMIT_OFFSET)
#define AVS_VDD_HIGH_LIMIT_MASK (0xFF << AVS_HIGH_VDD_LIMIT_OFFSET)
/* VDD limit is 0.9V for A70x0 @ CPU frequency < 1600MHz */
#define AVS_A7K_LOW_CLK_VALUE ((0x80 << AVS_TARGET_DELTA_OFFSET) | \
(0x1A << AVS_HIGH_VDD_LIMIT_OFFSET) | \
(0x1A << AVS_LOW_VDD_LIMIT_OFFSET) | \
(0x1 << AVS_SOFT_RESET_OFFSET) | \
(0x1 << AVS_ENABLE_OFFSET))
/* VDD limit is 1.0V for all A80x0 devices */
#define AVS_A8K_CLK_VALUE ((0x80 << AVS_TARGET_DELTA_OFFSET) | \
(0x24 << AVS_HIGH_VDD_LIMIT_OFFSET) | \
(0x24 << AVS_LOW_VDD_LIMIT_OFFSET) | \
(0x1 << AVS_SOFT_RESET_OFFSET) | \
(0x1 << AVS_ENABLE_OFFSET))
#define AVS_A3900_CLK_VALUE ((0x80 << 24) | \
(0x2c2 << 13) | \
(0x2c2 << 3) | \
(0x1 << AVS_SOFT_RESET_OFFSET) | \
(0x1 << AVS_ENABLE_OFFSET))
#define MVEBU_AP_EFUSE_SRV_CTRL_REG (MVEBU_AP_GEN_MGMT_BASE + 0x8)
#define EFUSE_SRV_CTRL_LD_SELECT_OFFS 6
#define EFUSE_SRV_CTRL_LD_SEL_USER_MASK (1 << EFUSE_SRV_CTRL_LD_SELECT_OFFS)
/* Notify bootloader on DRAM setup */
#define AP807_CPU_ARO_0_CTRL_0 (MVEBU_RFU_BASE + 0x82A8)
#define AP807_CPU_ARO_1_CTRL_0 (MVEBU_RFU_BASE + 0x8D00)
/* 0 - ARO clock is enabled, 1 - ARO clock is disabled */
#define AP807_CPU_ARO_CLK_EN_OFFSET 0
#define AP807_CPU_ARO_CLK_EN_MASK (0x1 << AP807_CPU_ARO_CLK_EN_OFFSET)
/* 0 - ARO is the clock source, 1 - PLL is the clock source */
#define AP807_CPU_ARO_SEL_PLL_OFFSET 5
#define AP807_CPU_ARO_SEL_PLL_MASK (0x1 << AP807_CPU_ARO_SEL_PLL_OFFSET)
/*
* - AVS work points in the LD0 eFuse:
* SVC1 work point: LD0[88:81]
* SVC2 work point: LD0[96:89]
* SVC3 work point: LD0[104:97]
* SVC4 work point: LD0[112:105]
* - Identification information in the LD-0 eFuse:
* DRO: LD0[74:65] - Not used by the SW
* Revision: LD0[78:75] - Not used by the SW
* Bin: LD0[80:79] - Not used by the SW
* SW Revision: LD0[115:113]
* Cluster 1 PWR: LD0[193] - if set to 1, power down CPU Cluster-1
* resulting in 2 CPUs active only (7020)
*/
#define MVEBU_AP_LD_EFUSE_BASE (MVEBU_AP_GEN_MGMT_BASE + 0xF00)
/* Bits [94:63] - 32 data bits total */
#define MVEBU_AP_LD0_94_63_EFUSE_OFFS (MVEBU_AP_LD_EFUSE_BASE + 0x8)
/* Bits [125:95] - 31 data bits total, 32nd bit is parity for bits [125:63] */
#define MVEBU_AP_LD0_125_95_EFUSE_OFFS (MVEBU_AP_LD_EFUSE_BASE + 0xC)
/* Bits [220:189] - 32 data bits total */
#define MVEBU_AP_LD0_220_189_EFUSE_OFFS (MVEBU_AP_LD_EFUSE_BASE + 0x18)
/* Offsets for the above 2 fields combined into single 64-bit value [125:63] */
#define EFUSE_AP_LD0_DRO_OFFS 2
/* LD0[74:65] */
#define EFUSE_AP_LD0_DRO_MASK 0x3FF
#define EFUSE_AP_LD0_REVID_OFFS 12
/* LD0[78:75] */
#define EFUSE_AP_LD0_REVID_MASK 0xF
#define EFUSE_AP_LD0_BIN_OFFS 16
/* LD0[80:79] */
#define EFUSE_AP_LD0_BIN_MASK 0x3
#define EFUSE_AP_LD0_SWREV_OFFS 50
/* LD0[115:113] */
#define EFUSE_AP_LD0_SWREV_MASK 0x7
#define EFUSE_AP_LD0_SVC1_OFFS 18
/* LD0[88:81] */
#define EFUSE_AP_LD0_SVC2_OFFS 26
/* LD0[96:89] */
#define EFUSE_AP_LD0_SVC3_OFFS 34
/* LD0[104:97] */
#define EFUSE_AP_LD0_SVC4_OFFS 42
/* LD0[112:105] */
#define EFUSE_AP_LD0_WP_MASK 0xFF
#define EFUSE_AP_LD0_CLUSTER_DOWN_OFFS 4
/* Return the AP revision of the chip */
static
unsigned
int
ble_get_ap_type
(
void
)
{
unsigned
int
chip_rev_id
;
chip_rev_id
=
mmio_read_32
(
MVEBU_CSS_GWD_CTRL_IIDR2_REG
);
chip_rev_id
=
((
chip_rev_id
&
GWD_IIDR2_CHIP_ID_MASK
)
>>
GWD_IIDR2_CHIP_ID_OFFSET
);
return
chip_rev_id
;
}
/******************************************************************************
* The routine allows to save the CCU and IO windows configuration during DRAM
* setup and restore them afterwards before exiting the BLE stage.
* Such window configuration is required since not all default settings coming
* from the HW and the BootROM allow access to peripherals connected to
* all available CPn components.
* For instance, when the boot device is located on CP0, the IO window to CP1
* is not opened automatically by the HW and if the DRAM SPD is located on CP1
* i2c channel, it cannot be read at BLE stage.
* Therefore the DRAM init procedure have to provide access to all available
* CPn peripherals during the BLE stage by setting the CCU IO window to all
* CPnph addresses and by enabling the IO windows accordingly.
* Additionally this function configures the CCU GCR to DRAM, which allows
* usage or more than 4GB DRAM as it configured by the default CCU DRAM window.
*
* IN:
* MMAP_SAVE_AND_CONFIG - save the existing configuration and update it
* MMAP_RESTORE_SAVED - restore saved configuration
* OUT:
* NONE
****************************************************************************
*/
static
void
ble_plat_mmap_config
(
int
restore
)
{
if
(
restore
==
MMAP_RESTORE_SAVED
)
{
/* Restore all orig. settings that were modified by BLE stage */
ccu_restore_win_all
(
MVEBU_AP0
);
/* Restore CCU */
iow_restore_win_all
(
MVEBU_AP0
);
return
;
}
/* Store original values */
ccu_save_win_all
(
MVEBU_AP0
);
/* Save CCU */
iow_save_win_all
(
MVEBU_AP0
);
init_ccu
(
MVEBU_AP0
);
/* The configuration saved, now all the changes can be done */
init_io_win
(
MVEBU_AP0
);
}
/****************************************************************************
* Setup Adaptive Voltage Switching - this is required for some platforms
****************************************************************************
*/
static
void
ble_plat_avs_config
(
void
)
{
uint32_t
reg_val
,
device_id
;
/* Check which SoC is running and act accordingly */
if
(
ble_get_ap_type
()
==
CHIP_ID_AP807
)
{
VERBOSE
(
"AVS: Setting AP807 AVS CTRL to 0x%x
\n
"
,
AVS_A3900_CLK_VALUE
);
mmio_write_32
(
AVS_EN_CTRL_REG
,
AVS_A3900_CLK_VALUE
);
return
;
}
/* Check which SoC is running and act accordingly */
device_id
=
cp110_device_id_get
(
MVEBU_CP_REGS_BASE
(
0
));
switch
(
device_id
)
{
case
MVEBU_80X0_DEV_ID
:
case
MVEBU_80X0_CP115_DEV_ID
:
/* Set the new AVS value - fix the default one on A80x0 */
mmio_write_32
(
AVS_EN_CTRL_REG
,
AVS_A8K_CLK_VALUE
);
break
;
case
MVEBU_70X0_DEV_ID
:
case
MVEBU_70X0_CP115_DEV_ID
:
/* Only fix AVS for CPU clocks lower than 1600MHz on A70x0 */
reg_val
=
mmio_read_32
(
MVEBU_AP_SAR_REG_BASE
(
FREQ_MODE_AP_SAR_REG_NUM
));
reg_val
&=
SAR_CLOCK_FREQ_MODE_MASK
;
reg_val
>>=
SAR_CLOCK_FREQ_MODE_OFFSET
;
if
((
reg_val
>
CPU_1600_DDR_900_RCLK_900_2
)
&&
(
reg_val
<
CPU_DDR_RCLK_INVALID
))
mmio_write_32
(
AVS_EN_CTRL_REG
,
AVS_A7K_LOW_CLK_VALUE
);
break
;
default:
ERROR
(
"Unsupported Device ID 0x%x
\n
"
,
device_id
);
}
}
/****************************************************************************
* SVC flow - v0.10
* The feature is intended to configure AVS value according to eFuse values
* that are burned individually for each SoC during the test process.
* Primary AVS value is stored in HD efuse and processed on power on
* by the HW engine
* Secondary AVS value is located in LD efuse and contains 4 work points for
* various CPU frequencies.
* The Secondary AVS value is only taken into account if the SW Revision stored
* in the efuse is greater than 0 and the CPU is running in a certain speed.
****************************************************************************
*/
static
void
ble_plat_svc_config
(
void
)
{
uint32_t
reg_val
,
avs_workpoint
,
freq_pidi_mode
;
uint64_t
efuse
;
uint32_t
device_id
,
single_cluster
;
uint8_t
svc
[
4
],
perr
[
4
],
i
,
sw_ver
;
/* Due to a bug in A3900 device_id skip SVC config
* TODO: add SVC config once it is decided for a3900
*/
if
(
ble_get_ap_type
()
==
CHIP_ID_AP807
)
{
NOTICE
(
"SVC: SVC is not supported on AP807
\n
"
);
ble_plat_avs_config
();
return
;
}
/* Set access to LD0 */
reg_val
=
mmio_read_32
(
MVEBU_AP_EFUSE_SRV_CTRL_REG
);
reg_val
&=
~
EFUSE_SRV_CTRL_LD_SELECT_OFFS
;
mmio_write_32
(
MVEBU_AP_EFUSE_SRV_CTRL_REG
,
reg_val
);
/* Obtain the value of LD0[125:63] */
efuse
=
mmio_read_32
(
MVEBU_AP_LD0_125_95_EFUSE_OFFS
);
efuse
<<=
32
;
efuse
|=
mmio_read_32
(
MVEBU_AP_LD0_94_63_EFUSE_OFFS
);
/* SW Revision:
* Starting from SW revision 1 the SVC flow is supported.
* SW version 0 (efuse not programmed) should follow the
* regular AVS update flow.
*/
sw_ver
=
(
efuse
>>
EFUSE_AP_LD0_SWREV_OFFS
)
&
EFUSE_AP_LD0_SWREV_MASK
;
if
(
sw_ver
<
1
)
{
NOTICE
(
"SVC: SW Revision 0x%x. SVC is not supported
\n
"
,
sw_ver
);
ble_plat_avs_config
();
return
;
}
/* Frequency mode from SAR */
freq_pidi_mode
=
SAR_CLOCK_FREQ_MODE
(
mmio_read_32
(
MVEBU_AP_SAR_REG_BASE
(
FREQ_MODE_AP_SAR_REG_NUM
)));
/* Decode all SVC work points */
svc
[
0
]
=
(
efuse
>>
EFUSE_AP_LD0_SVC1_OFFS
)
&
EFUSE_AP_LD0_WP_MASK
;
svc
[
1
]
=
(
efuse
>>
EFUSE_AP_LD0_SVC2_OFFS
)
&
EFUSE_AP_LD0_WP_MASK
;
svc
[
2
]
=
(
efuse
>>
EFUSE_AP_LD0_SVC3_OFFS
)
&
EFUSE_AP_LD0_WP_MASK
;
svc
[
3
]
=
(
efuse
>>
EFUSE_AP_LD0_SVC4_OFFS
)
&
EFUSE_AP_LD0_WP_MASK
;
INFO
(
"SVC: Efuse WP: [0]=0x%x, [1]=0x%x, [2]=0x%x, [3]=0x%x
\n
"
,
svc
[
0
],
svc
[
1
],
svc
[
2
],
svc
[
3
]);
/* Validate parity of SVC workpoint values */
for
(
i
=
0
;
i
<
4
;
i
++
)
{
uint8_t
parity
,
bit
;
perr
[
i
]
=
0
;
for
(
bit
=
1
,
parity
=
svc
[
i
]
&
1
;
bit
<
7
;
bit
++
)
parity
^=
(
svc
[
i
]
>>
bit
)
&
1
;
/* Starting from SW version 2, the parity check is mandatory */
if
((
sw_ver
>
1
)
&&
(
parity
!=
((
svc
[
i
]
>>
7
)
&
1
)))
perr
[
i
]
=
1
;
/* register the error */
}
single_cluster
=
mmio_read_32
(
MVEBU_AP_LD0_220_189_EFUSE_OFFS
);
single_cluster
=
(
single_cluster
>>
EFUSE_AP_LD0_CLUSTER_DOWN_OFFS
)
&
1
;
device_id
=
cp110_device_id_get
(
MVEBU_CP_REGS_BASE
(
0
));
if
(
device_id
==
MVEBU_80X0_DEV_ID
||
device_id
==
MVEBU_80X0_CP115_DEV_ID
)
{
/* A8040/A8020 */
NOTICE
(
"SVC: DEV ID: %s, FREQ Mode: 0x%x
\n
"
,
single_cluster
==
0
?
"8040"
:
"8020"
,
freq_pidi_mode
);
switch
(
freq_pidi_mode
)
{
case
CPU_1800_DDR_1200_RCLK_1200
:
case
CPU_1800_DDR_1050_RCLK_1050
:
if
(
perr
[
1
])
goto
perror
;
avs_workpoint
=
svc
[
1
];
break
;
case
CPU_1600_DDR_1050_RCLK_1050
:
case
CPU_1600_DDR_900_RCLK_900_2
:
if
(
perr
[
2
])
goto
perror
;
avs_workpoint
=
svc
[
2
];
break
;
case
CPU_1300_DDR_800_RCLK_800
:
case
CPU_1300_DDR_650_RCLK_650
:
if
(
perr
[
3
])
goto
perror
;
avs_workpoint
=
svc
[
3
];
break
;
case
CPU_2000_DDR_1200_RCLK_1200
:
case
CPU_2000_DDR_1050_RCLK_1050
:
default:
if
(
perr
[
0
])
goto
perror
;
avs_workpoint
=
svc
[
0
];
break
;
}
}
else
if
(
device_id
==
MVEBU_70X0_DEV_ID
||
device_id
==
MVEBU_70X0_CP115_DEV_ID
)
{
/* A7040/A7020/A6040 */
NOTICE
(
"SVC: DEV ID: %s, FREQ Mode: 0x%x
\n
"
,
single_cluster
==
0
?
"7040"
:
"7020"
,
freq_pidi_mode
);
switch
(
freq_pidi_mode
)
{
case
CPU_1400_DDR_800_RCLK_800
:
if
(
single_cluster
)
{
/* 7020 */
if
(
perr
[
1
])
goto
perror
;
avs_workpoint
=
svc
[
1
];
}
else
{
if
(
perr
[
0
])
goto
perror
;
avs_workpoint
=
svc
[
0
];
}
break
;
case
CPU_1200_DDR_800_RCLK_800
:
if
(
single_cluster
)
{
/* 7020 */
if
(
perr
[
2
])
goto
perror
;
avs_workpoint
=
svc
[
2
];
}
else
{
if
(
perr
[
1
])
goto
perror
;
avs_workpoint
=
svc
[
1
];
}
break
;
case
CPU_800_DDR_800_RCLK_800
:
case
CPU_1000_DDR_800_RCLK_800
:
if
(
single_cluster
)
{
/* 7020 */
if
(
perr
[
3
])
goto
perror
;
avs_workpoint
=
svc
[
3
];
}
else
{
if
(
perr
[
2
])
goto
perror
;
avs_workpoint
=
svc
[
2
];
}
break
;
case
CPU_600_DDR_800_RCLK_800
:
if
(
perr
[
3
])
goto
perror
;
avs_workpoint
=
svc
[
3
];
/* Same for 6040 and 7020 */
break
;
case
CPU_1600_DDR_800_RCLK_800
:
/* 7020 only */
default:
if
(
single_cluster
)
{
/* 7020 */
if
(
perr
[
0
])
goto
perror
;
avs_workpoint
=
svc
[
0
];
}
else
avs_workpoint
=
0
;
break
;
}
}
else
{
ERROR
(
"SVC: Unsupported Device ID 0x%x
\n
"
,
device_id
);
return
;
}
/* Set AVS control if needed */
if
(
avs_workpoint
==
0
)
{
ERROR
(
"SVC: AVS work point not changed
\n
"
);
return
;
}
/* Remove parity bit */
avs_workpoint
&=
0x7F
;
reg_val
=
mmio_read_32
(
AVS_EN_CTRL_REG
);
NOTICE
(
"SVC: AVS work point changed from 0x%x to 0x%x
\n
"
,
(
reg_val
&
AVS_VDD_LOW_LIMIT_MASK
)
>>
AVS_LOW_VDD_LIMIT_OFFSET
,
avs_workpoint
);
reg_val
&=
~
(
AVS_VDD_LOW_LIMIT_MASK
|
AVS_VDD_HIGH_LIMIT_MASK
);
reg_val
|=
0x1
<<
AVS_ENABLE_OFFSET
;
reg_val
|=
avs_workpoint
<<
AVS_HIGH_VDD_LIMIT_OFFSET
;
reg_val
|=
avs_workpoint
<<
AVS_LOW_VDD_LIMIT_OFFSET
;
mmio_write_32
(
AVS_EN_CTRL_REG
,
reg_val
);
return
;
perror:
ERROR
(
"Failed SVC WP[%d] parity check!
\n
"
,
i
);
ERROR
(
"Ignoring the WP values
\n
"
);
}
#if PLAT_RECOVERY_IMAGE_ENABLE
static
int
ble_skip_image_i2c
(
struct
skip_image
*
skip_im
)
{
ERROR
(
"skipping image using i2c is not supported
\n
"
);
/* not supported */
return
0
;
}
static
int
ble_skip_image_other
(
struct
skip_image
*
skip_im
)
{
ERROR
(
"implementation missing for skip image request
\n
"
);
/* not supported, make your own implementation */
return
0
;
}
static
int
ble_skip_image_gpio
(
struct
skip_image
*
skip_im
)
{
unsigned
int
val
;
unsigned
int
mpp_address
=
0
;
unsigned
int
offset
=
0
;
switch
(
skip_im
->
info
.
test
.
cp_ap
)
{
case
(
CP
):
mpp_address
=
MVEBU_CP_GPIO_DATA_IN
(
skip_im
->
info
.
test
.
cp_index
,
skip_im
->
info
.
gpio
.
num
);
if
(
skip_im
->
info
.
gpio
.
num
>
NUM_OF_GPIO_PER_REG
)
offset
=
skip_im
->
info
.
gpio
.
num
-
NUM_OF_GPIO_PER_REG
;
else
offset
=
skip_im
->
info
.
gpio
.
num
;
break
;
case
(
AP
):
mpp_address
=
MVEBU_AP_GPIO_DATA_IN
;
offset
=
skip_im
->
info
.
gpio
.
num
;
break
;
}
val
=
mmio_read_32
(
mpp_address
);
val
&=
(
1
<<
offset
);
if
((
!
val
&&
skip_im
->
info
.
gpio
.
button_state
==
HIGH
)
||
(
val
&&
skip_im
->
info
.
gpio
.
button_state
==
LOW
))
{
mmio_write_32
(
SCRATCH_PAD_REG2
,
SCRATCH_PAD_SKIP_VAL
);
return
1
;
}
return
0
;
}
/*
* This function checks if there's a skip image request:
* return values:
* 1: (true) images request been made.
* 0: (false) no image request been made.
*/
static
int
ble_skip_current_image
(
void
)
{
struct
skip_image
*
skip_im
;
/*fetching skip image info*/
skip_im
=
(
struct
skip_image
*
)
plat_marvell_get_skip_image_data
();
if
(
skip_im
==
NULL
)
return
0
;
/* check if skipping image request has already been made */
if
(
mmio_read_32
(
SCRATCH_PAD_REG2
)
==
SCRATCH_PAD_SKIP_VAL
)
return
0
;
switch
(
skip_im
->
detection_method
)
{
case
GPIO
:
return
ble_skip_image_gpio
(
skip_im
);
case
I2C
:
return
ble_skip_image_i2c
(
skip_im
);
case
USER_DEFINED
:
return
ble_skip_image_other
(
skip_im
);
}
return
0
;
}
#endif
/* Switch to ARO from PLL in ap807 */
static
void
aro_to_pll
(
void
)
{
unsigned
int
reg
;
/* switch from ARO to PLL */
reg
=
mmio_read_32
(
AP807_CPU_ARO_0_CTRL_0
);
reg
|=
AP807_CPU_ARO_SEL_PLL_MASK
;
mmio_write_32
(
AP807_CPU_ARO_0_CTRL_0
,
reg
);
reg
=
mmio_read_32
(
AP807_CPU_ARO_1_CTRL_0
);
reg
|=
AP807_CPU_ARO_SEL_PLL_MASK
;
mmio_write_32
(
AP807_CPU_ARO_1_CTRL_0
,
reg
);
mdelay
(
1000
);
/* disable ARO clk driver */
reg
=
mmio_read_32
(
AP807_CPU_ARO_0_CTRL_0
);
reg
|=
(
AP807_CPU_ARO_CLK_EN_MASK
);
mmio_write_32
(
AP807_CPU_ARO_0_CTRL_0
,
reg
);
reg
=
mmio_read_32
(
AP807_CPU_ARO_1_CTRL_0
);
reg
|=
(
AP807_CPU_ARO_CLK_EN_MASK
);
mmio_write_32
(
AP807_CPU_ARO_1_CTRL_0
,
reg
);
}
int
ble_plat_setup
(
int
*
skip
)
{
int
ret
;
/* Power down unused CPUs */
plat_marvell_early_cpu_powerdown
();
/*
* Save the current CCU configuration and make required changes:
* - Allow access to DRAM larger than 4GB
* - Open memory access to all CPn peripherals
*/
ble_plat_mmap_config
(
MMAP_SAVE_AND_CONFIG
);
#if PLAT_RECOVERY_IMAGE_ENABLE
/* Check if there's a skip request to bootRom recovery Image */
if
(
ble_skip_current_image
())
{
/* close memory access to all CPn peripherals. */
ble_plat_mmap_config
(
MMAP_RESTORE_SAVED
);
*
skip
=
1
;
return
0
;
}
#endif
/* Do required CP-110 setups for BLE stage */
cp110_ble_init
(
MVEBU_CP_REGS_BASE
(
0
));
/* Setup AVS */
ble_plat_svc_config
();
/* work with PLL clock driver in AP807 */
if
(
ble_get_ap_type
()
==
CHIP_ID_AP807
)
aro_to_pll
();
/* Do required AP setups for BLE stage */
ap_ble_init
();
/* Update DRAM topology (scan DIMM SPDs) */
plat_marvell_dram_update_topology
();
/* Kick it in */
ret
=
dram_init
();
/* Restore the original CCU configuration before exit from BLE */
ble_plat_mmap_config
(
MMAP_RESTORE_SAVED
);
return
ret
;
}
plat/marvell/a8k/common/plat_pm.c
0 → 100644
View file @
ba0248b5
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#include <a8k_common.h>
#include <assert.h>
#include <bakery_lock.h>
#include <debug.h>
#include <delay_timer.h>
#include <cache_llc.h>
#include <console.h>
#include <gicv2.h>
#include <marvell_pm.h>
#include <mmio.h>
#include <mss_pm_ipc.h>
#include <plat_marvell.h>
#include <platform.h>
#include <plat_pm_trace.h>
#include <platform.h>
#define MVEBU_PRIVATE_UID_REG 0x30
#define MVEBU_RFU_GLOBL_SW_RST 0x84
#define MVEBU_CCU_RVBAR(cpu) (MVEBU_REGS_BASE + 0x640 + (cpu * 4))
#define MVEBU_CCU_CPU_UN_RESET(cpu) (MVEBU_REGS_BASE + 0x650 + (cpu * 4))
#define MPIDR_CPU_GET(mpidr) ((mpidr) & MPIDR_CPU_MASK)
#define MPIDR_CLUSTER_GET(mpidr) MPIDR_AFFLVL1_VAL((mpidr))
#define MVEBU_GPIO_MASK(index) (1 << (index % 32))
#define MVEBU_MPP_MASK(index) (0xF << (4 * (index % 8)))
#define MVEBU_GPIO_VALUE(index, value) (value << (index % 32))
#define MVEBU_USER_CMD_0_REG (MVEBU_DRAM_MAC_BASE + 0x20)
#define MVEBU_USER_CMD_CH0_OFFSET 28
#define MVEBU_USER_CMD_CH0_MASK (1 << MVEBU_USER_CMD_CH0_OFFSET)
#define MVEBU_USER_CMD_CH0_EN (1 << MVEBU_USER_CMD_CH0_OFFSET)
#define MVEBU_USER_CMD_CS_OFFSET 24
#define MVEBU_USER_CMD_CS_MASK (0xF << MVEBU_USER_CMD_CS_OFFSET)
#define MVEBU_USER_CMD_CS_ALL (0xF << MVEBU_USER_CMD_CS_OFFSET)
#define MVEBU_USER_CMD_SR_OFFSET 6
#define MVEBU_USER_CMD_SR_MASK (0x3 << MVEBU_USER_CMD_SR_OFFSET)
#define MVEBU_USER_CMD_SR_ENTER (0x1 << MVEBU_USER_CMD_SR_OFFSET)
#define MVEBU_MC_PWR_CTRL_REG (MVEBU_DRAM_MAC_BASE + 0x54)
#define MVEBU_MC_AC_ON_DLY_OFFSET 8
#define MVEBU_MC_AC_ON_DLY_MASK (0xF << MVEBU_MC_AC_ON_DLY_OFFSET)
#define MVEBU_MC_AC_ON_DLY_DEF_VAR (8 << MVEBU_MC_AC_ON_DLY_OFFSET)
#define MVEBU_MC_AC_OFF_DLY_OFFSET 4
#define MVEBU_MC_AC_OFF_DLY_MASK (0xF << MVEBU_MC_AC_OFF_DLY_OFFSET)
#define MVEBU_MC_AC_OFF_DLY_DEF_VAR (0xC << MVEBU_MC_AC_OFF_DLY_OFFSET)
#define MVEBU_MC_PHY_AUTO_OFF_OFFSET 0
#define MVEBU_MC_PHY_AUTO_OFF_MASK (1 << MVEBU_MC_PHY_AUTO_OFF_OFFSET)
#define MVEBU_MC_PHY_AUTO_OFF_EN (1 << MVEBU_MC_PHY_AUTO_OFF_OFFSET)
/* this lock synchronize AP multiple cores execution with MSS */
DEFINE_BAKERY_LOCK
(
pm_sys_lock
);
/* Weak definitions may be overridden in specific board */
#pragma weak plat_marvell_get_pm_cfg
/* AP806 CPU power down /power up definitions */
enum
CPU_ID
{
CPU0
,
CPU1
,
CPU2
,
CPU3
};
#define REG_WR_VALIDATE_TIMEOUT (2000)
#define FEATURE_DISABLE_STATUS_REG \
(MVEBU_REGS_BASE + 0x6F8230)
#define FEATURE_DISABLE_STATUS_CPU_CLUSTER_OFFSET 4
#define FEATURE_DISABLE_STATUS_CPU_CLUSTER_MASK \
(0x1 << FEATURE_DISABLE_STATUS_CPU_CLUSTER_OFFSET)
#ifdef MVEBU_SOC_AP807
#define PWRC_CPUN_CR_PWR_DN_RQ_OFFSET 1
#define PWRC_CPUN_CR_LDO_BYPASS_RDY_OFFSET 0
#else
#define PWRC_CPUN_CR_PWR_DN_RQ_OFFSET 0
#define PWRC_CPUN_CR_LDO_BYPASS_RDY_OFFSET 31
#endif
#define PWRC_CPUN_CR_REG(cpu_id) \
(MVEBU_REGS_BASE + 0x680000 + (cpu_id * 0x10))
#define PWRC_CPUN_CR_PWR_DN_RQ_MASK \
(0x1 << PWRC_CPUN_CR_PWR_DN_RQ_OFFSET)
#define PWRC_CPUN_CR_ISO_ENABLE_OFFSET 16
#define PWRC_CPUN_CR_ISO_ENABLE_MASK \
(0x1 << PWRC_CPUN_CR_ISO_ENABLE_OFFSET)
#define PWRC_CPUN_CR_LDO_BYPASS_RDY_MASK \
(0x1 << PWRC_CPUN_CR_LDO_BYPASS_RDY_OFFSET)
#define CCU_B_PRCRN_REG(cpu_id) \
(MVEBU_REGS_BASE + 0x1A50 + \
((cpu_id / 2) * (0x400)) + ((cpu_id % 2) * 4))
#define CCU_B_PRCRN_CPUPORESET_STATIC_OFFSET 0
#define CCU_B_PRCRN_CPUPORESET_STATIC_MASK \
(0x1 << CCU_B_PRCRN_CPUPORESET_STATIC_OFFSET)
/* power switch fingers */
#define AP807_PWRC_LDO_CR0_REG \
(MVEBU_REGS_BASE + 0x680000 + 0x100)
#define AP807_PWRC_LDO_CR0_OFFSET 16
#define AP807_PWRC_LDO_CR0_MASK \
(0xff << AP807_PWRC_LDO_CR0_OFFSET)
#define AP807_PWRC_LDO_CR0_VAL 0xfd
/*
* Power down CPU:
* Used to reduce power consumption, and avoid SoC unnecessary temperature rise.
*/
static
int
plat_marvell_cpu_powerdown
(
int
cpu_id
)
{
uint32_t
reg_val
;
int
exit_loop
=
REG_WR_VALIDATE_TIMEOUT
;
INFO
(
"Powering down CPU%d
\n
"
,
cpu_id
);
/* 1. Isolation enable */
reg_val
=
mmio_read_32
(
PWRC_CPUN_CR_REG
(
cpu_id
));
reg_val
|=
0x1
<<
PWRC_CPUN_CR_ISO_ENABLE_OFFSET
;
mmio_write_32
(
PWRC_CPUN_CR_REG
(
cpu_id
),
reg_val
);
/* 2. Read and check Isolation enabled - verify bit set to 1 */
do
{
reg_val
=
mmio_read_32
(
PWRC_CPUN_CR_REG
(
cpu_id
));
exit_loop
--
;
}
while
(
!
(
reg_val
&
(
0x1
<<
PWRC_CPUN_CR_ISO_ENABLE_OFFSET
))
&&
exit_loop
>
0
);
/* 3. Switch off CPU power */
reg_val
=
mmio_read_32
(
PWRC_CPUN_CR_REG
(
cpu_id
));
reg_val
&=
~
PWRC_CPUN_CR_PWR_DN_RQ_MASK
;
mmio_write_32
(
PWRC_CPUN_CR_REG
(
cpu_id
),
reg_val
);
/* 4. Read and check Switch Off - verify bit set to 0 */
exit_loop
=
REG_WR_VALIDATE_TIMEOUT
;
do
{
reg_val
=
mmio_read_32
(
PWRC_CPUN_CR_REG
(
cpu_id
));
exit_loop
--
;
}
while
(
reg_val
&
PWRC_CPUN_CR_PWR_DN_RQ_MASK
&&
exit_loop
>
0
);
if
(
exit_loop
<=
0
)
goto
cpu_poweroff_error
;
/* 5. De-Assert power ready */
reg_val
=
mmio_read_32
(
PWRC_CPUN_CR_REG
(
cpu_id
));
reg_val
&=
~
PWRC_CPUN_CR_LDO_BYPASS_RDY_MASK
;
mmio_write_32
(
PWRC_CPUN_CR_REG
(
cpu_id
),
reg_val
);
/* 6. Assert CPU POR reset */
reg_val
=
mmio_read_32
(
CCU_B_PRCRN_REG
(
cpu_id
));
reg_val
&=
~
CCU_B_PRCRN_CPUPORESET_STATIC_MASK
;
mmio_write_32
(
CCU_B_PRCRN_REG
(
cpu_id
),
reg_val
);
/* 7. Read and poll on Validate the CPU is out of reset */
exit_loop
=
REG_WR_VALIDATE_TIMEOUT
;
do
{
reg_val
=
mmio_read_32
(
CCU_B_PRCRN_REG
(
cpu_id
));
exit_loop
--
;
}
while
(
reg_val
&
CCU_B_PRCRN_CPUPORESET_STATIC_MASK
&&
exit_loop
>
0
);
if
(
exit_loop
<=
0
)
goto
cpu_poweroff_error
;
INFO
(
"Successfully powered down CPU%d
\n
"
,
cpu_id
);
return
0
;
cpu_poweroff_error:
ERROR
(
"ERROR: Can't power down CPU%d
\n
"
,
cpu_id
);
return
-
1
;
}
/*
* Power down CPUs 1-3 at early boot stage,
* to reduce power consumption and SoC temperature.
* This is triggered by BLE prior to DDR initialization.
*
* Note:
* All CPUs will be powered up by plat_marvell_cpu_powerup on Linux boot stage,
* which is triggered by PSCI ops (pwr_domain_on).
*/
int
plat_marvell_early_cpu_powerdown
(
void
)
{
uint32_t
cpu_cluster_status
=
mmio_read_32
(
FEATURE_DISABLE_STATUS_REG
)
&
FEATURE_DISABLE_STATUS_CPU_CLUSTER_MASK
;
/* if cpu_cluster_status bit is set,
* that means we have only single cluster
*/
int
cluster_count
=
cpu_cluster_status
?
1
:
2
;
INFO
(
"Powering off unused CPUs
\n
"
);
/* CPU1 is in AP806 cluster-0, which always exists, so power it down */
if
(
plat_marvell_cpu_powerdown
(
CPU1
)
==
-
1
)
return
-
1
;
/*
* CPU2-3 are in AP806 2nd cluster (cluster-1),
* which doesn't exists in dual-core systems.
* so need to check if we have dual-core (single cluster)
* or quad-code (2 clusters)
*/
if
(
cluster_count
==
2
)
{
/* CPU2-3 are part of 2nd cluster */
if
(
plat_marvell_cpu_powerdown
(
CPU2
)
==
-
1
)
return
-
1
;
if
(
plat_marvell_cpu_powerdown
(
CPU3
)
==
-
1
)
return
-
1
;
}
return
0
;
}
/*
* Power up CPU - part of Linux boot stage
*/
static
int
plat_marvell_cpu_powerup
(
u_register_t
mpidr
)
{
uint32_t
reg_val
;
int
cpu_id
=
MPIDR_CPU_GET
(
mpidr
),
cluster
=
MPIDR_CLUSTER_GET
(
mpidr
);
int
exit_loop
=
REG_WR_VALIDATE_TIMEOUT
;
/* calculate absolute CPU ID */
cpu_id
=
cluster
*
PLAT_MARVELL_CLUSTER_CORE_COUNT
+
cpu_id
;
INFO
(
"Powering on CPU%d
\n
"
,
cpu_id
);
#ifdef MVEBU_SOC_AP807
/* Activate 2 power switch fingers */
reg_val
=
mmio_read_32
(
AP807_PWRC_LDO_CR0_REG
);
reg_val
&=
~
(
AP807_PWRC_LDO_CR0_MASK
);
reg_val
|=
(
AP807_PWRC_LDO_CR0_VAL
<<
AP807_PWRC_LDO_CR0_OFFSET
);
mmio_write_32
(
AP807_PWRC_LDO_CR0_REG
,
reg_val
);
udelay
(
100
);
#endif
/* 1. Switch CPU power ON */
reg_val
=
mmio_read_32
(
PWRC_CPUN_CR_REG
(
cpu_id
));
reg_val
|=
0x1
<<
PWRC_CPUN_CR_PWR_DN_RQ_OFFSET
;
mmio_write_32
(
PWRC_CPUN_CR_REG
(
cpu_id
),
reg_val
);
/* 2. Wait for CPU on, up to 100 uSec: */
udelay
(
100
);
/* 3. Assert power ready */
reg_val
=
mmio_read_32
(
PWRC_CPUN_CR_REG
(
cpu_id
));
reg_val
|=
0x1
<<
PWRC_CPUN_CR_LDO_BYPASS_RDY_OFFSET
;
mmio_write_32
(
PWRC_CPUN_CR_REG
(
cpu_id
),
reg_val
);
/* 4. Read & Validate power ready
* used in order to generate 16 Host CPU cycles
*/
do
{
reg_val
=
mmio_read_32
(
PWRC_CPUN_CR_REG
(
cpu_id
));
exit_loop
--
;
}
while
(
!
(
reg_val
&
(
0x1
<<
PWRC_CPUN_CR_LDO_BYPASS_RDY_OFFSET
))
&&
exit_loop
>
0
);
if
(
exit_loop
<=
0
)
goto
cpu_poweron_error
;
/* 5. Isolation disable */
reg_val
=
mmio_read_32
(
PWRC_CPUN_CR_REG
(
cpu_id
));
reg_val
&=
~
PWRC_CPUN_CR_ISO_ENABLE_MASK
;
mmio_write_32
(
PWRC_CPUN_CR_REG
(
cpu_id
),
reg_val
);
/* 6. Read and check Isolation enabled - verify bit set to 1 */
exit_loop
=
REG_WR_VALIDATE_TIMEOUT
;
do
{
reg_val
=
mmio_read_32
(
PWRC_CPUN_CR_REG
(
cpu_id
));
exit_loop
--
;
}
while
((
reg_val
&
(
0x1
<<
PWRC_CPUN_CR_ISO_ENABLE_OFFSET
))
&&
exit_loop
>
0
);
/* 7. De Assert CPU POR reset & Core reset */
reg_val
=
mmio_read_32
(
CCU_B_PRCRN_REG
(
cpu_id
));
reg_val
|=
0x1
<<
CCU_B_PRCRN_CPUPORESET_STATIC_OFFSET
;
mmio_write_32
(
CCU_B_PRCRN_REG
(
cpu_id
),
reg_val
);
/* 8. Read & Validate CPU POR reset */
exit_loop
=
REG_WR_VALIDATE_TIMEOUT
;
do
{
reg_val
=
mmio_read_32
(
CCU_B_PRCRN_REG
(
cpu_id
));
exit_loop
--
;
}
while
(
!
(
reg_val
&
(
0x1
<<
CCU_B_PRCRN_CPUPORESET_STATIC_OFFSET
))
&&
exit_loop
>
0
);
if
(
exit_loop
<=
0
)
goto
cpu_poweron_error
;
INFO
(
"Successfully powered on CPU%d
\n
"
,
cpu_id
);
return
0
;
cpu_poweron_error:
ERROR
(
"ERROR: Can't power up CPU%d
\n
"
,
cpu_id
);
return
-
1
;
}
static
int
plat_marvell_cpu_on
(
u_register_t
mpidr
)
{
int
cpu_id
;
int
cluster
;
/* Set barierr */
dsbsy
();
/* Get cpu number - use CPU ID */
cpu_id
=
MPIDR_CPU_GET
(
mpidr
);
/* Get cluster number - use affinity level 1 */
cluster
=
MPIDR_CLUSTER_GET
(
mpidr
);
/* Set CPU private UID */
mmio_write_32
(
MVEBU_REGS_BASE
+
MVEBU_PRIVATE_UID_REG
,
cluster
+
0x4
);
/* Set the cpu start address to BL1 entry point (align to 0x10000) */
mmio_write_32
(
MVEBU_CCU_RVBAR
(
cpu_id
),
PLAT_MARVELL_CPU_ENTRY_ADDR
>>
16
);
/* Get the cpu out of reset */
mmio_write_32
(
MVEBU_CCU_CPU_UN_RESET
(
cpu_id
),
0x10001
);
return
0
;
}
/*****************************************************************************
* A8K handler called to check the validity of the power state
* parameter.
*****************************************************************************
*/
static
int
a8k_validate_power_state
(
unsigned
int
power_state
,
psci_power_state_t
*
req_state
)
{
int
pstate
=
psci_get_pstate_type
(
power_state
);
int
pwr_lvl
=
psci_get_pstate_pwrlvl
(
power_state
);
int
i
;
if
(
pwr_lvl
>
PLAT_MAX_PWR_LVL
)
return
PSCI_E_INVALID_PARAMS
;
/* Sanity check the requested state */
if
(
pstate
==
PSTATE_TYPE_STANDBY
)
{
/*
* It's possible to enter standby only on power level 0
* Ignore any other power level.
*/
if
(
pwr_lvl
!=
MARVELL_PWR_LVL0
)
return
PSCI_E_INVALID_PARAMS
;
req_state
->
pwr_domain_state
[
MARVELL_PWR_LVL0
]
=
MARVELL_LOCAL_STATE_RET
;
}
else
{
for
(
i
=
MARVELL_PWR_LVL0
;
i
<=
pwr_lvl
;
i
++
)
req_state
->
pwr_domain_state
[
i
]
=
MARVELL_LOCAL_STATE_OFF
;
}
/*
* We expect the 'state id' to be zero.
*/
if
(
psci_get_pstate_id
(
power_state
))
return
PSCI_E_INVALID_PARAMS
;
return
PSCI_E_SUCCESS
;
}
/*****************************************************************************
* A8K handler called when a CPU is about to enter standby.
*****************************************************************************
*/
static
void
a8k_cpu_standby
(
plat_local_state_t
cpu_state
)
{
ERROR
(
"%s: needs to be implemented
\n
"
,
__func__
);
panic
();
}
/*****************************************************************************
* A8K handler called when a power domain is about to be turned on. The
* mpidr determines the CPU to be turned on.
*****************************************************************************
*/
static
int
a8k_pwr_domain_on
(
u_register_t
mpidr
)
{
/* Power up CPU (CPUs 1-3 are powered off at start of BLE) */
plat_marvell_cpu_powerup
(
mpidr
);
if
(
is_pm_fw_running
())
{
unsigned
int
target
=
((
mpidr
&
0xFF
)
+
(((
mpidr
>>
8
)
&
0xFF
)
*
2
));
/*
* pm system synchronization - used to synchronize
* multiple core access to MSS
*/
bakery_lock_get
(
&
pm_sys_lock
);
/* send CPU ON IPC Message to MSS */
mss_pm_ipc_msg_send
(
target
,
PM_IPC_MSG_CPU_ON
,
0
);
/* trigger IPC message to MSS */
mss_pm_ipc_msg_trigger
();
/* pm system synchronization */
bakery_lock_release
(
&
pm_sys_lock
);
/* trace message */
PM_TRACE
(
TRACE_PWR_DOMAIN_ON
|
target
);
}
else
{
/* proprietary CPU ON exection flow */
plat_marvell_cpu_on
(
mpidr
);
}
return
0
;
}
/*****************************************************************************
* A8K handler called to validate the entry point.
*****************************************************************************
*/
static
int
a8k_validate_ns_entrypoint
(
uintptr_t
entrypoint
)
{
return
PSCI_E_SUCCESS
;
}
/*****************************************************************************
* A8K handler called when a power domain is about to be turned off. The
* target_state encodes the power state that each level should transition to.
*****************************************************************************
*/
static
void
a8k_pwr_domain_off
(
const
psci_power_state_t
*
target_state
)
{
if
(
is_pm_fw_running
())
{
unsigned
int
idx
=
plat_my_core_pos
();
/* Prevent interrupts from spuriously waking up this cpu */
gicv2_cpuif_disable
();
/* pm system synchronization - used to synchronize multiple
* core access to MSS
*/
bakery_lock_get
(
&
pm_sys_lock
);
/* send CPU OFF IPC Message to MSS */
mss_pm_ipc_msg_send
(
idx
,
PM_IPC_MSG_CPU_OFF
,
target_state
);
/* trigger IPC message to MSS */
mss_pm_ipc_msg_trigger
();
/* pm system synchronization */
bakery_lock_release
(
&
pm_sys_lock
);
/* trace message */
PM_TRACE
(
TRACE_PWR_DOMAIN_OFF
);
}
else
{
INFO
(
"%s: is not supported without SCP
\n
"
,
__func__
);
}
}
/* Get PM config to power off the SoC */
void
*
plat_marvell_get_pm_cfg
(
void
)
{
return
NULL
;
}
/*
* This function should be called on restore from
* "suspend to RAM" state when the execution flow
* has to bypass BootROM image to RAM copy and speed up
* the system recovery
*
*/
static
void
plat_marvell_exit_bootrom
(
void
)
{
marvell_exit_bootrom
(
PLAT_MARVELL_TRUSTED_ROM_BASE
);
}
/*
* Prepare for the power off of the system via GPIO
*/
static
void
plat_marvell_power_off_gpio
(
struct
power_off_method
*
pm_cfg
,
register_t
*
gpio_addr
,
register_t
*
gpio_data
)
{
unsigned
int
gpio
;
unsigned
int
idx
;
unsigned
int
shift
;
unsigned
int
reg
;
unsigned
int
addr
;
gpio_info_t
*
info
;
unsigned
int
tog_bits
;
assert
((
pm_cfg
->
cfg
.
gpio
.
pin_count
<
PMIC_GPIO_MAX_NUMBER
)
&&
(
pm_cfg
->
cfg
.
gpio
.
step_count
<
PMIC_GPIO_MAX_TOGGLE_STEP
));
/* Prepare GPIOs for PMIC */
for
(
gpio
=
0
;
gpio
<
pm_cfg
->
cfg
.
gpio
.
pin_count
;
gpio
++
)
{
info
=
&
pm_cfg
->
cfg
.
gpio
.
info
[
gpio
];
/* Set PMIC GPIO to output mode */
reg
=
mmio_read_32
(
MVEBU_CP_GPIO_DATA_OUT_EN
(
info
->
cp_index
,
info
->
gpio_index
));
mmio_write_32
(
MVEBU_CP_GPIO_DATA_OUT_EN
(
info
->
cp_index
,
info
->
gpio_index
),
reg
&
~
MVEBU_GPIO_MASK
(
info
->
gpio_index
));
/* Set the appropriate MPP to GPIO mode */
reg
=
mmio_read_32
(
MVEBU_PM_MPP_REGS
(
info
->
cp_index
,
info
->
gpio_index
));
mmio_write_32
(
MVEBU_PM_MPP_REGS
(
info
->
cp_index
,
info
->
gpio_index
),
reg
&
~
MVEBU_MPP_MASK
(
info
->
gpio_index
));
}
/* Wait for MPP & GPIO pre-configurations done */
mdelay
(
pm_cfg
->
cfg
.
gpio
.
delay_ms
);
/* Toggle the GPIO values, and leave final step to be triggered
* after DDR self-refresh is enabled
*/
for
(
idx
=
0
;
idx
<
pm_cfg
->
cfg
.
gpio
.
step_count
;
idx
++
)
{
tog_bits
=
pm_cfg
->
cfg
.
gpio
.
seq
[
idx
];
/* The GPIOs must be within same GPIO register,
* thus could get the original value by first GPIO
*/
info
=
&
pm_cfg
->
cfg
.
gpio
.
info
[
0
];
reg
=
mmio_read_32
(
MVEBU_CP_GPIO_DATA_OUT
(
info
->
cp_index
,
info
->
gpio_index
));
addr
=
MVEBU_CP_GPIO_DATA_OUT
(
info
->
cp_index
,
info
->
gpio_index
);
for
(
gpio
=
0
;
gpio
<
pm_cfg
->
cfg
.
gpio
.
pin_count
;
gpio
++
)
{
shift
=
pm_cfg
->
cfg
.
gpio
.
info
[
gpio
].
gpio_index
%
32
;
if
(
GPIO_LOW
==
(
tog_bits
&
(
1
<<
gpio
)))
reg
&=
~
(
1
<<
shift
);
else
reg
|=
(
1
<<
shift
);
}
/* Set the GPIO register, for last step just store
* register address and values to system registers
*/
if
(
idx
<
pm_cfg
->
cfg
.
gpio
.
step_count
-
1
)
{
mmio_write_32
(
MVEBU_CP_GPIO_DATA_OUT
(
info
->
cp_index
,
info
->
gpio_index
),
reg
);
mdelay
(
pm_cfg
->
cfg
.
gpio
.
delay_ms
);
}
else
{
/* Save GPIO register and address values for
* finishing the power down operation later
*/
*
gpio_addr
=
addr
;
*
gpio_data
=
reg
;
}
}
}
/*
* Prepare for the power off of the system
*/
static
void
plat_marvell_power_off_prepare
(
struct
power_off_method
*
pm_cfg
,
register_t
*
addr
,
register_t
*
data
)
{
switch
(
pm_cfg
->
type
)
{
case
PMIC_GPIO
:
plat_marvell_power_off_gpio
(
pm_cfg
,
addr
,
data
);
break
;
default:
break
;
}
}
/*****************************************************************************
* A8K handler called when a power domain is about to be suspended. The
* target_state encodes the power state that each level should transition to.
*****************************************************************************
*/
static
void
a8k_pwr_domain_suspend
(
const
psci_power_state_t
*
target_state
)
{
if
(
is_pm_fw_running
())
{
unsigned
int
idx
;
/* Prevent interrupts from spuriously waking up this cpu */
gicv2_cpuif_disable
();
idx
=
plat_my_core_pos
();
/* pm system synchronization - used to synchronize multiple
* core access to MSS
*/
bakery_lock_get
(
&
pm_sys_lock
);
/* send CPU Suspend IPC Message to MSS */
mss_pm_ipc_msg_send
(
idx
,
PM_IPC_MSG_CPU_SUSPEND
,
target_state
);
/* trigger IPC message to MSS */
mss_pm_ipc_msg_trigger
();
/* pm system synchronization */
bakery_lock_release
(
&
pm_sys_lock
);
/* trace message */
PM_TRACE
(
TRACE_PWR_DOMAIN_SUSPEND
);
}
else
{
uintptr_t
*
mailbox
=
(
void
*
)
PLAT_MARVELL_MAILBOX_BASE
;
INFO
(
"Suspending to RAM
\n
"
);
/* Prevent interrupts from spuriously waking up this cpu */
gicv2_cpuif_disable
();
mailbox
[
MBOX_IDX_SUSPEND_MAGIC
]
=
MVEBU_MAILBOX_SUSPEND_STATE
;
mailbox
[
MBOX_IDX_ROM_EXIT_ADDR
]
=
(
uintptr_t
)
&
plat_marvell_exit_bootrom
;
#if PLAT_MARVELL_SHARED_RAM_CACHED
flush_dcache_range
(
PLAT_MARVELL_MAILBOX_BASE
+
MBOX_IDX_SUSPEND_MAGIC
*
sizeof
(
uintptr_t
),
2
*
sizeof
(
uintptr_t
));
#endif
/* Flush and disable LLC before going off-power */
llc_disable
(
0
);
isb
();
/*
* Do not halt here!
* The function must return for allowing the caller function
* psci_power_up_finish() to do the proper context saving and
* to release the CPU lock.
*/
}
}
/*****************************************************************************
* A8K handler called when a power domain has just been powered on after
* being turned off earlier. The target_state encodes the low power state that
* each level has woken up from.
*****************************************************************************
*/
static
void
a8k_pwr_domain_on_finish
(
const
psci_power_state_t
*
target_state
)
{
/* arch specific configuration */
marvell_psci_arch_init
(
0
);
/* Interrupt initialization */
gicv2_pcpu_distif_init
();
gicv2_cpuif_enable
();
if
(
is_pm_fw_running
())
{
/* trace message */
PM_TRACE
(
TRACE_PWR_DOMAIN_ON_FINISH
);
}
}
/*****************************************************************************
* A8K handler called when a power domain has just been powered on after
* having been suspended earlier. The target_state encodes the low power state
* that each level has woken up from.
* TODO: At the moment we reuse the on finisher and reinitialize the secure
* context. Need to implement a separate suspend finisher.
*****************************************************************************
*/
static
void
a8k_pwr_domain_suspend_finish
(
const
psci_power_state_t
*
target_state
)
{
if
(
is_pm_fw_running
())
{
/* arch specific configuration */
marvell_psci_arch_init
(
0
);
/* Interrupt initialization */
gicv2_cpuif_enable
();
/* trace message */
PM_TRACE
(
TRACE_PWR_DOMAIN_SUSPEND_FINISH
);
}
else
{
uintptr_t
*
mailbox
=
(
void
*
)
PLAT_MARVELL_MAILBOX_BASE
;
/* Only primary CPU requres platform init */
if
(
!
plat_my_core_pos
())
{
/* Initialize the console to provide
* early debug support
*/
console_init
(
PLAT_MARVELL_BOOT_UART_BASE
,
PLAT_MARVELL_BOOT_UART_CLK_IN_HZ
,
MARVELL_CONSOLE_BAUDRATE
);
bl31_plat_arch_setup
();
marvell_bl31_platform_setup
();
/*
* Remove suspend to RAM marker from the mailbox
* for treating a regular reset as a cold boot
*/
mailbox
[
MBOX_IDX_SUSPEND_MAGIC
]
=
0
;
mailbox
[
MBOX_IDX_ROM_EXIT_ADDR
]
=
0
;
#if PLAT_MARVELL_SHARED_RAM_CACHED
flush_dcache_range
(
PLAT_MARVELL_MAILBOX_BASE
+
MBOX_IDX_SUSPEND_MAGIC
*
sizeof
(
uintptr_t
),
2
*
sizeof
(
uintptr_t
));
#endif
}
}
}
/*****************************************************************************
* This handler is called by the PSCI implementation during the `SYSTEM_SUSPEND`
* call to get the `power_state` parameter. This allows the platform to encode
* the appropriate State-ID field within the `power_state` parameter which can
* be utilized in `pwr_domain_suspend()` to suspend to system affinity level.
*****************************************************************************
*/
static
void
a8k_get_sys_suspend_power_state
(
psci_power_state_t
*
req_state
)
{
/* lower affinities use PLAT_MAX_OFF_STATE */
for
(
int
i
=
MPIDR_AFFLVL0
;
i
<=
PLAT_MAX_PWR_LVL
;
i
++
)
req_state
->
pwr_domain_state
[
i
]
=
PLAT_MAX_OFF_STATE
;
}
static
void
__dead2
a8k_pwr_domain_pwr_down_wfi
(
const
psci_power_state_t
*
target_state
)
{
struct
power_off_method
*
pm_cfg
;
unsigned
int
srcmd
;
unsigned
int
sdram_reg
;
register_t
gpio_data
=
0
,
gpio_addr
=
0
;
if
(
is_pm_fw_running
())
{
psci_power_down_wfi
();
panic
();
}
pm_cfg
=
(
struct
power_off_method
*
)
plat_marvell_get_pm_cfg
();
/* Prepare for power off */
plat_marvell_power_off_prepare
(
pm_cfg
,
&
gpio_addr
,
&
gpio_data
);
/* First step to enable DDR self-refresh
* to keep the data during suspend
*/
mmio_write_32
(
MVEBU_MC_PWR_CTRL_REG
,
0x8C1
);
/* Save DDR self-refresh second step register
* and value to be issued later
*/
sdram_reg
=
MVEBU_USER_CMD_0_REG
;
srcmd
=
mmio_read_32
(
sdram_reg
);
srcmd
&=
~
(
MVEBU_USER_CMD_CH0_MASK
|
MVEBU_USER_CMD_CS_MASK
|
MVEBU_USER_CMD_SR_MASK
);
srcmd
|=
(
MVEBU_USER_CMD_CH0_EN
|
MVEBU_USER_CMD_CS_ALL
|
MVEBU_USER_CMD_SR_ENTER
);
/*
* Wait for DRAM is done using registers access only.
* At this stage any access to DRAM (procedure call) will
* release it from the self-refresh mode
*/
__asm__
volatile
(
/* Align to a cache line */
" .balign 64
\n\t
"
/* Enter self refresh */
" str %[srcmd], [%[sdram_reg]]
\n\t
"
/*
* Wait 100 cycles for DDR to enter self refresh, by
* doing 50 times two instructions.
*/
" mov x1, #50
\n\t
"
"1: subs x1, x1, #1
\n\t
"
" bne 1b
\n\t
"
/* Issue the command to trigger the SoC power off */
" str %[gpio_data], [%[gpio_addr]]
\n\t
"
/* Trap the processor */
" b .
\n\t
"
:
:
[
srcmd
]
"r"
(
srcmd
),
[
sdram_reg
]
"r"
(
sdram_reg
),
[
gpio_addr
]
"r"
(
gpio_addr
),
[
gpio_data
]
"r"
(
gpio_data
)
:
"x1"
);
panic
();
}
/*****************************************************************************
* A8K handlers to shutdown/reboot the system
*****************************************************************************
*/
static
void
__dead2
a8k_system_off
(
void
)
{
ERROR
(
"%s: needs to be implemented
\n
"
,
__func__
);
panic
();
}
void
plat_marvell_system_reset
(
void
)
{
mmio_write_32
(
MVEBU_RFU_BASE
+
MVEBU_RFU_GLOBL_SW_RST
,
0x0
);
}
static
void
__dead2
a8k_system_reset
(
void
)
{
plat_marvell_system_reset
();
/* we shouldn't get to this point */
panic
();
}
/*****************************************************************************
* Export the platform handlers via plat_arm_psci_pm_ops. The ARM Standard
* platform layer will take care of registering the handlers with PSCI.
*****************************************************************************
*/
const
plat_psci_ops_t
plat_arm_psci_pm_ops
=
{
.
cpu_standby
=
a8k_cpu_standby
,
.
pwr_domain_on
=
a8k_pwr_domain_on
,
.
pwr_domain_off
=
a8k_pwr_domain_off
,
.
pwr_domain_suspend
=
a8k_pwr_domain_suspend
,
.
pwr_domain_on_finish
=
a8k_pwr_domain_on_finish
,
.
get_sys_suspend_power_state
=
a8k_get_sys_suspend_power_state
,
.
pwr_domain_suspend_finish
=
a8k_pwr_domain_suspend_finish
,
.
pwr_domain_pwr_down_wfi
=
a8k_pwr_domain_pwr_down_wfi
,
.
system_off
=
a8k_system_off
,
.
system_reset
=
a8k_system_reset
,
.
validate_power_state
=
a8k_validate_power_state
,
.
validate_ns_entrypoint
=
a8k_validate_ns_entrypoint
};
plat/marvell/a8k/common/plat_pm_trace.c
0 → 100644
View file @
ba0248b5
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#include <mmio.h>
#include <mss_mem.h>
#include <platform.h>
#include <plat_pm_trace.h>
#ifdef PM_TRACE_ENABLE
/* core trace APIs */
core_trace_func
funcTbl
[
PLATFORM_CORE_COUNT
]
=
{
pm_core_0_trace
,
pm_core_1_trace
,
pm_core_2_trace
,
pm_core_3_trace
};
/*****************************************************************************
* pm_core0_trace
* pm_core1_trace
* pm_core2_trace
* pm_core_3trace
*
* This functions set trace info into core cyclic trace queue in MSS SRAM
* memory space
*****************************************************************************
*/
void
pm_core_0_trace
(
unsigned
int
trace
)
{
unsigned
int
current_position_core_0
=
mmio_read_32
(
AP_MSS_ATF_CORE_0_CTRL_BASE
);
mmio_write_32
((
AP_MSS_ATF_CORE_0_INFO_BASE
+
(
current_position_core_0
*
AP_MSS_ATF_CORE_ENTRY_SIZE
)),
mmio_read_32
(
AP_MSS_TIMER_BASE
));
mmio_write_32
((
AP_MSS_ATF_CORE_0_INFO_TRACE
+
(
current_position_core_0
*
AP_MSS_ATF_CORE_ENTRY_SIZE
)),
trace
);
mmio_write_32
(
AP_MSS_ATF_CORE_0_CTRL_BASE
,
((
current_position_core_0
+
1
)
&
AP_MSS_ATF_TRACE_SIZE_MASK
));
}
void
pm_core_1_trace
(
unsigned
int
trace
)
{
unsigned
int
current_position_core_1
=
mmio_read_32
(
AP_MSS_ATF_CORE_1_CTRL_BASE
);
mmio_write_32
((
AP_MSS_ATF_CORE_1_INFO_BASE
+
(
current_position_core_1
*
AP_MSS_ATF_CORE_ENTRY_SIZE
)),
mmio_read_32
(
AP_MSS_TIMER_BASE
));
mmio_write_32
((
AP_MSS_ATF_CORE_1_INFO_TRACE
+
(
current_position_core_1
*
AP_MSS_ATF_CORE_ENTRY_SIZE
)),
trace
);
mmio_write_32
(
AP_MSS_ATF_CORE_1_CTRL_BASE
,
((
current_position_core_1
+
1
)
&
AP_MSS_ATF_TRACE_SIZE_MASK
));
}
void
pm_core_2_trace
(
unsigned
int
trace
)
{
unsigned
int
current_position_core_2
=
mmio_read_32
(
AP_MSS_ATF_CORE_2_CTRL_BASE
);
mmio_write_32
((
AP_MSS_ATF_CORE_2_INFO_BASE
+
(
current_position_core_2
*
AP_MSS_ATF_CORE_ENTRY_SIZE
)),
mmio_read_32
(
AP_MSS_TIMER_BASE
));
mmio_write_32
((
AP_MSS_ATF_CORE_2_INFO_TRACE
+
(
current_position_core_2
*
AP_MSS_ATF_CORE_ENTRY_SIZE
)),
trace
);
mmio_write_32
(
AP_MSS_ATF_CORE_2_CTRL_BASE
,
((
current_position_core_2
+
1
)
&
AP_MSS_ATF_TRACE_SIZE_MASK
));
}
void
pm_core_3_trace
(
unsigned
int
trace
)
{
unsigned
int
current_position_core_3
=
mmio_read_32
(
AP_MSS_ATF_CORE_3_CTRL_BASE
);
mmio_write_32
((
AP_MSS_ATF_CORE_3_INFO_BASE
+
(
current_position_core_3
*
AP_MSS_ATF_CORE_ENTRY_SIZE
)),
mmio_read_32
(
AP_MSS_TIMER_BASE
));
mmio_write_32
((
AP_MSS_ATF_CORE_3_INFO_TRACE
+
(
current_position_core_3
*
AP_MSS_ATF_CORE_ENTRY_SIZE
)),
trace
);
mmio_write_32
(
AP_MSS_ATF_CORE_3_CTRL_BASE
,
((
current_position_core_3
+
1
)
&
AP_MSS_ATF_TRACE_SIZE_MASK
));
}
#endif
/* PM_TRACE_ENABLE */
plat/marvell/a8k/common/plat_thermal.c
0 → 100644
View file @
ba0248b5
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#include <debug.h>
#include <delay_timer.h>
#include <mmio.h>
#include <mvebu_def.h>
#include <thermal.h>
#define THERMAL_TIMEOUT 1200
#define THERMAL_SEN_CTRL_LSB_STRT_OFFSET 0
#define THERMAL_SEN_CTRL_LSB_STRT_MASK \
(0x1 << THERMAL_SEN_CTRL_LSB_STRT_OFFSET)
#define THERMAL_SEN_CTRL_LSB_RST_OFFSET 1
#define THERMAL_SEN_CTRL_LSB_RST_MASK \
(0x1 << THERMAL_SEN_CTRL_LSB_RST_OFFSET)
#define THERMAL_SEN_CTRL_LSB_EN_OFFSET 2
#define THERMAL_SEN_CTRL_LSB_EN_MASK \
(0x1 << THERMAL_SEN_CTRL_LSB_EN_OFFSET)
#define THERMAL_SEN_CTRL_STATS_VALID_OFFSET 16
#define THERMAL_SEN_CTRL_STATS_VALID_MASK \
(0x1 << THERMAL_SEN_CTRL_STATS_VALID_OFFSET)
#define THERMAL_SEN_CTRL_STATS_TEMP_OUT_OFFSET 0
#define THERMAL_SEN_CTRL_STATS_TEMP_OUT_MASK \
(0x3FF << THERMAL_SEN_CTRL_STATS_TEMP_OUT_OFFSET)
#define THERMAL_SEN_OUTPUT_MSB 512
#define THERMAL_SEN_OUTPUT_COMP 1024
struct
tsen_regs
{
uint32_t
ext_tsen_ctrl_lsb
;
uint32_t
ext_tsen_ctrl_msb
;
uint32_t
ext_tsen_status
;
};
static
int
ext_tsen_probe
(
struct
tsen_config
*
tsen_cfg
)
{
uint32_t
reg
,
timeout
=
0
;
struct
tsen_regs
*
base
;
if
(
tsen_cfg
==
NULL
&&
tsen_cfg
->
regs_base
==
NULL
)
{
ERROR
(
"initial thermal sensor configuration is missing
\n
"
);
return
-
1
;
}
base
=
(
struct
tsen_regs
*
)
tsen_cfg
->
regs_base
;
INFO
(
"initializing thermal sensor
\n
"
);
/* initialize thermal sensor hardware reset once */
reg
=
mmio_read_32
((
uintptr_t
)
&
base
->
ext_tsen_ctrl_lsb
);
reg
&=
~
THERMAL_SEN_CTRL_LSB_RST_OFFSET
;
/* de-assert TSEN_RESET */
reg
|=
THERMAL_SEN_CTRL_LSB_EN_MASK
;
/* set TSEN_EN to 1 */
reg
|=
THERMAL_SEN_CTRL_LSB_STRT_MASK
;
/* set TSEN_START to 1 */
mmio_write_32
((
uintptr_t
)
&
base
->
ext_tsen_ctrl_lsb
,
reg
);
reg
=
mmio_read_32
((
uintptr_t
)
&
base
->
ext_tsen_status
);
while
((
reg
&
THERMAL_SEN_CTRL_STATS_VALID_MASK
)
==
0
&&
timeout
<
THERMAL_TIMEOUT
)
{
udelay
(
100
);
reg
=
mmio_read_32
((
uintptr_t
)
&
base
->
ext_tsen_status
);
timeout
++
;
}
if
((
reg
&
THERMAL_SEN_CTRL_STATS_VALID_MASK
)
==
0
)
{
ERROR
(
"thermal sensor is not ready
\n
"
);
return
-
1
;
}
tsen_cfg
->
tsen_ready
=
1
;
VERBOSE
(
"thermal sensor was initialized
\n
"
);
return
0
;
}
static
int
ext_tsen_read
(
struct
tsen_config
*
tsen_cfg
,
int
*
temp
)
{
uint32_t
reg
;
struct
tsen_regs
*
base
;
if
(
tsen_cfg
==
NULL
&&
!
tsen_cfg
->
tsen_ready
)
{
ERROR
(
"thermal sensor was not initialized
\n
"
);
return
-
1
;
}
base
=
(
struct
tsen_regs
*
)
tsen_cfg
->
regs_base
;
reg
=
mmio_read_32
((
uintptr_t
)
&
base
->
ext_tsen_status
);
reg
=
((
reg
&
THERMAL_SEN_CTRL_STATS_TEMP_OUT_MASK
)
>>
THERMAL_SEN_CTRL_STATS_TEMP_OUT_OFFSET
);
/*
* TSEN output format is signed as a 2s complement number
* ranging from-512 to +511. when MSB is set, need to
* calculate the complement number
*/
if
(
reg
>=
THERMAL_SEN_OUTPUT_MSB
)
reg
-=
THERMAL_SEN_OUTPUT_COMP
;
if
(
tsen_cfg
->
tsen_divisor
==
0
)
{
ERROR
(
"thermal sensor divisor cannot be zero
\n
"
);
return
-
1
;
}
*
temp
=
((
tsen_cfg
->
tsen_gain
*
((
int
)
reg
))
+
tsen_cfg
->
tsen_offset
)
/
tsen_cfg
->
tsen_divisor
;
return
0
;
}
static
struct
tsen_config
tsen_cfg
=
{
.
tsen_offset
=
153400
,
.
tsen_gain
=
425
,
.
tsen_divisor
=
1000
,
.
tsen_ready
=
0
,
.
regs_base
=
(
void
*
)
MVEBU_AP_EXT_TSEN_BASE
,
.
ptr_tsen_probe
=
ext_tsen_probe
,
.
ptr_tsen_read
=
ext_tsen_read
};
struct
tsen_config
*
marvell_thermal_config_get
(
void
)
{
return
&
tsen_cfg
;
}
plat/marvell/common/aarch64/marvell_common.c
0 → 100644
View file @
ba0248b5
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
#include <debug.h>
#include <mmio.h>
#include <plat_marvell.h>
#include <platform_def.h>
#include <xlat_tables.h>
/* Weak definitions may be overridden in specific ARM standard platform */
#pragma weak plat_get_ns_image_entrypoint
#pragma weak plat_marvell_get_mmap
/*
* Set up the page tables for the generic and platform-specific memory regions.
* The extents of the generic memory regions are specified by the function
* arguments and consist of:
* - Trusted SRAM seen by the BL image;
* - Code section;
* - Read-only data section;
* - Coherent memory region, if applicable.
*/
void
marvell_setup_page_tables
(
uintptr_t
total_base
,
size_t
total_size
,
uintptr_t
code_start
,
uintptr_t
code_limit
,
uintptr_t
rodata_start
,
uintptr_t
rodata_limit
#if USE_COHERENT_MEM
,
uintptr_t
coh_start
,
uintptr_t
coh_limit
#endif
)
{
/*
* Map the Trusted SRAM with appropriate memory attributes.
* Subsequent mappings will adjust the attributes for specific regions.
*/
VERBOSE
(
"Trusted SRAM seen by this BL image: %p - %p
\n
"
,
(
void
*
)
total_base
,
(
void
*
)
(
total_base
+
total_size
));
mmap_add_region
(
total_base
,
total_base
,
total_size
,
MT_MEMORY
|
MT_RW
|
MT_SECURE
);
/* Re-map the code section */
VERBOSE
(
"Code region: %p - %p
\n
"
,
(
void
*
)
code_start
,
(
void
*
)
code_limit
);
mmap_add_region
(
code_start
,
code_start
,
code_limit
-
code_start
,
MT_CODE
|
MT_SECURE
);
/* Re-map the read-only data section */
VERBOSE
(
"Read-only data region: %p - %p
\n
"
,
(
void
*
)
rodata_start
,
(
void
*
)
rodata_limit
);
mmap_add_region
(
rodata_start
,
rodata_start
,
rodata_limit
-
rodata_start
,
MT_RO_DATA
|
MT_SECURE
);
#if USE_COHERENT_MEM
/* Re-map the coherent memory region */
VERBOSE
(
"Coherent region: %p - %p
\n
"
,
(
void
*
)
coh_start
,
(
void
*
)
coh_limit
);
mmap_add_region
(
coh_start
,
coh_start
,
coh_limit
-
coh_start
,
MT_DEVICE
|
MT_RW
|
MT_SECURE
);
#endif
/* Now (re-)map the platform-specific memory regions */
mmap_add
(
plat_marvell_get_mmap
());
/* Create the page tables to reflect the above mappings */
init_xlat_tables
();
}
unsigned
long
plat_get_ns_image_entrypoint
(
void
)
{
return
PLAT_MARVELL_NS_IMAGE_OFFSET
;
}
/*****************************************************************************
* Gets SPSR for BL32 entry
*****************************************************************************
*/
uint32_t
marvell_get_spsr_for_bl32_entry
(
void
)
{
/*
* The Secure Payload Dispatcher service is responsible for
* setting the SPSR prior to entry into the BL32 image.
*/
return
0
;
}
/*****************************************************************************
* Gets SPSR for BL33 entry
*****************************************************************************
*/
uint32_t
marvell_get_spsr_for_bl33_entry
(
void
)
{
unsigned
long
el_status
;
unsigned
int
mode
;
uint32_t
spsr
;
/* Figure out what mode we enter the non-secure world in */
el_status
=
read_id_aa64pfr0_el1
()
>>
ID_AA64PFR0_EL2_SHIFT
;
el_status
&=
ID_AA64PFR0_ELX_MASK
;
mode
=
(
el_status
)
?
MODE_EL2
:
MODE_EL1
;
/*
* TODO: Consider the possibility of specifying the SPSR in
* the FIP ToC and allowing the platform to have a say as
* well.
*/
spsr
=
SPSR_64
(
mode
,
MODE_SP_ELX
,
DISABLE_ALL_EXCEPTIONS
);
return
spsr
;
}
/*****************************************************************************
* Returns ARM platform specific memory map regions.
*****************************************************************************
*/
const
mmap_region_t
*
plat_marvell_get_mmap
(
void
)
{
return
plat_marvell_mmap
;
}
plat/marvell/common/aarch64/marvell_helpers.S
0 → 100644
View file @
ba0248b5
/*
*
Copyright
(
C
)
2018
Marvell
International
Ltd
.
*
*
SPDX
-
License
-
Identifier
:
BSD
-
3
-
Clause
*
https
:
//
spdx
.
org
/
licenses
*/
#include <asm_macros.S>
#include <cortex_a72.h>
#include <marvell_def.h>
#include <platform_def.h>
#ifndef PLAT_a3700
#include <ccu.h>
#include <cache_llc.h>
#endif
.
weak
plat_marvell_calc_core_pos
.
weak
plat_my_core_pos
.
globl
plat_crash_console_init
.
globl
plat_crash_console_putc
.
globl
platform_mem_init
.
globl
disable_mmu_dcache
.
globl
invalidate_tlb_all
.
globl
platform_unmap_sram
.
globl
disable_sram
.
globl
disable_icache
.
globl
invalidate_icache_all
.
globl
marvell_exit_bootrom
.
globl
ca72_l2_enable_unique_clean
/
*
-----------------------------------------------------
*
unsigned
int
plat_my_core_pos
(
void
)
*
This
function
uses
the
plat_marvell_calc_core_pos
()
*
definition
to
get
the
index
of
the
calling
CPU
.
*
-----------------------------------------------------
*/
func
plat_my_core_pos
mrs
x0
,
mpidr_el1
b
plat_marvell_calc_core_pos
endfunc
plat_my_core_pos
/
*
-----------------------------------------------------
*
unsigned
int
plat_marvell_calc_core_pos
(
uint64_t
mpidr
)
*
Helper
function
to
calculate
the
core
position
.
*
With
this
function
:
CorePos
=
(
ClusterId
*
2
)
+
*
CoreId
*
-----------------------------------------------------
*/
func
plat_marvell_calc_core_pos
and
x1
,
x0
,
#
MPIDR_CPU_MASK
and
x0
,
x0
,
#
MPIDR_CLUSTER_MASK
add
x0
,
x1
,
x0
,
LSR
#
7
ret
endfunc
plat_marvell_calc_core_pos
/
*
---------------------------------------------
*
int
plat_crash_console_init
(
void
)
*
Function
to
initialize
the
crash
console
*
without
a
C
Runtime
to
print
crash
report
.
*
Clobber
list
:
x0
,
x1
,
x2
*
---------------------------------------------
*/
func
plat_crash_console_init
mov_imm
x0
,
PLAT_MARVELL_CRASH_UART_BASE
mov_imm
x1
,
PLAT_MARVELL_CRASH_UART_CLK_IN_HZ
mov_imm
x2
,
MARVELL_CONSOLE_BAUDRATE
b
console_core_init
endfunc
plat_crash_console_init
/
*
---------------------------------------------
*
int
plat_crash_console_putc
(
int
c
)
*
Function
to
print
a
character
on
the
crash
*
console
without
a
C
Runtime
.
*
Clobber
list
:
x1
,
x2
*
---------------------------------------------
*/
func
plat_crash_console_putc
mov_imm
x1
,
PLAT_MARVELL_CRASH_UART_BASE
b
console_core_putc
endfunc
plat_crash_console_putc
/
*
---------------------------------------------------------------------
*
We
don
't need to carry out any memory initialization on ARM
*
platforms
.
The
Secure
RAM
is
accessible
straight
away
.
*
---------------------------------------------------------------------
*/
func
platform_mem_init
ret
endfunc
platform_mem_init
/
*
-----------------------------------------------------
*
Disable
icache
,
dcache
,
and
MMU
*
-----------------------------------------------------
*/
func
disable_mmu_dcache
mrs
x0
,
sctlr_el3
bic
x0
,
x0
,
0x1
/*
M
bit
-
MMU
*/
bic
x0
,
x0
,
0x4
/*
C
bit
-
Dcache
L1
&
L2
*/
msr
sctlr_el3
,
x0
isb
b
mmu_off
mmu_off
:
ret
endfunc
disable_mmu_dcache
/
*
-----------------------------------------------------
*
Disable
all
TLB
entries
*
-----------------------------------------------------
*/
func
invalidate_tlb_all
tlbi
alle3
dsb
sy
isb
ret
endfunc
invalidate_tlb_all
/
*
-----------------------------------------------------
*
Disable
the
i
cache
*
-----------------------------------------------------
*/
func
disable_icache
mrs
x0
,
sctlr_el3
bic
x0
,
x0
,
0x1000
/*
I
bit
-
Icache
L1
&
L2
*/
msr
sctlr_el3
,
x0
isb
ret
endfunc
disable_icache
/
*
-----------------------------------------------------
*
Disable
all
of
the
i
caches
*
-----------------------------------------------------
*/
func
invalidate_icache_all
ic
ialluis
isb
sy
ret
endfunc
invalidate_icache_all
/
*
-----------------------------------------------------
*
Clear
the
SRAM
enabling
bit
to
unmap
SRAM
*
-----------------------------------------------------
*/
func
platform_unmap_sram
ldr
x0
,
=
CCU_SRAM_WIN_CR
str
wzr
,
[
x0
]
ret
endfunc
platform_unmap_sram
/
*
-----------------------------------------------------
*
Disable
the
SRAM
*
-----------------------------------------------------
*/
func
disable_sram
/
*
Disable
the
line
lockings
.
They
must
be
disabled
expictly
*
or
the
OS
will
have
problems
using
the
cache
*/
ldr
x1
,
=
MASTER_LLC_TC0_LOCK
str
wzr
,
[
x1
]
/
*
Invalidate
all
ways
*/
ldr
w1
,
=
LLC_WAY_MASK
ldr
x0
,
=
MASTER_L2X0_INV_WAY
str
w1
,
[
x0
]
/
*
Finally
disable
LLC
*/
ldr
x0
,
=
MASTER_LLC_CTRL
str
wzr
,
[
x0
]
ret
endfunc
disable_sram
/
*
-----------------------------------------------------
*
Operation
when
exit
bootROM
:
*
Disable
the
MMU
*
Disable
and
invalidate
the
dcache
*
Unmap
and
disable
the
SRAM
*
Disable
and
invalidate
the
icache
*
-----------------------------------------------------
*/
func
marvell_exit_bootrom
/
*
Save
the
system
restore
address
*/
mov
x28
,
x0
/
*
Close
the
caches
and
MMU
*/
bl
disable_mmu_dcache
/
*
*
There
is
nothing
important
in
the
caches
now
,
*
so
invalidate
them
instead
of
cleaning
.
*/
adr
x0
,
__RW_START__
adr
x1
,
__RW_END__
sub
x1
,
x1
,
x0
bl
inv_dcache_range
bl
invalidate_tlb_all
/
*
*
Clean
the
memory
mapping
of
SRAM
*
the
DDR
mapping
will
remain
to
enable
boot
image
to
execute
*/
bl
platform_unmap_sram
/
*
Disable
the
SRAM
*/
bl
disable_sram
/
*
Disable
and
invalidate
icache
*/
bl
disable_icache
bl
invalidate_icache_all
mov
x0
,
x28
br
x0
endfunc
marvell_exit_bootrom
/
*
*
Enable
L2
UniqueClean
evictions
with
data
*/
func
ca72_l2_enable_unique_clean
mrs
x0
,
CORTEX_A72_L2ACTLR_EL1
orr
x0
,
x0
,
#
CORTEX_A72_L2ACTLR_ENABLE_UNIQUE_CLEAN
msr
CORTEX_A72_L2ACTLR_EL1
,
x0
ret
endfunc
ca72_l2_enable_unique_clean
plat/marvell/common/marvell_bl1_setup.c
0 → 100644
View file @
ba0248b5
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#include <bl1.h>
#include <bl1/bl1_private.h>
#include <bl_common.h>
#include <console.h>
#include <debug.h>
#include <platform.h>
#include <platform_def.h>
#include <plat_marvell.h>
#include <sp805.h>
/* Weak definitions may be overridden in specific Marvell standard platform */
#pragma weak bl1_early_platform_setup
#pragma weak bl1_plat_arch_setup
#pragma weak bl1_platform_setup
#pragma weak bl1_plat_sec_mem_layout
/* Data structure which holds the extents of the RAM for BL1*/
static
meminfo_t
bl1_ram_layout
;
meminfo_t
*
bl1_plat_sec_mem_layout
(
void
)
{
return
&
bl1_ram_layout
;
}
/*
* BL1 specific platform actions shared between Marvell standard platforms.
*/
void
marvell_bl1_early_platform_setup
(
void
)
{
const
size_t
bl1_size
=
BL1_RAM_LIMIT
-
BL1_RAM_BASE
;
/* Initialize the console to provide early debug support */
console_init
(
PLAT_MARVELL_BOOT_UART_BASE
,
PLAT_MARVELL_BOOT_UART_CLK_IN_HZ
,
MARVELL_CONSOLE_BAUDRATE
);
/* Allow BL1 to see the whole Trusted RAM */
bl1_ram_layout
.
total_base
=
MARVELL_BL_RAM_BASE
;
bl1_ram_layout
.
total_size
=
MARVELL_BL_RAM_SIZE
;
/* Calculate how much RAM BL1 is using and how much remains free */
bl1_ram_layout
.
free_base
=
MARVELL_BL_RAM_BASE
;
bl1_ram_layout
.
free_size
=
MARVELL_BL_RAM_SIZE
;
reserve_mem
(
&
bl1_ram_layout
.
free_base
,
&
bl1_ram_layout
.
free_size
,
BL1_RAM_BASE
,
bl1_size
);
}
void
bl1_early_platform_setup
(
void
)
{
marvell_bl1_early_platform_setup
();
}
/*
* Perform the very early platform specific architecture setup shared between
* MARVELL standard platforms. This only does basic initialization. Later
* architectural setup (bl1_arch_setup()) does not do anything platform
* specific.
*/
void
marvell_bl1_plat_arch_setup
(
void
)
{
marvell_setup_page_tables
(
bl1_ram_layout
.
total_base
,
bl1_ram_layout
.
total_size
,
BL1_RO_BASE
,
BL1_RO_LIMIT
,
BL1_RO_DATA_BASE
,
BL1_RO_DATA_END
#if USE_COHERENT_MEM
,
BL_COHERENT_RAM_BASE
,
BL_COHERENT_RAM_END
#endif
);
enable_mmu_el3
(
0
);
}
void
bl1_plat_arch_setup
(
void
)
{
marvell_bl1_plat_arch_setup
();
}
/*
* Perform the platform specific architecture setup shared between
* MARVELL standard platforms.
*/
void
marvell_bl1_platform_setup
(
void
)
{
/* Initialise the IO layer and register platform IO devices */
plat_marvell_io_setup
();
}
void
bl1_platform_setup
(
void
)
{
marvell_bl1_platform_setup
();
}
void
bl1_plat_prepare_exit
(
entry_point_info_t
*
ep_info
)
{
#ifdef EL3_PAYLOAD_BASE
/*
* Program the EL3 payload's entry point address into the CPUs mailbox
* in order to release secondary CPUs from their holding pen and make
* them jump there.
*/
marvell_program_trusted_mailbox
(
ep_info
->
pc
);
dsbsy
();
sev
();
#endif
}
plat/marvell/common/marvell_bl2_setup.c
0 → 100644
View file @
ba0248b5
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#include <arch_helpers.h>
#include <bl_common.h>
#include <console.h>
#include <marvell_def.h>
#include <platform_def.h>
#include <plat_marvell.h>
#include <string.h>
/* Data structure which holds the extents of the trusted SRAM for BL2 */
static
meminfo_t
bl2_tzram_layout
__aligned
(
CACHE_WRITEBACK_GRANULE
);
/*****************************************************************************
* This structure represents the superset of information that is passed to
* BL31, e.g. while passing control to it from BL2, bl31_params
* and other platform specific parameters
*****************************************************************************
*/
typedef
struct
bl2_to_bl31_params_mem
{
bl31_params_t
bl31_params
;
image_info_t
bl31_image_info
;
image_info_t
bl32_image_info
;
image_info_t
bl33_image_info
;
entry_point_info_t
bl33_ep_info
;
entry_point_info_t
bl32_ep_info
;
entry_point_info_t
bl31_ep_info
;
}
bl2_to_bl31_params_mem_t
;
static
bl2_to_bl31_params_mem_t
bl31_params_mem
;
/* Weak definitions may be overridden in specific MARVELL standard platform */
#pragma weak bl2_early_platform_setup
#pragma weak bl2_platform_setup
#pragma weak bl2_plat_arch_setup
#pragma weak bl2_plat_sec_mem_layout
#pragma weak bl2_plat_get_bl31_params
#pragma weak bl2_plat_get_bl31_ep_info
#pragma weak bl2_plat_flush_bl31_params
#pragma weak bl2_plat_set_bl31_ep_info
#pragma weak bl2_plat_get_scp_bl2_meminfo
#pragma weak bl2_plat_get_bl32_meminfo
#pragma weak bl2_plat_set_bl32_ep_info
#pragma weak bl2_plat_get_bl33_meminfo
#pragma weak bl2_plat_set_bl33_ep_info
meminfo_t
*
bl2_plat_sec_mem_layout
(
void
)
{
return
&
bl2_tzram_layout
;
}
/*****************************************************************************
* This function assigns a pointer to the memory that the platform has kept
* aside to pass platform specific and trusted firmware related information
* to BL31. This memory is allocated by allocating memory to
* bl2_to_bl31_params_mem_t structure which is a superset of all the
* structure whose information is passed to BL31
* NOTE: This function should be called only once and should be done
* before generating params to BL31
*****************************************************************************
*/
bl31_params_t
*
bl2_plat_get_bl31_params
(
void
)
{
bl31_params_t
*
bl2_to_bl31_params
;
/*
* Initialise the memory for all the arguments that needs to
* be passed to BL31
*/
memset
(
&
bl31_params_mem
,
0
,
sizeof
(
bl2_to_bl31_params_mem_t
));
/* Assign memory for TF related information */
bl2_to_bl31_params
=
&
bl31_params_mem
.
bl31_params
;
SET_PARAM_HEAD
(
bl2_to_bl31_params
,
PARAM_BL31
,
VERSION_1
,
0
);
/* Fill BL31 related information */
bl2_to_bl31_params
->
bl31_image_info
=
&
bl31_params_mem
.
bl31_image_info
;
SET_PARAM_HEAD
(
bl2_to_bl31_params
->
bl31_image_info
,
PARAM_IMAGE_BINARY
,
VERSION_1
,
0
);
/* Fill BL32 related information if it exists */
#if BL32_BASE
bl2_to_bl31_params
->
bl32_ep_info
=
&
bl31_params_mem
.
bl32_ep_info
;
SET_PARAM_HEAD
(
bl2_to_bl31_params
->
bl32_ep_info
,
PARAM_EP
,
VERSION_1
,
0
);
bl2_to_bl31_params
->
bl32_image_info
=
&
bl31_params_mem
.
bl32_image_info
;
SET_PARAM_HEAD
(
bl2_to_bl31_params
->
bl32_image_info
,
PARAM_IMAGE_BINARY
,
VERSION_1
,
0
);
#endif
/* Fill BL33 related information */
bl2_to_bl31_params
->
bl33_ep_info
=
&
bl31_params_mem
.
bl33_ep_info
;
SET_PARAM_HEAD
(
bl2_to_bl31_params
->
bl33_ep_info
,
PARAM_EP
,
VERSION_1
,
0
);
/* BL33 expects to receive the primary CPU MPID (through x0) */
bl2_to_bl31_params
->
bl33_ep_info
->
args
.
arg0
=
0xffff
&
read_mpidr
();
bl2_to_bl31_params
->
bl33_image_info
=
&
bl31_params_mem
.
bl33_image_info
;
SET_PARAM_HEAD
(
bl2_to_bl31_params
->
bl33_image_info
,
PARAM_IMAGE_BINARY
,
VERSION_1
,
0
);
return
bl2_to_bl31_params
;
}
/* Flush the TF params and the TF plat params */
void
bl2_plat_flush_bl31_params
(
void
)
{
flush_dcache_range
((
unsigned
long
)
&
bl31_params_mem
,
sizeof
(
bl2_to_bl31_params_mem_t
));
}
/*****************************************************************************
* This function returns a pointer to the shared memory that the platform
* has kept to point to entry point information of BL31 to BL2
*****************************************************************************
*/
struct
entry_point_info
*
bl2_plat_get_bl31_ep_info
(
void
)
{
#if DEBUG
bl31_params_mem
.
bl31_ep_info
.
args
.
arg1
=
MARVELL_BL31_PLAT_PARAM_VAL
;
#endif
return
&
bl31_params_mem
.
bl31_ep_info
;
}
/*****************************************************************************
* BL1 has passed the extents of the trusted SRAM that should be visible to BL2
* in x0. This memory layout is sitting at the base of the free trusted SRAM.
* Copy it to a safe location before its reclaimed by later BL2 functionality.
*****************************************************************************
*/
void
marvell_bl2_early_platform_setup
(
meminfo_t
*
mem_layout
)
{
/* Initialize the console to provide early debug support */
console_init
(
PLAT_MARVELL_BOOT_UART_BASE
,
PLAT_MARVELL_BOOT_UART_CLK_IN_HZ
,
MARVELL_CONSOLE_BAUDRATE
);
/* Setup the BL2 memory layout */
bl2_tzram_layout
=
*
mem_layout
;
/* Initialise the IO layer and register platform IO devices */
plat_marvell_io_setup
();
}
void
bl2_early_platform_setup
(
meminfo_t
*
mem_layout
)
{
marvell_bl2_early_platform_setup
(
mem_layout
);
}
void
bl2_platform_setup
(
void
)
{
/* Nothing to do */
}
/*****************************************************************************
* Perform the very early platform specific architectural setup here. At the
* moment this is only initializes the mmu in a quick and dirty way.
*****************************************************************************
*/
void
marvell_bl2_plat_arch_setup
(
void
)
{
marvell_setup_page_tables
(
bl2_tzram_layout
.
total_base
,
bl2_tzram_layout
.
total_size
,
BL_CODE_BASE
,
BL_CODE_END
,
BL_RO_DATA_BASE
,
BL_RO_DATA_END
#if USE_COHERENT_MEM
,
BL_COHERENT_RAM_BASE
,
BL_COHERENT_RAM_END
#endif
);
enable_mmu_el1
(
0
);
}
void
bl2_plat_arch_setup
(
void
)
{
marvell_bl2_plat_arch_setup
();
}
/*****************************************************************************
* Populate the extents of memory available for loading SCP_BL2 (if used),
* i.e. anywhere in trusted RAM as long as it doesn't overwrite BL2.
*****************************************************************************
*/
void
bl2_plat_get_scp_bl2_meminfo
(
meminfo_t
*
scp_bl2_meminfo
)
{
*
scp_bl2_meminfo
=
bl2_tzram_layout
;
}
/*****************************************************************************
* Before calling this function BL31 is loaded in memory and its entrypoint
* is set by load_image. This is a placeholder for the platform to change
* the entrypoint of BL31 and set SPSR and security state.
* On MARVELL std. platforms we only set the security state of the entrypoint
*****************************************************************************
*/
void
bl2_plat_set_bl31_ep_info
(
image_info_t
*
bl31_image_info
,
entry_point_info_t
*
bl31_ep_info
)
{
SET_SECURITY_STATE
(
bl31_ep_info
->
h
.
attr
,
SECURE
);
bl31_ep_info
->
spsr
=
SPSR_64
(
MODE_EL3
,
MODE_SP_ELX
,
DISABLE_ALL_EXCEPTIONS
);
}
/*****************************************************************************
* Populate the extents of memory available for loading BL32
*****************************************************************************
*/
#ifdef BL32_BASE
void
bl2_plat_get_bl32_meminfo
(
meminfo_t
*
bl32_meminfo
)
{
/*
* Populate the extents of memory available for loading BL32.
*/
bl32_meminfo
->
total_base
=
BL32_BASE
;
bl32_meminfo
->
free_base
=
BL32_BASE
;
bl32_meminfo
->
total_size
=
(
TRUSTED_DRAM_BASE
+
TRUSTED_DRAM_SIZE
)
-
BL32_BASE
;
bl32_meminfo
->
free_size
=
(
TRUSTED_DRAM_BASE
+
TRUSTED_DRAM_SIZE
)
-
BL32_BASE
;
}
#endif
/*****************************************************************************
* Before calling this function BL32 is loaded in memory and its entrypoint
* is set by load_image. This is a placeholder for the platform to change
* the entrypoint of BL32 and set SPSR and security state.
* On MARVELL std. platforms we only set the security state of the entrypoint
*****************************************************************************
*/
void
bl2_plat_set_bl32_ep_info
(
image_info_t
*
bl32_image_info
,
entry_point_info_t
*
bl32_ep_info
)
{
SET_SECURITY_STATE
(
bl32_ep_info
->
h
.
attr
,
SECURE
);
bl32_ep_info
->
spsr
=
marvell_get_spsr_for_bl32_entry
();
}
/*****************************************************************************
* Before calling this function BL33 is loaded in memory and its entrypoint
* is set by load_image. This is a placeholder for the platform to change
* the entrypoint of BL33 and set SPSR and security state.
* On MARVELL std. platforms we only set the security state of the entrypoint
*****************************************************************************
*/
void
bl2_plat_set_bl33_ep_info
(
image_info_t
*
image
,
entry_point_info_t
*
bl33_ep_info
)
{
SET_SECURITY_STATE
(
bl33_ep_info
->
h
.
attr
,
NON_SECURE
);
bl33_ep_info
->
spsr
=
marvell_get_spsr_for_bl33_entry
();
}
/*****************************************************************************
* Populate the extents of memory available for loading BL33
*****************************************************************************
*/
void
bl2_plat_get_bl33_meminfo
(
meminfo_t
*
bl33_meminfo
)
{
bl33_meminfo
->
total_base
=
MARVELL_DRAM_BASE
;
bl33_meminfo
->
total_size
=
MARVELL_DRAM_SIZE
;
bl33_meminfo
->
free_base
=
MARVELL_DRAM_BASE
;
bl33_meminfo
->
free_size
=
MARVELL_DRAM_SIZE
;
}
plat/marvell/common/marvell_bl31_setup.c
0 → 100644
View file @
ba0248b5
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#include <arch.h>
#include <assert.h>
#include <console.h>
#include <debug.h>
#include <marvell_def.h>
#include <marvell_plat_priv.h>
#include <plat_marvell.h>
#include <platform.h>
#ifdef USE_CCI
#include <cci.h>
#endif
/*
* The next 3 constants identify the extents of the code, RO data region and the
* limit of the BL31 image. These addresses are used by the MMU setup code and
* therefore they must be page-aligned. It is the responsibility of the linker
* script to ensure that __RO_START__, __RO_END__ & __BL31_END__ linker symbols
* refer to page-aligned addresses.
*/
#define BL31_END (unsigned long)(&__BL31_END__)
/*
* Placeholder variables for copying the arguments that have been passed to
* BL31 from BL2.
*/
static
entry_point_info_t
bl32_image_ep_info
;
static
entry_point_info_t
bl33_image_ep_info
;
/* Weak definitions may be overridden in specific ARM standard platform */
#pragma weak bl31_early_platform_setup
#pragma weak bl31_platform_setup
#pragma weak bl31_plat_arch_setup
#pragma weak bl31_plat_get_next_image_ep_info
#pragma weak plat_get_syscnt_freq2
/*****************************************************************************
* Return a pointer to the 'entry_point_info' structure of the next image for
* the security state specified. BL33 corresponds to the non-secure image type
* while BL32 corresponds to the secure image type. A NULL pointer is returned
* if the image does not exist.
*****************************************************************************
*/
entry_point_info_t
*
bl31_plat_get_next_image_ep_info
(
uint32_t
type
)
{
entry_point_info_t
*
next_image_info
;
assert
(
sec_state_is_valid
(
type
));
next_image_info
=
(
type
==
NON_SECURE
)
?
&
bl33_image_ep_info
:
&
bl32_image_ep_info
;
return
next_image_info
;
}
/*****************************************************************************
* Perform any BL31 early platform setup common to ARM standard platforms.
* Here is an opportunity to copy parameters passed by the calling EL (S-EL1
* in BL2 & S-EL3 in BL1) before they are lost (potentially). This needs to be
* done before the MMU is initialized so that the memory layout can be used
* while creating page tables. BL2 has flushed this information to memory, so
* we are guaranteed to pick up good data.
*****************************************************************************
*/
void
marvell_bl31_early_platform_setup
(
bl31_params_t
*
from_bl2
,
void
*
plat_params_from_bl2
)
{
/* Initialize the console to provide early debug support */
console_init
(
PLAT_MARVELL_BOOT_UART_BASE
,
PLAT_MARVELL_BOOT_UART_CLK_IN_HZ
,
MARVELL_CONSOLE_BAUDRATE
);
#if RESET_TO_BL31
/* There are no parameters from BL2 if BL31 is a reset vector */
assert
(
from_bl2
==
NULL
);
assert
(
plat_params_from_bl2
==
NULL
);
#ifdef BL32_BASE
/* Populate entry point information for BL32 */
SET_PARAM_HEAD
(
&
bl32_image_ep_info
,
PARAM_EP
,
VERSION_1
,
0
);
SET_SECURITY_STATE
(
bl32_image_ep_info
.
h
.
attr
,
SECURE
);
bl32_image_ep_info
.
pc
=
BL32_BASE
;
bl32_image_ep_info
.
spsr
=
marvell_get_spsr_for_bl32_entry
();
#endif
/* BL32_BASE */
/* Populate entry point information for BL33 */
SET_PARAM_HEAD
(
&
bl33_image_ep_info
,
PARAM_EP
,
VERSION_1
,
0
);
/*
* Tell BL31 where the non-trusted software image
* is located and the entry state information
*/
bl33_image_ep_info
.
pc
=
plat_get_ns_image_entrypoint
();
bl33_image_ep_info
.
spsr
=
marvell_get_spsr_for_bl33_entry
();
SET_SECURITY_STATE
(
bl33_image_ep_info
.
h
.
attr
,
NON_SECURE
);
#else
/*
* Check params passed from BL2 should not be NULL,
*/
assert
(
from_bl2
!=
NULL
);
assert
(
from_bl2
->
h
.
type
==
PARAM_BL31
);
assert
(
from_bl2
->
h
.
version
>=
VERSION_1
);
/*
* In debug builds, we pass a special value in 'plat_params_from_bl2'
* to verify platform parameters from BL2 to BL31.
* In release builds, it's not used.
*/
assert
(((
unsigned
long
long
)
plat_params_from_bl2
)
==
MARVELL_BL31_PLAT_PARAM_VAL
);
/*
* Copy BL32 (if populated by BL2) and BL33 entry point information.
* They are stored in Secure RAM, in BL2's address space.
*/
if
(
from_bl2
->
bl32_ep_info
)
bl32_image_ep_info
=
*
from_bl2
->
bl32_ep_info
;
bl33_image_ep_info
=
*
from_bl2
->
bl33_ep_info
;
#endif
}
void
bl31_early_platform_setup
(
bl31_params_t
*
from_bl2
,
void
*
plat_params_from_bl2
)
{
marvell_bl31_early_platform_setup
(
from_bl2
,
plat_params_from_bl2
);
#ifdef USE_CCI
/*
* Initialize CCI for this cluster during cold boot.
* No need for locks as no other CPU is active.
*/
plat_marvell_interconnect_init
();
/*
* Enable CCI coherency for the primary CPU's cluster.
* Platform specific PSCI code will enable coherency for other
* clusters.
*/
plat_marvell_interconnect_enter_coherency
();
#endif
}
/*****************************************************************************
* Perform any BL31 platform setup common to ARM standard platforms
*****************************************************************************
*/
void
marvell_bl31_platform_setup
(
void
)
{
/* Initialize the GIC driver, cpu and distributor interfaces */
plat_marvell_gic_driver_init
();
plat_marvell_gic_init
();
/* For Armada-8k-plus family, the SoC includes more than
* a single AP die, but the default die that boots is AP #0.
* For other families there is only one die (#0).
* Initialize psci arch from die 0
*/
marvell_psci_arch_init
(
0
);
}
/*****************************************************************************
* Perform any BL31 platform runtime setup prior to BL31 exit common to ARM
* standard platforms
*****************************************************************************
*/
void
marvell_bl31_plat_runtime_setup
(
void
)
{
/* Initialize the runtime console */
console_init
(
PLAT_MARVELL_BL31_RUN_UART_BASE
,
PLAT_MARVELL_BL31_RUN_UART_CLK_IN_HZ
,
MARVELL_CONSOLE_BAUDRATE
);
}
void
bl31_platform_setup
(
void
)
{
marvell_bl31_platform_setup
();
}
void
bl31_plat_runtime_setup
(
void
)
{
marvell_bl31_plat_runtime_setup
();
}
/*****************************************************************************
* Perform the very early platform specific architectural setup shared between
* ARM standard platforms. This only does basic initialization. Later
* architectural setup (bl31_arch_setup()) does not do anything platform
* specific.
*****************************************************************************
*/
void
marvell_bl31_plat_arch_setup
(
void
)
{
marvell_setup_page_tables
(
BL31_BASE
,
BL31_END
-
BL31_BASE
,
BL_CODE_BASE
,
BL_CODE_END
,
BL_RO_DATA_BASE
,
BL_RO_DATA_END
#if USE_COHERENT_MEM
,
BL_COHERENT_RAM_BASE
,
BL_COHERENT_RAM_END
#endif
);
#if BL31_CACHE_DISABLE
enable_mmu_el3
(
DISABLE_DCACHE
);
INFO
(
"Cache is disabled in BL3
\n
"
);
#else
enable_mmu_el3
(
0
);
#endif
}
void
bl31_plat_arch_setup
(
void
)
{
marvell_bl31_plat_arch_setup
();
}
unsigned
int
plat_get_syscnt_freq2
(
void
)
{
return
PLAT_REF_CLK_IN_HZ
;
}
plat/marvell/common/marvell_cci.c
0 → 100644
View file @
ba0248b5
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#include <cci.h>
#include <plat_marvell.h>
static
const
int
cci_map
[]
=
{
PLAT_MARVELL_CCI_CLUSTER0_SL_IFACE_IX
,
PLAT_MARVELL_CCI_CLUSTER1_SL_IFACE_IX
};
/****************************************************************************
* The following functions are defined as weak to allow a platform to override
* the way ARM CCI driver is initialised and used.
****************************************************************************
*/
#pragma weak plat_marvell_interconnect_init
#pragma weak plat_marvell_interconnect_enter_coherency
#pragma weak plat_marvell_interconnect_exit_coherency
/****************************************************************************
* Helper function to initialize ARM CCI driver.
****************************************************************************
*/
void
plat_marvell_interconnect_init
(
void
)
{
cci_init
(
PLAT_MARVELL_CCI_BASE
,
cci_map
,
ARRAY_SIZE
(
cci_map
));
}
/****************************************************************************
* Helper function to place current master into coherency
****************************************************************************
*/
void
plat_marvell_interconnect_enter_coherency
(
void
)
{
cci_enable_snoop_dvm_reqs
(
MPIDR_AFFLVL1_VAL
(
read_mpidr_el1
()));
}
/****************************************************************************
* Helper function to remove current master from coherency
****************************************************************************
*/
void
plat_marvell_interconnect_exit_coherency
(
void
)
{
cci_disable_snoop_dvm_reqs
(
MPIDR_AFFLVL1_VAL
(
read_mpidr_el1
()));
}
plat/marvell/common/marvell_common.mk
0 → 100644
View file @
ba0248b5
# Copyright (C) 2018 Marvell International Ltd.
#
# SPDX-License-Identifier: BSD-3-Clause
# https://spdx.org/licenses
MARVELL_PLAT_BASE
:=
plat/marvell
MARVELL_PLAT_INCLUDE_BASE
:=
include/plat/marvell
include
$(MARVELL_PLAT_BASE)/version.mk
include
$(MARVELL_PLAT_BASE)/marvell.mk
VERSION_STRING
+=(
Marvell-
${SUBVERSION}
)
SEPARATE_CODE_AND_RODATA
:=
1
# flag to switch from PLL to ARO
ARO_ENABLE
:=
0
$(eval
$(call
add_define,ARO_ENABLE))
# Enable/Disable LLC
LLC_ENABLE
:=
1
$(eval
$(call
add_define,LLC_ENABLE))
PLAT_INCLUDES
+=
-I
.
-Iinclude
/common/tbbr
\
-I
$(MARVELL_PLAT_INCLUDE_BASE)
/common
\
-I
$(MARVELL_PLAT_INCLUDE_BASE)
/common/aarch64
PLAT_BL_COMMON_SOURCES
+=
lib/xlat_tables/xlat_tables_common.c
\
lib/xlat_tables/aarch64/xlat_tables.c
\
$(MARVELL_PLAT_BASE)
/common/aarch64/marvell_common.c
\
$(MARVELL_PLAT_BASE)
/common/aarch64/marvell_helpers.S
BL1_SOURCES
+=
drivers/delay_timer/delay_timer.c
\
drivers/io/io_fip.c
\
drivers/io/io_memmap.c
\
drivers/io/io_storage.c
\
$(MARVELL_PLAT_BASE)
/common/marvell_bl1_setup.c
\
$(MARVELL_PLAT_BASE)
/common/marvell_io_storage.c
\
$(MARVELL_PLAT_BASE)
/common/plat_delay_timer.c
ifdef
EL3_PAYLOAD_BASE
# Need the arm_program_trusted_mailbox() function to release secondary CPUs from
# their holding pen
endif
BL2_SOURCES
+=
drivers/io/io_fip.c
\
drivers/io/io_memmap.c
\
drivers/io/io_storage.c
\
$(MARVELL_PLAT_BASE)
/common/marvell_bl2_setup.c
\
$(MARVELL_PLAT_BASE)
/common/marvell_io_storage.c
BL31_SOURCES
+=
$(MARVELL_PLAT_BASE)
/common/marvell_bl31_setup.c
\
$(MARVELL_PLAT_BASE)
/common/marvell_pm.c
\
$(MARVELL_PLAT_BASE)
/common/marvell_topology.c
\
plat/common/plat_psci_common.c
\
$(MARVELL_PLAT_BASE)
/common/plat_delay_timer.c
\
drivers/delay_timer/delay_timer.c
# PSCI functionality
$(eval
$(call
add_define,CONFIG_ARM64))
# MSS (SCP) build
ifeq
(${MSS_SUPPORT}, 1)
include
$(MARVELL_PLAT_BASE)/common/mss/mss_common.mk
endif
fip
:
mrvl_flash
plat/marvell/common/marvell_ddr_info.c
0 → 100644
View file @
ba0248b5
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#include <debug.h>
#include <platform_def.h>
#include <ddr_info.h>
#include <mmio.h>
#define DRAM_CH0_MMAP_LOW_REG(iface, cs, base) \
(base + DRAM_CH0_MMAP_LOW_OFFSET + (iface) * 0x10000 + (cs) * 0x8)
#define DRAM_CH0_MMAP_HIGH_REG(iface, cs, base) \
(DRAM_CH0_MMAP_LOW_REG(iface, cs, base) + 4)
#define DRAM_CS_VALID_ENABLED_MASK 0x1
#define DRAM_AREA_LENGTH_OFFS 16
#define DRAM_AREA_LENGTH_MASK (0x1f << DRAM_AREA_LENGTH_OFFS)
#define DRAM_START_ADDRESS_L_OFFS 23
#define DRAM_START_ADDRESS_L_MASK \
(0x1ff << DRAM_START_ADDRESS_L_OFFS)
#define DRAM_START_ADDR_HTOL_OFFS 32
#define DRAM_MAX_CS_NUM 2
#define DRAM_CS_ENABLED(iface, cs, base) \
(mmio_read_32(DRAM_CH0_MMAP_LOW_REG(iface, cs, base)) & \
DRAM_CS_VALID_ENABLED_MASK)
#define GET_DRAM_REGION_SIZE_CODE(iface, cs, base) \
(mmio_read_32(DRAM_CH0_MMAP_LOW_REG(iface, cs, base)) & \
DRAM_AREA_LENGTH_MASK) >> DRAM_AREA_LENGTH_OFFS
/* Mapping between DDR area length and real DDR size is specific and looks like
* bellow:
* 0 => 384 MB
* 1 => 768 MB
* 2 => 1536 MB
* 3 => 3 GB
* 4 => 6 GB
*
* 7 => 8 MB
* 8 => 16 MB
* 9 => 32 MB
* 10 => 64 MB
* 11 => 128 MB
* 12 => 256 MB
* 13 => 512 MB
* 14 => 1 GB
* 15 => 2 GB
* 16 => 4 GB
* 17 => 8 GB
* 18 => 16 GB
* 19 => 32 GB
* 20 => 64 GB
* 21 => 128 GB
* 22 => 256 GB
* 23 => 512 GB
* 24 => 1 TB
* 25 => 2 TB
* 26 => 4 TB
*
* to calculate real size we need to use two different formulas:
* -- GET_DRAM_REGION_SIZE_ODD for values 0-4 (DRAM_REGION_SIZE_ODD)
* -- GET_DRAM_REGION_SIZE_EVEN for values 7-26 (DRAM_REGION_SIZE_EVEN)
* using mentioned formulas we cover whole mapping between "Area length" value
* and real size (see above mapping).
*/
#define DRAM_REGION_SIZE_EVEN(C) (((C) >= 7) && ((C) <= 26))
#define GET_DRAM_REGION_SIZE_EVEN(C) ((uint64_t)1 << ((C) + 16))
#define DRAM_REGION_SIZE_ODD(C) ((C) <= 4)
#define GET_DRAM_REGION_SIZE_ODD(C) ((uint64_t)0x18000000 << (C))
uint64_t
mvebu_get_dram_size
(
uint64_t
ap_base_addr
)
{
uint64_t
mem_size
=
0
;
uint8_t
region_code
;
uint8_t
cs
,
iface
;
for
(
iface
=
0
;
iface
<
DRAM_MAX_IFACE
;
iface
++
)
{
for
(
cs
=
0
;
cs
<
DRAM_MAX_CS_NUM
;
cs
++
)
{
/* Exit loop on first disabled DRAM CS */
if
(
!
DRAM_CS_ENABLED
(
iface
,
cs
,
ap_base_addr
))
break
;
/* Decode area length for current CS
* from register value
*/
region_code
=
GET_DRAM_REGION_SIZE_CODE
(
iface
,
cs
,
ap_base_addr
);
if
(
DRAM_REGION_SIZE_EVEN
(
region_code
))
{
mem_size
+=
GET_DRAM_REGION_SIZE_EVEN
(
region_code
);
}
else
if
(
DRAM_REGION_SIZE_ODD
(
region_code
))
{
mem_size
+=
GET_DRAM_REGION_SIZE_ODD
(
region_code
);
}
else
{
WARN
(
"%s: Invalid mem region (0x%x) CS#%d
\n
"
,
__func__
,
region_code
,
cs
);
return
0
;
}
}
}
return
mem_size
;
}
plat/marvell/common/marvell_gicv2.c
0 → 100644
View file @
ba0248b5
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#include <gicv2.h>
#include <plat_marvell.h>
#include <platform.h>
#include <platform_def.h>
/*
* The following functions are defined as weak to allow a platform to override
* the way the GICv2 driver is initialised and used.
*/
#pragma weak plat_marvell_gic_driver_init
#pragma weak plat_marvell_gic_init
/*
* On a GICv2 system, the Group 1 secure interrupts are treated as Group 0
* interrupts.
*/
static
const
interrupt_prop_t
marvell_interrupt_props
[]
=
{
PLAT_MARVELL_G1S_IRQ_PROPS
(
GICV2_INTR_GROUP0
),
PLAT_MARVELL_G0_IRQ_PROPS
(
GICV2_INTR_GROUP0
)
};
static
unsigned
int
target_mask_array
[
PLATFORM_CORE_COUNT
];
/*
* Ideally `marvell_gic_data` structure definition should be a `const` but it is
* kept as modifiable for overwriting with different GICD and GICC base when
* running on FVP with VE memory map.
*/
static
gicv2_driver_data_t
marvell_gic_data
=
{
.
gicd_base
=
PLAT_MARVELL_GICD_BASE
,
.
gicc_base
=
PLAT_MARVELL_GICC_BASE
,
.
interrupt_props
=
marvell_interrupt_props
,
.
interrupt_props_num
=
ARRAY_SIZE
(
marvell_interrupt_props
),
.
target_masks
=
target_mask_array
,
.
target_masks_num
=
ARRAY_SIZE
(
target_mask_array
),
};
/*
* ARM common helper to initialize the GICv2 only driver.
*/
void
plat_marvell_gic_driver_init
(
void
)
{
gicv2_driver_init
(
&
marvell_gic_data
);
}
void
plat_marvell_gic_init
(
void
)
{
gicv2_distif_init
();
gicv2_pcpu_distif_init
();
gicv2_set_pe_target_mask
(
plat_my_core_pos
());
gicv2_cpuif_enable
();
}
plat/marvell/common/marvell_io_storage.c
0 → 100644
View file @
ba0248b5
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#include <assert.h>
#include <bl_common.h>
/* For ARRAY_SIZE */
#include <debug.h>
#include <firmware_image_package.h>
#include <io_driver.h>
#include <io_fip.h>
#include <io_memmap.h>
#include <io_storage.h>
#include <platform_def.h>
#include <string.h>
/* IO devices */
static
const
io_dev_connector_t
*
fip_dev_con
;
static
uintptr_t
fip_dev_handle
;
static
const
io_dev_connector_t
*
memmap_dev_con
;
static
uintptr_t
memmap_dev_handle
;
static
const
io_block_spec_t
fip_block_spec
=
{
.
offset
=
PLAT_MARVELL_FIP_BASE
,
.
length
=
PLAT_MARVELL_FIP_MAX_SIZE
};
static
const
io_uuid_spec_t
bl2_uuid_spec
=
{
.
uuid
=
UUID_TRUSTED_BOOT_FIRMWARE_BL2
,
};
static
const
io_uuid_spec_t
scp_bl2_uuid_spec
=
{
.
uuid
=
UUID_SCP_FIRMWARE_SCP_BL2
,
};
static
const
io_uuid_spec_t
bl31_uuid_spec
=
{
.
uuid
=
UUID_EL3_RUNTIME_FIRMWARE_BL31
,
};
static
const
io_uuid_spec_t
bl32_uuid_spec
=
{
.
uuid
=
UUID_SECURE_PAYLOAD_BL32
,
};
static
const
io_uuid_spec_t
bl33_uuid_spec
=
{
.
uuid
=
UUID_NON_TRUSTED_FIRMWARE_BL33
,
};
static
int
open_fip
(
const
uintptr_t
spec
);
static
int
open_memmap
(
const
uintptr_t
spec
);
struct
plat_io_policy
{
uintptr_t
*
dev_handle
;
uintptr_t
image_spec
;
int
(
*
check
)(
const
uintptr_t
spec
);
};
/* By default, Marvell platforms load images from the FIP */
static
const
struct
plat_io_policy
policies
[]
=
{
[
FIP_IMAGE_ID
]
=
{
&
memmap_dev_handle
,
(
uintptr_t
)
&
fip_block_spec
,
open_memmap
},
[
BL2_IMAGE_ID
]
=
{
&
fip_dev_handle
,
(
uintptr_t
)
&
bl2_uuid_spec
,
open_fip
},
[
SCP_BL2_IMAGE_ID
]
=
{
&
fip_dev_handle
,
(
uintptr_t
)
&
scp_bl2_uuid_spec
,
open_fip
},
[
BL31_IMAGE_ID
]
=
{
&
fip_dev_handle
,
(
uintptr_t
)
&
bl31_uuid_spec
,
open_fip
},
[
BL32_IMAGE_ID
]
=
{
&
fip_dev_handle
,
(
uintptr_t
)
&
bl32_uuid_spec
,
open_fip
},
[
BL33_IMAGE_ID
]
=
{
&
fip_dev_handle
,
(
uintptr_t
)
&
bl33_uuid_spec
,
open_fip
},
};
/* Weak definitions may be overridden in specific ARM standard platform */
#pragma weak plat_marvell_io_setup
#pragma weak plat_marvell_get_alt_image_source
static
int
open_fip
(
const
uintptr_t
spec
)
{
int
result
;
uintptr_t
local_image_handle
;
/* See if a Firmware Image Package is available */
result
=
io_dev_init
(
fip_dev_handle
,
(
uintptr_t
)
FIP_IMAGE_ID
);
if
(
result
==
0
)
{
result
=
io_open
(
fip_dev_handle
,
spec
,
&
local_image_handle
);
if
(
result
==
0
)
{
VERBOSE
(
"Using FIP
\n
"
);
io_close
(
local_image_handle
);
}
}
return
result
;
}
static
int
open_memmap
(
const
uintptr_t
spec
)
{
int
result
;
uintptr_t
local_image_handle
;
result
=
io_dev_init
(
memmap_dev_handle
,
(
uintptr_t
)
NULL
);
if
(
result
==
0
)
{
result
=
io_open
(
memmap_dev_handle
,
spec
,
&
local_image_handle
);
if
(
result
==
0
)
{
VERBOSE
(
"Using Memmap
\n
"
);
io_close
(
local_image_handle
);
}
}
return
result
;
}
void
marvell_io_setup
(
void
)
{
int
io_result
;
io_result
=
register_io_dev_fip
(
&
fip_dev_con
);
assert
(
io_result
==
0
);
io_result
=
register_io_dev_memmap
(
&
memmap_dev_con
);
assert
(
io_result
==
0
);
/* Open connections to devices and cache the handles */
io_result
=
io_dev_open
(
fip_dev_con
,
(
uintptr_t
)
NULL
,
&
fip_dev_handle
);
assert
(
io_result
==
0
);
io_result
=
io_dev_open
(
memmap_dev_con
,
(
uintptr_t
)
NULL
,
&
memmap_dev_handle
);
assert
(
io_result
==
0
);
/* Ignore improbable errors in release builds */
(
void
)
io_result
;
}
void
plat_marvell_io_setup
(
void
)
{
marvell_io_setup
();
}
int
plat_marvell_get_alt_image_source
(
unsigned
int
image_id
__attribute__
((
unused
)),
uintptr_t
*
dev_handle
__attribute__
((
unused
)),
uintptr_t
*
image_spec
__attribute__
((
unused
)))
{
/* By default do not try an alternative */
return
-
ENOENT
;
}
/*
* Return an IO device handle and specification which can be used to access
* an image. Use this to enforce platform load policy
*/
int
plat_get_image_source
(
unsigned
int
image_id
,
uintptr_t
*
dev_handle
,
uintptr_t
*
image_spec
)
{
int
result
;
const
struct
plat_io_policy
*
policy
;
assert
(
image_id
<
ARRAY_SIZE
(
policies
));
policy
=
&
policies
[
image_id
];
result
=
policy
->
check
(
policy
->
image_spec
);
if
(
result
==
0
)
{
*
image_spec
=
policy
->
image_spec
;
*
dev_handle
=
*
(
policy
->
dev_handle
);
}
else
{
VERBOSE
(
"Trying alternative IO
\n
"
);
result
=
plat_marvell_get_alt_image_source
(
image_id
,
dev_handle
,
image_spec
);
}
return
result
;
}
/*
* See if a Firmware Image Package is available,
* by checking if TOC is valid or not.
*/
int
marvell_io_is_toc_valid
(
void
)
{
int
result
;
result
=
io_dev_init
(
fip_dev_handle
,
(
uintptr_t
)
FIP_IMAGE_ID
);
return
result
==
0
;
}
Prev
1
2
3
4
5
6
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment