Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
Menu
Open sidebar
adam.huang
Arm Trusted Firmware
Commits
bc5e79cd
Unverified
Commit
bc5e79cd
authored
Jan 25, 2019
by
Antonio Niño Díaz
Committed by
GitHub
Jan 25, 2019
Browse files
Merge pull request #1776 from vwadekar/tf2.0-tegra-downstream-rebase-1.22.19
Tf2.0 tegra downstream rebase 1.22.19
parents
ae478c26
8ec45621
Changes
33
Show whitespace changes
Inline
Side-by-side
plat/nvidia/tegra/soc/t186/drivers/include/mce_private.h
View file @
bc5e79cd
...
...
@@ -64,19 +64,17 @@
#define MCA_ARG_FINISH_MASK U(0xFF)
/*******************************************************************************
* Uncore PERFMON ARI
struct
* Uncore PERFMON ARI
macros
******************************************************************************/
#define UNCORE_PERFMON_CMD_READ U(0)
#define UNCORE_PERFMON_CMD_WRITE U(1)
#define UNCORE_PERFMON_CMD_MASK U(0xFF)
#define UNCORE_PERFMON_CMD_SHIFT U(24)
#define UNCORE_PERFMON_UNIT_GRP_MASK U(0xF)
#define UNCORE_PERFMON_SELECTOR_MASK U(0xF)
#define UNCORE_PERFMON_REG_MASK U(0xFF)
#define UNCORE_PERFMON_CTR_MASK U(0xFF)
#define UNCORE_PERFMON_RESP_STATUS_MASK U(0xFF)
#define UNCORE_PERFMON_RESP_STATUS_SHIFT U(24)
/*******************************************************************************
* Structure populated by arch specific code to export routines which perform
...
...
plat/nvidia/tegra/soc/t186/drivers/mce/ari.c
View file @
bc5e79cd
...
...
@@ -35,8 +35,8 @@
#define ARI_REQUEST_VALID_BIT (1U << 8)
#define ARI_EVT_MASK_STANDBYWFI_BIT (1U << 7)
/* default timeout (
m
s) to wait for ARI completion */
#define ARI_MAX_RETRY_COUNT 2000
/* default timeout (
u
s) to wait for ARI completion */
#define ARI_MAX_RETRY_COUNT
U(
2000
000)
/*******************************************************************************
* ARI helper functions
...
...
@@ -80,7 +80,7 @@ static inline void ari_clobber_response(uint32_t ari_base)
static
int32_t
ari_request_wait
(
uint32_t
ari_base
,
uint32_t
evt_mask
,
uint32_t
req
,
uint32_t
lo
,
uint32_t
hi
)
{
uint32_t
retries
=
ARI_MAX_RETRY_COUNT
;
uint32_t
retries
=
(
uint32_t
)
ARI_MAX_RETRY_COUNT
;
uint32_t
status
;
int32_t
ret
=
0
;
...
...
@@ -115,8 +115,8 @@ static int32_t ari_request_wait(uint32_t ari_base, uint32_t evt_mask, uint32_t r
break
;
}
/* delay 1
m
s */
m
delay
(
1
);
/* delay 1
u
s */
u
delay
(
1
);
/* decrement the retry count */
retries
--
;
...
...
@@ -503,7 +503,7 @@ int32_t ari_read_write_uncore_perfmon(uint32_t ari_base, uint64_t req,
uint32_t
val
,
req_status
;
uint8_t
req_cmd
;
req_cmd
=
(
uint8_t
)(
req
>>
UNCORE_PERFMON_CMD_
SHIFT
);
req_cmd
=
(
uint8_t
)(
req
&
UNCORE_PERFMON_CMD_
MASK
);
/* clean the previous response state */
ari_clobber_response
(
ari_base
);
...
...
@@ -533,7 +533,7 @@ int32_t ari_read_write_uncore_perfmon(uint32_t ari_base, uint64_t req,
* For "read" commands get the data from the uncore
* perfmon registers
*/
req_status
>>
=
UNCORE_PERFMON_RESP_STATUS_
SHIFT
;
req_status
&
=
UNCORE_PERFMON_RESP_STATUS_
MASK
;
if
((
req_status
==
0U
)
&&
(
req_cmd
==
UNCORE_PERFMON_CMD_READ
))
{
*
data
=
ari_get_response_low
(
ari_base
);
}
...
...
plat/nvidia/tegra/soc/t186/plat_memctrl.c
View file @
bc5e79cd
...
...
@@ -4,9 +4,13 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
#include <common/bl_common.h>
#include <mce.h>
#include <memctrl_v2.h>
#include <tegra_mc_def.h>
#include <tegra_platform.h>
/*******************************************************************************
* Array to hold stream_id override config register offsets
...
...
@@ -201,6 +205,318 @@ const static mc_txn_override_cfg_t tegra186_txn_override_cfgs[] = {
mc_make_txn_override_cfg
(
SCEW
,
CGID_TAG_ADR
),
};
static
void
tegra186_memctrl_reconfig_mss_clients
(
void
)
{
#if ENABLE_ROC_FOR_ORDERING_CLIENT_REQUESTS
uint32_t
val
,
wdata_0
,
wdata_1
;
/*
* Assert Memory Controller's HOTRESET_FLUSH_ENABLE signal for
* boot and strongly ordered MSS clients to flush existing memory
* traffic and stall future requests.
*/
val
=
tegra_mc_read_32
(
MC_CLIENT_HOTRESET_CTRL0
);
assert
(
val
==
MC_CLIENT_HOTRESET_CTRL0_RESET_VAL
);
wdata_0
=
MC_CLIENT_HOTRESET_CTRL0_HDA_FLUSH_ENB
|
#if ENABLE_AFI_DEVICE
MC_CLIENT_HOTRESET_CTRL0_AFI_FLUSH_ENB
|
#endif
MC_CLIENT_HOTRESET_CTRL0_SATA_FLUSH_ENB
|
MC_CLIENT_HOTRESET_CTRL0_XUSB_HOST_FLUSH_ENB
|
MC_CLIENT_HOTRESET_CTRL0_XUSB_DEV_FLUSH_ENB
;
tegra_mc_write_32
(
MC_CLIENT_HOTRESET_CTRL0
,
wdata_0
);
/* Wait for HOTRESET STATUS to indicate FLUSH_DONE */
do
{
val
=
tegra_mc_read_32
(
MC_CLIENT_HOTRESET_STATUS0
);
}
while
((
val
&
wdata_0
)
!=
wdata_0
);
/* Wait one more time due to SW WAR for known legacy issue */
do
{
val
=
tegra_mc_read_32
(
MC_CLIENT_HOTRESET_STATUS0
);
}
while
((
val
&
wdata_0
)
!=
wdata_0
);
val
=
tegra_mc_read_32
(
MC_CLIENT_HOTRESET_CTRL1
);
assert
(
val
==
MC_CLIENT_HOTRESET_CTRL1_RESET_VAL
);
wdata_1
=
MC_CLIENT_HOTRESET_CTRL1_SDMMC4A_FLUSH_ENB
|
MC_CLIENT_HOTRESET_CTRL1_APE_FLUSH_ENB
|
MC_CLIENT_HOTRESET_CTRL1_SE_FLUSH_ENB
|
MC_CLIENT_HOTRESET_CTRL1_ETR_FLUSH_ENB
|
MC_CLIENT_HOTRESET_CTRL1_AXIS_FLUSH_ENB
|
MC_CLIENT_HOTRESET_CTRL1_EQOS_FLUSH_ENB
|
MC_CLIENT_HOTRESET_CTRL1_UFSHC_FLUSH_ENB
|
MC_CLIENT_HOTRESET_CTRL1_BPMP_FLUSH_ENB
|
MC_CLIENT_HOTRESET_CTRL1_AON_FLUSH_ENB
|
MC_CLIENT_HOTRESET_CTRL1_SCE_FLUSH_ENB
;
tegra_mc_write_32
(
MC_CLIENT_HOTRESET_CTRL1
,
wdata_1
);
/* Wait for HOTRESET STATUS to indicate FLUSH_DONE */
do
{
val
=
tegra_mc_read_32
(
MC_CLIENT_HOTRESET_STATUS1
);
}
while
((
val
&
wdata_1
)
!=
wdata_1
);
/* Wait one more time due to SW WAR for known legacy issue */
do
{
val
=
tegra_mc_read_32
(
MC_CLIENT_HOTRESET_STATUS1
);
}
while
((
val
&
wdata_1
)
!=
wdata_1
);
/*
* Change MEMTYPE_OVERRIDE from SO_DEV -> PASSTHRU for boot and
* strongly ordered MSS clients. ROC needs to be single point
* of control on overriding the memory type. So, remove TSA's
* memtype override.
*
* MC clients with default SO_DEV override still enabled at TSA:
* AONW, BPMPW, SCEW, APEW
*/
#if ENABLE_AFI_DEVICE
mc_set_tsa_passthrough
(
AFIW
);
#endif
mc_set_tsa_passthrough
(
HDAW
);
mc_set_tsa_passthrough
(
SATAW
);
mc_set_tsa_passthrough
(
XUSB_HOSTW
);
mc_set_tsa_passthrough
(
XUSB_DEVW
);
mc_set_tsa_passthrough
(
SDMMCWAB
);
mc_set_tsa_passthrough
(
APEDMAW
);
mc_set_tsa_passthrough
(
SESWR
);
mc_set_tsa_passthrough
(
ETRW
);
mc_set_tsa_passthrough
(
AXISW
);
mc_set_tsa_passthrough
(
EQOSW
);
mc_set_tsa_passthrough
(
UFSHCW
);
mc_set_tsa_passthrough
(
BPMPDMAW
);
mc_set_tsa_passthrough
(
AONDMAW
);
mc_set_tsa_passthrough
(
SCEDMAW
);
/* Parker has no IO Coherency support and need the following:
* Ordered MC Clients on Parker are AFI, EQOS, SATA, XUSB.
* ISO clients(DISP, VI, EQOS) should never snoop caches and
* don't need ROC/PCFIFO ordering.
* ISO clients(EQOS) that need ordering should use PCFIFO ordering
* and bypass ROC ordering by using FORCE_NON_COHERENT path.
* FORCE_NON_COHERENT/FORCE_COHERENT config take precedence
* over SMMU attributes.
* Force all Normal memory transactions from ISO and non-ISO to be
* non-coherent(bypass ROC, avoid cache snoop to avoid perf hit).
* Force the SO_DEV transactions from ordered ISO clients(EQOS) to
* non-coherent path and enable MC PCFIFO interlock for ordering.
* Force the SO_DEV transactions from ordered non-ISO clients (PCIe,
* XUSB, SATA) to coherent so that the transactions are
* ordered by ROC.
* PCFIFO ensure write ordering.
* Read after Write ordering is maintained/enforced by MC clients.
* Clients that need PCIe type write ordering must
* go through ROC ordering.
* Ordering enable for Read clients is not necessary.
* R5's and A9 would get necessary ordering from AXI and
* don't need ROC ordering enable:
* - MMIO ordering is through dev mapping and MMIO
* accesses bypass SMMU.
* - Normal memory is accessed through SMMU and ordering is
* ensured by client and AXI.
* - Ack point for Normal memory is WCAM in MC.
* - MMIO's can be early acked and AXI ensures dev memory ordering,
* Client ensures read/write direction change ordering.
* - See Bug 200312466 for more details.
*
* CGID_TAG_ADR is only present from T186 A02. As this code is common
* between A01 and A02, tegra_memctrl_set_overrides() programs
* CGID_TAG_ADR for the necessary clients on A02.
*/
mc_set_txn_override
(
HDAR
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
BPMPW
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
PTCR
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
NVDISPLAYR
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
EQOSW
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
NVJPGSWR
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
ISPRA
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
SDMMCWAA
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
VICSRD
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
MPCOREW
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
NO_OVERRIDE
,
NO_OVERRIDE
);
mc_set_txn_override
(
GPUSRD
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
AXISR
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
SCEDMAW
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
SDMMCW
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
EQOSR
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
/* See bug 200131110 comment #35*/
mc_set_txn_override
(
APEDMAR
,
CGID_TAG_CLIENT_AXI_ID
,
SO_DEV_CLIENT_AXI_ID
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
NVENCSRD
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
SDMMCRAB
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
VICSRD1
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
BPMPDMAR
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
VIW
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
SDMMCRAA
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
AXISW
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
XUSB_DEVR
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
UFSHCR
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
TSECSWR
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
GPUSWR
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
SATAR
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
XUSB_HOSTW
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_COHERENT
);
mc_set_txn_override
(
TSECSWRB
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
GPUSRD2
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
SCEDMAR
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
GPUSWR2
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
AONDMAW
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
/* See bug 200131110 comment #35*/
mc_set_txn_override
(
APEDMAW
,
CGID_TAG_CLIENT_AXI_ID
,
SO_DEV_CLIENT_AXI_ID
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
AONW
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
HOST1XDMAR
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
ETRR
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
SESWR
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
NVJPGSRD
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
NVDECSRD
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
TSECSRDB
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
BPMPDMAW
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
APER
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
NVDECSRD1
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
XUSB_HOSTR
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
ISPWA
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
SESRD
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
SCER
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
AONR
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
MPCORER
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
NO_OVERRIDE
,
NO_OVERRIDE
);
mc_set_txn_override
(
SDMMCWA
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
HDAW
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
NVDECSWR
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
UFSHCW
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
AONDMAR
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
SATAW
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_COHERENT
);
mc_set_txn_override
(
ETRW
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
VICSWR
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
NVENCSWR
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
/* See bug 200131110 comment #35 */
mc_set_txn_override
(
AFIR
,
CGID_TAG_DEFAULT
,
SO_DEV_CLIENT_AXI_ID
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
SDMMCWAB
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
SDMMCRA
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
NVDISPLAYR1
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
ISPWB
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
BPMPR
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
APEW
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
SDMMCR
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
mc_set_txn_override
(
XUSB_DEVW
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_COHERENT
);
mc_set_txn_override
(
TSECSRD
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
/*
* See bug 200131110 comment #35 - there are no normal requests
* and AWID for SO/DEV requests is hardcoded in RTL for a
* particular PCIE controller
*/
mc_set_txn_override
(
AFIW
,
CGID_TAG_DEFAULT
,
SO_DEV_CLIENT_AXI_ID
,
FORCE_NON_COHERENT
,
FORCE_COHERENT
);
mc_set_txn_override
(
SCEW
,
CGID_TAG_DEFAULT
,
SO_DEV_ZERO
,
FORCE_NON_COHERENT
,
FORCE_NON_COHERENT
);
/*
* At this point, ordering can occur at ROC. So, remove PCFIFO's
* control over ordering requests.
*
* Change PCFIFO_*_ORDERED_CLIENT from ORDERED -> UNORDERED for
* boot and strongly ordered MSS clients
*/
val
=
MC_PCFIFO_CLIENT_CONFIG1_RESET_VAL
&
#if ENABLE_AFI_DEVICE
mc_set_pcfifo_unordered_boot_so_mss
(
1
,
AFIW
)
&
#endif
mc_set_pcfifo_unordered_boot_so_mss
(
1
,
HDAW
)
&
mc_set_pcfifo_unordered_boot_so_mss
(
1
,
SATAW
);
tegra_mc_write_32
(
MC_PCFIFO_CLIENT_CONFIG1
,
val
);
val
=
MC_PCFIFO_CLIENT_CONFIG2_RESET_VAL
&
mc_set_pcfifo_unordered_boot_so_mss
(
2
,
XUSB_HOSTW
)
&
mc_set_pcfifo_unordered_boot_so_mss
(
2
,
XUSB_DEVW
);
tegra_mc_write_32
(
MC_PCFIFO_CLIENT_CONFIG2
,
val
);
val
=
MC_PCFIFO_CLIENT_CONFIG3_RESET_VAL
&
mc_set_pcfifo_unordered_boot_so_mss
(
3
,
SDMMCWAB
);
tegra_mc_write_32
(
MC_PCFIFO_CLIENT_CONFIG3
,
val
);
val
=
MC_PCFIFO_CLIENT_CONFIG4_RESET_VAL
&
mc_set_pcfifo_unordered_boot_so_mss
(
4
,
SESWR
)
&
mc_set_pcfifo_unordered_boot_so_mss
(
4
,
ETRW
)
&
mc_set_pcfifo_unordered_boot_so_mss
(
4
,
AXISW
)
&
mc_set_pcfifo_unordered_boot_so_mss
(
4
,
UFSHCW
)
&
mc_set_pcfifo_unordered_boot_so_mss
(
4
,
BPMPDMAW
)
&
mc_set_pcfifo_unordered_boot_so_mss
(
4
,
AONDMAW
)
&
mc_set_pcfifo_unordered_boot_so_mss
(
4
,
SCEDMAW
);
/* EQOSW is the only client that has PCFIFO order enabled. */
val
|=
mc_set_pcfifo_ordered_boot_so_mss
(
4
,
EQOSW
);
tegra_mc_write_32
(
MC_PCFIFO_CLIENT_CONFIG4
,
val
);
val
=
MC_PCFIFO_CLIENT_CONFIG5_RESET_VAL
&
mc_set_pcfifo_unordered_boot_so_mss
(
5
,
APEDMAW
);
tegra_mc_write_32
(
MC_PCFIFO_CLIENT_CONFIG5
,
val
);
/*
* Deassert HOTRESET FLUSH_ENABLE for boot and strongly ordered MSS
* clients to allow memory traffic from all clients to start passing
* through ROC
*/
val
=
tegra_mc_read_32
(
MC_CLIENT_HOTRESET_CTRL0
);
assert
(
val
==
wdata_0
);
wdata_0
=
MC_CLIENT_HOTRESET_CTRL0_RESET_VAL
;
tegra_mc_write_32
(
MC_CLIENT_HOTRESET_CTRL0
,
wdata_0
);
val
=
tegra_mc_read_32
(
MC_CLIENT_HOTRESET_CTRL1
);
assert
(
val
==
wdata_1
);
wdata_1
=
MC_CLIENT_HOTRESET_CTRL1_RESET_VAL
;
tegra_mc_write_32
(
MC_CLIENT_HOTRESET_CTRL1
,
wdata_1
);
#endif
}
static
void
tegra186_memctrl_set_overrides
(
void
)
{
const
tegra_mc_settings_t
*
plat_mc_settings
=
tegra_get_mc_settings
();
const
mc_txn_override_cfg_t
*
mc_txn_override_cfgs
;
uint32_t
num_txn_override_cfgs
;
uint32_t
i
,
val
;
/* Get the settings from the platform */
assert
(
plat_mc_settings
!=
NULL
);
mc_txn_override_cfgs
=
plat_mc_settings
->
txn_override_cfg
;
num_txn_override_cfgs
=
plat_mc_settings
->
num_txn_override_cfgs
;
/*
* Set the MC_TXN_OVERRIDE registers for write clients.
*/
if
((
tegra_chipid_is_t186
())
&&
(
!
tegra_platform_is_silicon
()
||
(
tegra_platform_is_silicon
()
&&
(
tegra_get_chipid_minor
()
==
1U
))))
{
/*
* GPU and NVENC settings for Tegra186 simulation and
* Silicon rev. A01
*/
val
=
tegra_mc_read_32
(
MC_TXN_OVERRIDE_CONFIG_GPUSWR
);
val
&=
(
uint32_t
)
~
MC_TXN_OVERRIDE_CGID_TAG_MASK
;
tegra_mc_write_32
(
MC_TXN_OVERRIDE_CONFIG_GPUSWR
,
val
|
MC_TXN_OVERRIDE_CGID_TAG_ZERO
);
val
=
tegra_mc_read_32
(
MC_TXN_OVERRIDE_CONFIG_GPUSWR2
);
val
&=
(
uint32_t
)
~
MC_TXN_OVERRIDE_CGID_TAG_MASK
;
tegra_mc_write_32
(
MC_TXN_OVERRIDE_CONFIG_GPUSWR2
,
val
|
MC_TXN_OVERRIDE_CGID_TAG_ZERO
);
val
=
tegra_mc_read_32
(
MC_TXN_OVERRIDE_CONFIG_NVENCSWR
);
val
&=
(
uint32_t
)
~
MC_TXN_OVERRIDE_CGID_TAG_MASK
;
tegra_mc_write_32
(
MC_TXN_OVERRIDE_CONFIG_NVENCSWR
,
val
|
MC_TXN_OVERRIDE_CGID_TAG_CLIENT_AXI_ID
);
}
else
{
/*
* Settings for Tegra186 silicon rev. A02 and onwards.
*/
for
(
i
=
0
;
i
<
num_txn_override_cfgs
;
i
++
)
{
val
=
tegra_mc_read_32
(
mc_txn_override_cfgs
[
i
].
offset
);
val
&=
(
uint32_t
)
~
MC_TXN_OVERRIDE_CGID_TAG_MASK
;
tegra_mc_write_32
(
mc_txn_override_cfgs
[
i
].
offset
,
val
|
mc_txn_override_cfgs
[
i
].
cgid_tag
);
}
}
}
/*******************************************************************************
* Struct to hold the memory controller settings
******************************************************************************/
...
...
@@ -210,7 +526,9 @@ static tegra_mc_settings_t tegra186_mc_settings = {
.
streamid_security_cfg
=
tegra186_streamid_sec_cfgs
,
.
num_streamid_security_cfgs
=
(
uint32_t
)
ARRAY_SIZE
(
tegra186_streamid_sec_cfgs
),
.
txn_override_cfg
=
tegra186_txn_override_cfgs
,
.
num_txn_override_cfgs
=
(
uint32_t
)
ARRAY_SIZE
(
tegra186_txn_override_cfgs
)
.
num_txn_override_cfgs
=
(
uint32_t
)
ARRAY_SIZE
(
tegra186_txn_override_cfgs
),
.
reconfig_mss_clients
=
tegra186_memctrl_reconfig_mss_clients
,
.
set_txn_overrides
=
tegra186_memctrl_set_overrides
,
};
/*******************************************************************************
...
...
@@ -220,3 +538,45 @@ tegra_mc_settings_t *tegra_get_mc_settings(void)
{
return
&
tegra186_mc_settings
;
}
/*******************************************************************************
* Handler to program the scratch registers with TZDRAM settings for the
* resume firmware
******************************************************************************/
void
plat_memctrl_tzdram_setup
(
uint64_t
phys_base
,
uint64_t
size_in_bytes
)
{
uint32_t
val
;
/*
* Setup the Memory controller to allow only secure accesses to
* the TZDRAM carveout
*/
INFO
(
"Configuring TrustZone DRAM Memory Carveout
\n
"
);
tegra_mc_write_32
(
MC_SECURITY_CFG0_0
,
(
uint32_t
)
phys_base
);
tegra_mc_write_32
(
MC_SECURITY_CFG3_0
,
(
uint32_t
)(
phys_base
>>
32
));
tegra_mc_write_32
(
MC_SECURITY_CFG1_0
,
size_in_bytes
>>
20
);
/*
* When TZ encryption is enabled, we need to setup TZDRAM
* before CPU accesses TZ Carveout, else CPU will fetch
* non-decrypted data. So save TZDRAM setting for SC7 resume
* FW to restore.
*
* Scratch registers map:
* RSV55_0 = CFG1[12:0] | CFG0[31:20]
* RSV55_1 = CFG3[1:0]
*/
val
=
tegra_mc_read_32
(
MC_SECURITY_CFG1_0
)
&
MC_SECURITY_SIZE_MB_MASK
;
val
|=
tegra_mc_read_32
(
MC_SECURITY_CFG0_0
)
&
MC_SECURITY_BOM_MASK
;
mmio_write_32
(
TEGRA_SCRATCH_BASE
+
SCRATCH_TZDRAM_ADDR_LO
,
val
);
val
=
tegra_mc_read_32
(
MC_SECURITY_CFG3_0
)
&
MC_SECURITY_BOM_HI_MASK
;
mmio_write_32
(
TEGRA_SCRATCH_BASE
+
SCRATCH_TZDRAM_ADDR_HI
,
val
);
/*
* MCE propagates the security configuration values across the
* CCPLEX.
*/
(
void
)
mce_update_gsc_tzdram
();
}
plat/nvidia/tegra/soc/t186/plat_psci_handlers.c
View file @
bc5e79cd
...
...
@@ -22,15 +22,11 @@
#include <smmu.h>
#include <stdbool.h>
#include <t18x_ari.h>
#include <tegra186_private.h>
#include <tegra_private.h>
extern
void
memcpy16
(
void
*
dest
,
const
void
*
src
,
unsigned
int
length
);
extern
void
prepare_cpu_pwr_dwn
(
void
);
extern
void
tegra186_cpu_reset_handler
(
void
);
extern
uint64_t
__tegra186_cpu_reset_handler_end
,
__tegra186_smmu_context
;
/* state id mask */
#define TEGRA186_STATE_ID_MASK 0xFU
/* constants to get power state's wake time */
...
...
@@ -125,12 +121,11 @@ int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
/* save 'Secure Boot' Processor Feature Config Register */
val
=
mmio_read_32
(
TEGRA_MISC_BASE
+
MISCREG_PFCFG
);
mmio_write_32
(
TEGRA_SCRATCH_BASE
+
S
ECURE_SCRATCH_RSV6
,
val
);
mmio_write_32
(
TEGRA_SCRATCH_BASE
+
S
CRATCH_SECURE_BOOTP_FCFG
,
val
);
/* save SMMU context to TZDRAM */
smmu_ctx_base
=
params_from_bl2
->
tzdram_base
+
((
uintptr_t
)
&
__tegra186_smmu_context
-
(
uintptr_t
)
&
tegra186_cpu_reset_handler
);
tegra186_get_smmu_ctx_offset
();
tegra_smmu_save_context
((
uintptr_t
)
smmu_ctx_base
);
/* Prepare for system suspend */
...
...
@@ -139,6 +134,7 @@ int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
cstate_info
.
system_state_force
=
1
;
cstate_info
.
update_wake_mask
=
1
;
mce_update_cstate_info
(
&
cstate_info
);
/* Loop until system suspend is allowed */
do
{
val
=
(
uint32_t
)
mce_command_handler
(
...
...
@@ -151,6 +147,10 @@ int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
/* Instruct the MCE to enter system suspend state */
(
void
)
mce_command_handler
((
uint64_t
)
MCE_CMD_ENTER_CSTATE
,
(
uint64_t
)
TEGRA_ARI_CORE_C7
,
MCE_CORE_SLEEP_TIME_INFINITE
,
0U
);
/* set system suspend state for house-keeping */
tegra186_set_system_suspend_entry
();
}
else
{
;
/* do nothing */
}
...
...
@@ -281,8 +281,7 @@ int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_sta
* BL3-1 over to TZDRAM.
*/
val
=
params_from_bl2
->
tzdram_base
+
((
uintptr_t
)
&
__tegra186_cpu_reset_handler_end
-
(
uintptr_t
)
&
tegra186_cpu_reset_handler
);
tegra186_get_cpu_reset_handler_size
();
memcpy16
((
void
*
)(
uintptr_t
)
val
,
(
void
*
)(
uintptr_t
)
BL31_BASE
,
(
uintptr_t
)
&
__BL31_END__
-
(
uintptr_t
)
BL31_BASE
);
}
...
...
@@ -297,7 +296,7 @@ int32_t tegra_soc_pwr_domain_on(u_register_t mpidr)
uint64_t
target_cluster
=
(
mpidr
&
MPIDR_CLUSTER_MASK
)
>>
MPIDR_AFFINITY_BITS
;
if
(
target_cluster
>
MPIDR_AFFLVL1
)
{
if
(
target_cluster
>
((
uint32_t
)
PLATFORM_CLUSTER_COUNT
-
1U
)
)
{
ERROR
(
"%s: unsupported CPU (0x%lx)
\n
"
,
__func__
,
mpidr
);
ret
=
PSCI_E_NOT_PRESENT
;
...
...
plat/nvidia/tegra/soc/t186/plat_secondary.c
View file @
bc5e79cd
...
...
@@ -11,6 +11,7 @@
#include <lib/mmio.h>
#include <mce.h>
#include <tegra186_private.h>
#include <tegra_def.h>
#include <tegra_private.h>
...
...
@@ -24,9 +25,6 @@
extern
void
memcpy16
(
void
*
dest
,
const
void
*
src
,
unsigned
int
length
);
extern
uint64_t
tegra_bl31_phys_base
;
extern
uint64_t
__tegra186_cpu_reset_handler_end
;
/*******************************************************************************
* Setup secondary CPU vectors
******************************************************************************/
...
...
@@ -34,38 +32,33 @@ void plat_secondary_setup(void)
{
uint32_t
addr_low
,
addr_high
;
const
plat_params_from_bl2_t
*
params_from_bl2
=
bl31_get_plat_params
();
uint64_t
cpu_reset_handler_base
;
uint64_t
cpu_reset_handler_base
,
cpu_reset_handler_size
;
INFO
(
"Setting up secondary CPU boot
\n
"
);
if
((
tegra_bl31_phys_base
>=
TEGRA_TZRAM_BASE
)
&&
(
tegra_bl31_phys_base
<=
(
TEGRA_TZRAM_BASE
+
TEGRA_TZRAM_SIZE
)))
{
/*
* The BL31 code resides in the TZSRAM which loses state
* when we enter System Suspend. Copy the wakeup trampoline
* code to TZDRAM to help us exit from System Suspend.
*/
cpu_reset_handler_base
=
params_from_bl2
->
tzdram_base
;
memcpy16
((
void
*
)((
uintptr_t
)
cpu_reset_handler_base
),
(
void
*
)(
uintptr_t
)
tegra186_cpu_reset_handler
,
(
uintptr_t
)
&
tegra186_cpu_reset_handler
);
}
else
{
cpu_reset_handler_base
=
(
uintptr_t
)
&
tegra_secure_entrypoint
;
}
cpu_reset_handler_base
=
tegra186_get_cpu_reset_handler_base
();
cpu_reset_handler_size
=
tegra186_get_cpu_reset_handler_size
();
(
void
)
memcpy16
((
void
*
)(
uintptr_t
)
params_from_bl2
->
tzdram_base
,
(
const
void
*
)(
uintptr_t
)
cpu_reset_handler_base
,
cpu_reset_handler_size
);
addr_low
=
(
uint32_t
)
cpu_reset_handler_base
|
CPU_RESET_MODE_AA64
;
addr_high
=
(
uint32_t
)((
cpu_reset_handler_base
>>
32U
)
&
0x7ffU
);
/* TZDRAM base will be used as the "resume" address */
addr_low
=
(
uint32_t
)
params_from_bl2
->
tzdram_base
|
CPU_RESET_MODE_AA64
;
addr_high
=
(
uint32_t
)((
params_from_bl2
->
tzdram_base
>>
32U
)
&
0x7ffU
);
/* write lower 32 bits first, then the upper 11 bits */
mmio_write_32
(
TEGRA_MISC_BASE
+
MISCREG_AA64_RST_LOW
,
addr_low
);
mmio_write_32
(
TEGRA_MISC_BASE
+
MISCREG_AA64_RST_HIGH
,
addr_high
);
/* save reset vector to be used during SYSTEM_SUSPEND exit */
mmio_write_32
(
TEGRA_SCRATCH_BASE
+
SCRATCH_
SECURE_RSV1_SCRATCH_0
,
mmio_write_32
(
TEGRA_SCRATCH_BASE
+
SCRATCH_
RESET_VECTOR_LO
,
addr_low
);
mmio_write_32
(
TEGRA_SCRATCH_BASE
+
SCRATCH_
SECURE_RSV1_SCRATCH_1
,
mmio_write_32
(
TEGRA_SCRATCH_BASE
+
SCRATCH_
RESET_VECTOR_HI
,
addr_high
);
/* update reset vector address to the CCPLEX */
...
...
plat/nvidia/tegra/soc/t186/plat_setup.c
View file @
bc5e79cd
...
...
@@ -40,7 +40,7 @@
* the number of power domains at the highest power level.
*******************************************************************************
*/
const
uint8_t
tegra_power_domain_tree_desc
[]
=
{
static
const
uint8_t
tegra_power_domain_tree_desc
[]
=
{
/* No of root nodes */
1
,
/* No of clusters */
...
...
@@ -211,7 +211,7 @@ struct tegra_bl31_params *plat_get_bl31_params(void)
{
uint32_t
val
;
val
=
mmio_read_32
(
TEGRA_SCRATCH_BASE
+
S
ECURE_SCRATCH_RSV53_LO
);
val
=
mmio_read_32
(
TEGRA_SCRATCH_BASE
+
S
CRATCH_BL31_PARAMS_ADDR
);
return
(
struct
tegra_bl31_params
*
)(
uintptr_t
)
val
;
}
...
...
@@ -223,7 +223,7 @@ plat_params_from_bl2_t *plat_get_bl31_plat_params(void)
{
uint32_t
val
;
val
=
mmio_read_32
(
TEGRA_SCRATCH_BASE
+
S
ECURE_SCRATCH_RSV53_HI
);
val
=
mmio_read_32
(
TEGRA_SCRATCH_BASE
+
S
CRATCH_BL31_PLAT_PARAMS_ADDR
);
return
(
plat_params_from_bl2_t
*
)(
uintptr_t
)
val
;
}
...
...
plat/nvidia/tegra/soc/t186/plat_smmu.c
View file @
bc5e79cd
...
...
@@ -8,6 +8,7 @@
#include <smmu.h>
#include <tegra_def.h>
#include <tegra_mc_def.h>
#define MAX_NUM_SMMU_DEVICES U(1)
...
...
plat/nvidia/tegra/soc/t186/plat_trampoline.S
View file @
bc5e79cd
...
...
@@ -10,23 +10,32 @@
#include <plat/common/common_def.h>
#include <tegra_def.h>
#define TEGRA186_STATE_SYSTEM_SUSPEND 0x5C7
#define TEGRA186_STATE_SYSTEM_RESUME 0x600D
#define TEGRA186_SMMU_CTX_SIZE 0x420
.
globl
tegra186_cpu_reset_handler
/*
CPU
reset
handler
routine
*/
func
tegra186_cpu_reset_handler
_align
=
4
/
*
*
The
TZRAM
loses
state
during
System
Suspend
.
We
use
this
*
information
to
decide
if
the
reset
handler
is
running
after
a
*
System
Suspend
.
Resume
from
system
suspend
requires
restoring
*
the
entire
state
from
TZDRAM
to
TZRAM
.
*/
mov
x0
,
#
BL31_BASE
ldr
x0
,
[
x0
]
cbnz
x0
,
boot_cpu
/
*
check
if
we
are
exiting
system
suspend
state
*/
adr
x0
,
__tegra186_system_suspend_state
ldr
x1
,
[
x0
]
mov
x2
,
#
TEGRA186_STATE_SYSTEM_SUSPEND
lsl
x2
,
x2
,
#
16
add
x2
,
x2
,
#
TEGRA186_STATE_SYSTEM_SUSPEND
cmp
x1
,
x2
bne
boot_cpu
/
*
resume
from
system
suspend
*/
/
*
set
system
resume
state
*/
mov
x1
,
#
TEGRA186_STATE_SYSTEM_RESUME
lsl
x1
,
x1
,
#
16
mov
x2
,
#
TEGRA186_STATE_SYSTEM_RESUME
add
x1
,
x1
,
x2
str
x1
,
[
x0
]
dsb
sy
/
*
prepare
to
relocate
to
TZSRAM
*/
mov
x0
,
#
BL31_BASE
adr
x1
,
__tegra186_cpu_reset_handler_end
adr
x2
,
__tegra186_cpu_reset_handler_data
...
...
@@ -69,6 +78,12 @@ endfunc tegra186_cpu_reset_handler
__tegra186_cpu_reset_handler_data
:
.
quad
tegra_secure_entrypoint
.
quad
__BL31_END__
-
BL31_BASE
.
globl
__tegra186_system_suspend_state
__tegra186_system_suspend_state
:
.
quad
0
.
align
4
.
globl
__tegra186_smmu_context
__tegra186_smmu_context
:
.
rept
TEGRA186_SMMU_CTX_SIZE
...
...
@@ -80,3 +95,50 @@ __tegra186_smmu_context:
.
align
4
.
globl
__tegra186_cpu_reset_handler_end
__tegra186_cpu_reset_handler_end
:
.
globl
tegra186_get_cpu_reset_handler_size
.
globl
tegra186_get_cpu_reset_handler_base
.
globl
tegra186_get_smmu_ctx_offset
.
globl
tegra186_set_system_suspend_entry
/*
return
size
of
the
CPU
reset
handler
*/
func
tegra186_get_cpu_reset_handler_size
adr
x0
,
__tegra186_cpu_reset_handler_end
adr
x1
,
tegra186_cpu_reset_handler
sub
x0
,
x0
,
x1
ret
endfunc
tegra186_get_cpu_reset_handler_size
/*
return
the
start
address
of
the
CPU
reset
handler
*/
func
tegra186_get_cpu_reset_handler_base
adr
x0
,
tegra186_cpu_reset_handler
ret
endfunc
tegra186_get_cpu_reset_handler_base
/*
return
the
size
of
the
SMMU
context
*/
func
tegra186_get_smmu_ctx_offset
adr
x0
,
__tegra186_smmu_context
adr
x1
,
tegra186_cpu_reset_handler
sub
x0
,
x0
,
x1
ret
endfunc
tegra186_get_smmu_ctx_offset
/*
set
system
suspend
state
before
SC7
entry
*/
func
tegra186_set_system_suspend_entry
mov
x0
,
#
TEGRA_MC_BASE
mov
x3
,
#
MC_SECURITY_CFG3_0
ldr
w1
,
[
x0
,
x3
]
lsl
x1
,
x1
,
#
32
mov
x3
,
#
MC_SECURITY_CFG0_0
ldr
w2
,
[
x0
,
x3
]
orr
x3
,
x1
,
x2
/*
TZDRAM
base
*/
adr
x0
,
__tegra186_system_suspend_state
adr
x1
,
tegra186_cpu_reset_handler
sub
x2
,
x0
,
x1
/*
offset
in
TZDRAM
*/
mov
x0
,
#
TEGRA186_STATE_SYSTEM_SUSPEND
lsl
x0
,
x0
,
#
16
add
x0
,
x0
,
#
TEGRA186_STATE_SYSTEM_SUSPEND
str
x0
,
[
x3
,
x2
]
/*
set
value
in
TZDRAM
*/
dsb
sy
ret
endfunc
tegra186_set_system_suspend_entry
plat/nvidia/tegra/soc/t186/platform_t186.mk
View file @
bc5e79cd
...
...
@@ -11,15 +11,9 @@ $(eval $(call add_define,ENABLE_AFI_DEVICE))
ENABLE_ROC_FOR_ORDERING_CLIENT_REQUESTS
:=
1
$(eval
$(call
add_define,ENABLE_ROC_FOR_ORDERING_CLIENT_REQUESTS))
RELOCATE_TO_BL31_BASE
:=
1
$(eval
$(call
add_define,RELOCATE_TO_BL31_BASE))
ENABLE_CHIP_VERIFICATION_HARNESS
:=
0
$(eval
$(call
add_define,ENABLE_CHIP_VERIFICATION_HARNESS))
ENABLE_SMMU_DEVICE
:=
1
$(eval
$(call
add_define,ENABLE_SMMU_DEVICE))
RESET_TO_BL31
:=
1
PROGRAMMABLE_RESET_ADDRESS
:=
1
...
...
@@ -45,7 +39,8 @@ $(eval $(call add_define,MAX_MMAP_REGIONS))
# platform files
PLAT_INCLUDES
+=
-I
${SOC_DIR}
/drivers/include
BL31_SOURCES
+=
lib/cpus/aarch64/denver.S
\
BL31_SOURCES
+=
drivers/ti/uart/aarch64/16550_console.S
\
lib/cpus/aarch64/denver.S
\
lib/cpus/aarch64/cortex_a57.S
\
${COMMON_DIR}
/drivers/gpcdma/gpcdma.c
\
${COMMON_DIR}
/drivers/memctrl/memctrl_v2.c
\
...
...
plat/nvidia/tegra/soc/t210/plat_psci_handlers.c
View file @
bc5e79cd
...
...
@@ -104,7 +104,12 @@ plat_local_state_t tegra_soc_get_target_pwr_state(unsigned int lvl,
if
((
lvl
==
MPIDR_AFFLVL1
)
&&
(
target
==
PSTATE_ID_CLUSTER_IDLE
))
{
/* initialize the bpmp interface */
(
void
)
tegra_bpmp_init
();
ret
=
tegra_bpmp_init
();
if
(
ret
!=
0U
)
{
/* Cluster idle not allowed */
target
=
PSCI_LOCAL_STATE_RUN
;
}
else
{
/* Cluster idle */
data
[
0
]
=
(
uint32_t
)
cpu
;
...
...
@@ -112,7 +117,8 @@ plat_local_state_t tegra_soc_get_target_pwr_state(unsigned int lvl,
data
[
2
]
=
TEGRA_PM_SC1
;
ret
=
tegra_bpmp_send_receive_atomic
(
MRQ_DO_IDLE
,
(
void
*
)
&
data
,
(
int
)
sizeof
(
data
),
(
void
*
)
&
bpmp_reply
,
(
int
)
sizeof
(
bpmp_reply
));
(
void
*
)
&
bpmp_reply
,
(
int
)
sizeof
(
bpmp_reply
));
/* check if cluster idle entry is allowed */
if
((
ret
!=
0L
)
||
(
bpmp_reply
!=
BPMP_CCx_ALLOWED
))
{
...
...
@@ -120,11 +126,17 @@ plat_local_state_t tegra_soc_get_target_pwr_state(unsigned int lvl,
/* Cluster idle not allowed */
target
=
PSCI_LOCAL_STATE_RUN
;
}
}
}
else
if
((
lvl
==
MPIDR_AFFLVL1
)
&&
(
target
==
PSTATE_ID_CLUSTER_POWERDN
))
{
/* initialize the bpmp interface */
(
void
)
tegra_bpmp_init
();
ret
=
tegra_bpmp_init
();
if
(
ret
!=
0U
)
{
/* Cluster power down not allowed */
target
=
PSCI_LOCAL_STATE_RUN
;
}
else
{
/* Cluster power-down */
data
[
0
]
=
(
uint32_t
)
cpu
;
...
...
@@ -132,7 +144,8 @@ plat_local_state_t tegra_soc_get_target_pwr_state(unsigned int lvl,
data
[
2
]
=
TEGRA_PM_SC1
;
ret
=
tegra_bpmp_send_receive_atomic
(
MRQ_DO_IDLE
,
(
void
*
)
&
data
,
(
int
)
sizeof
(
data
),
(
void
*
)
&
bpmp_reply
,
(
int
)
sizeof
(
bpmp_reply
));
(
void
*
)
&
bpmp_reply
,
(
int
)
sizeof
(
bpmp_reply
));
/* check if cluster power down is allowed */
if
((
ret
!=
0L
)
||
(
bpmp_reply
!=
BPMP_CCx_ALLOWED
))
{
...
...
@@ -140,6 +153,7 @@ plat_local_state_t tegra_soc_get_target_pwr_state(unsigned int lvl,
/* Cluster power down not allowed */
target
=
PSCI_LOCAL_STATE_RUN
;
}
}
}
else
if
(((
lvl
==
MPIDR_AFFLVL2
)
||
(
lvl
==
MPIDR_AFFLVL1
))
&&
(
target
==
PSTATE_ID_SOC_POWERDN
))
{
...
...
plat/nvidia/tegra/soc/t210/platform_t210.mk
View file @
bc5e79cd
...
...
@@ -24,7 +24,8 @@ $(eval $(call add_define,MAX_MMAP_REGIONS))
PLAT_INCLUDES
+=
-I
${SOC_DIR}
/drivers/se
BL31_SOURCES
+=
lib/cpus/aarch64/cortex_a53.S
\
BL31_SOURCES
+=
drivers/ti/uart/aarch64/16550_console.S
\
lib/cpus/aarch64/cortex_a53.S
\
lib/cpus/aarch64/cortex_a57.S
\
${COMMON_DIR}
/drivers/bpmp/bpmp.c
\
${COMMON_DIR}
/drivers/flowctrl/flowctrl.c
\
...
...
services/spd/trusty/smcall.h
View file @
bc5e79cd
...
...
@@ -7,69 +7,68 @@
#ifndef SMCALL_H
#define SMCALL_H
#define SMC_NUM_ENTITIES 64
#define SMC_NUM_ARGS 4
#define SMC_NUM_PARAMS (SMC_NUM_ARGS - 1)
#define SMC_NUM_ENTITIES 64
U
#define SMC_NUM_ARGS 4
U
#define SMC_NUM_PARAMS (SMC_NUM_ARGS - 1
U
)
#define SMC_IS_FASTCALL(smc_nr) ((smc_nr) & 0x80000000)
#define SMC_IS_SMC64(smc_nr) ((smc_nr) & 0x40000000)
#define SMC_ENTITY(smc_nr) (((smc_nr) & 0x3F000000) >> 24)
#define SMC_FUNCTION(smc_nr) ((smc_nr) & 0x0000FFFF)
#define SMC_IS_FASTCALL(smc_nr) ((smc_nr) & 0x80000000
U
)
#define SMC_IS_SMC64(smc_nr) ((smc_nr) & 0x40000000
U
)
#define SMC_ENTITY(smc_nr) (((smc_nr) & 0x3F000000
U
) >> 24
U
)
#define SMC_FUNCTION(smc_nr) ((smc_nr) & 0x0000FFFF
U
)
#define SMC_NR(entity, fn, fastcall, smc64) \
(((((unsigned int) (fastcall)) & 0x1) << 31) | \
(((smc64) & 0x1) << 30) | \
(((entity) & 0x3F) << 24) | \
((fn) & 0xFFFF) \
)
(((((uint32_t)(fastcall)) & 0x1U) << 31U) | \
(((smc64) & 0x1U) << 30U) | \
(((entity) & 0x3FU) << 24U) | \
((fn) & 0xFFFFU))
#define SMC_FASTCALL_NR(entity, fn) SMC_NR((entity), (fn), 1, 0)
#define SMC_FASTCALL64_NR(entity, fn) SMC_NR((entity), (fn), 1, 1)
#define SMC_YIELDCALL_NR(entity, fn) SMC_NR((entity), (fn), 0, 0)
#define SMC_YIELDCALL64_NR(entity, fn) SMC_NR((entity), (fn), 0, 1)
#define SMC_FASTCALL_NR(entity, fn) SMC_NR((entity), (fn), 1
U
, 0
U
)
#define SMC_FASTCALL64_NR(entity, fn) SMC_NR((entity), (fn), 1
U
, 1
U
)
#define SMC_YIELDCALL_NR(entity, fn) SMC_NR((entity), (fn), 0
U
, 0
U
)
#define SMC_YIELDCALL64_NR(entity, fn) SMC_NR((entity), (fn), 0
U
, 1
U
)
#define SMC_ENTITY_ARCH 0
/* ARM Architecture calls */
#define SMC_ENTITY_CPU 1
/* CPU Service calls */
#define SMC_ENTITY_SIP 2
/* SIP Service calls */
#define SMC_ENTITY_OEM 3
/* OEM Service calls */
#define SMC_ENTITY_STD 4
/* Standard Service calls */
#define SMC_ENTITY_RESERVED 5
/* Reserved for future use */
#define SMC_ENTITY_TRUSTED_APP 48
/* Trusted Application calls */
#define SMC_ENTITY_TRUSTED_OS 50
/* Trusted OS calls */
#define SMC_ENTITY_LOGGING 51
/* Used for secure -> nonsecure logging */
#define SMC_ENTITY_SECURE_MONITOR 60
/* Trusted OS calls internal to secure monitor */
#define SMC_ENTITY_ARCH 0
U
/* ARM Architecture calls */
#define SMC_ENTITY_CPU 1
U
/* CPU Service calls */
#define SMC_ENTITY_SIP 2
U
/* SIP Service calls */
#define SMC_ENTITY_OEM 3
U
/* OEM Service calls */
#define SMC_ENTITY_STD 4
U
/* Standard Service calls */
#define SMC_ENTITY_RESERVED 5
U
/* Reserved for future use */
#define SMC_ENTITY_TRUSTED_APP 48
U
/* Trusted Application calls */
#define SMC_ENTITY_TRUSTED_OS 50
U
/* Trusted OS calls */
#define SMC_ENTITY_LOGGING 51
U
/* Used for secure -> nonsecure logging */
#define SMC_ENTITY_SECURE_MONITOR 60
U
/* Trusted OS calls internal to secure monitor */
/* FC = Fast call, YC = Yielding call */
#define SMC_YC_RESTART_LAST SMC_YIELDCALL_NR (SMC_ENTITY_SECURE_MONITOR, 0)
#define SMC_YC_NOP SMC_YIELDCALL_NR (SMC_ENTITY_SECURE_MONITOR, 1)
#define SMC_YC_RESTART_LAST SMC_YIELDCALL_NR (SMC_ENTITY_SECURE_MONITOR, 0
U
)
#define SMC_YC_NOP SMC_YIELDCALL_NR (SMC_ENTITY_SECURE_MONITOR, 1
U
)
/*
* Return from secure os to non-secure os with return value in r1
*/
#define SMC_YC_NS_RETURN SMC_YIELDCALL_NR (SMC_ENTITY_SECURE_MONITOR, 0)
#define SMC_YC_NS_RETURN SMC_YIELDCALL_NR (SMC_ENTITY_SECURE_MONITOR, 0
U
)
#define SMC_FC_RESERVED SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 0)
#define SMC_FC_FIQ_EXIT SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 1)
#define SMC_FC_REQUEST_FIQ SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 2)
#define SMC_FC_GET_NEXT_IRQ SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 3)
#define SMC_FC_FIQ_ENTER SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 4)
#define SMC_FC_RESERVED SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 0
U
)
#define SMC_FC_FIQ_EXIT SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 1
U
)
#define SMC_FC_REQUEST_FIQ SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 2
U
)
#define SMC_FC_GET_NEXT_IRQ SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 3
U
)
#define SMC_FC_FIQ_ENTER SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 4
U
)
#define SMC_FC64_SET_FIQ_HANDLER SMC_FASTCALL64_NR(SMC_ENTITY_SECURE_MONITOR, 5)
#define SMC_FC64_GET_FIQ_REGS SMC_FASTCALL64_NR (SMC_ENTITY_SECURE_MONITOR, 6)
#define SMC_FC64_SET_FIQ_HANDLER SMC_FASTCALL64_NR(SMC_ENTITY_SECURE_MONITOR, 5
U
)
#define SMC_FC64_GET_FIQ_REGS SMC_FASTCALL64_NR (SMC_ENTITY_SECURE_MONITOR, 6
U
)
#define SMC_FC_CPU_SUSPEND SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 7)
#define SMC_FC_CPU_RESUME SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 8)
#define SMC_FC_CPU_SUSPEND SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 7
U
)
#define SMC_FC_CPU_RESUME SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 8
U
)
#define SMC_FC_AARCH_SWITCH SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 9)
#define SMC_FC_GET_VERSION_STR SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 10)
#define SMC_FC_AARCH_SWITCH SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 9
U
)
#define SMC_FC_GET_VERSION_STR SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 10
U
)
/* Trusted OS entity calls */
#define SMC_YC_VIRTIO_GET_DESCR SMC_YIELDCALL_NR(SMC_ENTITY_TRUSTED_OS, 20)
#define SMC_YC_VIRTIO_START SMC_YIELDCALL_NR(SMC_ENTITY_TRUSTED_OS, 21)
#define SMC_YC_VIRTIO_STOP SMC_YIELDCALL_NR(SMC_ENTITY_TRUSTED_OS, 22)
#define SMC_YC_VIRTIO_GET_DESCR SMC_YIELDCALL_NR(SMC_ENTITY_TRUSTED_OS, 20
U
)
#define SMC_YC_VIRTIO_START SMC_YIELDCALL_NR(SMC_ENTITY_TRUSTED_OS, 21
U
)
#define SMC_YC_VIRTIO_STOP SMC_YIELDCALL_NR(SMC_ENTITY_TRUSTED_OS, 22
U
)
#define SMC_YC_VDEV_RESET SMC_YIELDCALL_NR(SMC_ENTITY_TRUSTED_OS, 23)
#define SMC_YC_VDEV_KICK_VQ SMC_YIELDCALL_NR(SMC_ENTITY_TRUSTED_OS, 24)
#define SMC_YC_SET_ROT_PARAMS SMC_YIELDCALL_NR(SMC_ENTITY_TRUSTED_OS, 65535)
#define SMC_YC_VDEV_RESET SMC_YIELDCALL_NR(SMC_ENTITY_TRUSTED_OS, 23
U
)
#define SMC_YC_VDEV_KICK_VQ SMC_YIELDCALL_NR(SMC_ENTITY_TRUSTED_OS, 24
U
)
#define SMC_YC_SET_ROT_PARAMS SMC_YIELDCALL_NR(SMC_ENTITY_TRUSTED_OS, 65535
U
)
#endif
/* SMCALL_H */
services/spd/trusty/trusty.c
View file @
bc5e79cd
...
...
@@ -21,7 +21,10 @@
#include "smcall.h"
/* macro to check if Hypervisor is enabled in the HCR_EL2 register */
#define HYP_ENABLE_FLAG 0x286001
#define HYP_ENABLE_FLAG 0x286001U
/* length of Trusty's input parameters (in bytes) */
#define TRUSTY_PARAMS_LEN_BYTES (4096U * 2)
struct
trusty_stack
{
uint8_t
space
[
PLATFORM_STACK_SIZE
]
__aligned
(
16
);
...
...
@@ -32,7 +35,7 @@ struct trusty_cpu_ctx {
cpu_context_t
cpu_ctx
;
void
*
saved_sp
;
uint32_t
saved_security_state
;
int
fiq_handler_active
;
int
32_t
fiq_handler_active
;
uint64_t
fiq_handler_pc
;
uint64_t
fiq_handler_cpsr
;
uint64_t
fiq_handler_sp
;
...
...
@@ -43,7 +46,7 @@ struct trusty_cpu_ctx {
struct
trusty_stack
secure_stack
;
};
struct
args
{
struct
smc_
args
{
uint64_t
r0
;
uint64_t
r1
;
uint64_t
r2
;
...
...
@@ -56,8 +59,8 @@ struct args {
static
struct
trusty_cpu_ctx
trusty_cpu_ctx
[
PLATFORM_CORE_COUNT
];
struct
args
trusty_init_context_stack
(
void
**
sp
,
void
*
new_stack
);
struct
args
trusty_context_switch_helper
(
void
**
sp
,
void
*
smc_params
);
struct
smc_
args
trusty_init_context_stack
(
void
**
sp
,
void
*
new_stack
);
struct
smc_
args
trusty_context_switch_helper
(
void
**
sp
,
void
*
smc_params
);
static
uint32_t
current_vmid
;
...
...
@@ -66,37 +69,37 @@ static struct trusty_cpu_ctx *get_trusty_ctx(void)
return
&
trusty_cpu_ctx
[
plat_my_core_pos
()];
}
static
uint32_t
is_hypervisor_mode
(
void
)
static
bool
is_hypervisor_mode
(
void
)
{
uint64_t
hcr
=
read_hcr
();
return
!!
(
hcr
&
HYP_ENABLE_FLAG
);
return
(
(
hcr
&
HYP_ENABLE_FLAG
)
!=
0U
)
?
true
:
false
;
}
static
struct
args
trusty_context_switch
(
uint32_t
security_state
,
uint64_t
r0
,
static
struct
smc_
args
trusty_context_switch
(
uint32_t
security_state
,
uint64_t
r0
,
uint64_t
r1
,
uint64_t
r2
,
uint64_t
r3
)
{
struct
args
ret
;
struct
smc_
args
args
,
ret_args
;
struct
trusty_cpu_ctx
*
ctx
=
get_trusty_ctx
();
struct
trusty_cpu_ctx
*
ctx_smc
;
assert
(
ctx
->
saved_security_state
!=
security_state
);
ret
.
r7
=
0
;
args
.
r7
=
0
;
if
(
is_hypervisor_mode
())
{
/* According to the ARM DEN0028A spec, VMID is stored in x7 */
ctx_smc
=
cm_get_context
(
NON_SECURE
);
assert
(
ctx_smc
);
ret
.
r7
=
SMC_GET_GP
(
ctx_smc
,
CTX_GPREG_X7
);
assert
(
ctx_smc
!=
NULL
);
args
.
r7
=
SMC_GET_GP
(
ctx_smc
,
CTX_GPREG_X7
);
}
/* r4, r5, r6 reserved for future use. */
ret
.
r6
=
0
;
ret
.
r5
=
0
;
ret
.
r4
=
0
;
ret
.
r3
=
r3
;
ret
.
r2
=
r2
;
ret
.
r1
=
r1
;
ret
.
r0
=
r0
;
args
.
r6
=
0
;
args
.
r5
=
0
;
args
.
r4
=
0
;
args
.
r3
=
r3
;
args
.
r2
=
r2
;
args
.
r1
=
r1
;
args
.
r0
=
r0
;
/*
* To avoid the additional overhead in PSCI flow, skip FP context
...
...
@@ -109,9 +112,9 @@ static struct args trusty_context_switch(uint32_t security_state, uint64_t r0,
cm_el1_sysregs_context_save
(
security_state
);
ctx
->
saved_security_state
=
security_state
;
ret
=
trusty_context_switch_helper
(
&
ctx
->
saved_sp
,
&
ret
);
ret
_args
=
trusty_context_switch_helper
(
&
ctx
->
saved_sp
,
&
args
);
assert
(
ctx
->
saved_security_state
==
!
security_state
);
assert
(
ctx
->
saved_security_state
==
((
security_state
==
0U
)
?
1U
:
0U
)
);
cm_el1_sysregs_context_restore
(
security_state
);
if
(
r0
!=
SMC_FC_CPU_SUSPEND
&&
r0
!=
SMC_FC_CPU_RESUME
)
...
...
@@ -119,7 +122,7 @@ static struct args trusty_context_switch(uint32_t security_state, uint64_t r0,
cm_set_next_eret_context
(
security_state
);
return
ret
;
return
ret
_args
;
}
static
uint64_t
trusty_fiq_handler
(
uint32_t
id
,
...
...
@@ -127,29 +130,29 @@ static uint64_t trusty_fiq_handler(uint32_t id,
void
*
handle
,
void
*
cookie
)
{
struct
args
ret
;
struct
smc_
args
ret
;
struct
trusty_cpu_ctx
*
ctx
=
get_trusty_ctx
();
assert
(
!
is_caller_secure
(
flags
));
ret
=
trusty_context_switch
(
NON_SECURE
,
SMC_FC_FIQ_ENTER
,
0
,
0
,
0
);
if
(
ret
.
r0
)
{
if
(
ret
.
r0
!=
0U
)
{
SMC_RET0
(
handle
);
}
if
(
ctx
->
fiq_handler_active
)
{
if
(
ctx
->
fiq_handler_active
!=
0
)
{
INFO
(
"%s: fiq handler already active
\n
"
,
__func__
);
SMC_RET0
(
handle
);
}
ctx
->
fiq_handler_active
=
1
;
memcpy
(
&
ctx
->
fiq_gpregs
,
get_gpregs_ctx
(
handle
),
sizeof
(
ctx
->
fiq_gpregs
));
(
void
)
memcpy
(
&
ctx
->
fiq_gpregs
,
get_gpregs_ctx
(
handle
),
sizeof
(
ctx
->
fiq_gpregs
));
ctx
->
fiq_pc
=
SMC_GET_EL3
(
handle
,
CTX_ELR_EL3
);
ctx
->
fiq_cpsr
=
SMC_GET_EL3
(
handle
,
CTX_SPSR_EL3
);
ctx
->
fiq_sp_el1
=
read_ctx_reg
(
get_sysregs_ctx
(
handle
),
CTX_SP_EL1
);
write_ctx_reg
(
get_sysregs_ctx
(
handle
),
CTX_SP_EL1
,
ctx
->
fiq_handler_sp
);
cm_set_elr_spsr_el3
(
NON_SECURE
,
ctx
->
fiq_handler_pc
,
ctx
->
fiq_handler_cpsr
);
cm_set_elr_spsr_el3
(
NON_SECURE
,
ctx
->
fiq_handler_pc
,
(
uint32_t
)
ctx
->
fiq_handler_cpsr
);
SMC_RET0
(
handle
);
}
...
...
@@ -159,9 +162,9 @@ static uint64_t trusty_set_fiq_handler(void *handle, uint64_t cpu,
{
struct
trusty_cpu_ctx
*
ctx
;
if
(
cpu
>=
PLATFORM_CORE_COUNT
)
{
if
(
cpu
>=
(
uint64_t
)
PLATFORM_CORE_COUNT
)
{
ERROR
(
"%s: cpu %lld >= %d
\n
"
,
__func__
,
cpu
,
PLATFORM_CORE_COUNT
);
return
SM_ERR_INVALID_PARAMETERS
;
return
(
uint64_t
)
SM_ERR_INVALID_PARAMETERS
;
}
ctx
=
&
trusty_cpu_ctx
[
cpu
];
...
...
@@ -182,16 +185,16 @@ static uint64_t trusty_get_fiq_regs(void *handle)
static
uint64_t
trusty_fiq_exit
(
void
*
handle
,
uint64_t
x1
,
uint64_t
x2
,
uint64_t
x3
)
{
struct
args
ret
;
struct
smc_
args
ret
;
struct
trusty_cpu_ctx
*
ctx
=
get_trusty_ctx
();
if
(
!
ctx
->
fiq_handler_active
)
{
if
(
ctx
->
fiq_handler_active
==
0
)
{
NOTICE
(
"%s: fiq handler not active
\n
"
,
__func__
);
SMC_RET1
(
handle
,
SM_ERR_INVALID_PARAMETERS
);
SMC_RET1
(
handle
,
(
uint64_t
)
SM_ERR_INVALID_PARAMETERS
);
}
ret
=
trusty_context_switch
(
NON_SECURE
,
SMC_FC_FIQ_EXIT
,
0
,
0
,
0
);
if
(
ret
.
r0
!=
1
)
{
if
(
ret
.
r0
!=
1
U
)
{
INFO
(
"%s(%p) SMC_FC_FIQ_EXIT returned unexpected value, %lld
\n
"
,
__func__
,
handle
,
ret
.
r0
);
}
...
...
@@ -205,10 +208,10 @@ static uint64_t trusty_fiq_exit(void *handle, uint64_t x1, uint64_t x2, uint64_t
* x1-x4 and x8-x17 need to be restored here because smc_handler64
* corrupts them (el1 code also restored them).
*/
memcpy
(
get_gpregs_ctx
(
handle
),
&
ctx
->
fiq_gpregs
,
sizeof
(
ctx
->
fiq_gpregs
));
(
void
)
memcpy
(
get_gpregs_ctx
(
handle
),
&
ctx
->
fiq_gpregs
,
sizeof
(
ctx
->
fiq_gpregs
));
ctx
->
fiq_handler_active
=
0
;
write_ctx_reg
(
get_sysregs_ctx
(
handle
),
CTX_SP_EL1
,
ctx
->
fiq_sp_el1
);
cm_set_elr_spsr_el3
(
NON_SECURE
,
ctx
->
fiq_pc
,
ctx
->
fiq_cpsr
);
cm_set_elr_spsr_el3
(
NON_SECURE
,
ctx
->
fiq_pc
,
(
uint32_t
)
ctx
->
fiq_cpsr
);
SMC_RET0
(
handle
);
}
...
...
@@ -222,8 +225,8 @@ static uintptr_t trusty_smc_handler(uint32_t smc_fid,
void
*
handle
,
u_register_t
flags
)
{
struct
args
ret
;
uint32_t
vmid
=
0
;
struct
smc_
args
ret
;
uint32_t
vmid
=
0
U
;
entry_point_info_t
*
ep_info
=
bl31_plat_get_next_image_ep_info
(
SECURE
);
/*
...
...
@@ -231,10 +234,12 @@ static uintptr_t trusty_smc_handler(uint32_t smc_fid,
* Verified Boot is not even supported and returning success here
* would not compromise the boot process.
*/
if
(
!
ep_info
&&
(
smc_fid
==
SMC_YC_SET_ROT_PARAMS
))
{
if
(
(
ep_info
==
NULL
)
&&
(
smc_fid
==
SMC_YC_SET_ROT_PARAMS
))
{
SMC_RET1
(
handle
,
0
);
}
else
if
(
!
ep_info
)
{
}
else
if
(
ep_info
==
NULL
)
{
SMC_RET1
(
handle
,
SMC_UNK
);
}
else
{
;
/* do nothing */
}
if
(
is_caller_secure
(
flags
))
{
...
...
@@ -279,12 +284,11 @@ static uintptr_t trusty_smc_handler(uint32_t smc_fid,
static
int32_t
trusty_init
(
void
)
{
void
el3_exit
(
void
);
entry_point_info_t
*
ep_info
;
struct
args
zero_args
=
{
0
};
struct
smc_
args
zero_args
=
{
0
};
struct
trusty_cpu_ctx
*
ctx
=
get_trusty_ctx
();
uint32_t
cpu
=
plat_my_core_pos
();
int
reg_width
=
GET_RW
(
read_ctx_reg
(
get_el3state_ctx
(
&
ctx
->
cpu_ctx
),
u
int
64_t
reg_width
=
GET_RW
(
read_ctx_reg
(
get_el3state_ctx
(
&
ctx
->
cpu_ctx
),
CTX_SPSR_EL3
));
/*
...
...
@@ -292,7 +296,7 @@ static int32_t trusty_init(void)
* failure.
*/
ep_info
=
bl31_plat_get_next_image_ep_info
(
SECURE
);
assert
(
ep_info
);
assert
(
ep_info
!=
NULL
);
fpregs_context_save
(
get_fpregs_ctx
(
cm_get_context
(
NON_SECURE
)));
cm_el1_sysregs_context_save
(
NON_SECURE
);
...
...
@@ -304,7 +308,7 @@ static int32_t trusty_init(void)
* Adjust secondary cpu entry point for 32 bit images to the
* end of exception vectors
*/
if
((
cpu
!=
0
)
&&
(
reg_width
==
MODE_RW_32
))
{
if
((
cpu
!=
0
U
)
&&
(
reg_width
==
MODE_RW_32
))
{
INFO
(
"trusty: cpu %d, adjust entry point to 0x%lx
\n
"
,
cpu
,
ep_info
->
pc
+
(
1U
<<
5
));
cm_set_elr_el3
(
SECURE
,
ep_info
->
pc
+
(
1U
<<
5
));
...
...
@@ -314,10 +318,10 @@ static int32_t trusty_init(void)
fpregs_context_restore
(
get_fpregs_ctx
(
cm_get_context
(
SECURE
)));
cm_set_next_eret_context
(
SECURE
);
ctx
->
saved_security_state
=
~
0
;
/* initial saved state is invalid */
trusty_init_context_stack
(
&
ctx
->
saved_sp
,
&
ctx
->
secure_stack
.
end
);
ctx
->
saved_security_state
=
~
0
U
;
/* initial saved state is invalid */
(
void
)
trusty_init_context_stack
(
&
ctx
->
saved_sp
,
&
ctx
->
secure_stack
.
end
);
trusty_context_switch_helper
(
&
ctx
->
saved_sp
,
&
zero_args
);
(
void
)
trusty_context_switch_helper
(
&
ctx
->
saved_sp
,
&
zero_args
);
cm_el1_sysregs_context_restore
(
NON_SECURE
);
fpregs_context_restore
(
get_fpregs_ctx
(
cm_get_context
(
NON_SECURE
)));
...
...
@@ -328,10 +332,10 @@ static int32_t trusty_init(void)
static
void
trusty_cpu_suspend
(
uint32_t
off
)
{
struct
args
ret
;
struct
smc_
args
ret
;
ret
=
trusty_context_switch
(
NON_SECURE
,
SMC_FC_CPU_SUSPEND
,
off
,
0
,
0
);
if
(
ret
.
r0
!=
0
)
{
if
(
ret
.
r0
!=
0
U
)
{
INFO
(
"%s: cpu %d, SMC_FC_CPU_SUSPEND returned unexpected value, %lld
\n
"
,
__func__
,
plat_my_core_pos
(),
ret
.
r0
);
}
...
...
@@ -339,10 +343,10 @@ static void trusty_cpu_suspend(uint32_t off)
static
void
trusty_cpu_resume
(
uint32_t
on
)
{
struct
args
ret
;
struct
smc_
args
ret
;
ret
=
trusty_context_switch
(
NON_SECURE
,
SMC_FC_CPU_RESUME
,
on
,
0
,
0
);
if
(
ret
.
r0
!=
0
)
{
if
(
ret
.
r0
!=
0
U
)
{
INFO
(
"%s: cpu %d, SMC_FC_CPU_RESUME returned unexpected value, %lld
\n
"
,
__func__
,
plat_my_core_pos
(),
ret
.
r0
);
}
...
...
@@ -359,8 +363,8 @@ static void trusty_cpu_on_finish_handler(u_register_t unused)
{
struct
trusty_cpu_ctx
*
ctx
=
get_trusty_ctx
();
if
(
!
ctx
->
saved_sp
)
{
trusty_init
();
if
(
ctx
->
saved_sp
==
NULL
)
{
(
void
)
trusty_init
();
}
else
{
trusty_cpu_resume
(
1
);
}
...
...
@@ -398,12 +402,12 @@ static int32_t trusty_setup(void)
entry_point_info_t
*
ep_info
;
uint32_t
instr
;
uint32_t
flags
;
int
ret
;
int
32_t
ret
;
bool
aarch32
=
false
;
/* Get trusty's entry point info */
ep_info
=
bl31_plat_get_next_image_ep_info
(
SECURE
);
if
(
!
ep_info
)
{
if
(
ep_info
==
NULL
)
{
INFO
(
"Trusty image missing.
\n
"
);
return
-
1
;
}
...
...
@@ -444,8 +448,9 @@ static int32_t trusty_setup(void)
ret
=
register_interrupt_type_handler
(
INTR_TYPE_S_EL1
,
trusty_fiq_handler
,
flags
);
if
(
ret
)
if
(
ret
!=
0
)
{
ERROR
(
"trusty: failed to register fiq handler, ret = %d
\n
"
,
ret
);
}
if
(
aarch32
)
{
entry_point_info_t
*
ns_ep_info
;
...
...
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment