Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
Menu
Open sidebar
adam.huang
Arm Trusted Firmware
Commits
84ded36c
Commit
84ded36c
authored
Jul 18, 2016
by
danh-arm
Committed by
GitHub
Jul 18, 2016
Browse files
Merge pull request #654 from rockchip-linux/rk3399-suspend-resume
rockchip: support the suspend/resume for rk3399
parents
473cae6c
9ec78bdf
Changes
16
Show whitespace changes
Inline
Side-by-side
plat/rockchip/common/aarch64/plat_helpers.S
View file @
84ded36c
...
...
@@ -35,6 +35,7 @@
#include <cortex_a72.h>
#include <plat_private.h>
#include <platform_def.h>
#include <plat_pmu_macros.S>
.
globl
cpuson_entry_point
.
globl
cpuson_flags
...
...
@@ -47,92 +48,38 @@
.
globl
plat_my_core_pos
.
globl
plat_reset_handler
#define RK_REVISION(rev) RK_PLAT_CFG##rev
#define RK_HANDLER(rev) plat_reset_handler_juno_r##rev
#define JUMP_TO_HANDLER_IF_RK_R(revision) \
jump_to_handler
RK_REVISION
(
revision
),
RK_HANDLER
(
revision
)
/
*
*
Helper
macro
to
jump
to
the
given
handler
if
the
board
revision
*
matches
.
*
Expects
the
Juno
board
revision
in
x0
.
*
void
plat_reset_handler
(
void
)
;
*
*
Determine
the
SOC
type
and
call
the
appropriate
reset
*
handler
.
*
*/
.
macro
jump_to_handler
_revision
,
_handler
cmp
x0
,
#
\
_revision
b.eq
\
_handler
.
endm
/
*
*
Helper
macro
that
reads
the
part
number
of
the
current
CPU
and
jumps
*
to
the
given
label
if
it
matches
the
CPU
MIDR
provided
.
*/
.
macro
jump_if_cpu_midr
_cpu_midr
,
_label
func
plat_reset_handler
mrs
x0
,
midr_el1
ubfx
x0
,
x0
,
MIDR_PN_SHIFT
,
#
12
cmp
w0
,
#((
\
_cpu_midr
>>
MIDR_PN_SHIFT
)
&
MIDR_PN_MASK
)
b.eq
\
_label
.
end
m
cmp
w0
,
#((
CORTEX_A72_MIDR
>>
MIDR_PN_SHIFT
)
&
MIDR_PN_MASK
)
b.eq
handler_a72
b
handler_
end
handler_a72
:
/
*
*
Platform
reset
handler
for
rockchip
.
*
only
A53
cores
*/
func
RK_HANDLER
(0)
ret
endfunc
RK_HANDLER
(0)
/
*
*
Platform
reset
handler
for
rockchip
.
*
-
Cortex
-
A53
processor
cluster
;
*
-
Cortex
-
A72
processor
cluster
.
*
*
This
handler
does
the
following
:
*
-
Set
the
L2
Data
RAM
latency
to
2
(
i
.
e
.
3
cycles
)
for
Cortex
-
A72
*
-
Set
the
L2
Tag
RAM
latency
to
1
(
i
.
e
.
2
cycles
)
for
Cortex
-
A72
*/
func
RK_HANDLER
(1)
/
*
*
Nothing
to
do
on
Cortex
-
A53
.
*
*
Set
the
L2
Data
RAM
latency
for
Cortex
-
A72
.
*
Set
the
L2
Tag
RAM
latency
to
for
Cortex
-
A72
.
*/
jump_if_cpu_midr
CORTEX_A72_MIDR
,
A72
ret
A72
:
/
*
Cortex
-
A72
specific
settings
*/
mov
x0
,
#((
2
<<
L2CTLR_DATA_RAM_LATENCY_SHIFT
)
|
\
(0
x1
<<
5
))
msr
L2CTLR_EL1
,
x0
isb
handler_end
:
ret
endfunc
RK_HANDLER
(1)
/
*
*
void
plat_reset_handler
(
void
)
;
*
*
Determine
the
SOC
type
and
call
the
appropriate
reset
*
handler
.
*
*/
func
plat_reset_handler
mov
x0
,
RK_PLAT_AARCH_CFG
JUMP_TO_HANDLER_IF_RK_R
(0)
JUMP_TO_HANDLER_IF_RK_R
(1)
/
*
SOC
type
is
not
supported
*/
not_supported
:
b
not_supported
endfunc
plat_reset_handler
func
plat_my_core_pos
mrs
x0
,
mpidr_el1
and
x1
,
x0
,
#
MPIDR_CPU_MASK
and
x0
,
x0
,
#
MPIDR_CLUSTER_MASK
add
x0
,
x1
,
x0
,
LSR
#
6
add
x0
,
x1
,
x0
,
LSR
#
PLAT_RK_CLST_TO_CPUID_SHIFT
ret
endfunc
plat_my_core_pos
...
...
@@ -192,30 +139,30 @@ endfunc plat_crash_console_putc
.
align
16
func
platform_cpu_warmboot
mrs
x0
,
MPIDR_EL1
and
x1
,
x0
,
#
MPIDR_CPU_MASK
and
x0
,
x0
,
#
MPIDR_CLUSTER_MASK
and
x19
,
x0
,
#
MPIDR_CPU_MASK
and
x20
,
x0
,
#
MPIDR_CLUSTER_MASK
mov
x0
,
x20
func_rockchip_clst_warmboot
/
*
--------------------------------------------------------------------
*
big
cluster
id
is
1
*
big
cores
id
is
from
0
-
3
,
little
cores
id
4
-
7
*
--------------------------------------------------------------------
*/
add
x
0
,
x1
,
x0
,
lsr
#
6
add
x
21
,
x1
9
,
x
2
0
,
lsr
#
PLAT_RK_CLST_TO_CPUID_SHIFT
/
*
--------------------------------------------------------------------
*
get
per
cpuup
flag
*
--------------------------------------------------------------------
*/
adr
x4
,
cpuson_flags
add
x4
,
x4
,
x
0
,
lsl
#
2
add
x4
,
x4
,
x
21
,
lsl
#
2
ldr
w1
,
[
x4
]
/
*
--------------------------------------------------------------------
*
check
cpuon
reason
*
--------------------------------------------------------------------
*/
ldr
w3
,
=
PMU_CPU_AUTO_PWRDN
cmp
w1
,
w3
cmp
w1
,
PMU_CPU_AUTO_PWRDN
b.eq
boot_entry
ldr
w3
,
=
PMU_CPU_HOTPLUG
cmp
w1
,
w3
cmp
w1
,
PMU_CPU_HOTPLUG
b.eq
boot_entry
/
*
--------------------------------------------------------------------
*
If
the
boot
core
cpuson_flags
or
cpuson_entry_point
is
not
...
...
@@ -226,15 +173,13 @@ wfe_loop:
wfe
b
wfe_loop
boot_entry
:
mov
w1
,
#
0
str
w1
,
[
x4
]
str
wzr
,
[
x4
]
/
*
--------------------------------------------------------------------
*
get
per
cpuup
boot
addr
*
--------------------------------------------------------------------
*/
adr
x5
,
cpuson_entry_point
ldr
x2
,
[
x5
,
x0
,
lsl
#
3
]
ldr
x2
,
[
x5
,
x21
,
lsl
#
3
]
br
x2
endfunc
platform_cpu_warmboot
...
...
@@ -252,3 +197,4 @@ cpuson_flags:
.
rept
PLATFORM_CORE_COUNT
.
word
0
.
endr
rockchip_clst_warmboot_data
plat/rockchip/common/drivers/pmu/pmu_com.h
View file @
84ded36c
...
...
@@ -31,7 +31,7 @@
* Use this macro to instantiate lock before it is used in below
* rockchip_pd_lock_xxx() macros
*/
DE
FIN
E_BAKERY_LOCK
(
rockchip_pd_lock
);
DE
CLAR
E_BAKERY_LOCK
(
rockchip_pd_lock
);
/*
* These are wrapper macros to the powe domain Bakery Lock API.
...
...
plat/rockchip/common/include/plat_private.h
View file @
84ded36c
...
...
@@ -35,6 +35,7 @@
#include <mmio.h>
#include <stdint.h>
#include <xlat_tables.h>
#include <psci.h>
/******************************************************************************
* For rockchip socs pm ops
...
...
@@ -45,6 +46,12 @@ struct rockchip_pm_ops_cb {
int
(
*
cores_pwr_dm_on_finish
)(
void
);
int
(
*
cores_pwr_dm_suspend
)(
void
);
int
(
*
cores_pwr_dm_resume
)(
void
);
/* hlvl is used for clusters or system level */
int
(
*
hlvl_pwr_dm_suspend
)(
uint32_t
lvl
,
plat_local_state_t
lvl_state
);
int
(
*
hlvl_pwr_dm_resume
)(
uint32_t
lvl
,
plat_local_state_t
lvl_state
);
int
(
*
hlvl_pwr_dm_off
)(
uint32_t
lvl
,
plat_local_state_t
lvl_state
);
int
(
*
hlvl_pwr_dm_on_finish
)(
uint32_t
lvl
,
plat_local_state_t
lvl_state
);
int
(
*
sys_pwr_dm_suspend
)(
void
);
int
(
*
sys_pwr_dm_resume
)(
void
);
void
(
*
sys_gbl_soft_reset
)(
void
)
__dead2
;
...
...
@@ -109,6 +116,7 @@ void plat_rockchip_pmusram_prepare(void);
void
plat_rockchip_pmu_init
(
void
);
void
plat_rockchip_soc_init
(
void
);
void
plat_setup_rockchip_pm_ops
(
struct
rockchip_pm_ops_cb
*
ops
);
uintptr_t
plat_get_sec_entrypoint
(
void
);
void
platform_cpu_warmboot
(
void
);
...
...
@@ -126,10 +134,12 @@ extern uint32_t cpuson_flags[PLATFORM_CORE_COUNT];
extern
const
mmap_region_t
plat_rk_mmap
[];
#endif
/* __ASSEMBLY__ */
/* only Cortex-A53 */
#define RK_PLAT_CFG0 0
/* include Cortex-A72 */
#define RK_PLAT_CFG1 1
/******************************************************************************
* cpu up status
* The bits of macro value is not more than 12 bits for cmp instruction!
******************************************************************************/
#define PMU_CPU_HOTPLUG 0xf00
#define PMU_CPU_AUTO_PWRDN 0xf0
#define PMU_CLST_RET 0xa5
#endif
/* __PLAT_PRIVATE_H__ */
plat/rockchip/common/plat_pm.c
View file @
84ded36c
...
...
@@ -50,21 +50,6 @@ static uintptr_t rockchip_sec_entrypoint;
static
struct
rockchip_pm_ops_cb
*
rockchip_ops
;
static
void
plat_rockchip_sys_pwr_domain_resume
(
void
)
{
if
(
rockchip_ops
&&
rockchip_ops
->
sys_pwr_dm_resume
)
rockchip_ops
->
sys_pwr_dm_resume
();
}
static
void
plat_rockchip_cores_pwr_domain_resume
(
void
)
{
if
(
rockchip_ops
&&
rockchip_ops
->
cores_pwr_dm_resume
)
rockchip_ops
->
cores_pwr_dm_resume
();
/* Program the gic per-cpu distributor or re-distributor interface */
plat_rockchip_gic_cpuif_enable
();
}
/*******************************************************************************
* Rockchip standard platform handler called to check the validity of the power
* state parameter.
...
...
@@ -96,6 +81,10 @@ int rockchip_validate_power_state(unsigned int power_state,
for
(
i
=
MPIDR_AFFLVL0
;
i
<=
pwr_lvl
;
i
++
)
req_state
->
pwr_domain_state
[
i
]
=
PLAT_MAX_OFF_STATE
;
for
(
i
=
(
pwr_lvl
+
1
);
i
<=
PLAT_MAX_PWR_LVL
;
i
++
)
req_state
->
pwr_domain_state
[
i
]
=
PLAT_MAX_RET_STATE
;
}
/* We expect the 'state id' to be zero */
...
...
@@ -154,14 +143,28 @@ int rockchip_pwr_domain_on(u_register_t mpidr)
******************************************************************************/
void
rockchip_pwr_domain_off
(
const
psci_power_state_t
*
target_state
)
{
uint32_t
lvl
;
plat_local_state_t
lvl_state
;
assert
(
RK_CORE_PWR_STATE
(
target_state
)
==
PLAT_MAX_OFF_STATE
);
plat_rockchip_gic_cpuif_disable
();
if
(
RK_CLUSTER_PWR_STATE
(
target_state
)
==
PLAT_MAX_OFF_STATE
)
plat_cci_disable
();
if
(
rockchip_ops
&&
rockchip_ops
->
cores_pwr_dm_off
)
if
(
!
rockchip_ops
||
!
rockchip_ops
->
cores_pwr_dm_off
)
return
;
rockchip_ops
->
cores_pwr_dm_off
();
if
(
!
rockchip_ops
->
hlvl_pwr_dm_off
)
return
;
for
(
lvl
=
MPIDR_AFFLVL1
;
lvl
<=
PLAT_MAX_PWR_LVL
;
lvl
++
)
{
lvl_state
=
target_state
->
pwr_domain_state
[
lvl
];
rockchip_ops
->
hlvl_pwr_dm_off
(
lvl
,
lvl_state
);
}
}
/*******************************************************************************
...
...
@@ -170,18 +173,20 @@ void rockchip_pwr_domain_off(const psci_power_state_t *target_state)
******************************************************************************/
void
rockchip_pwr_domain_suspend
(
const
psci_power_state_t
*
target_state
)
{
if
(
RK_CORE_PWR_STATE
(
target_state
)
==
PLAT_MAX_RET_STATE
)
return
;
uint32_t
lvl
;
plat_local_state_t
lvl_state
;
assert
(
RK_CORE_PWR_STATE
(
target_state
)
==
PLAT_MAX_OFF_STATE
);
if
(
RK_CORE_PWR_STATE
(
target_state
)
!=
PLAT_MAX_OFF_STATE
)
return
;
if
(
RK_SYSTEM_PWR_STATE
(
target_state
)
==
PLAT_MAX_OFF_STATE
)
{
if
(
rockchip_ops
&&
rockchip_ops
->
sys_pwr_dm_suspend
)
if
(
rockchip_ops
)
{
if
(
RK_SYSTEM_PWR_STATE
(
target_state
)
==
PLAT_MAX_OFF_STATE
&&
rockchip_ops
->
sys_pwr_dm_suspend
)
{
rockchip_ops
->
sys_pwr_dm_suspend
();
}
else
{
if
(
rockchip_ops
&&
rockchip_ops
->
cores_pwr_dm_suspend
)
}
else
if
(
rockchip_ops
->
cores_pwr_dm_suspend
)
{
rockchip_ops
->
cores_pwr_dm_suspend
();
}
}
/* Prevent interrupts from spuriously waking up this cpu */
plat_rockchip_gic_cpuif_disable
();
...
...
@@ -189,6 +194,14 @@ void rockchip_pwr_domain_suspend(const psci_power_state_t *target_state)
/* Perform the common cluster specific operations */
if
(
RK_CLUSTER_PWR_STATE
(
target_state
)
==
PLAT_MAX_OFF_STATE
)
plat_cci_disable
();
if
(
!
rockchip_ops
||
!
rockchip_ops
->
hlvl_pwr_dm_suspend
)
return
;
for
(
lvl
=
MPIDR_AFFLVL1
;
lvl
<=
PLAT_MAX_PWR_LVL
;
lvl
++
)
{
lvl_state
=
target_state
->
pwr_domain_state
[
lvl
];
rockchip_ops
->
hlvl_pwr_dm_suspend
(
lvl
,
lvl_state
);
}
}
/*******************************************************************************
...
...
@@ -198,10 +211,24 @@ void rockchip_pwr_domain_suspend(const psci_power_state_t *target_state)
******************************************************************************/
void
rockchip_pwr_domain_on_finish
(
const
psci_power_state_t
*
target_state
)
{
uint32_t
lvl
;
plat_local_state_t
lvl_state
;
assert
(
RK_CORE_PWR_STATE
(
target_state
)
==
PLAT_MAX_OFF_STATE
);
if
(
rockchip_ops
&&
rockchip_ops
->
cores_pwr_dm_on_finish
)
if
(
!
rockchip_ops
)
goto
comm_finish
;
if
(
rockchip_ops
->
hlvl_pwr_dm_on_finish
)
{
for
(
lvl
=
MPIDR_AFFLVL1
;
lvl
<=
PLAT_MAX_PWR_LVL
;
lvl
++
)
{
lvl_state
=
target_state
->
pwr_domain_state
[
lvl
];
rockchip_ops
->
hlvl_pwr_dm_on_finish
(
lvl
,
lvl_state
);
}
}
if
(
rockchip_ops
->
cores_pwr_dm_on_finish
)
rockchip_ops
->
cores_pwr_dm_on_finish
();
comm_finish:
/* Perform the common cluster specific operations */
if
(
RK_CLUSTER_PWR_STATE
(
target_state
)
==
PLAT_MAX_OFF_STATE
)
{
...
...
@@ -225,15 +252,37 @@ void rockchip_pwr_domain_on_finish(const psci_power_state_t *target_state)
******************************************************************************/
void
rockchip_pwr_domain_suspend_finish
(
const
psci_power_state_t
*
target_state
)
{
uint32_t
lvl
;
plat_local_state_t
lvl_state
;
/* Nothing to be done on waking up from retention from CPU level */
if
(
RK_CORE_PWR_STATE
(
target_state
)
=
=
PLAT_MAX_
RET
_STATE
)
if
(
RK_CORE_PWR_STATE
(
target_state
)
!
=
PLAT_MAX_
OFF
_STATE
)
return
;
/* Perform system domain restore if woken up from system suspend */
if
(
RK_SYSTEM_PWR_STATE
(
target_state
)
==
PLAT_MAX_OFF_STATE
)
plat_rockchip_sys_pwr_domain_resume
();
else
plat_rockchip_cores_pwr_domain_resume
();
if
(
!
rockchip_ops
)
goto
comm_finish
;
if
(
rockchip_ops
->
hlvl_pwr_dm_resume
)
{
for
(
lvl
=
MPIDR_AFFLVL1
;
lvl
<=
PLAT_MAX_PWR_LVL
;
lvl
++
)
{
lvl_state
=
target_state
->
pwr_domain_state
[
lvl
];
rockchip_ops
->
hlvl_pwr_dm_resume
(
lvl
,
lvl_state
);
}
}
if
(
RK_SYSTEM_PWR_STATE
(
target_state
)
==
PLAT_MAX_OFF_STATE
&&
rockchip_ops
->
sys_pwr_dm_resume
)
{
rockchip_ops
->
sys_pwr_dm_resume
();
}
else
if
(
rockchip_ops
->
cores_pwr_dm_resume
)
{
rockchip_ops
->
cores_pwr_dm_resume
();
}
comm_finish:
/*
* Program the gic per-cpu distributor
* or re-distributor interface
*/
plat_rockchip_gic_cpuif_enable
();
/* Perform the common cluster specific operations */
if
(
RK_CLUSTER_PWR_STATE
(
target_state
)
==
PLAT_MAX_OFF_STATE
)
{
...
...
@@ -288,6 +337,12 @@ int plat_setup_psci_ops(uintptr_t sec_entrypoint,
return
0
;
}
uintptr_t
plat_get_sec_entrypoint
(
void
)
{
assert
(
rockchip_sec_entrypoint
);
return
rockchip_sec_entrypoint
;
}
void
plat_setup_rockchip_pm_ops
(
struct
rockchip_pm_ops_cb
*
ops
)
{
rockchip_ops
=
ops
;
...
...
plat/rockchip/common/plat_topology.c
View file @
84ded36c
...
...
@@ -45,11 +45,13 @@ int plat_core_pos_by_mpidr(u_register_t mpidr)
{
unsigned
int
cluster_id
,
cpu_id
;
cpu_id
=
MPIDR_AFFLVL
0_VAL
(
mpidr
)
;
cluster_id
=
MPIDR_AFFLVL1_VAL
(
mpidr
)
;
cpu_id
=
mpidr
&
MPIDR_AFFLVL
_MASK
;
cluster_id
=
mpidr
&
MPIDR_CLUSTER_MASK
;
if
(
cluster_id
>=
PLATFORM_CLUSTER_COUNT
)
cpu_id
+=
(
cluster_id
>>
PLAT_RK_CLST_TO_CPUID_SHIFT
);
if
(
cpu_id
>=
PLATFORM_CORE_COUNT
)
return
-
1
;
return
((
cluster_id
*
PLATFORM_CLUSTER0_CORE_COUNT
)
+
cpu_id
)
;
return
cpu_id
;
}
plat/rockchip/rk3368/drivers/pmu/plat_pmu_macros.S
0 → 100644
View file @
84ded36c
/*
*
Copyright
(
c
)
2016
,
ARM
Limited
and
Contributors
.
All
rights
reserved
.
*
*
Redistribution
and
use
in
source
and
binary
forms
,
with
or
without
*
modification
,
are
permitted
provided
that
the
following
conditions
are
met
:
*
*
Redistributions
of
source
code
must
retain
the
above
copyright
notice
,
this
*
list
of
conditions
and
the
following
disclaimer
.
*
*
Redistributions
in
binary
form
must
reproduce
the
above
copyright
notice
,
*
this
list
of
conditions
and
the
following
disclaimer
in
the
documentation
*
and
/
or
other
materials
provided
with
the
distribution
.
*
*
THIS
SOFTWARE
IS
PROVIDED
BY
THE
COPYRIGHT
HOLDERS
AND
CONTRIBUTORS
"AS IS"
*
AND
ANY
EXPRESS
OR
IMPLIED
WARRANTIES
,
INCLUDING
,
BUT
NOT
LIMITED
TO
,
THE
*
IMPLIED
WARRANTIES
OF
MERCHANTABILITY
AND
FITNESS
FOR
A
PARTICULAR
PURPOSE
*
ARE
DISCLAIMED
.
IN
NO
EVENT
SHALL
THE
COPYRIGHT
HOLDER
OR
CONTRIBUTORS
BE
*
LIABLE
FOR
ANY
DIRECT
,
INDIRECT
,
INCIDENTAL
,
SPECIAL
,
EXEMPLARY
,
OR
*
CONSEQUENTIAL
DAMAGES
(
INCLUDING
,
BUT
NOT
LIMITED
TO
,
PROCUREMENT
OF
*
SUBSTITUTE
GOODS
OR
SERVICES
; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
*
INTERRUPTION
)
HOWEVER
CAUSED
AND
ON
ANY
THEORY
OF
LIABILITY
,
WHETHER
IN
*
CONTRACT
,
STRICT
LIABILITY
,
OR
TORT
(
INCLUDING
NEGLIGENCE
OR
OTHERWISE
)
*
ARISING
IN
ANY
WAY
OUT
OF
THE
USE
OF
THIS
SOFTWARE
,
EVEN
IF
ADVISED
OF
THE
*
POSSIBILITY
OF
SUCH
DAMAGE
.
*/
#include <arch.h>
#include <asm_macros.S>
#include <platform_def.h>
.
macro
func_rockchip_clst_warmboot
/
*
Nothing
to
do
for
rk3368
*/
.
endm
.
macro
rockchip_clst_warmboot_data
/
*
Nothing
to
do
for
rk3368
*/
.
endm
plat/rockchip/rk3368/drivers/pmu/pmu.c
View file @
84ded36c
...
...
@@ -40,6 +40,8 @@
#include <ddr_rk3368.h>
#include <pmu_com.h>
DEFINE_BAKERY_LOCK
(
rockchip_pd_lock
);
static
struct
psram_data_t
*
psram_sleep_cfg
=
(
struct
psram_data_t
*
)
PSRAM_DT_BASE
;
...
...
plat/rockchip/rk3368/include/platform_def.h
View file @
84ded36c
...
...
@@ -74,6 +74,8 @@
PLATFORM_CLUSTER_COUNT + \
PLATFORM_CORE_COUNT)
#define PLAT_RK_CLST_TO_CPUID_SHIFT 8
#define PLAT_MAX_PWR_LVL MPIDR_AFFLVL2
/*
...
...
@@ -144,6 +146,4 @@
#define PLAT_RK_PRIMARY_CPU 0x0
#define RK_PLAT_AARCH_CFG RK_PLAT_CFG0
#endif
/* __PLATFORM_DEF_H__ */
plat/rockchip/rk3368/rk3368_def.h
View file @
84ded36c
...
...
@@ -105,12 +105,6 @@
#define PLAT_RK_CCI_CLUSTER0_SL_IFACE_IX 3
#define PLAT_RK_CCI_CLUSTER1_SL_IFACE_IX 4
/******************************************************************************
* cpu up status
******************************************************************************/
#define PMU_CPU_HOTPLUG 0xdeadbeaf
#define PMU_CPU_AUTO_PWRDN 0xabcdef12
/******************************************************************************
* sgi, ppi
******************************************************************************/
...
...
plat/rockchip/rk3399/drivers/pmu/plat_pmu_macros.S
0 → 100644
View file @
84ded36c
/*
*
Copyright
(
c
)
2016
,
ARM
Limited
and
Contributors
.
All
rights
reserved
.
*
*
Redistribution
and
use
in
source
and
binary
forms
,
with
or
without
*
modification
,
are
permitted
provided
that
the
following
conditions
are
met
:
*
*
Redistributions
of
source
code
must
retain
the
above
copyright
notice
,
this
*
list
of
conditions
and
the
following
disclaimer
.
*
*
Redistributions
in
binary
form
must
reproduce
the
above
copyright
notice
,
*
this
list
of
conditions
and
the
following
disclaimer
in
the
documentation
*
and
/
or
other
materials
provided
with
the
distribution
.
*
*
THIS
SOFTWARE
IS
PROVIDED
BY
THE
COPYRIGHT
HOLDERS
AND
CONTRIBUTORS
"AS IS"
*
AND
ANY
EXPRESS
OR
IMPLIED
WARRANTIES
,
INCLUDING
,
BUT
NOT
LIMITED
TO
,
THE
*
IMPLIED
WARRANTIES
OF
MERCHANTABILITY
AND
FITNESS
FOR
A
PARTICULAR
PURPOSE
*
ARE
DISCLAIMED
.
IN
NO
EVENT
SHALL
THE
COPYRIGHT
HOLDER
OR
CONTRIBUTORS
BE
*
LIABLE
FOR
ANY
DIRECT
,
INDIRECT
,
INCIDENTAL
,
SPECIAL
,
EXEMPLARY
,
OR
*
CONSEQUENTIAL
DAMAGES
(
INCLUDING
,
BUT
NOT
LIMITED
TO
,
PROCUREMENT
OF
*
SUBSTITUTE
GOODS
OR
SERVICES
; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
*
INTERRUPTION
)
HOWEVER
CAUSED
AND
ON
ANY
THEORY
OF
LIABILITY
,
WHETHER
IN
*
CONTRACT
,
STRICT
LIABILITY
,
OR
TORT
(
INCLUDING
NEGLIGENCE
OR
OTHERWISE
)
*
ARISING
IN
ANY
WAY
OUT
OF
THE
USE
OF
THIS
SOFTWARE
,
EVEN
IF
ADVISED
OF
THE
*
POSSIBILITY
OF
SUCH
DAMAGE
.
*/
#include <arch.h>
#include <asm_macros.S>
#include <platform_def.h>
.
globl
clst_warmboot_data
#define PLL_MODE_SHIFT (0x8)
#define PLL_NORMAL_MODE ((0x3 << (PLL_MODE_SHIFT + 16)) | \
(0
x1
<<
PLL_MODE_SHIFT
))
#define MPIDR_CLST_L_BITS 0x0
/
*
*
For
different
socs
,
if
we
want
to
speed
up
warmboot
,
*
we
need
to
config
some
regs
here
.
*
If
scu
was
suspend
,
we
must
resume
related
clk
*
from
slow
(
24
M
)
mode
to
normal
mode
first
.
*
X0
:
MPIDR_EL1
&
MPIDR_CLUSTER_MASK
*/
.
macro
func_rockchip_clst_warmboot
adr
x4
,
clst_warmboot_data
lsr
x5
,
x0
,
#
6
ldr
w3
,
[
x4
,
x5
]
str
wzr
,
[
x4
,
x5
]
cmp
w3
,
#
PMU_CLST_RET
b.ne
clst_warmboot_end
ldr
w6
,
=(
PLL_NORMAL_MODE
)
/
*
*
core_l
offset
is
CRU_BASE
+
0xc
,
*
core_b
offset
is
CRU_BASE
+
0x2c
*/
ldr
x7
,
=(
CRU_BASE
+
0xc
)
lsr
x2
,
x0
,
#
3
str
w6
,
[
x7
,
x2
]
clst_warmboot_end
:
.
endm
.
macro
rockchip_clst_warmboot_data
clst_warmboot_data
:
.
rept
PLATFORM_CLUSTER_COUNT
.
word
0
.
endr
.
endm
plat/rockchip/rk3399/drivers/pmu/pmu.c
View file @
84ded36c
...
...
@@ -46,6 +46,8 @@
#include <pmu.h>
#include <pmu_com.h>
DEFINE_BAKERY_LOCK
(
rockchip_pd_lock
);
static
struct
psram_data_t
*
psram_sleep_cfg
=
(
struct
psram_data_t
*
)
PSRAM_DT_BASE
;
...
...
@@ -67,6 +69,320 @@ __attribute__ ((section("tzfw_coherent_mem")))
#endif
;
/* coheront */
static
void
pmu_bus_idle_req
(
uint32_t
bus
,
uint32_t
state
)
{
uint32_t
bus_id
=
BIT
(
bus
);
uint32_t
bus_req
;
uint32_t
wait_cnt
=
0
;
uint32_t
bus_state
,
bus_ack
;
if
(
state
)
bus_req
=
BIT
(
bus
);
else
bus_req
=
0
;
mmio_clrsetbits_32
(
PMU_BASE
+
PMU_BUS_IDLE_REQ
,
bus_id
,
bus_req
);
do
{
bus_state
=
mmio_read_32
(
PMU_BASE
+
PMU_BUS_IDLE_ST
)
&
bus_id
;
bus_ack
=
mmio_read_32
(
PMU_BASE
+
PMU_BUS_IDLE_ACK
)
&
bus_id
;
wait_cnt
++
;
}
while
((
bus_state
!=
bus_req
||
bus_ack
!=
bus_req
)
&&
(
wait_cnt
<
MAX_WAIT_COUNT
));
if
(
bus_state
!=
bus_req
||
bus_ack
!=
bus_req
)
{
INFO
(
"%s:st=%x(%x)
\n
"
,
__func__
,
mmio_read_32
(
PMU_BASE
+
PMU_BUS_IDLE_ST
),
bus_state
);
INFO
(
"%s:st=%x(%x)
\n
"
,
__func__
,
mmio_read_32
(
PMU_BASE
+
PMU_BUS_IDLE_ACK
),
bus_ack
);
}
}
struct
pmu_slpdata_s
pmu_slpdata
;
static
void
qos_save
(
void
)
{
if
(
pmu_power_domain_st
(
PD_GPU
)
==
pmu_pd_on
)
RESTORE_QOS
(
pmu_slpdata
.
gpu_qos
,
GPU
);
if
(
pmu_power_domain_st
(
PD_ISP0
)
==
pmu_pd_on
)
{
RESTORE_QOS
(
pmu_slpdata
.
isp0_m0_qos
,
ISP0_M0
);
RESTORE_QOS
(
pmu_slpdata
.
isp0_m1_qos
,
ISP0_M1
);
}
if
(
pmu_power_domain_st
(
PD_ISP1
)
==
pmu_pd_on
)
{
RESTORE_QOS
(
pmu_slpdata
.
isp1_m0_qos
,
ISP1_M0
);
RESTORE_QOS
(
pmu_slpdata
.
isp1_m1_qos
,
ISP1_M1
);
}
if
(
pmu_power_domain_st
(
PD_VO
)
==
pmu_pd_on
)
{
RESTORE_QOS
(
pmu_slpdata
.
vop_big_r
,
VOP_BIG_R
);
RESTORE_QOS
(
pmu_slpdata
.
vop_big_w
,
VOP_BIG_W
);
RESTORE_QOS
(
pmu_slpdata
.
vop_little
,
VOP_LITTLE
);
}
if
(
pmu_power_domain_st
(
PD_HDCP
)
==
pmu_pd_on
)
RESTORE_QOS
(
pmu_slpdata
.
hdcp_qos
,
HDCP
);
if
(
pmu_power_domain_st
(
PD_GMAC
)
==
pmu_pd_on
)
RESTORE_QOS
(
pmu_slpdata
.
gmac_qos
,
GMAC
);
if
(
pmu_power_domain_st
(
PD_CCI
)
==
pmu_pd_on
)
{
RESTORE_QOS
(
pmu_slpdata
.
cci_m0_qos
,
CCI_M0
);
RESTORE_QOS
(
pmu_slpdata
.
cci_m1_qos
,
CCI_M1
);
}
if
(
pmu_power_domain_st
(
PD_SD
)
==
pmu_pd_on
)
RESTORE_QOS
(
pmu_slpdata
.
sdmmc_qos
,
SDMMC
);
if
(
pmu_power_domain_st
(
PD_EMMC
)
==
pmu_pd_on
)
RESTORE_QOS
(
pmu_slpdata
.
emmc_qos
,
EMMC
);
if
(
pmu_power_domain_st
(
PD_SDIOAUDIO
)
==
pmu_pd_on
)
RESTORE_QOS
(
pmu_slpdata
.
sdio_qos
,
SDIO
);
if
(
pmu_power_domain_st
(
PD_GIC
)
==
pmu_pd_on
)
RESTORE_QOS
(
pmu_slpdata
.
gic_qos
,
GIC
);
if
(
pmu_power_domain_st
(
PD_RGA
)
==
pmu_pd_on
)
{
RESTORE_QOS
(
pmu_slpdata
.
rga_r_qos
,
RGA_R
);
RESTORE_QOS
(
pmu_slpdata
.
rga_w_qos
,
RGA_W
);
}
if
(
pmu_power_domain_st
(
PD_IEP
)
==
pmu_pd_on
)
RESTORE_QOS
(
pmu_slpdata
.
iep_qos
,
IEP
);
if
(
pmu_power_domain_st
(
PD_USB3
)
==
pmu_pd_on
)
{
RESTORE_QOS
(
pmu_slpdata
.
usb_otg0_qos
,
USB_OTG0
);
RESTORE_QOS
(
pmu_slpdata
.
usb_otg1_qos
,
USB_OTG1
);
}
if
(
pmu_power_domain_st
(
PD_PERIHP
)
==
pmu_pd_on
)
{
RESTORE_QOS
(
pmu_slpdata
.
usb_host0_qos
,
USB_HOST0
);
RESTORE_QOS
(
pmu_slpdata
.
usb_host1_qos
,
USB_HOST1
);
RESTORE_QOS
(
pmu_slpdata
.
perihp_nsp_qos
,
PERIHP_NSP
);
}
if
(
pmu_power_domain_st
(
PD_PERILP
)
==
pmu_pd_on
)
{
RESTORE_QOS
(
pmu_slpdata
.
dmac0_qos
,
DMAC0
);
RESTORE_QOS
(
pmu_slpdata
.
dmac1_qos
,
DMAC1
);
RESTORE_QOS
(
pmu_slpdata
.
dcf_qos
,
DCF
);
RESTORE_QOS
(
pmu_slpdata
.
crypto0_qos
,
CRYPTO0
);
RESTORE_QOS
(
pmu_slpdata
.
crypto1_qos
,
CRYPTO1
);
RESTORE_QOS
(
pmu_slpdata
.
perilp_nsp_qos
,
PERILP_NSP
);
RESTORE_QOS
(
pmu_slpdata
.
perilpslv_nsp_qos
,
PERILPSLV_NSP
);
RESTORE_QOS
(
pmu_slpdata
.
peri_cm1_qos
,
PERI_CM1
);
}
if
(
pmu_power_domain_st
(
PD_VDU
)
==
pmu_pd_on
)
RESTORE_QOS
(
pmu_slpdata
.
video_m0_qos
,
VIDEO_M0
);
if
(
pmu_power_domain_st
(
PD_VCODEC
)
==
pmu_pd_on
)
{
RESTORE_QOS
(
pmu_slpdata
.
video_m1_r_qos
,
VIDEO_M1_R
);
RESTORE_QOS
(
pmu_slpdata
.
video_m1_w_qos
,
VIDEO_M1_W
);
}
}
static
void
qos_restore
(
void
)
{
if
(
pmu_power_domain_st
(
PD_GPU
)
==
pmu_pd_on
)
SAVE_QOS
(
pmu_slpdata
.
gpu_qos
,
GPU
);
if
(
pmu_power_domain_st
(
PD_ISP0
)
==
pmu_pd_on
)
{
SAVE_QOS
(
pmu_slpdata
.
isp0_m0_qos
,
ISP0_M0
);
SAVE_QOS
(
pmu_slpdata
.
isp0_m1_qos
,
ISP0_M1
);
}
if
(
pmu_power_domain_st
(
PD_ISP1
)
==
pmu_pd_on
)
{
SAVE_QOS
(
pmu_slpdata
.
isp1_m0_qos
,
ISP1_M0
);
SAVE_QOS
(
pmu_slpdata
.
isp1_m1_qos
,
ISP1_M1
);
}
if
(
pmu_power_domain_st
(
PD_VO
)
==
pmu_pd_on
)
{
SAVE_QOS
(
pmu_slpdata
.
vop_big_r
,
VOP_BIG_R
);
SAVE_QOS
(
pmu_slpdata
.
vop_big_w
,
VOP_BIG_W
);
SAVE_QOS
(
pmu_slpdata
.
vop_little
,
VOP_LITTLE
);
}
if
(
pmu_power_domain_st
(
PD_HDCP
)
==
pmu_pd_on
)
SAVE_QOS
(
pmu_slpdata
.
hdcp_qos
,
HDCP
);
if
(
pmu_power_domain_st
(
PD_GMAC
)
==
pmu_pd_on
)
SAVE_QOS
(
pmu_slpdata
.
gmac_qos
,
GMAC
);
if
(
pmu_power_domain_st
(
PD_CCI
)
==
pmu_pd_on
)
{
SAVE_QOS
(
pmu_slpdata
.
cci_m0_qos
,
CCI_M0
);
SAVE_QOS
(
pmu_slpdata
.
cci_m1_qos
,
CCI_M1
);
}
if
(
pmu_power_domain_st
(
PD_SD
)
==
pmu_pd_on
)
SAVE_QOS
(
pmu_slpdata
.
sdmmc_qos
,
SDMMC
);
if
(
pmu_power_domain_st
(
PD_EMMC
)
==
pmu_pd_on
)
SAVE_QOS
(
pmu_slpdata
.
emmc_qos
,
EMMC
);
if
(
pmu_power_domain_st
(
PD_SDIOAUDIO
)
==
pmu_pd_on
)
SAVE_QOS
(
pmu_slpdata
.
sdio_qos
,
SDIO
);
if
(
pmu_power_domain_st
(
PD_GIC
)
==
pmu_pd_on
)
SAVE_QOS
(
pmu_slpdata
.
gic_qos
,
GIC
);
if
(
pmu_power_domain_st
(
PD_RGA
)
==
pmu_pd_on
)
{
SAVE_QOS
(
pmu_slpdata
.
rga_r_qos
,
RGA_R
);
SAVE_QOS
(
pmu_slpdata
.
rga_w_qos
,
RGA_W
);
}
if
(
pmu_power_domain_st
(
PD_IEP
)
==
pmu_pd_on
)
SAVE_QOS
(
pmu_slpdata
.
iep_qos
,
IEP
);
if
(
pmu_power_domain_st
(
PD_USB3
)
==
pmu_pd_on
)
{
SAVE_QOS
(
pmu_slpdata
.
usb_otg0_qos
,
USB_OTG0
);
SAVE_QOS
(
pmu_slpdata
.
usb_otg1_qos
,
USB_OTG1
);
}
if
(
pmu_power_domain_st
(
PD_PERIHP
)
==
pmu_pd_on
)
{
SAVE_QOS
(
pmu_slpdata
.
usb_host0_qos
,
USB_HOST0
);
SAVE_QOS
(
pmu_slpdata
.
usb_host1_qos
,
USB_HOST1
);
SAVE_QOS
(
pmu_slpdata
.
perihp_nsp_qos
,
PERIHP_NSP
);
}
if
(
pmu_power_domain_st
(
PD_PERILP
)
==
pmu_pd_on
)
{
SAVE_QOS
(
pmu_slpdata
.
dmac0_qos
,
DMAC0
);
SAVE_QOS
(
pmu_slpdata
.
dmac1_qos
,
DMAC1
);
SAVE_QOS
(
pmu_slpdata
.
dcf_qos
,
DCF
);
SAVE_QOS
(
pmu_slpdata
.
crypto0_qos
,
CRYPTO0
);
SAVE_QOS
(
pmu_slpdata
.
crypto1_qos
,
CRYPTO1
);
SAVE_QOS
(
pmu_slpdata
.
perilp_nsp_qos
,
PERILP_NSP
);
SAVE_QOS
(
pmu_slpdata
.
perilpslv_nsp_qos
,
PERILPSLV_NSP
);
SAVE_QOS
(
pmu_slpdata
.
peri_cm1_qos
,
PERI_CM1
);
}
if
(
pmu_power_domain_st
(
PD_VDU
)
==
pmu_pd_on
)
SAVE_QOS
(
pmu_slpdata
.
video_m0_qos
,
VIDEO_M0
);
if
(
pmu_power_domain_st
(
PD_VCODEC
)
==
pmu_pd_on
)
{
SAVE_QOS
(
pmu_slpdata
.
video_m1_r_qos
,
VIDEO_M1_R
);
SAVE_QOS
(
pmu_slpdata
.
video_m1_w_qos
,
VIDEO_M1_W
);
}
}
static
int
pmu_set_power_domain
(
uint32_t
pd_id
,
uint32_t
pd_state
)
{
uint32_t
state
;
if
(
pmu_power_domain_st
(
pd_id
)
==
pd_state
)
goto
out
;
if
(
pd_state
==
pmu_pd_on
)
pmu_power_domain_ctr
(
pd_id
,
pd_state
);
state
=
(
pd_state
==
pmu_pd_off
)
?
BUS_IDLE
:
BUS_ACTIVE
;
switch
(
pd_id
)
{
case
PD_GPU
:
pmu_bus_idle_req
(
BUS_ID_GPU
,
state
);
break
;
case
PD_VIO
:
pmu_bus_idle_req
(
BUS_ID_VIO
,
state
);
break
;
case
PD_ISP0
:
pmu_bus_idle_req
(
BUS_ID_ISP0
,
state
);
break
;
case
PD_ISP1
:
pmu_bus_idle_req
(
BUS_ID_ISP1
,
state
);
break
;
case
PD_VO
:
pmu_bus_idle_req
(
BUS_ID_VOPB
,
state
);
pmu_bus_idle_req
(
BUS_ID_VOPL
,
state
);
break
;
case
PD_HDCP
:
pmu_bus_idle_req
(
BUS_ID_HDCP
,
state
);
break
;
case
PD_TCPD0
:
break
;
case
PD_TCPD1
:
break
;
case
PD_GMAC
:
pmu_bus_idle_req
(
BUS_ID_GMAC
,
state
);
break
;
case
PD_CCI
:
pmu_bus_idle_req
(
BUS_ID_CCIM0
,
state
);
pmu_bus_idle_req
(
BUS_ID_CCIM1
,
state
);
break
;
case
PD_SD
:
pmu_bus_idle_req
(
BUS_ID_SD
,
state
);
break
;
case
PD_EMMC
:
pmu_bus_idle_req
(
BUS_ID_EMMC
,
state
);
break
;
case
PD_EDP
:
pmu_bus_idle_req
(
BUS_ID_EDP
,
state
);
break
;
case
PD_SDIOAUDIO
:
pmu_bus_idle_req
(
BUS_ID_SDIOAUDIO
,
state
);
break
;
case
PD_GIC
:
pmu_bus_idle_req
(
BUS_ID_GIC
,
state
);
break
;
case
PD_RGA
:
pmu_bus_idle_req
(
BUS_ID_RGA
,
state
);
break
;
case
PD_VCODEC
:
pmu_bus_idle_req
(
BUS_ID_VCODEC
,
state
);
break
;
case
PD_VDU
:
pmu_bus_idle_req
(
BUS_ID_VDU
,
state
);
break
;
case
PD_IEP
:
pmu_bus_idle_req
(
BUS_ID_IEP
,
state
);
break
;
case
PD_USB3
:
pmu_bus_idle_req
(
BUS_ID_USB3
,
state
);
break
;
case
PD_PERIHP
:
pmu_bus_idle_req
(
BUS_ID_PERIHP
,
state
);
break
;
default:
break
;
}
if
(
pd_state
==
pmu_pd_off
)
pmu_power_domain_ctr
(
pd_id
,
pd_state
);
out:
return
0
;
}
static
uint32_t
pmu_powerdomain_state
;
static
void
pmu_power_domains_suspend
(
void
)
{
clk_gate_con_save
();
clk_gate_con_disable
();
qos_save
();
pmu_powerdomain_state
=
mmio_read_32
(
PMU_BASE
+
PMU_PWRDN_ST
);
pmu_set_power_domain
(
PD_GPU
,
pmu_pd_off
);
pmu_set_power_domain
(
PD_TCPD0
,
pmu_pd_off
);
pmu_set_power_domain
(
PD_TCPD1
,
pmu_pd_off
);
pmu_set_power_domain
(
PD_VO
,
pmu_pd_off
);
pmu_set_power_domain
(
PD_ISP0
,
pmu_pd_off
);
pmu_set_power_domain
(
PD_ISP1
,
pmu_pd_off
);
pmu_set_power_domain
(
PD_HDCP
,
pmu_pd_off
);
pmu_set_power_domain
(
PD_SDIOAUDIO
,
pmu_pd_off
);
pmu_set_power_domain
(
PD_GMAC
,
pmu_pd_off
);
pmu_set_power_domain
(
PD_EDP
,
pmu_pd_off
);
pmu_set_power_domain
(
PD_IEP
,
pmu_pd_off
);
pmu_set_power_domain
(
PD_RGA
,
pmu_pd_off
);
pmu_set_power_domain
(
PD_VCODEC
,
pmu_pd_off
);
pmu_set_power_domain
(
PD_VDU
,
pmu_pd_off
);
clk_gate_con_restore
();
}
static
void
pmu_power_domains_resume
(
void
)
{
clk_gate_con_save
();
clk_gate_con_disable
();
if
(
!
(
pmu_powerdomain_state
&
BIT
(
PD_VDU
)))
pmu_set_power_domain
(
PD_VDU
,
pmu_pd_on
);
if
(
!
(
pmu_powerdomain_state
&
BIT
(
PD_VCODEC
)))
pmu_set_power_domain
(
PD_VCODEC
,
pmu_pd_on
);
if
(
!
(
pmu_powerdomain_state
&
BIT
(
PD_RGA
)))
pmu_set_power_domain
(
PD_RGA
,
pmu_pd_on
);
if
(
!
(
pmu_powerdomain_state
&
BIT
(
PD_IEP
)))
pmu_set_power_domain
(
PD_IEP
,
pmu_pd_on
);
if
(
!
(
pmu_powerdomain_state
&
BIT
(
PD_EDP
)))
pmu_set_power_domain
(
PD_EDP
,
pmu_pd_on
);
if
(
!
(
pmu_powerdomain_state
&
BIT
(
PD_GMAC
)))
pmu_set_power_domain
(
PD_GMAC
,
pmu_pd_on
);
if
(
!
(
pmu_powerdomain_state
&
BIT
(
PD_SDIOAUDIO
)))
pmu_set_power_domain
(
PD_SDIOAUDIO
,
pmu_pd_on
);
if
(
!
(
pmu_powerdomain_state
&
BIT
(
PD_HDCP
)))
pmu_set_power_domain
(
PD_HDCP
,
pmu_pd_on
);
if
(
!
(
pmu_powerdomain_state
&
BIT
(
PD_ISP1
)))
pmu_set_power_domain
(
PD_ISP1
,
pmu_pd_on
);
if
(
!
(
pmu_powerdomain_state
&
BIT
(
PD_ISP0
)))
pmu_set_power_domain
(
PD_ISP0
,
pmu_pd_on
);
if
(
!
(
pmu_powerdomain_state
&
BIT
(
PD_VO
)))
pmu_set_power_domain
(
PD_VO
,
pmu_pd_on
);
if
(
!
(
pmu_powerdomain_state
&
BIT
(
PD_TCPD1
)))
pmu_set_power_domain
(
PD_TCPD1
,
pmu_pd_on
);
if
(
!
(
pmu_powerdomain_state
&
BIT
(
PD_TCPD0
)))
pmu_set_power_domain
(
PD_TCPD0
,
pmu_pd_on
);
if
(
!
(
pmu_powerdomain_state
&
BIT
(
PD_GPU
)))
pmu_set_power_domain
(
PD_GPU
,
pmu_pd_on
);
qos_restore
();
clk_gate_con_restore
();
}
void
rk3399_flash_l2_b
(
void
)
{
uint32_t
wait_cnt
=
0
;
...
...
@@ -77,7 +393,7 @@ void rk3399_flash_l2_b(void)
while
(
!
(
mmio_read_32
(
PMU_BASE
+
PMU_CORE_PWR_ST
)
&
BIT
(
L2_FLUSHDONE_CLUSTER_B
)))
{
wait_cnt
++
;
if
(
!
(
wait_cnt
%
MAX_WAIT_CO
N
UT
)
)
if
(
wait_cnt
>=
MAX_WAIT_COU
N
T
)
WARN
(
"%s:reg %x,wait
\n
"
,
__func__
,
mmio_read_32
(
PMU_BASE
+
PMU_CORE_PWR_ST
));
}
...
...
@@ -103,7 +419,7 @@ static void pmu_scu_b_pwrdn(void)
while
(
!
(
mmio_read_32
(
PMU_BASE
+
PMU_CORE_PWR_ST
)
&
BIT
(
STANDBY_BY_WFIL2_CLUSTER_B
)))
{
wait_cnt
++
;
if
(
!
(
wait_cnt
%
MAX_WAIT_CO
N
UT
)
)
if
(
wait_cnt
>=
MAX_WAIT_COU
N
T
)
ERROR
(
"%s:wait cluster-b l2(%x)
\n
"
,
__func__
,
mmio_read_32
(
PMU_BASE
+
PMU_CORE_PWR_ST
));
}
...
...
@@ -220,6 +536,78 @@ static int cpus_power_domain_off(uint32_t cpu_id, uint32_t pd_cfg)
return
0
;
}
static
inline
void
clst_pwr_domain_suspend
(
plat_local_state_t
lvl_state
)
{
uint32_t
cpu_id
=
plat_my_core_pos
();
uint32_t
pll_id
,
clst_st_msk
,
clst_st_chk_msk
,
pmu_st
;
assert
(
cpu_id
<
PLATFORM_CORE_COUNT
);
if
(
lvl_state
==
PLAT_MAX_RET_STATE
||
lvl_state
==
PLAT_MAX_OFF_STATE
)
{
if
(
cpu_id
<
PLATFORM_CLUSTER0_CORE_COUNT
)
{
pll_id
=
ALPLL_ID
;
clst_st_msk
=
CLST_L_CPUS_MSK
;
}
else
{
pll_id
=
ABPLL_ID
;
clst_st_msk
=
CLST_B_CPUS_MSK
<<
PLATFORM_CLUSTER0_CORE_COUNT
;
}
clst_st_chk_msk
=
clst_st_msk
&
~
(
BIT
(
cpu_id
));
pmu_st
=
mmio_read_32
(
PMU_BASE
+
PMU_PWRDN_ST
);
pmu_st
&=
clst_st_msk
;
if
(
pmu_st
==
clst_st_chk_msk
)
{
mmio_write_32
(
CRU_BASE
+
CRU_PLL_CON
(
pll_id
,
3
),
PLL_SLOW_MODE
);
clst_warmboot_data
[
pll_id
]
=
PMU_CLST_RET
;
pmu_st
=
mmio_read_32
(
PMU_BASE
+
PMU_PWRDN_ST
);
pmu_st
&=
clst_st_msk
;
if
(
pmu_st
==
clst_st_chk_msk
)
return
;
/*
* it is mean that others cpu is up again,
* we must resume the cfg at once.
*/
mmio_write_32
(
CRU_BASE
+
CRU_PLL_CON
(
pll_id
,
3
),
PLL_NOMAL_MODE
);
clst_warmboot_data
[
pll_id
]
=
0
;
}
}
}
static
int
clst_pwr_domain_resume
(
plat_local_state_t
lvl_state
)
{
uint32_t
cpu_id
=
plat_my_core_pos
();
uint32_t
pll_id
,
pll_st
;
assert
(
cpu_id
<
PLATFORM_CORE_COUNT
);
if
(
lvl_state
==
PLAT_MAX_RET_STATE
||
lvl_state
==
PLAT_MAX_OFF_STATE
)
{
if
(
cpu_id
<
PLATFORM_CLUSTER0_CORE_COUNT
)
pll_id
=
ALPLL_ID
;
else
pll_id
=
ABPLL_ID
;
pll_st
=
mmio_read_32
(
CRU_BASE
+
CRU_PLL_CON
(
pll_id
,
3
))
>>
PLL_MODE_SHIFT
;
if
(
pll_st
!=
NORMAL_MODE
)
{
WARN
(
"%s: clst (%d) is in error mode (%d)
\n
"
,
__func__
,
pll_id
,
pll_st
);
return
-
1
;
}
}
return
0
;
}
static
void
nonboot_cpus_off
(
void
)
{
uint32_t
boot_cpu
,
cpu
;
...
...
@@ -258,6 +646,19 @@ static int cores_pwr_domain_off(void)
return
0
;
}
static
int
hlvl_pwr_domain_off
(
uint32_t
lvl
,
plat_local_state_t
lvl_state
)
{
switch
(
lvl
)
{
case
MPIDR_AFFLVL1
:
clst_pwr_domain_suspend
(
lvl_state
);
break
;
default:
break
;
}
return
0
;
}
static
int
cores_pwr_domain_suspend
(
void
)
{
uint32_t
cpu_id
=
plat_my_core_pos
();
...
...
@@ -265,7 +666,7 @@ static int cores_pwr_domain_suspend(void)
assert
(
cpu_id
<
PLATFORM_CORE_COUNT
);
assert
(
cpuson_flags
[
cpu_id
]
==
0
);
cpuson_flags
[
cpu_id
]
=
PMU_CPU_AUTO_PWRDN
;
cpuson_entry_point
[
cpu_id
]
=
(
uintptr_t
)
psci
_entrypoint
;
cpuson_entry_point
[
cpu_id
]
=
plat_get_sec
_entrypoint
()
;
dsb
();
cpus_power_domain_off
(
cpu_id
,
core_pwr_wfi_int
);
...
...
@@ -273,12 +674,38 @@ static int cores_pwr_domain_suspend(void)
return
0
;
}
static
int
hlvl_pwr_domain_suspend
(
uint32_t
lvl
,
plat_local_state_t
lvl_state
)
{
switch
(
lvl
)
{
case
MPIDR_AFFLVL1
:
clst_pwr_domain_suspend
(
lvl_state
);
break
;
default:
break
;
}
return
0
;
}
static
int
cores_pwr_domain_on_finish
(
void
)
{
uint32_t
cpu_id
=
plat_my_core_pos
();
/* Disable core_pm */
mmio_write_32
(
PMU_BASE
+
PMU_CORE_PM_CON
(
cpu_id
),
CORES_PM_DISABLE
);
mmio_write_32
(
PMU_BASE
+
PMU_CORE_PM_CON
(
cpu_id
),
CORES_PM_DISABLE
);
return
0
;
}
static
int
hlvl_pwr_domain_on_finish
(
uint32_t
lvl
,
plat_local_state_t
lvl_state
)
{
switch
(
lvl
)
{
case
MPIDR_AFFLVL1
:
clst_pwr_domain_resume
(
lvl_state
);
break
;
default:
break
;
}
return
0
;
}
...
...
@@ -293,10 +720,23 @@ static int cores_pwr_domain_resume(void)
return
0
;
}
static
int
hlvl_pwr_domain_resume
(
uint32_t
lvl
,
plat_local_state_t
lvl_state
)
{
switch
(
lvl
)
{
case
MPIDR_AFFLVL1
:
clst_pwr_domain_resume
(
lvl_state
);
default:
break
;
}
return
0
;
}
static
void
sys_slp_config
(
void
)
{
uint32_t
slp_mode_cfg
=
0
;
mmio_write_32
(
GRF_BASE
+
GRF_SOC_CON4
,
CCI_FORCE_WAKEUP
);
mmio_write_32
(
PMU_BASE
+
PMU_CCI500_CON
,
BIT_WITH_WMSK
(
PMU_CLR_PREQ_CCI500_HW
)
|
BIT_WITH_WMSK
(
PMU_CLR_QREQ_CCI500_HW
)
|
...
...
@@ -315,25 +755,77 @@ static void sys_slp_config(void)
BIT
(
PMU_CPU0_PD_EN
)
|
BIT
(
PMU_L2_FLUSH_EN
)
|
BIT
(
PMU_L2_IDLE_EN
)
|
BIT
(
PMU_SCU_PD_EN
);
BIT
(
PMU_SCU_PD_EN
)
|
BIT
(
PMU_CCI_PD_EN
)
|
BIT
(
PMU_CLK_CORE_SRC_GATE_EN
)
|
BIT
(
PMU_PERILP_PD_EN
)
|
BIT
(
PMU_CLK_PERILP_SRC_GATE_EN
)
|
BIT
(
PMU_ALIVE_USE_LF
)
|
BIT
(
PMU_SREF0_ENTER_EN
)
|
BIT
(
PMU_SREF1_ENTER_EN
)
|
BIT
(
PMU_DDRC0_GATING_EN
)
|
BIT
(
PMU_DDRC1_GATING_EN
)
|
BIT
(
PMU_DDRIO0_RET_EN
)
|
BIT
(
PMU_DDRIO1_RET_EN
)
|
BIT
(
PMU_DDRIO_RET_HW_DE_REQ
)
|
BIT
(
PMU_PLL_PD_EN
)
|
BIT
(
PMU_CLK_CENTER_SRC_GATE_EN
)
|
BIT
(
PMU_OSC_DIS
)
|
BIT
(
PMU_PMU_USE_LF
);
mmio_setbits_32
(
PMU_BASE
+
PMU_WKUP_CFG4
,
BIT
(
PMU_CLUSTER_L_WKUP_EN
));
mmio_setbits_32
(
PMU_BASE
+
PMU_WKUP_CFG4
,
BIT
(
PMU_CLUSTER_B_WKUP_EN
));
mmio_setbits_32
(
PMU_BASE
+
PMU_WKUP_CFG4
,
BIT
(
PMU_GPIO_WKUP_EN
));
mmio_write_32
(
PMU_BASE
+
PMU_PWRMODE_CON
,
slp_mode_cfg
);
mmio_setbits_32
(
PMU_BASE
+
PMU_WKUP_CFG4
,
PMU_CLUSTER_L_WKUP_EN
);
mmio_setbits_32
(
PMU_BASE
+
PMU_WKUP_CFG4
,
PMU_CLUSTER_B_WKUP_EN
);
mmio_clrbits_32
(
PMU_BASE
+
PMU_WKUP_CFG4
,
PMU_GPIO_WKUP_EN
);
mmio_write_32
(
PMU_BASE
+
PMU_SCU_L_PWRDN_CNT
,
CYCL_32K_CNT_MS
(
5
));
mmio_write_32
(
PMU_BASE
+
PMU_SCU_L_PWRUP_CNT
,
CYCL_32K_CNT_MS
(
5
));
mmio_write_32
(
PMU_BASE
+
PMU_SCU_B_PWRDN_CNT
,
CYCL_32K_CNT_MS
(
5
));
mmio_write_32
(
PMU_BASE
+
PMU_SCU_B_PWRUP_CNT
,
CYCL_32K_CNT_MS
(
5
));
mmio_write_32
(
PMU_BASE
+
PMU_CENTER_PWRDN_CNT
,
CYCL_32K_CNT_MS
(
5
));
mmio_write_32
(
PMU_BASE
+
PMU_CENTER_PWRUP_CNT
,
CYCL_32K_CNT_MS
(
5
));
mmio_write_32
(
PMU_BASE
+
PMU_WAKEUP_RST_CLR_CNT
,
CYCL_32K_CNT_MS
(
5
));
mmio_write_32
(
PMU_BASE
+
PMU_OSC_CNT
,
CYCL_32K_CNT_MS
(
5
));
mmio_write_32
(
PMU_BASE
+
PMU_DDRIO_PWRON_CNT
,
CYCL_32K_CNT_MS
(
5
));
mmio_write_32
(
PMU_BASE
+
PMU_PLLLOCK_CNT
,
CYCL_32K_CNT_MS
(
5
));
mmio_write_32
(
PMU_BASE
+
PMU_PLLRST_CNT
,
CYCL_32K_CNT_MS
(
5
));
mmio_write_32
(
PMU_BASE
+
PMU_STABLE_CNT
,
CYCL_32K_CNT_MS
(
5
));
mmio_clrbits_32
(
PMU_BASE
+
PMU_SFT_CON
,
BIT
(
PMU_24M_EN_CFG
));
mmio_write_32
(
PMU_BASE
+
PMU_PLL_CON
,
PLL_PD_HW
);
mmio_write_32
(
PMUGRF_BASE
+
PMUGRF_SOC_CON0
,
EXTERNAL_32K
);
mmio_write_32
(
PMUGRF_BASE
,
IOMUX_CLK_32K
);
/*32k iomux*/
}
mmio_write_32
(
PMU_BASE
+
PMU_PWRMODE_CON
,
slp_mode_cfg
);
static
void
set_hw_idle
(
uint32_t
hw_idle
)
{
mmio_setbits_32
(
PMU_BASE
+
PMU_BUS_CLR
,
hw_idle
);
}
mmio_write_32
(
PMU_BASE
+
PMU_STABLE_CNT
,
CYCL_24M_CNT_MS
(
5
));
mmio_write_32
(
PMU_BASE
+
PMU_SCU_L_PWRDN_CNT
,
CYCL_24M_CNT_MS
(
2
));
mmio_write_32
(
PMU_BASE
+
PMU_SCU_L_PWRUP_CNT
,
CYCL_24M_CNT_MS
(
2
));
mmio_write_32
(
PMU_BASE
+
PMU_SCU_B_PWRDN_CNT
,
CYCL_24M_CNT_MS
(
2
));
mmio_write_32
(
PMU_BASE
+
PMU_SCU_B_PWRUP_CNT
,
CYCL_24M_CNT_MS
(
2
));
static
void
clr_hw_idle
(
uint32_t
hw_idle
)
{
mmio_clrbits_32
(
PMU_BASE
+
PMU_BUS_CLR
,
hw_idle
);
}
static
int
sys_pwr_domain_suspend
(
void
)
{
uint32_t
wait_cnt
=
0
;
uint32_t
status
=
0
;
pmu_power_domains_suspend
();
set_hw_idle
(
BIT
(
PMU_CLR_CENTER1
)
|
BIT
(
PMU_CLR_ALIVE
)
|
BIT
(
PMU_CLR_MSCH0
)
|
BIT
(
PMU_CLR_MSCH1
)
|
BIT
(
PMU_CLR_CCIM0
)
|
BIT
(
PMU_CLR_CCIM1
)
|
BIT
(
PMU_CLR_CENTER
)
|
BIT
(
PMU_CLR_PERILP
)
|
BIT
(
PMU_CLR_PMU
)
|
BIT
(
PMU_CLR_PERILPM0
)
|
BIT
(
PMU_CLR_GIC
));
sys_slp_config
();
plls_suspend
();
pmu_sgrf_rst_hld
();
mmio_write_32
(
SGRF_BASE
+
SGRF_SOC_CON0_1
(
1
),
...
...
@@ -347,42 +839,83 @@ static int sys_pwr_domain_suspend(void)
BIT_WITH_WMSK
(
PMU_PWRDWN_REQ_CORE_B_SW
)
|
BIT_WITH_WMSK
(
PMU_PWRDWN_REQ_GIC2_CORE_B_SW
));
dsb
();
status
=
BIT
(
PMU_PWRDWN_REQ_CORE_B_2GIC_SW_ST
)
|
BIT
(
PMU_PWRDWN_REQ_CORE_B_SW_ST
)
|
BIT
(
PMU_PWRDWN_REQ_GIC2_CORE_B_SW_ST
);
while
((
mmio_read_32
(
PMU_BASE
+
PMU_ADB400_ST
)
&
status
)
!=
status
)
{
wait_cnt
++
;
if
(
wait_cnt
>=
MAX_WAIT_COUNT
)
{
ERROR
(
"%s:wait cluster-b l2(%x)
\n
"
,
__func__
,
mmio_read_32
(
PMU_BASE
+
PMU_ADB400_ST
));
panic
();
}
}
mmio_setbits_32
(
PMU_BASE
+
PMU_PWRDN_CON
,
BIT
(
PMU_SCU_B_PWRDWN_EN
));
/* TODO: Wait SoC to cut off the logic_center, switch the gpio mode */
mmio_write_32
(
PMUGRF_BASE
+
PMUGRF_GPIO0A_IOMUX
,
GPIO0A6_IOMUX_GPIO
);
return
0
;
}
static
int
sys_pwr_domain_resume
(
void
)
{
uint32_t
wait_cnt
=
0
;
uint32_t
status
=
0
;
/* TODO: switch the pwm mode */
mmio_write_32
(
PMUGRF_BASE
+
PMUGRF_GPIO0A_IOMUX
,
GPIO0A6_IOMUX_PWM
);
pmu_sgrf_rst_hld
();
mmio_write_32
(
SGRF_BASE
+
SGRF_SOC_CON0_1
(
1
),
(
cpu_warm_boot_addr
>>
CPU_BOOT_ADDR_ALIGN
)
|
CPU_BOOT_ADDR_WMASK
);
plls_resume
();
mmio_write_32
(
PMU_BASE
+
PMU_CCI500_CON
,
WMSK_BIT
(
PMU_CLR_PREQ_CCI500_HW
)
|
WMSK_BIT
(
PMU_CLR_QREQ_CCI500_HW
)
|
WMSK_BIT
(
PMU_QGATING_CCI500_CFG
));
dsb
();
mmio_clrbits_32
(
PMU_BASE
+
PMU_PWRDN_CON
,
BIT
(
PMU_SCU_B_PWRDWN_EN
));
mmio_write_32
(
PMU_BASE
+
PMU_ADB400_CON
,
WMSK_BIT
(
PMU_PWRDWN_REQ_CORE_B_2GIC_SW
)
|
WMSK_BIT
(
PMU_PWRDWN_REQ_CORE_B_SW
)
|
WMSK_BIT
(
PMU_PWRDWN_REQ_GIC2_CORE_B_SW
)
|
WMSK_BIT
(
PMU_CLR_CORE_L_HW
)
|
WMSK_BIT
(
PMU_CLR_CORE_L_2GIC_HW
)
|
WMSK_BIT
(
PMU_CLR_GIC2_CORE_L_HW
));
mmio_clrbits_32
(
PMU_BASE
+
PMU_PWRDN_CON
,
BIT
(
PMU_SCU_B_PWRDWN_EN
));
status
=
BIT
(
PMU_PWRDWN_REQ_CORE_B_2GIC_SW_ST
)
|
BIT
(
PMU_PWRDWN_REQ_CORE_B_SW_ST
)
|
BIT
(
PMU_PWRDWN_REQ_GIC2_CORE_B_SW_ST
);
mmio_write_32
(
PMU_BASE
+
PMU_ADB400_CON
,
WMSK_BIT
(
PMU_PWRDWN_REQ_CORE_B_2GIC_SW
)
|
WMSK_BIT
(
PMU_PWRDWN_REQ_CORE_B_SW
)
|
WMSK_BIT
(
PMU_PWRDWN_REQ_GIC2_CORE_B_SW
));
while
((
mmio_read_32
(
PMU_BASE
+
PMU_ADB400_ST
)
&
status
))
{
wait_cnt
++
;
if
(
wait_cnt
>=
MAX_WAIT_COUNT
)
{
ERROR
(
"%s:wait cluster-b l2(%x)
\n
"
,
__func__
,
mmio_read_32
(
PMU_BASE
+
PMU_ADB400_ST
));
panic
();
}
}
pmu_scu_b_pwrup
();
plat_rockchip_gic_cpuif_enable
();
pmu_power_domains_resume
();
clr_hw_idle
(
BIT
(
PMU_CLR_CENTER1
)
|
BIT
(
PMU_CLR_ALIVE
)
|
BIT
(
PMU_CLR_MSCH0
)
|
BIT
(
PMU_CLR_MSCH1
)
|
BIT
(
PMU_CLR_CCIM0
)
|
BIT
(
PMU_CLR_CCIM1
)
|
BIT
(
PMU_CLR_CENTER
)
|
BIT
(
PMU_CLR_PERILP
)
|
BIT
(
PMU_CLR_PMU
)
|
BIT
(
PMU_CLR_GIC
));
return
0
;
}
...
...
@@ -434,6 +967,10 @@ static struct rockchip_pm_ops_cb pm_ops = {
.
cores_pwr_dm_on_finish
=
cores_pwr_domain_on_finish
,
.
cores_pwr_dm_suspend
=
cores_pwr_domain_suspend
,
.
cores_pwr_dm_resume
=
cores_pwr_domain_resume
,
.
hlvl_pwr_dm_suspend
=
hlvl_pwr_domain_suspend
,
.
hlvl_pwr_dm_resume
=
hlvl_pwr_domain_resume
,
.
hlvl_pwr_dm_off
=
hlvl_pwr_domain_off
,
.
hlvl_pwr_dm_on_finish
=
hlvl_pwr_domain_on_finish
,
.
sys_pwr_dm_suspend
=
sys_pwr_domain_suspend
,
.
sys_pwr_dm_resume
=
sys_pwr_domain_resume
,
.
sys_gbl_soft_reset
=
soc_soft_reset
,
...
...
@@ -453,15 +990,19 @@ void plat_rockchip_pmu_init(void)
for
(
cpu
=
0
;
cpu
<
PLATFORM_CORE_COUNT
;
cpu
++
)
cpuson_flags
[
cpu
]
=
0
;
for
(
cpu
=
0
;
cpu
<
PLATFORM_CLUSTER_COUNT
;
cpu
++
)
clst_warmboot_data
[
cpu
]
=
0
;
psram_sleep_cfg
->
ddr_func
=
0x00
;
psram_sleep_cfg
->
ddr_data
=
0x00
;
psram_sleep_cfg
->
ddr_flag
=
0x00
;
psram_sleep_cfg
->
boot_mpidr
=
read_mpidr_el1
()
&
0xffff
;
/* c
pu boot from pmusram
*/
/* c
onfig cpu's warm boot address
*/
mmio_write_32
(
SGRF_BASE
+
SGRF_SOC_CON0_1
(
1
),
(
cpu_warm_boot_addr
>>
CPU_BOOT_ADDR_ALIGN
)
|
CPU_BOOT_ADDR_WMASK
);
mmio_write_32
(
PMU_BASE
+
PMU_NOC_AUTO_ENA
,
NOC_AUTO_ENABLE
);
nonboot_cpus_off
();
...
...
plat/rockchip/rk3399/drivers/pmu/pmu.h
View file @
84ded36c
...
...
@@ -808,14 +808,137 @@ enum pmu_core_pwr_st {
#define PMU_NOC_AUTO_ENA 0xd8
#define PMU_PWRDN_CON1 0xdc
#define PMUGRF_GPIO0A_IOMUX 0x00
#define PMUGRF_GPIO1A_IOMUX 0x10
#define AP_PWROFF 0x0a
#define GPIO0A6_IOMUX_GPIO BITS_WITH_WMASK(0, 3, 12)
#define GPIO0A6_IOMUX_PWM BITS_WITH_WMASK(1, 3, 12)
#define GPIO1A6_IOMUX BITS_WITH_WMASK(0, 3, 12)
#define TSADC_INT_PIN 38
#define CORES_PM_DISABLE 0x0
#define CPU_AXI_QOS_ID_COREID 0x00
#define CPU_AXI_QOS_REVISIONID 0x04
#define CPU_AXI_QOS_PRIORITY 0x08
#define CPU_AXI_QOS_MODE 0x0c
#define CPU_AXI_QOS_BANDWIDTH 0x10
#define CPU_AXI_QOS_SATURATION 0x14
#define CPU_AXI_QOS_EXTCONTROL 0x18
#define CPU_AXI_QOS_NUM_REGS 0x07
#define CPU_AXI_CCI_M0_QOS_BASE 0xffa50000
#define CPU_AXI_CCI_M1_QOS_BASE 0xffad8000
#define CPU_AXI_DMAC0_QOS_BASE 0xffa64200
#define CPU_AXI_DMAC1_QOS_BASE 0xffa64280
#define CPU_AXI_DCF_QOS_BASE 0xffa64180
#define CPU_AXI_CRYPTO0_QOS_BASE 0xffa64100
#define CPU_AXI_CRYPTO1_QOS_BASE 0xffa64080
#define CPU_AXI_PMU_CM0_QOS_BASE 0xffa68000
#define CPU_AXI_PERI_CM1_QOS_BASE 0xffa64300
#define CPU_AXI_GIC_QOS_BASE 0xffa78000
#define CPU_AXI_SDIO_QOS_BASE 0xffa76000
#define CPU_AXI_SDMMC_QOS_BASE 0xffa74000
#define CPU_AXI_EMMC_QOS_BASE 0xffa58000
#define CPU_AXI_GMAC_QOS_BASE 0xffa5c000
#define CPU_AXI_USB_OTG0_QOS_BASE 0xffa70000
#define CPU_AXI_USB_OTG1_QOS_BASE 0xffa70080
#define CPU_AXI_USB_HOST0_QOS_BASE 0xffa60100
#define CPU_AXI_USB_HOST1_QOS_BASE 0xffa60180
#define CPU_AXI_GPU_QOS_BASE 0xffae0000
#define CPU_AXI_VIDEO_M0_QOS_BASE 0xffab8000
#define CPU_AXI_VIDEO_M1_R_QOS_BASE 0xffac0000
#define CPU_AXI_VIDEO_M1_W_QOS_BASE 0xffac0080
#define CPU_AXI_RGA_R_QOS_BASE 0xffab0000
#define CPU_AXI_RGA_W_QOS_BASE 0xffab0080
#define CPU_AXI_IEP_QOS_BASE 0xffa98000
#define CPU_AXI_VOP_BIG_R_QOS_BASE 0xffac8000
#define CPU_AXI_VOP_BIG_W_QOS_BASE 0xffac8080
#define CPU_AXI_VOP_LITTLE_QOS_BASE 0xffad0000
#define CPU_AXI_ISP0_M0_QOS_BASE 0xffaa0000
#define CPU_AXI_ISP0_M1_QOS_BASE 0xffaa0080
#define CPU_AXI_ISP1_M0_QOS_BASE 0xffaa8000
#define CPU_AXI_ISP1_M1_QOS_BASE 0xffaa8080
#define CPU_AXI_HDCP_QOS_BASE 0xffa90000
#define CPU_AXI_PERIHP_NSP_QOS_BASE 0xffad8080
#define CPU_AXI_PERILP_NSP_QOS_BASE 0xffad8180
#define CPU_AXI_PERILPSLV_NSP_QOS_BASE 0xffad8100
#define PD_CTR_LOOP 500
#define CHK_CPU_LOOP 500
#define MAX_WAIT_CONUT 1000
#define MAX_WAIT_COUNT 1000
#define GRF_SOC_CON4 0x0e210
#define PMUGRF_SOC_CON0 0x0180
#define CCI_FORCE_WAKEUP WMSK_BIT(8)
#define EXTERNAL_32K WMSK_BIT(0)
#define PLL_PD_HW 0xff
#define IOMUX_CLK_32K 0x00030002
#define NOC_AUTO_ENABLE 0x3fffffff
#define SAVE_QOS(array, NAME) \
RK3399_CPU_AXI_SAVE_QOS(array, CPU_AXI_##NAME##_QOS_BASE)
#define RESTORE_QOS(array, NAME) \
RK3399_CPU_AXI_RESTORE_QOS(array, CPU_AXI_##NAME##_QOS_BASE)
#define RK3399_CPU_AXI_SAVE_QOS(array, base) do { \
array[0] = mmio_read_32(base + CPU_AXI_QOS_ID_COREID); \
array[1] = mmio_read_32(base + CPU_AXI_QOS_REVISIONID); \
array[2] = mmio_read_32(base + CPU_AXI_QOS_PRIORITY); \
array[3] = mmio_read_32(base + CPU_AXI_QOS_MODE); \
array[4] = mmio_read_32(base + CPU_AXI_QOS_BANDWIDTH); \
array[5] = mmio_read_32(base + CPU_AXI_QOS_SATURATION); \
array[6] = mmio_read_32(base + CPU_AXI_QOS_EXTCONTROL); \
} while (0)
#define RK3399_CPU_AXI_RESTORE_QOS(array, base) do { \
mmio_write_32(base + CPU_AXI_QOS_ID_COREID, array[0]); \
mmio_write_32(base + CPU_AXI_QOS_REVISIONID, array[1]); \
mmio_write_32(base + CPU_AXI_QOS_PRIORITY, array[2]); \
mmio_write_32(base + CPU_AXI_QOS_MODE, array[3]); \
mmio_write_32(base + CPU_AXI_QOS_BANDWIDTH, array[4]); \
mmio_write_32(base + CPU_AXI_QOS_SATURATION, array[5]); \
mmio_write_32(base + CPU_AXI_QOS_EXTCONTROL, array[6]); \
} while (0)
struct
pmu_slpdata_s
{
uint32_t
cci_m0_qos
[
CPU_AXI_QOS_NUM_REGS
];
uint32_t
cci_m1_qos
[
CPU_AXI_QOS_NUM_REGS
];
uint32_t
dmac0_qos
[
CPU_AXI_QOS_NUM_REGS
];
uint32_t
dmac1_qos
[
CPU_AXI_QOS_NUM_REGS
];
uint32_t
dcf_qos
[
CPU_AXI_QOS_NUM_REGS
];
uint32_t
crypto0_qos
[
CPU_AXI_QOS_NUM_REGS
];
uint32_t
crypto1_qos
[
CPU_AXI_QOS_NUM_REGS
];
uint32_t
pmu_cm0_qos
[
CPU_AXI_QOS_NUM_REGS
];
uint32_t
peri_cm1_qos
[
CPU_AXI_QOS_NUM_REGS
];
uint32_t
gic_qos
[
CPU_AXI_QOS_NUM_REGS
];
uint32_t
sdmmc_qos
[
CPU_AXI_QOS_NUM_REGS
];
uint32_t
gmac_qos
[
CPU_AXI_QOS_NUM_REGS
];
uint32_t
emmc_qos
[
CPU_AXI_QOS_NUM_REGS
];
uint32_t
usb_otg0_qos
[
CPU_AXI_QOS_NUM_REGS
];
uint32_t
usb_otg1_qos
[
CPU_AXI_QOS_NUM_REGS
];
uint32_t
usb_host0_qos
[
CPU_AXI_QOS_NUM_REGS
];
uint32_t
usb_host1_qos
[
CPU_AXI_QOS_NUM_REGS
];
uint32_t
gpu_qos
[
CPU_AXI_QOS_NUM_REGS
];
uint32_t
video_m0_qos
[
CPU_AXI_QOS_NUM_REGS
];
uint32_t
video_m1_r_qos
[
CPU_AXI_QOS_NUM_REGS
];
uint32_t
video_m1_w_qos
[
CPU_AXI_QOS_NUM_REGS
];
uint32_t
rga_r_qos
[
CPU_AXI_QOS_NUM_REGS
];
uint32_t
rga_w_qos
[
CPU_AXI_QOS_NUM_REGS
];
uint32_t
vop_big_r
[
CPU_AXI_QOS_NUM_REGS
];
uint32_t
vop_big_w
[
CPU_AXI_QOS_NUM_REGS
];
uint32_t
vop_little
[
CPU_AXI_QOS_NUM_REGS
];
uint32_t
iep_qos
[
CPU_AXI_QOS_NUM_REGS
];
uint32_t
isp1_m0_qos
[
CPU_AXI_QOS_NUM_REGS
];
uint32_t
isp1_m1_qos
[
CPU_AXI_QOS_NUM_REGS
];
uint32_t
isp0_m0_qos
[
CPU_AXI_QOS_NUM_REGS
];
uint32_t
isp0_m1_qos
[
CPU_AXI_QOS_NUM_REGS
];
uint32_t
hdcp_qos
[
CPU_AXI_QOS_NUM_REGS
];
uint32_t
perihp_nsp_qos
[
CPU_AXI_QOS_NUM_REGS
];
uint32_t
perilp_nsp_qos
[
CPU_AXI_QOS_NUM_REGS
];
uint32_t
perilpslv_nsp_qos
[
CPU_AXI_QOS_NUM_REGS
];
uint32_t
sdio_qos
[
CPU_AXI_QOS_NUM_REGS
];
};
extern
uint32_t
clst_warmboot_data
[
PLATFORM_CLUSTER_COUNT
];
#endif
/* __PMU_H__ */
plat/rockchip/rk3399/drivers/soc/soc.c
View file @
84ded36c
...
...
@@ -69,6 +69,15 @@ const mmap_region_t plat_rk_mmap[] = {
MT_DEVICE
|
MT_RW
|
MT_SECURE
),
MAP_REGION_FLAT
(
GRF_BASE
,
GRF_SIZE
,
MT_DEVICE
|
MT_RW
|
MT_SECURE
),
MAP_REGION_FLAT
(
SERVICE_NOC_0_BASE
,
NOC_0_SIZE
,
MT_DEVICE
|
MT_RW
|
MT_SECURE
),
MAP_REGION_FLAT
(
SERVICE_NOC_1_BASE
,
NOC_1_SIZE
,
MT_DEVICE
|
MT_RW
|
MT_SECURE
),
MAP_REGION_FLAT
(
SERVICE_NOC_2_BASE
,
NOC_2_SIZE
,
MT_DEVICE
|
MT_RW
|
MT_SECURE
),
MAP_REGION_FLAT
(
SERVICE_NOC_3_BASE
,
NOC_3_SIZE
,
MT_DEVICE
|
MT_RW
|
MT_SECURE
),
{
0
}
};
...
...
@@ -272,8 +281,7 @@ void plls_suspend(void)
for
(
i
=
0
;
i
<
CRU_CLKSEL_COUNT
;
i
++
)
slp_data
.
cru_clksel_con
[
i
]
=
mmio_read_32
(
CRU_BASE
+
CRU_CLKSEL_OFFSET
+
i
*
REG_SIZE
);
mmio_read_32
(
CRU_BASE
+
CRU_CLKSEL_CON
(
i
));
for
(
i
=
0
;
i
<
PMUCRU_CLKSEL_CONUT
;
i
++
)
slp_data
.
pmucru_clksel_con
[
i
]
=
...
...
@@ -289,6 +297,43 @@ void plls_suspend(void)
_pll_suspend
(
ALPLL_ID
);
}
void
clk_gate_con_save
(
void
)
{
uint32_t
i
=
0
;
for
(
i
=
0
;
i
<
PMUCRU_GATE_COUNT
;
i
++
)
slp_data
.
pmucru_gate_con
[
i
]
=
mmio_read_32
(
PMUCRU_BASE
+
PMUCRU_GATE_CON
(
i
));
for
(
i
=
0
;
i
<
CRU_GATE_COUNT
;
i
++
)
slp_data
.
cru_gate_con
[
i
]
=
mmio_read_32
(
CRU_BASE
+
CRU_GATE_CON
(
i
));
}
void
clk_gate_con_disable
(
void
)
{
uint32_t
i
;
for
(
i
=
0
;
i
<
PMUCRU_GATE_COUNT
;
i
++
)
mmio_write_32
(
PMUCRU_BASE
+
PMUCRU_GATE_CON
(
i
),
REG_SOC_WMSK
);
for
(
i
=
0
;
i
<
CRU_GATE_COUNT
;
i
++
)
mmio_write_32
(
CRU_BASE
+
CRU_GATE_CON
(
i
),
REG_SOC_WMSK
);
}
void
clk_gate_con_restore
(
void
)
{
uint32_t
i
;
for
(
i
=
0
;
i
<
PMUCRU_GATE_COUNT
;
i
++
)
mmio_write_32
(
PMUCRU_BASE
+
PMUCRU_GATE_CON
(
i
),
REG_SOC_WMSK
|
slp_data
.
pmucru_gate_con
[
i
]);
for
(
i
=
0
;
i
<
CRU_GATE_COUNT
;
i
++
)
mmio_write_32
(
CRU_BASE
+
CRU_GATE_CON
(
i
),
REG_SOC_WMSK
|
slp_data
.
cru_gate_con
[
i
]);
}
static
void
set_plls_nobypass
(
uint32_t
pll_id
)
{
if
(
pll_id
==
PPLL_ID
)
...
...
@@ -304,7 +349,7 @@ static void plls_resume_prepare(void)
int
i
;
for
(
i
=
0
;
i
<
CRU_CLKSEL_COUNT
;
i
++
)
mmio_write_32
((
CRU_BASE
+
CRU_CLKSEL_
OFFSET
+
i
*
REG_SIZE
),
mmio_write_32
((
CRU_BASE
+
CRU_CLKSEL_
CON
(
i
)
),
REG_SOC_WMSK
|
slp_data
.
cru_clksel_con
[
i
]);
for
(
i
=
0
;
i
<
PMUCRU_CLKSEL_CONUT
;
i
++
)
mmio_write_32
((
PMUCRU_BASE
+
...
...
plat/rockchip/rk3399/drivers/soc/soc.h
View file @
84ded36c
...
...
@@ -34,12 +34,8 @@
#define GLB_SRST_FST_CFG_VAL 0xfdb9
#define GLB_SRST_SND_CFG_VAL 0xeca8
#define PMUCRU_PPLL_CON_OFFSET 0x000
#define PMUCRU_PPLL_CON_BASE_ADDR (PMUCRU_BASE + PMUCRU_PPLL_CON_OFFSET)
#define PMUCRU_PPLL_CON_CONUT 0x06
#define PMUCRU_PPLL_CON(num) (PMUCRU_PPLL_CON_BASE_ADDR + num * 4)
#define CRU_PLL_CON(pll_id, num) (CRU_BASE + pll_id * 0x20 + num * 4)
#define PMUCRU_PPLL_CON(n) ((n) * 4)
#define CRU_PLL_CON(pll_id, n) ((pll_id) * 0x20 + (n) * 4)
#define PLL_MODE_MSK 0x03
#define PLL_MODE_SHIFT 0x08
#define PLL_BYPASS_MSK 0x01
...
...
@@ -54,26 +50,28 @@
#define PLL_SLOW_MODE BITS_WITH_WMASK(SLOW_MODE,\
PLL_MODE_MSK, PLL_MODE_SHIFT)
#define PLL_BYPASS_MODE BITS_WITH_WMASK(PLL_BYPASS,\
PLL_BYPASS_MSK,\
PLL_BYPASS_SHIFT)
#define PLL_NO_BYPASS_MODE BITS_WITH_WMASK(NO_PLL_BYPASS,\
PLL_BYPASS_MSK,\
PLL_BYPASS_SHIFT)
#define PLL_NOMAL_MODE BITS_WITH_WMASK(NORMAL_MODE,\
PLL_MODE_MSK, PLL_MODE_SHIFT)
#define PLL_BYPASS_MODE BIT_WITH_WMSK(PLL_BYPASS_SHIFT)
#define PLL_NO_BYPASS_MODE WMSK_BIT(PLL_BYPASS_SHIFT)
#define PLL_CON_COUNT 0x06
#define CRU_CLKSEL_COUNT 0x108
#define CRU_CLKSEL_
OFFSET
0x
300
#define CRU_CLKSEL_
CON(n)
(
0x
80 + (n) * 4)
#define PMUCRU_CLKSEL_CONUT 0x06
#define PMUCRU_CLKSEL_OFFSET 0x080
#define REG_SIZE 0x04
#define REG_SOC_WMSK 0xffff0000
#define CLK_GATE_MASK 0x01
#define PMUCRU_GATE_COUNT 0x03
#define CRU_GATE_COUNT 0x23
#define PMUCRU_GATE_CON(n) (0x100 + (n) * 4)
#define CRU_GATE_CON(n) (0x300 + (n) * 4)
enum
plls_id
{
ALPLL_ID
=
0
,
ABPLL_ID
,
...
...
@@ -86,6 +84,9 @@ enum plls_id {
END_PLL_ID
,
};
#define CLST_L_CPUS_MSK (0xf)
#define CLST_B_CPUS_MSK (0x3)
enum
pll_work_mode
{
SLOW_MODE
=
0x00
,
NORMAL_MODE
=
0x01
,
...
...
@@ -102,10 +103,13 @@ struct deepsleep_data_s {
uint32_t
plls_con
[
END_PLL_ID
][
PLL_CON_COUNT
];
uint32_t
pmucru_clksel_con
[
PMUCRU_CLKSEL_CONUT
];
uint32_t
cru_clksel_con
[
CRU_CLKSEL_COUNT
];
uint32_t
cru_gate_con
[
CRU_GATE_COUNT
];
uint32_t
pmucru_gate_con
[
PMUCRU_GATE_COUNT
];
};
#define CYCL_24M_CNT_US(us) (24 * us)
#define CYCL_24M_CNT_MS(ms) (ms * CYCL_24M_CNT_US(1000))
#define CYCL_32K_CNT_MS(ms) (ms * 32)
/**************************************************
* secure timer
...
...
@@ -261,5 +265,7 @@ static inline void pmu_sgrf_rst_hld(void)
void
__dead2
soc_global_soft_reset
(
void
);
void
plls_resume
(
void
);
void
plls_suspend
(
void
);
void
clk_gate_con_save
(
void
);
void
clk_gate_con_disable
(
void
);
void
clk_gate_con_restore
(
void
);
#endif
/* __SOC_H__ */
plat/rockchip/rk3399/include/platform_def.h
View file @
84ded36c
...
...
@@ -73,7 +73,7 @@
#define PLATFORM_NUM_AFFS (PLATFORM_SYSTEM_COUNT + \
PLATFORM_CLUSTER_COUNT + \
PLATFORM_CORE_COUNT)
#define PLAT_RK_CLST_TO_CPUID_SHIFT 6
#define PLAT_MAX_PWR_LVL MPIDR_AFFLVL2
/*
...
...
@@ -109,7 +109,7 @@
******************************************************************************/
#define ADDR_SPACE_SIZE (1ull << 32)
#define MAX_XLAT_TABLES 20
#define MAX_MMAP_REGIONS 2
0
#define MAX_MMAP_REGIONS 2
5
/*******************************************************************************
* Declarations and constants to access the mailboxes safely. Each mailbox is
...
...
@@ -146,6 +146,4 @@
#define PLAT_RK_PRIMARY_CPU 0x0
#define RK_PLAT_AARCH_CFG RK_PLAT_CFG1
#endif
/* __PLATFORM_DEF_H__ */
plat/rockchip/rk3399/rk3399_def.h
View file @
84ded36c
...
...
@@ -82,6 +82,18 @@
#define GRF_BASE 0xff770000
#define GRF_SIZE SIZE_K(64)
#define SERVICE_NOC_0_BASE 0xffa50000
#define NOC_0_SIZE SIZE_K(192)
#define SERVICE_NOC_1_BASE 0xffa84000
#define NOC_1_SIZE SIZE_K(16)
#define SERVICE_NOC_2_BASE 0xffa8c000
#define NOC_2_SIZE SIZE_K(16)
#define SERVICE_NOC_3_BASE 0xffa90000
#define NOC_3_SIZE SIZE_K(448)
/*
* include i2c pmu/audio, pwm0-3 rkpwm0-3 uart_dbg,mailbox scr
* 0xff650000 -0xff6c0000
...
...
@@ -118,12 +130,6 @@
#define PLAT_RK_CCI_CLUSTER0_SL_IFACE_IX 0
#define PLAT_RK_CCI_CLUSTER1_SL_IFACE_IX 1
/******************************************************************************
* cpu up status
******************************************************************************/
#define PMU_CPU_HOTPLUG 0xdeadbeaf
#define PMU_CPU_AUTO_PWRDN 0xabcdef12
/******************************************************************************
* sgi, ppi
******************************************************************************/
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment