Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
Menu
Open sidebar
adam.huang
Arm Trusted Firmware
Commits
61dbb028
Commit
61dbb028
authored
Apr 06, 2016
by
danh-arm
Browse files
Merge pull request #581 from rockchip-linux/rockchip-atf-20160405
Support for Rockchip's family SoCs
parents
af711c1e
6fba6e04
Changes
31
Show whitespace changes
Inline
Side-by-side
plat/rockchip/common/aarch64/plat_helpers.S
0 → 100644
View file @
61dbb028
/*
*
Copyright
(
c
)
2013
-
2016
,
ARM
Limited
and
Contributors
.
All
rights
reserved
.
*
*
Redistribution
and
use
in
source
and
binary
forms
,
with
or
without
*
modification
,
are
permitted
provided
that
the
following
conditions
are
met
:
*
*
Redistributions
of
source
code
must
retain
the
above
copyright
notice
,
this
*
list
of
conditions
and
the
following
disclaimer
.
*
*
Redistributions
in
binary
form
must
reproduce
the
above
copyright
notice
,
*
this
list
of
conditions
and
the
following
disclaimer
in
the
documentation
*
and
/
or
other
materials
provided
with
the
distribution
.
*
*
Neither
the
name
of
ARM
nor
the
names
of
its
contributors
may
be
used
*
to
endorse
or
promote
products
derived
from
this
software
without
specific
*
prior
written
permission
.
*
*
THIS
SOFTWARE
IS
PROVIDED
BY
THE
COPYRIGHT
HOLDERS
AND
CONTRIBUTORS
"AS IS"
*
AND
ANY
EXPRESS
OR
IMPLIED
WARRANTIES
,
INCLUDING
,
BUT
NOT
LIMITED
TO
,
THE
*
IMPLIED
WARRANTIES
OF
MERCHANTABILITY
AND
FITNESS
FOR
A
PARTICULAR
PURPOSE
*
ARE
DISCLAIMED
.
IN
NO
EVENT
SHALL
THE
COPYRIGHT
HOLDER
OR
CONTRIBUTORS
BE
*
LIABLE
FOR
ANY
DIRECT
,
INDIRECT
,
INCIDENTAL
,
SPECIAL
,
EXEMPLARY
,
OR
*
CONSEQUENTIAL
DAMAGES
(
INCLUDING
,
BUT
NOT
LIMITED
TO
,
PROCUREMENT
OF
*
SUBSTITUTE
GOODS
OR
SERVICES
; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
*
INTERRUPTION
)
HOWEVER
CAUSED
AND
ON
ANY
THEORY
OF
LIABILITY
,
WHETHER
IN
*
CONTRACT
,
STRICT
LIABILITY
,
OR
TORT
(
INCLUDING
NEGLIGENCE
OR
OTHERWISE
)
*
ARISING
IN
ANY
WAY
OUT
OF
THE
USE
OF
THIS
SOFTWARE
,
EVEN
IF
ADVISED
OF
THE
*
POSSIBILITY
OF
SUCH
DAMAGE
.
*/
#include <arch.h>
#include <asm_macros.S>
#include <bl_common.h>
#include <cortex_a53.h>
#include <cortex_a72.h>
#include <plat_private.h>
#include <platform_def.h>
.
globl
cpuson_entry_point
.
globl
cpuson_flags
.
globl
platform_cpu_warmboot
.
globl
plat_secondary_cold_boot_setup
.
globl
plat_report_exception
.
globl
platform_is_primary_cpu
.
globl
plat_crash_console_init
.
globl
plat_crash_console_putc
.
globl
plat_my_core_pos
.
globl
plat_reset_handler
#define RK_REVISION(rev) RK_PLAT_CFG##rev
#define RK_HANDLER(rev) plat_reset_handler_juno_r##rev
#define JUMP_TO_HANDLER_IF_RK_R(revision) \
jump_to_handler
RK_REVISION
(
revision
),
RK_HANDLER
(
revision
)
/
*
*
Helper
macro
to
jump
to
the
given
handler
if
the
board
revision
*
matches
.
*
Expects
the
Juno
board
revision
in
x0
.
*
*/
.
macro
jump_to_handler
_revision
,
_handler
cmp
x0
,
#
\
_revision
b.eq
\
_handler
.
endm
/
*
*
Helper
macro
that
reads
the
part
number
of
the
current
CPU
and
jumps
*
to
the
given
label
if
it
matches
the
CPU
MIDR
provided
.
*/
.
macro
jump_if_cpu_midr
_cpu_midr
,
_label
mrs
x0
,
midr_el1
ubfx
x0
,
x0
,
MIDR_PN_SHIFT
,
#
12
cmp
w0
,
#((
\
_cpu_midr
>>
MIDR_PN_SHIFT
)
&
MIDR_PN_MASK
)
b.eq
\
_label
.
endm
/
*
*
Platform
reset
handler
for
rockchip
.
*
only
A53
cores
*/
func
RK_HANDLER
(0)
ret
endfunc
RK_HANDLER
(0)
/
*
*
Platform
reset
handler
for
rockchip
.
*
-
Cortex
-
A53
processor
cluster
;
*
-
Cortex
-
A72
processor
cluster
.
*
*
This
handler
does
the
following
:
*
-
Set
the
L2
Data
RAM
latency
to
2
(
i
.
e
.
3
cycles
)
for
Cortex
-
A72
*
-
Set
the
L2
Tag
RAM
latency
to
1
(
i
.
e
.
2
cycles
)
for
Cortex
-
A72
*/
func
RK_HANDLER
(1)
/
*
*
Nothing
to
do
on
Cortex
-
A53
.
*
*/
jump_if_cpu_midr
CORTEX_A72_MIDR
,
A72
ret
A72
:
/
*
Cortex
-
A72
specific
settings
*/
mov
x0
,
#((
2
<<
L2CTLR_DATA_RAM_LATENCY_SHIFT
)
|
\
(0
x1
<<
5
))
msr
L2CTLR_EL1
,
x0
isb
ret
endfunc
RK_HANDLER
(1)
/
*
*
void
plat_reset_handler
(
void
)
;
*
*
Determine
the
SOC
type
and
call
the
appropriate
reset
*
handler
.
*
*/
func
plat_reset_handler
mov
x0
,
RK_PLAT_AARCH_CFG
JUMP_TO_HANDLER_IF_RK_R
(0)
JUMP_TO_HANDLER_IF_RK_R
(1)
/
*
SOC
type
is
not
supported
*/
not_supported
:
b
not_supported
endfunc
plat_reset_handler
func
plat_my_core_pos
mrs
x0
,
mpidr_el1
and
x1
,
x0
,
#
MPIDR_CPU_MASK
and
x0
,
x0
,
#
MPIDR_CLUSTER_MASK
add
x0
,
x1
,
x0
,
LSR
#
6
ret
endfunc
plat_my_core_pos
/
*
--------------------------------------------------------------------
*
void
plat_secondary_cold_boot_setup
(
void
)
;
*
*
This
function
performs
any
platform
specific
actions
*
needed
for
a
secondary
cpu
after
a
cold
reset
e
.
g
*
mark
the
cpu
's presence, mechanism to place it in a
*
holding
pen
etc
.
*
--------------------------------------------------------------------
*/
func
plat_secondary_cold_boot_setup
/
*
rk3368
does
not
do
cold
boot
for
secondary
CPU
*/
cb_panic
:
b
cb_panic
endfunc
plat_secondary_cold_boot_setup
func
platform_is_primary_cpu
and
x0
,
x0
,
#(
MPIDR_CLUSTER_MASK
|
MPIDR_CPU_MASK
)
cmp
x0
,
#
PLAT_RK_PRIMARY_CPU
cset
x0
,
eq
ret
endfunc
platform_is_primary_cpu
/
*
--------------------------------------------------------------------
*
int
plat_crash_console_init
(
void
)
*
Function
to
initialize
the
crash
console
*
without
a
C
Runtime
to
print
crash
report
.
*
Clobber
list
:
x0
,
x1
,
x2
*
--------------------------------------------------------------------
*/
func
plat_crash_console_init
mov_imm
x0
,
PLAT_RK_UART_BASE
mov_imm
x1
,
PLAT_RK_UART_CLOCK
mov_imm
x2
,
PLAT_RK_UART_BAUDRATE
b
console_core_init
endfunc
plat_crash_console_init
/
*
--------------------------------------------------------------------
*
int
plat_crash_console_putc
(
void
)
*
Function
to
print
a
character
on
the
crash
*
console
without
a
C
Runtime
.
*
Clobber
list
:
x1
,
x2
*
--------------------------------------------------------------------
*/
func
plat_crash_console_putc
mov_imm
x1
,
PLAT_RK_UART_BASE
b
console_core_putc
endfunc
plat_crash_console_putc
/
*
--------------------------------------------------------------------
*
void
platform_cpu_warmboot
(
void
)
;
*
cpus
online
or
resume
enterpoint
*
--------------------------------------------------------------------
*/
func
platform_cpu_warmboot
mrs
x0
,
MPIDR_EL1
and
x1
,
x0
,
#
MPIDR_CPU_MASK
and
x0
,
x0
,
#
MPIDR_CLUSTER_MASK
/
*
--------------------------------------------------------------------
*
big
cluster
id
is
1
*
big
cores
id
is
from
0
-
3
,
little
cores
id
4
-
7
*
--------------------------------------------------------------------
*/
add
x0
,
x1
,
x0
,
lsr
#
6
/
*
--------------------------------------------------------------------
*
get
per
cpuup
flag
*
--------------------------------------------------------------------
*/
adr
x4
,
cpuson_flags
add
x4
,
x4
,
x0
,
lsl
#
2
ldr
w1
,
[
x4
]
/
*
--------------------------------------------------------------------
*
get
per
cpuup
boot
addr
*
--------------------------------------------------------------------
*/
adr
x5
,
cpuson_entry_point
ldr
x2
,
[
x5
,
x0
,
lsl
#
3
]
/
*
--------------------------------------------------------------------
*
check
cpuon
reason
*
--------------------------------------------------------------------
*/
ldr
w3
,
=
PMU_CPU_AUTO_PWRDN
cmp
w1
,
w3
b.eq
boot_entry
ldr
w3
,
=
PMU_CPU_HOTPLUG
cmp
w1
,
w3
b.eq
boot_entry
/
*
--------------------------------------------------------------------
*
If
the
boot
core
cpuson_flags
or
cpuson_entry_point
is
not
*
expection
.
force
the
core
into
wfe
.
*
--------------------------------------------------------------------
*/
wfe_loop
:
wfe
b
wfe_loop
boot_entry
:
mov
w0
,
#
0
str
w0
,
[
x4
]
br
x2
endfunc
platform_cpu_warmboot
/
*
--------------------------------------------------------------------
*
Per
-
CPU
Secure
entry
point
-
resume
or
power
up
*
--------------------------------------------------------------------
*/
.
section
tzfw_coherent_mem
,
"a"
.
align
3
cpuson_entry_point
:
.
rept
PLATFORM_CORE_COUNT
.
quad
0
.
endr
cpuson_flags
:
.
rept
PLATFORM_CORE_COUNT
.
quad
0
.
endr
plat/rockchip/common/aarch64/platform_common.c
0 → 100644
View file @
61dbb028
/*
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch_helpers.h>
#include <arm_gic.h>
#include <bl_common.h>
#include <cci.h>
#include <debug.h>
#include <string.h>
#include <xlat_tables.h>
#include <platform_def.h>
#include <plat_private.h>
#ifdef PLAT_RK_CCI_BASE
static
const
int
cci_map
[]
=
{
PLAT_RK_CCI_CLUSTER0_SL_IFACE_IX
,
PLAT_RK_CCI_CLUSTER1_SL_IFACE_IX
};
#endif
/******************************************************************************
* Macro generating the code for the function setting up the pagetables as per
* the platform memory map & initialize the mmu, for the given exception level
******************************************************************************/
#define DEFINE_CONFIGURE_MMU_EL(_el) \
void plat_configure_mmu_el ## _el(unsigned long total_base, \
unsigned long total_size, \
unsigned long ro_start, \
unsigned long ro_limit, \
unsigned long coh_start, \
unsigned long coh_limit) \
{ \
mmap_add_region(total_base, total_base, \
total_size, \
MT_MEMORY | MT_RW | MT_SECURE); \
mmap_add_region(ro_start, ro_start, \
ro_limit - ro_start, \
MT_MEMORY | MT_RO | MT_SECURE); \
mmap_add_region(coh_start, coh_start, \
coh_limit - coh_start, \
MT_DEVICE | MT_RW | MT_SECURE); \
mmap_add(plat_rk_mmap); \
init_xlat_tables(); \
\
enable_mmu_el ## _el(0); \
}
/* Define EL3 variants of the function initialising the MMU */
DEFINE_CONFIGURE_MMU_EL
(
3
)
uint64_t
plat_get_syscnt_freq
(
void
)
{
return
SYS_COUNTER_FREQ_IN_TICKS
;
}
void
plat_cci_init
(
void
)
{
#ifdef PLAT_RK_CCI_BASE
/* Initialize CCI driver */
cci_init
(
PLAT_RK_CCI_BASE
,
cci_map
,
ARRAY_SIZE
(
cci_map
));
#endif
}
void
plat_cci_enable
(
void
)
{
/*
* Enable CCI coherency for this cluster.
* No need for locks as no other cpu is active at the moment.
*/
#ifdef PLAT_RK_CCI_BASE
cci_enable_snoop_dvm_reqs
(
MPIDR_AFFLVL1_VAL
(
read_mpidr
()));
#endif
}
void
plat_cci_disable
(
void
)
{
#ifdef PLAT_RK_CCI_BASE
cci_disable_snoop_dvm_reqs
(
MPIDR_AFFLVL1_VAL
(
read_mpidr
()));
#endif
}
plat/rockchip/common/bl31_plat_setup.c
0 → 100644
View file @
61dbb028
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arm_gic.h>
#include <assert.h>
#include <bl_common.h>
#include <console.h>
#include <debug.h>
#include <mmio.h>
#include <platform.h>
#include <plat_private.h>
#include <platform_def.h>
/*******************************************************************************
* Declarations of linker defined symbols which will help us find the layout
* of trusted SRAM
******************************************************************************/
unsigned
long
__RO_START__
;
unsigned
long
__RO_END__
;
unsigned
long
__COHERENT_RAM_START__
;
unsigned
long
__COHERENT_RAM_END__
;
/*
* The next 2 constants identify the extents of the code & RO data region.
* These addresses are used by the MMU setup code and therefore they must be
* page-aligned. It is the responsibility of the linker script to ensure that
* __RO_START__ and __RO_END__ linker symbols refer to page-aligned addresses.
*/
#define BL31_RO_BASE (unsigned long)(&__RO_START__)
#define BL31_RO_LIMIT (unsigned long)(&__RO_END__)
/*
* The next 2 constants identify the extents of the coherent memory region.
* These addresses are used by the MMU setup code and therefore they must be
* page-aligned. It is the responsibility of the linker script to ensure that
* __COHERENT_RAM_START__ and __COHERENT_RAM_END__ linker symbols
* refer to page-aligned addresses.
*/
#define BL31_COHERENT_RAM_BASE (unsigned long)(&__COHERENT_RAM_START__)
#define BL31_COHERENT_RAM_LIMIT (unsigned long)(&__COHERENT_RAM_END__)
static
entry_point_info_t
bl32_ep_info
;
static
entry_point_info_t
bl33_ep_info
;
/*******************************************************************************
* Return a pointer to the 'entry_point_info' structure of the next image for
* the security state specified. BL33 corresponds to the non-secure image type
* while BL32 corresponds to the secure image type. A NULL pointer is returned
* if the image does not exist.
******************************************************************************/
entry_point_info_t
*
bl31_plat_get_next_image_ep_info
(
uint32_t
type
)
{
entry_point_info_t
*
next_image_info
;
next_image_info
=
(
type
==
NON_SECURE
)
?
&
bl33_ep_info
:
&
bl32_ep_info
;
/* None of the images on this platform can have 0x0 as the entrypoint */
if
(
next_image_info
->
pc
)
return
next_image_info
;
else
return
NULL
;
}
/*******************************************************************************
* Perform any BL3-1 early platform setup. Here is an opportunity to copy
* parameters passed by the calling EL (S-EL1 in BL2 & S-EL3 in BL1) before they
* are lost (potentially). This needs to be done before the MMU is initialized
* so that the memory layout can be used while creating page tables.
* BL2 has flushed this information to memory, so we are guaranteed to pick up
* good data.
******************************************************************************/
void
bl31_early_platform_setup
(
bl31_params_t
*
from_bl2
,
void
*
plat_params_from_bl2
)
{
console_init
(
PLAT_RK_UART_BASE
,
PLAT_RK_UART_CLOCK
,
PLAT_RK_UART_BAUDRATE
);
VERBOSE
(
"bl31_setup
\n
"
);
/* Passing a NULL context is a critical programming error */
assert
(
from_bl2
);
assert
(
from_bl2
->
h
.
type
==
PARAM_BL31
);
assert
(
from_bl2
->
h
.
version
>=
VERSION_1
);
bl32_ep_info
=
*
from_bl2
->
bl32_ep_info
;
bl33_ep_info
=
*
from_bl2
->
bl33_ep_info
;
/*
* The code for resuming cpu from suspend must be excuted in pmusram.
* Copy the code into pmusram.
*/
plat_rockchip_pmusram_prepare
();
}
/*******************************************************************************
* Perform any BL3-1 platform setup code
******************************************************************************/
void
bl31_platform_setup
(
void
)
{
plat_delay_timer_init
();
plat_rockchip_soc_init
();
/* Initialize the gic cpu and distributor interfaces */
plat_rockchip_gic_driver_init
();
plat_rockchip_gic_init
();
plat_rockchip_pmu_init
();
}
/*******************************************************************************
* Perform the very early platform specific architectural setup here. At the
* moment this is only intializes the mmu in a quick and dirty way.
******************************************************************************/
void
bl31_plat_arch_setup
(
void
)
{
plat_cci_init
();
plat_cci_enable
();
plat_configure_mmu_el3
(
BL31_RO_BASE
,
(
BL31_COHERENT_RAM_LIMIT
-
BL31_RO_BASE
),
BL31_RO_BASE
,
BL31_RO_LIMIT
,
BL31_COHERENT_RAM_BASE
,
BL31_COHERENT_RAM_LIMIT
);
}
plat/rockchip/common/drivers/pmu/pmu_com.h
0 → 100644
View file @
61dbb028
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __PMU_COM_H__
#define __PMU_COM_H__
DEFINE_BAKERY_LOCK
(
rockchip_pd_lock
);
#define rockchip_pd_lock_get() bakery_lock_get(&rockchip_pd_lock)
#define rockchip_pd_lock_rls() bakery_lock_release(&rockchip_pd_lock)
#define rockchip_pd_lock_init() bakery_lock_init(&rockchip_pd_lock)
/*****************************************************************************
* power domain on or off
*****************************************************************************/
enum
pmu_pd_state
{
pmu_pd_on
=
0
,
pmu_pd_off
=
1
};
#pragma weak plat_ic_get_pending_interrupt_id
#pragma weak pmu_power_domain_ctr
#pragma weak check_cpu_wfie
static
inline
uint32_t
pmu_power_domain_st
(
uint32_t
pd
)
{
uint32_t
pwrdn_st
=
mmio_read_32
(
PMU_BASE
+
PMU_PWRDN_ST
)
&
BIT
(
pd
);
if
(
pwrdn_st
)
return
pmu_pd_off
;
else
return
pmu_pd_on
;
}
static
int
pmu_power_domain_ctr
(
uint32_t
pd
,
uint32_t
pd_state
)
{
uint32_t
val
;
uint32_t
loop
=
0
;
int
ret
=
0
;
rockchip_pd_lock_get
();
val
=
mmio_read_32
(
PMU_BASE
+
PMU_PWRDN_CON
);
if
(
pd_state
==
pmu_pd_off
)
val
|=
BIT
(
pd
);
else
val
&=
~
BIT
(
pd
);
mmio_write_32
(
PMU_BASE
+
PMU_PWRDN_CON
,
val
);
dsb
();
while
((
pmu_power_domain_st
(
pd
)
!=
pd_state
)
&&
(
loop
<
PD_CTR_LOOP
))
{
udelay
(
1
);
loop
++
;
}
if
(
pmu_power_domain_st
(
pd
)
!=
pd_state
)
{
WARN
(
"%s: %d, %d, error!
\n
"
,
__func__
,
pd
,
pd_state
);
ret
=
-
EINVAL
;
}
rockchip_pd_lock_rls
();
return
ret
;
}
static
int
check_cpu_wfie
(
uint32_t
cpu_id
,
uint32_t
wfie_msk
)
{
uint32_t
cluster_id
,
loop
=
0
;
if
(
cpu_id
>=
PLATFORM_CLUSTER0_CORE_COUNT
)
{
cluster_id
=
1
;
cpu_id
-=
PLATFORM_CLUSTER0_CORE_COUNT
;
}
else
{
cluster_id
=
0
;
}
if
(
cluster_id
)
wfie_msk
<<=
(
clstb_cpu_wfe
+
cpu_id
);
else
wfie_msk
<<=
(
clstl_cpu_wfe
+
cpu_id
);
while
(
!
(
mmio_read_32
(
PMU_BASE
+
PMU_CORE_PWR_ST
)
&
wfie_msk
)
&&
(
loop
<
CHK_CPU_LOOP
))
{
udelay
(
1
);
loop
++
;
}
if
((
mmio_read_32
(
PMU_BASE
+
PMU_CORE_PWR_ST
)
&
wfie_msk
)
==
0
)
{
WARN
(
"%s: %d, %d, %d, error!
\n
"
,
__func__
,
cluster_id
,
cpu_id
,
wfie_msk
);
return
-
EINVAL
;
}
return
0
;
}
#endif
/* __PMU_COM_H__ */
plat/rockchip/common/include/plat_macros.S
0 → 100644
View file @
61dbb028
/*
*
Copyright
(
c
)
2014
-
2016
,
ARM
Limited
and
Contributors
.
All
rights
reserved
.
*
*
Redistribution
and
use
in
source
and
binary
forms
,
with
or
without
*
modification
,
are
permitted
provided
that
the
following
conditions
are
met
:
*
*
Redistributions
of
source
code
must
retain
the
above
copyright
notice
,
this
*
list
of
conditions
and
the
following
disclaimer
.
*
*
Redistributions
in
binary
form
must
reproduce
the
above
copyright
notice
,
*
this
list
of
conditions
and
the
following
disclaimer
in
the
documentation
*
and
/
or
other
materials
provided
with
the
distribution
.
*
*
Neither
the
name
of
ARM
nor
the
names
of
its
contributors
may
be
used
*
to
endorse
or
promote
products
derived
from
this
software
without
specific
*
prior
written
permission
.
*
*
THIS
SOFTWARE
IS
PROVIDED
BY
THE
COPYRIGHT
HOLDERS
AND
CONTRIBUTORS
"AS IS"
*
AND
ANY
EXPRESS
OR
IMPLIED
WARRANTIES
,
INCLUDING
,
BUT
NOT
LIMITED
TO
,
THE
*
IMPLIED
WARRANTIES
OF
MERCHANTABILITY
AND
FITNESS
FOR
A
PARTICULAR
PURPOSE
*
ARE
DISCLAIMED
.
IN
NO
EVENT
SHALL
THE
COPYRIGHT
HOLDER
OR
CONTRIBUTORS
BE
*
LIABLE
FOR
ANY
DIRECT
,
INDIRECT
,
INCIDENTAL
,
SPECIAL
,
EXEMPLARY
,
OR
*
CONSEQUENTIAL
DAMAGES
(
INCLUDING
,
BUT
NOT
LIMITED
TO
,
PROCUREMENT
OF
*
SUBSTITUTE
GOODS
OR
SERVICES
; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
*
INTERRUPTION
)
HOWEVER
CAUSED
AND
ON
ANY
THEORY
OF
LIABILITY
,
WHETHER
IN
*
CONTRACT
,
STRICT
LIABILITY
,
OR
TORT
(
INCLUDING
NEGLIGENCE
OR
OTHERWISE
)
*
ARISING
IN
ANY
WAY
OUT
OF
THE
USE
OF
THIS
SOFTWARE
,
EVEN
IF
ADVISED
OF
THE
*
POSSIBILITY
OF
SUCH
DAMAGE
.
*/
#ifndef __ROCKCHIP_PLAT_MACROS_S__
#define __ROCKCHIP_PLAT_MACROS_S__
#include <cci.h>
#include <gic_common.h>
#include <gicv2.h>
#include <gicv3.h>
#include <platform_def.h>
.
section
.
rodata.
gic_reg_name
,
"aS"
/*
Applicable
only
to
GICv2
and
GICv3
with
SRE
disabled
(
legacy
mode
)
*/
gicc_regs
:
.
asciz
"gicc_hppir"
,
"gicc_ahppir"
,
"gicc_ctlr"
,
""
/*
Applicable
only
to
GICv3
with
SRE
enabled
*/
icc_regs
:
.
asciz
"icc_hppir0_el1"
,
"icc_hppir1_el1"
,
"icc_ctlr_el3"
,
""
/*
Registers
common
to
both
GICv2
and
GICv3
*/
gicd_pend_reg
:
.
asciz
"gicd_ispendr regs (Offsets 0x200 - 0x278)\n"
\
"
Offset
:\
t
\
t
\
tvalue
\
n
"
newline
:
.
asciz
"\n"
spacer
:
.
asciz
":\t\t0x"
/
*
---------------------------------------------
*
The
below
utility
macro
prints
out
relevant
GIC
*
registers
whenever
an
unhandled
exception
is
*
taken
in
BL31
on
ARM
standard
platforms
.
*
Expects
:
GICD
base
in
x16
,
GICC
base
in
x17
*
Clobbers
:
x0
-
x10
,
sp
*
---------------------------------------------
*/
.
macro
plat_print_gic_regs
mov_imm
x16
,
PLAT_RK_GICD_BASE
mov_imm
x17
,
PLAT_RK_GICC_BASE
/
*
Check
for
GICv3
system
register
access
*/
mrs
x7
,
id_aa64pfr0_el1
ubfx
x7
,
x7
,
#
ID_AA64PFR0_GIC_SHIFT
,
#
ID_AA64PFR0_GIC_WIDTH
cmp
x7
,
#
1
b.ne
print_gicv2
/
*
Check
for
SRE
enable
*/
mrs
x8
,
ICC_SRE_EL3
tst
x8
,
#
ICC_SRE_SRE_BIT
b.eq
print_gicv2
/
*
Load
the
icc
reg
list
to
x6
*/
adr
x6
,
icc_regs
/
*
Load
the
icc
regs
to
gp
regs
used
by
str_in_crash_buf_print
*/
mrs
x8
,
ICC_HPPIR0_EL1
mrs
x9
,
ICC_HPPIR1_EL1
mrs
x10
,
ICC_CTLR_EL3
/
*
Store
to
the
crash
buf
and
print
to
console
*/
bl
str_in_crash_buf_print
b
print_gic_common
print_gicv2
:
/
*
Load
the
gicc
reg
list
to
x6
*/
adr
x6
,
gicc_regs
/
*
Load
the
gicc
regs
to
gp
regs
used
by
str_in_crash_buf_print
*/
ldr
w8
,
[
x17
,
#
GICC_HPPIR
]
ldr
w9
,
[
x17
,
#
GICC_AHPPIR
]
ldr
w10
,
[
x17
,
#
GICC_CTLR
]
/
*
Store
to
the
crash
buf
and
print
to
console
*/
bl
str_in_crash_buf_print
print_gic_common
:
/
*
Print
the
GICD_ISPENDR
regs
*/
add
x7
,
x16
,
#
GICD_ISPENDR
adr
x4
,
gicd_pend_reg
bl
asm_print_str
gicd_ispendr_loop
:
sub
x4
,
x7
,
x16
cmp
x4
,
#
0x280
b.eq
exit_print_gic_regs
bl
asm_print_hex
adr
x4
,
spacer
bl
asm_print_str
ldr
x4
,
[
x7
],
#
8
bl
asm_print_hex
adr
x4
,
newline
bl
asm_print_str
b
gicd_ispendr_loop
exit_print_gic_regs
:
.
endm
.
section
.
rodata.
cci_reg_name
,
"aS"
cci_iface_regs
:
.
asciz
"cci_snoop_ctrl_cluster0"
,
"cci_snoop_ctrl_cluster1"
,
""
/
*
------------------------------------------------
*
The
below
macro
prints
out
relevant
interconnect
*
registers
whenever
an
unhandled
exception
is
*
taken
in
BL3
-
1
.
*
Clobbers
:
x0
-
x9
,
sp
*
------------------------------------------------
*/
.
macro
plat_print_interconnect_regs
#if PLATFORM_CLUSTER_COUNT > 1
adr
x6
,
cci_iface_regs
/
*
Store
in
x7
the
base
address
of
the
first
interface
*/
mov_imm
x7
,
(
PLAT_RK_CCI_BASE
+
SLAVE_IFACE_OFFSET
(
\
PLAT_RK_CCI_CLUSTER0_SL_IFACE_IX
))
ldr
w8
,
[
x7
,
#
SNOOP_CTRL_REG
]
/
*
Store
in
x7
the
base
address
of
the
second
interface
*/
mov_imm
x7
,
(
PLAT_RK_CCI_BASE
+
SLAVE_IFACE_OFFSET
(
\
PLAT_RK_CCI_CLUSTER1_SL_IFACE_IX
))
ldr
w9
,
[
x7
,
#
SNOOP_CTRL_REG
]
/
*
Store
to
the
crash
buf
and
print
to
console
*/
bl
str_in_crash_buf_print
#endif
.
endm
#endif /* __ROCKCHIP_PLAT_MACROS_S__ */
plat/rockchip/common/include/plat_private.h
0 → 100644
View file @
61dbb028
/*
* Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __PLAT_PRIVATE_H__
#define __PLAT_PRIVATE_H__
#ifndef __ASSEMBLY__
#include <mmio.h>
#include <stdint.h>
#include <xlat_tables.h>
/******************************************************************************
* For rockchip socs pm ops
******************************************************************************/
struct
rockchip_pm_ops_cb
{
int
(
*
cores_pwr_dm_on
)(
unsigned
long
mpidr
,
uint64_t
entrypoint
);
int
(
*
cores_pwr_dm_off
)(
void
);
int
(
*
cores_pwr_dm_on_finish
)(
void
);
int
(
*
cores_pwr_dm_suspend
)(
void
);
int
(
*
cores_pwr_dm_resume
)(
void
);
int
(
*
sys_pwr_dm_suspend
)(
void
);
int
(
*
sys_pwr_dm_resume
)(
void
);
void
(
*
sys_gbl_soft_reset
)(
void
)
__dead2
;
void
(
*
system_off
)(
void
)
__dead2
;
};
/******************************************************************************
* The register have write-mask bits, it is mean, if you want to set the bits,
* you needs set the write-mask bits at the same time,
* The write-mask bits is in high 16-bits.
* The fllowing macro definition helps access write-mask bits reg efficient!
******************************************************************************/
#define REG_MSK_SHIFT 16
#ifndef BIT
#define BIT(nr) (1 << (nr))
#endif
#ifndef WMSK_BIT
#define WMSK_BIT(nr) BIT((nr) + REG_MSK_SHIFT)
#endif
/* set one bit with write mask */
#ifndef BIT_WITH_WMSK
#define BIT_WITH_WMSK(nr) (BIT(nr) | WMSK_BIT(nr))
#endif
#ifndef BITS_SHIFT
#define BITS_SHIFT(bits, shift) (bits << (shift))
#endif
#ifndef BITS_WITH_WMASK
#define BITS_WITH_WMASK(msk, bits, shift)\
(BITS_SHIFT(bits, shift) | BITS_SHIFT(msk, (shift + REG_MSK_SHIFT)))
#endif
/******************************************************************************
* Function and variable prototypes
*****************************************************************************/
void
plat_configure_mmu_el3
(
unsigned
long
total_base
,
unsigned
long
total_size
,
unsigned
long
,
unsigned
long
,
unsigned
long
,
unsigned
long
);
void
plat_cci_init
(
void
);
void
plat_cci_enable
(
void
);
void
plat_cci_disable
(
void
);
void
plat_delay_timer_init
(
void
);
void
plat_rockchip_gic_driver_init
(
void
);
void
plat_rockchip_gic_init
(
void
);
void
plat_rockchip_gic_cpuif_enable
(
void
);
void
plat_rockchip_gic_cpuif_disable
(
void
);
void
plat_rockchip_gic_pcpu_init
(
void
);
void
plat_rockchip_pmusram_prepare
(
void
);
void
plat_rockchip_pmu_init
(
void
);
void
plat_rockchip_soc_init
(
void
);
void
plat_setup_rockchip_pm_ops
(
struct
rockchip_pm_ops_cb
*
ops
);
extern
const
unsigned
char
rockchip_power_domain_tree_desc
[];
extern
void
*
pmu_cpuson_entrypoint_start
;
extern
void
*
pmu_cpuson_entrypoint_end
;
extern
uint64_t
cpuson_entry_point
[
PLATFORM_CORE_COUNT
];
extern
uint32_t
cpuson_flags
[
PLATFORM_CORE_COUNT
];
extern
const
mmap_region_t
plat_rk_mmap
[];
#endif
/* __ASSEMBLY__ */
/* only Cortex-A53 */
#define RK_PLAT_CFG0 0
/* include Cortex-A72 */
#define RK_PLAT_CFG1 1
#endif
/* __PLAT_PRIVATE_H__ */
plat/rockchip/common/plat_delay_timer.c
0 → 100644
View file @
61dbb028
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch_helpers.h>
#include <delay_timer.h>
#include <platform_def.h>
static
uint32_t
plat_get_timer_value
(
void
)
{
/*
* Generic delay timer implementation expects the timer to be a down
* counter. We apply bitwise NOT operator to the tick values returned
* by read_cntpct_el0() to simulate the down counter.
*/
return
(
uint32_t
)(
~
read_cntpct_el0
());
}
static
const
timer_ops_t
plat_timer_ops
=
{
.
get_timer_value
=
plat_get_timer_value
,
.
clk_mult
=
1
,
.
clk_div
=
SYS_COUNTER_FREQ_IN_MHZ
,
};
void
plat_delay_timer_init
(
void
)
{
timer_init
(
&
plat_timer_ops
);
}
plat/rockchip/common/plat_pm.c
0 → 100644
View file @
61dbb028
/*
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch_helpers.h>
#include <assert.h>
#include <console.h>
#include <errno.h>
#include <debug.h>
#include <psci.h>
#include <delay_timer.h>
#include <platform_def.h>
#include <plat_private.h>
/* Macros to read the rk power domain state */
#define RK_CORE_PWR_STATE(state) \
((state)->pwr_domain_state[MPIDR_AFFLVL0])
#define RK_CLUSTER_PWR_STATE(state) \
((state)->pwr_domain_state[MPIDR_AFFLVL1])
#define RK_SYSTEM_PWR_STATE(state) \
((state)->pwr_domain_state[PLAT_MAX_PWR_LVL])
static
uintptr_t
rockchip_sec_entrypoint
;
static
struct
rockchip_pm_ops_cb
*
rockchip_ops
;
static
void
plat_rockchip_sys_pwr_domain_resume
(
void
)
{
plat_rockchip_gic_init
();
if
(
rockchip_ops
&&
rockchip_ops
->
sys_pwr_dm_resume
)
rockchip_ops
->
sys_pwr_dm_resume
();
}
static
void
plat_rockchip_cores_pwr_domain_resume
(
void
)
{
if
(
rockchip_ops
&&
rockchip_ops
->
cores_pwr_dm_resume
)
rockchip_ops
->
cores_pwr_dm_resume
();
/* Enable the gic cpu interface */
plat_rockchip_gic_pcpu_init
();
/* Program the gic per-cpu distributor or re-distributor interface */
plat_rockchip_gic_cpuif_enable
();
}
/*******************************************************************************
* Rockchip standard platform handler called to check the validity of the power
* state parameter.
******************************************************************************/
int
rockchip_validate_power_state
(
unsigned
int
power_state
,
psci_power_state_t
*
req_state
)
{
int
pstate
=
psci_get_pstate_type
(
power_state
);
int
pwr_lvl
=
psci_get_pstate_pwrlvl
(
power_state
);
int
i
;
assert
(
req_state
);
if
(
pwr_lvl
>
PLAT_MAX_PWR_LVL
)
return
PSCI_E_INVALID_PARAMS
;
/* Sanity check the requested state */
if
(
pstate
==
PSTATE_TYPE_STANDBY
)
{
/*
* It's probably to enter standby only on power level 0
* ignore any other power level.
*/
if
(
pwr_lvl
!=
MPIDR_AFFLVL0
)
return
PSCI_E_INVALID_PARAMS
;
req_state
->
pwr_domain_state
[
MPIDR_AFFLVL0
]
=
PLAT_MAX_RET_STATE
;
}
else
{
for
(
i
=
MPIDR_AFFLVL0
;
i
<=
pwr_lvl
;
i
++
)
req_state
->
pwr_domain_state
[
i
]
=
PLAT_MAX_OFF_STATE
;
}
/* We expect the 'state id' to be zero */
if
(
psci_get_pstate_id
(
power_state
))
return
PSCI_E_INVALID_PARAMS
;
return
PSCI_E_SUCCESS
;
}
void
rockchip_get_sys_suspend_power_state
(
psci_power_state_t
*
req_state
)
{
int
i
;
for
(
i
=
MPIDR_AFFLVL0
;
i
<=
PLAT_MAX_PWR_LVL
;
i
++
)
req_state
->
pwr_domain_state
[
i
]
=
PLAT_MAX_OFF_STATE
;
}
/*******************************************************************************
* RockChip handler called when a CPU is about to enter standby.
******************************************************************************/
void
rockchip_cpu_standby
(
plat_local_state_t
cpu_state
)
{
unsigned
int
scr
;
assert
(
cpu_state
==
PLAT_MAX_RET_STATE
);
scr
=
read_scr_el3
();
/* Enable PhysicalIRQ bit for NS world to wake the CPU */
write_scr_el3
(
scr
|
SCR_IRQ_BIT
);
isb
();
dsb
();
wfi
();
/*
* Restore SCR to the original value, synchronisation of scr_el3 is
* done by eret while el3_exit to save some execution cycles.
*/
write_scr_el3
(
scr
);
}
/*******************************************************************************
* RockChip handler called when a power domain is about to be turned on. The
* mpidr determines the CPU to be turned on.
******************************************************************************/
int
rockchip_pwr_domain_on
(
u_register_t
mpidr
)
{
if
(
rockchip_ops
&&
rockchip_ops
->
cores_pwr_dm_on
)
rockchip_ops
->
cores_pwr_dm_on
(
mpidr
,
rockchip_sec_entrypoint
);
return
PSCI_E_SUCCESS
;
}
/*******************************************************************************
* RockChip handler called when a power domain is about to be turned off. The
* target_state encodes the power state that each level should transition to.
******************************************************************************/
void
rockchip_pwr_domain_off
(
const
psci_power_state_t
*
target_state
)
{
assert
(
RK_CORE_PWR_STATE
(
target_state
)
==
PLAT_MAX_OFF_STATE
);
plat_rockchip_gic_cpuif_disable
();
if
(
RK_CLUSTER_PWR_STATE
(
target_state
)
==
PLAT_MAX_OFF_STATE
)
plat_cci_disable
();
if
(
rockchip_ops
&&
rockchip_ops
->
cores_pwr_dm_off
)
rockchip_ops
->
cores_pwr_dm_off
();
}
/*******************************************************************************
* RockChip handler called when a power domain is about to be suspended. The
* target_state encodes the power state that each level should transition to.
******************************************************************************/
void
rockchip_pwr_domain_suspend
(
const
psci_power_state_t
*
target_state
)
{
if
(
RK_CORE_PWR_STATE
(
target_state
)
==
PLAT_MAX_RET_STATE
)
return
;
assert
(
RK_CORE_PWR_STATE
(
target_state
)
==
PLAT_MAX_OFF_STATE
);
if
(
RK_SYSTEM_PWR_STATE
(
target_state
)
==
PLAT_MAX_OFF_STATE
)
{
if
(
rockchip_ops
&&
rockchip_ops
->
sys_pwr_dm_suspend
)
rockchip_ops
->
sys_pwr_dm_suspend
();
}
else
{
if
(
rockchip_ops
&&
rockchip_ops
->
cores_pwr_dm_suspend
)
rockchip_ops
->
cores_pwr_dm_suspend
();
}
/* Prevent interrupts from spuriously waking up this cpu */
plat_rockchip_gic_cpuif_disable
();
/* Perform the common cluster specific operations */
if
(
RK_CLUSTER_PWR_STATE
(
target_state
)
==
PLAT_MAX_OFF_STATE
)
plat_cci_disable
();
}
/*******************************************************************************
* RockChip handler called when a power domain has just been powered on after
* being turned off earlier. The target_state encodes the low power state that
* each level has woken up from.
******************************************************************************/
void
rockchip_pwr_domain_on_finish
(
const
psci_power_state_t
*
target_state
)
{
assert
(
RK_CORE_PWR_STATE
(
target_state
)
==
PLAT_MAX_OFF_STATE
);
if
(
rockchip_ops
&&
rockchip_ops
->
cores_pwr_dm_on_finish
)
rockchip_ops
->
cores_pwr_dm_on_finish
();
/* Perform the common cluster specific operations */
if
(
RK_CLUSTER_PWR_STATE
(
target_state
)
==
PLAT_MAX_OFF_STATE
)
{
/* Enable coherency if this cluster was off */
plat_cci_enable
();
}
/* Enable the gic cpu interface */
plat_rockchip_gic_pcpu_init
();
/* Program the gic per-cpu distributor or re-distributor interface */
plat_rockchip_gic_cpuif_enable
();
}
/*******************************************************************************
* RockChip handler called when a power domain has just been powered on after
* having been suspended earlier. The target_state encodes the low power state
* that each level has woken up from.
* TODO: At the moment we reuse the on finisher and reinitialize the secure
* context. Need to implement a separate suspend finisher.
******************************************************************************/
void
rockchip_pwr_domain_suspend_finish
(
const
psci_power_state_t
*
target_state
)
{
/* Nothing to be done on waking up from retention from CPU level */
if
(
RK_CORE_PWR_STATE
(
target_state
)
==
PLAT_MAX_RET_STATE
)
return
;
/* Perform system domain restore if woken up from system suspend */
if
(
RK_SYSTEM_PWR_STATE
(
target_state
)
==
PLAT_MAX_OFF_STATE
)
plat_rockchip_sys_pwr_domain_resume
();
else
plat_rockchip_cores_pwr_domain_resume
();
/* Perform the common cluster specific operations */
if
(
RK_CLUSTER_PWR_STATE
(
target_state
)
==
PLAT_MAX_OFF_STATE
)
{
/* Enable coherency if this cluster was off */
plat_cci_enable
();
}
}
/*******************************************************************************
* RockChip handlers to reboot the system
******************************************************************************/
static
void
__dead2
rockchip_system_reset
(
void
)
{
assert
(
rockchip_ops
&&
rockchip_ops
->
sys_gbl_soft_reset
);
rockchip_ops
->
sys_gbl_soft_reset
();
}
/*******************************************************************************
* Export the platform handlers via plat_rockchip_psci_pm_ops. The rockchip
* standard
* platform layer will take care of registering the handlers with PSCI.
******************************************************************************/
const
plat_psci_ops_t
plat_rockchip_psci_pm_ops
=
{
.
cpu_standby
=
rockchip_cpu_standby
,
.
pwr_domain_on
=
rockchip_pwr_domain_on
,
.
pwr_domain_off
=
rockchip_pwr_domain_off
,
.
pwr_domain_suspend
=
rockchip_pwr_domain_suspend
,
.
pwr_domain_on_finish
=
rockchip_pwr_domain_on_finish
,
.
pwr_domain_suspend_finish
=
rockchip_pwr_domain_suspend_finish
,
.
system_reset
=
rockchip_system_reset
,
.
validate_power_state
=
rockchip_validate_power_state
,
.
get_sys_suspend_power_state
=
rockchip_get_sys_suspend_power_state
};
int
plat_setup_psci_ops
(
uintptr_t
sec_entrypoint
,
const
plat_psci_ops_t
**
psci_ops
)
{
*
psci_ops
=
&
plat_rockchip_psci_pm_ops
;
rockchip_sec_entrypoint
=
sec_entrypoint
;
return
0
;
}
void
plat_setup_rockchip_pm_ops
(
struct
rockchip_pm_ops_cb
*
ops
)
{
rockchip_ops
=
ops
;
}
plat/rockchip/common/plat_topology.c
0 → 100644
View file @
61dbb028
/*
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <platform_def.h>
#include <plat_private.h>
#include <psci.h>
/*******************************************************************************
* This function returns the RockChip default topology tree information.
******************************************************************************/
const
unsigned
char
*
plat_get_power_domain_tree_desc
(
void
)
{
return
rockchip_power_domain_tree_desc
;
}
int
plat_core_pos_by_mpidr
(
u_register_t
mpidr
)
{
unsigned
int
cluster_id
,
cpu_id
;
cpu_id
=
MPIDR_AFFLVL0_VAL
(
mpidr
);
cluster_id
=
MPIDR_AFFLVL1_VAL
(
mpidr
);
if
(
cluster_id
>=
PLATFORM_CLUSTER_COUNT
)
return
-
1
;
return
((
cluster_id
*
PLATFORM_CLUSTER0_CORE_COUNT
)
+
cpu_id
);
}
plat/rockchip/common/pmusram/pmu_sram.c
0 → 100644
View file @
61dbb028
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <platform.h>
/*****************************************************************************
* sram only surpport 32-bits access
******************************************************************************/
void
u32_align_cpy
(
uint32_t
*
dst
,
const
uint32_t
*
src
,
size_t
bytes
)
{
uint32_t
i
;
for
(
i
=
0
;
i
<
bytes
;
i
++
)
dst
[
i
]
=
src
[
i
];
}
plat/rockchip/common/pmusram/pmu_sram.h
0 → 100644
View file @
61dbb028
/* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __PMU_SRAM_H__
#define __PMU_SRAM_H__
/*****************************************************************************
* cpu up status
*****************************************************************************/
#define PMU_SYS_SLP_MODE 0xa5
#define PMU_SYS_ON_MODE 0x0
/*****************************************************************************
* define data offset in struct psram_data
*****************************************************************************/
#define PSRAM_DT_SP 0x0
#define PSRAM_DT_DDR_FUNC 0x8
#define PSRAM_DT_DDR_DATA 0x10
#define PSRAM_DT_DDRFLAG 0x18
#define PSRAM_DT_SYS_MODE 0x1c
#define PSRAM_DT_MPIDR 0x20
#define PSRAM_DT_END 0x24
/******************************************************************************
* Allocate data region for struct psram_data_t in pmusram
******************************************************************************/
/* Needed aligned 16 bytes for sp stack top */
#define PSRAM_DT_SIZE (((PSRAM_DT_END + 16) / 16) * 16)
#define PSRAM_DT_BASE ((PMUSRAM_BASE + PMUSRAM_RSIZE) - PSRAM_DT_SIZE)
#define PSRAM_SP_TOP PSRAM_DT_BASE
#ifndef __ASSEMBLY__
/*
* The struct is used in pmu_cpus_on.S which
* gets the data of the struct by the following index
* #define PSRAM_DT_SP 0x0
* #define PSRAM_DT_DDR_FUNC 0x8
* #define PSRAM_DT_DDR_DATA 0x10
* #define PSRAM_DT_DDRFLAG 0x18
* #define PSRAM_DT_SYS_MODE 0x1c
* #define PSRAM_DT_MPIDR 0x20
*/
struct
psram_data_t
{
uint64_t
sp
;
uint64_t
ddr_func
;
uint64_t
ddr_data
;
uint32_t
ddr_flag
;
uint32_t
sys_mode
;
uint32_t
boot_mpidr
;
};
CASSERT
(
sizeof
(
struct
psram_data_t
)
<=
PSRAM_DT_SIZE
,
assert_psram_dt_size_mismatch
);
CASSERT
(
__builtin_offsetof
(
struct
psram_data_t
,
sp
)
==
PSRAM_DT_SP
,
assert_psram_dt_sp_offset_mistmatch
);
CASSERT
(
__builtin_offsetof
(
struct
psram_data_t
,
ddr_func
)
==
PSRAM_DT_DDR_FUNC
,
assert_psram_dt_ddr_func_offset_mistmatch
);
CASSERT
(
__builtin_offsetof
(
struct
psram_data_t
,
ddr_data
)
==
PSRAM_DT_DDR_DATA
,
assert_psram_dt_ddr_data_offset_mistmatch
);
CASSERT
(
__builtin_offsetof
(
struct
psram_data_t
,
ddr_flag
)
==
PSRAM_DT_DDRFLAG
,
assert_psram_dt_ddr_flag_offset_mistmatch
);
CASSERT
(
__builtin_offsetof
(
struct
psram_data_t
,
sys_mode
)
==
PSRAM_DT_SYS_MODE
,
assert_psram_dt_sys_mode_offset_mistmatch
);
CASSERT
(
__builtin_offsetof
(
struct
psram_data_t
,
boot_mpidr
)
==
PSRAM_DT_MPIDR
,
assert_psram_dt_mpidr_offset_mistmatch
);
void
u32_align_cpy
(
uint32_t
*
dst
,
const
uint32_t
*
src
,
size_t
bytes
);
#endif
/* __ASSEMBLY__ */
#endif
plat/rockchip/common/pmusram/pmu_sram_cpus_on.S
0 → 100644
View file @
61dbb028
/*
*
Copyright
(
c
)
2016
,
ARM
Limited
and
Contributors
.
All
rights
reserved
.
*
*
Redistribution
and
use
in
source
and
binary
forms
,
with
or
without
*
modification
,
are
permitted
provided
that
the
following
conditions
are
met
:
*
*
Redistributions
of
source
code
must
retain
the
above
copyright
notice
,
this
*
list
of
conditions
and
the
following
disclaimer
.
*
*
Redistributions
in
binary
form
must
reproduce
the
above
copyright
notice
,
*
this
list
of
conditions
and
the
following
disclaimer
in
the
documentation
*
and
/
or
other
materials
provided
with
the
distribution
.
*
*
THIS
SOFTWARE
IS
PROVIDED
BY
THE
COPYRIGHT
HOLDERS
AND
CONTRIBUTORS
"AS IS"
*
AND
ANY
EXPRESS
OR
IMPLIED
WARRANTIES
,
INCLUDING
,
BUT
NOT
LIMITED
TO
,
THE
*
IMPLIED
WARRANTIES
OF
MERCHANTABILITY
AND
FITNESS
FOR
A
PARTICULAR
PURPOSE
*
ARE
DISCLAIMED
.
IN
NO
EVENT
SHALL
THE
COPYRIGHT
HOLDER
OR
CONTRIBUTORS
BE
*
LIABLE
FOR
ANY
DIRECT
,
INDIRECT
,
INCIDENTAL
,
SPECIAL
,
EXEMPLARY
,
OR
*
CONSEQUENTIAL
DAMAGES
(
INCLUDING
,
BUT
NOT
LIMITED
TO
,
PROCUREMENT
OF
*
SUBSTITUTE
GOODS
OR
SERVICES
; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
*
INTERRUPTION
)
HOWEVER
CAUSED
AND
ON
ANY
THEORY
OF
LIABILITY
,
WHETHER
IN
*
CONTRACT
,
STRICT
LIABILITY
,
OR
TORT
(
INCLUDING
NEGLIGENCE
OR
OTHERWISE
)
*
ARISING
IN
ANY
WAY
OUT
OF
THE
USE
OF
THIS
SOFTWARE
,
EVEN
IF
ADVISED
OF
THE
*
POSSIBILITY
OF
SUCH
DAMAGE
.
*/
#include <arch.h>
#include <asm_macros.S>
#include <platform_def.h>
#include <pmu_sram.h>
.
globl
pmu_cpuson_entrypoint_start
.
globl
pmu_cpuson_entrypoint_end
func
pmu_cpuson_entrypoint
pmu_cpuson_entrypoint_start
:
ldr
x5
,
psram_data
ldr
w0
,
[
x5
,
#
PSRAM_DT_SYS_MODE
]
cmp
w0
,
#
PMU_SYS_SLP_MODE
b.eq
check_wake_cpus
ldr
x6
,
warm_boot_func
br
x6
check_wake_cpus
:
mrs
x0
,
MPIDR_EL1
and
x1
,
x0
,
#
MPIDR_CPU_MASK
and
x0
,
x0
,
#
MPIDR_CLUSTER_MASK
orr
x0
,
x0
,
x1
/
*
primary_cpu
*/
ldr
w1
,
[
x5
,
#
PSRAM_DT_MPIDR
]
cmp
w0
,
w1
b.eq
sys_wakeup
/
*
*
If
the
core
is
not
the
primary
cpu
,
*
force
the
core
into
wfe
.
*/
wfe_loop
:
wfe
b
wfe_loop
sys_wakeup
:
/
*
check
ddr
flag
for
resume
ddr
*/
ldr
w2
,
[
x5
,
#
PSRAM_DT_DDRFLAG
]
cmp
w2
,
#
0x0
b.eq
sys_resume
ddr_resume
:
ldr
x2
,
[
x5
,
#
PSRAM_DT_SP
]
mov
sp
,
x2
ldr
x1
,
[
x5
,
#
PSRAM_DT_DDR_FUNC
]
ldr
x0
,
[
x5
,
#
PSRAM_DT_DDR_DATA
]
blr
x1
sys_resume
:
ldr
x1
,
sys_wakeup_entry
br
x1
.
align
3
psram_data
:
.
quad
PSRAM_DT_BASE
warm_boot_func
:
.
quad
platform_cpu_warmboot
sys_wakeup_entry
:
.
quad
psci_entrypoint
pmu_cpuson_entrypoint_end
:
.
word
0
endfunc
pmu_cpuson_entrypoint
plat/rockchip/common/rockchip_gicv2.c
0 → 100644
View file @
61dbb028
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <bl_common.h>
#include <gicv2.h>
#include <platform_def.h>
/******************************************************************************
* The following functions are defined as weak to allow a platform to override
* the way the GICv2 driver is initialised and used.
*****************************************************************************/
#pragma weak plat_rockchip_gic_driver_init
#pragma weak plat_rockchip_gic_init
#pragma weak plat_rockchip_gic_cpuif_enable
#pragma weak plat_rockchip_gic_cpuif_disable
#pragma weak plat_rockchip_gic_pcpu_init
/******************************************************************************
* On a GICv2 system, the Group 1 secure interrupts are treated as Group 0
* interrupts.
*****************************************************************************/
const
unsigned
int
g0_interrupt_array
[]
=
{
PLAT_RK_G1S_IRQS
,
};
/*
* Ideally `rockchip_gic_data` structure definition should be a `const` but it
* is kept as modifiable for overwriting with different GICD and GICC base when
* running on FVP with VE memory map.
*/
gicv2_driver_data_t
rockchip_gic_data
=
{
.
gicd_base
=
PLAT_RK_GICD_BASE
,
.
gicc_base
=
PLAT_RK_GICC_BASE
,
.
g0_interrupt_num
=
ARRAY_SIZE
(
g0_interrupt_array
),
.
g0_interrupt_array
=
g0_interrupt_array
,
};
/******************************************************************************
* RockChip common helper to initialize the GICv2 only driver.
*****************************************************************************/
void
plat_rockchip_gic_driver_init
(
void
)
{
gicv2_driver_init
(
&
rockchip_gic_data
);
}
void
plat_rockchip_gic_init
(
void
)
{
gicv2_distif_init
();
gicv2_pcpu_distif_init
();
gicv2_cpuif_enable
();
}
/******************************************************************************
* RockChip common helper to enable the GICv2 CPU interface
*****************************************************************************/
void
plat_rockchip_gic_cpuif_enable
(
void
)
{
gicv2_cpuif_enable
();
}
/******************************************************************************
* RockChip common helper to disable the GICv2 CPU interface
*****************************************************************************/
void
plat_rockchip_gic_cpuif_disable
(
void
)
{
gicv2_cpuif_disable
();
}
/******************************************************************************
* RockChip common helper to initialize the per cpu distributor interface
* in GICv2
*****************************************************************************/
void
plat_rockchip_gic_pcpu_init
(
void
)
{
gicv2_pcpu_distif_init
();
}
plat/rockchip/common/rockchip_gicv3.c
0 → 100644
View file @
61dbb028
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <bl_common.h>
#include <gicv3.h>
#include <platform.h>
#include <platform_def.h>
/******************************************************************************
* The following functions are defined as weak to allow a platform to override
* the way the GICv3 driver is initialised and used.
*****************************************************************************/
#pragma weak plat_rockchip_gic_driver_init
#pragma weak plat_rockchip_gic_init
#pragma weak plat_rockchip_gic_cpuif_enable
#pragma weak plat_rockchip_gic_cpuif_disable
#pragma weak plat_rockchip_gic_pcpu_init
/* The GICv3 driver only needs to be initialized in EL3 */
uintptr_t
rdistif_base_addrs
[
PLATFORM_CORE_COUNT
];
/* Array of Group1 secure interrupts to be configured by the gic driver */
const
unsigned
int
g1s_interrupt_array
[]
=
{
PLAT_RK_G1S_IRQS
};
/* Array of Group0 interrupts to be configured by the gic driver */
const
unsigned
int
g0_interrupt_array
[]
=
{
PLAT_RK_G0_IRQS
};
static
unsigned
int
plat_rockchip_mpidr_to_core_pos
(
unsigned
long
mpidr
)
{
return
(
unsigned
int
)
plat_core_pos_by_mpidr
(
mpidr
);
}
const
gicv3_driver_data_t
rockchip_gic_data
=
{
.
gicd_base
=
PLAT_RK_GICD_BASE
,
.
gicr_base
=
PLAT_RK_GICR_BASE
,
.
g0_interrupt_num
=
ARRAY_SIZE
(
g0_interrupt_array
),
.
g1s_interrupt_num
=
ARRAY_SIZE
(
g1s_interrupt_array
),
.
g0_interrupt_array
=
g0_interrupt_array
,
.
g1s_interrupt_array
=
g1s_interrupt_array
,
.
rdistif_num
=
PLATFORM_CORE_COUNT
,
.
rdistif_base_addrs
=
rdistif_base_addrs
,
.
mpidr_to_core_pos
=
plat_rockchip_mpidr_to_core_pos
,
};
void
plat_rockchip_gic_driver_init
(
void
)
{
/*
* The GICv3 driver is initialized in EL3 and does not need
* to be initialized again in SEL1. This is because the S-EL1
* can use GIC system registers to manage interrupts and does
* not need GIC interface base addresses to be configured.
*/
#if IMAGE_BL31
gicv3_driver_init
(
&
rockchip_gic_data
);
#endif
}
/******************************************************************************
* RockChip common helper to initialize the GIC. Only invoked
* by BL31
*****************************************************************************/
void
plat_rockchip_gic_init
(
void
)
{
gicv3_distif_init
();
gicv3_rdistif_init
(
plat_my_core_pos
());
gicv3_cpuif_enable
(
plat_my_core_pos
());
}
/******************************************************************************
* RockChip common helper to enable the GIC CPU interface
*****************************************************************************/
void
plat_rockchip_gic_cpuif_enable
(
void
)
{
gicv3_cpuif_enable
(
plat_my_core_pos
());
}
/******************************************************************************
* RockChip common helper to disable the GIC CPU interface
*****************************************************************************/
void
plat_rockchip_gic_cpuif_disable
(
void
)
{
gicv3_cpuif_disable
(
plat_my_core_pos
());
}
/******************************************************************************
* RockChip common helper to initialize the per-cpu redistributor interface
* in GICv3
*****************************************************************************/
void
plat_rockchip_gic_pcpu_init
(
void
)
{
gicv3_rdistif_init
(
plat_my_core_pos
());
}
plat/rockchip/rk3368/drivers/ddr/ddr_rk3368.c
0 → 100644
View file @
61dbb028
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <mmio.h>
#include <ddr_rk3368.h>
#include <debug.h>
#include <stdint.h>
#include <string.h>
#include <platform_def.h>
#include <pmu.h>
#include <rk3368_def.h>
#include <soc.h>
/* GRF_SOC_STATUS0 */
#define DPLL_LOCK (0x1 << 2)
/* GRF_DDRC0_CON0 */
#define GRF_DDR_16BIT_EN (((0x1 << 3) << 16) | (0x1 << 3))
#define GRF_DDR_32BIT_EN (((0x1 << 3) << 16) | (0x0 << 3))
#define GRF_MOBILE_DDR_EN (((0x1 << 4) << 16) | (0x1 << 4))
#define GRF_MOBILE_DDR_DISB (((0x1 << 4) << 16) | (0x0 << 4))
#define GRF_DDR3_EN (((0x1 << 2) << 16) | (0x1 << 2))
#define GRF_LPDDR2_3_EN (((0x1 << 2) << 16) | (0x0 << 2))
/* PMUGRF_SOC_CON0 */
#define ddrphy_bufferen_io_en(n) ((0x1 << (9 + 16)) | (n << 9))
#define ddrphy_bufferen_core_en(n) ((0x1 << (8 + 16)) | (n << 8))
struct
PCTRL_TIMING_TAG
{
uint32_t
ddrfreq
;
uint32_t
TOGCNT1U
;
uint32_t
TINIT
;
uint32_t
TRSTH
;
uint32_t
TOGCNT100N
;
uint32_t
TREFI
;
uint32_t
TMRD
;
uint32_t
TRFC
;
uint32_t
TRP
;
uint32_t
TRTW
;
uint32_t
TAL
;
uint32_t
TCL
;
uint32_t
TCWL
;
uint32_t
TRAS
;
uint32_t
TRC
;
uint32_t
TRCD
;
uint32_t
TRRD
;
uint32_t
TRTP
;
uint32_t
TWR
;
uint32_t
TWTR
;
uint32_t
TEXSR
;
uint32_t
TXP
;
uint32_t
TXPDLL
;
uint32_t
TZQCS
;
uint32_t
TZQCSI
;
uint32_t
TDQS
;
uint32_t
TCKSRE
;
uint32_t
TCKSRX
;
uint32_t
TCKE
;
uint32_t
TMOD
;
uint32_t
TRSTL
;
uint32_t
TZQCL
;
uint32_t
TMRR
;
uint32_t
TCKESR
;
uint32_t
TDPD
;
uint32_t
TREFI_MEM_DDR3
;
};
struct
MSCH_SAVE_REG_TAG
{
uint32_t
ddrconf
;
uint32_t
ddrtiming
;
uint32_t
ddrmode
;
uint32_t
readlatency
;
uint32_t
activate
;
uint32_t
devtodev
;
};
/* ddr suspend need save reg */
struct
PCTL_SAVE_REG_TAG
{
uint32_t
SCFG
;
uint32_t
CMDTSTATEN
;
uint32_t
MCFG1
;
uint32_t
MCFG
;
uint32_t
PPCFG
;
struct
PCTRL_TIMING_TAG
pctl_timing
;
/* DFI Control Registers */
uint32_t
DFITCTRLDELAY
;
uint32_t
DFIODTCFG
;
uint32_t
DFIODTCFG1
;
uint32_t
DFIODTRANKMAP
;
/* DFI Write Data Registers */
uint32_t
DFITPHYWRDATA
;
uint32_t
DFITPHYWRLAT
;
uint32_t
DFITPHYWRDATALAT
;
/* DFI Read Data Registers */
uint32_t
DFITRDDATAEN
;
uint32_t
DFITPHYRDLAT
;
/* DFI Update Registers */
uint32_t
DFITPHYUPDTYPE0
;
uint32_t
DFITPHYUPDTYPE1
;
uint32_t
DFITPHYUPDTYPE2
;
uint32_t
DFITPHYUPDTYPE3
;
uint32_t
DFITCTRLUPDMIN
;
uint32_t
DFITCTRLUPDMAX
;
uint32_t
DFITCTRLUPDDLY
;
uint32_t
DFIUPDCFG
;
uint32_t
DFITREFMSKI
;
uint32_t
DFITCTRLUPDI
;
/* DFI Status Registers */
uint32_t
DFISTCFG0
;
uint32_t
DFISTCFG1
;
uint32_t
DFITDRAMCLKEN
;
uint32_t
DFITDRAMCLKDIS
;
uint32_t
DFISTCFG2
;
/* DFI Low Power Register */
uint32_t
DFILPCFG0
;
};
struct
DDRPHY_SAVE_REG_TAG
{
uint32_t
PHY_REG0
;
uint32_t
PHY_REG1
;
uint32_t
PHY_REGB
;
uint32_t
PHY_REGC
;
uint32_t
PHY_REG11
;
uint32_t
PHY_REG13
;
uint32_t
PHY_REG14
;
uint32_t
PHY_REG16
;
uint32_t
PHY_REG20
;
uint32_t
PHY_REG21
;
uint32_t
PHY_REG26
;
uint32_t
PHY_REG27
;
uint32_t
PHY_REG28
;
uint32_t
PHY_REG30
;
uint32_t
PHY_REG31
;
uint32_t
PHY_REG36
;
uint32_t
PHY_REG37
;
uint32_t
PHY_REG38
;
uint32_t
PHY_REG40
;
uint32_t
PHY_REG41
;
uint32_t
PHY_REG46
;
uint32_t
PHY_REG47
;
uint32_t
PHY_REG48
;
uint32_t
PHY_REG50
;
uint32_t
PHY_REG51
;
uint32_t
PHY_REG56
;
uint32_t
PHY_REG57
;
uint32_t
PHY_REG58
;
uint32_t
PHY_REGDLL
;
uint32_t
PHY_REGEC
;
uint32_t
PHY_REGED
;
uint32_t
PHY_REGEE
;
uint32_t
PHY_REGEF
;
uint32_t
PHY_REGFB
;
uint32_t
PHY_REGFC
;
uint32_t
PHY_REGFD
;
uint32_t
PHY_REGFE
;
};
struct
BACKUP_REG_TAG
{
uint32_t
tag
;
uint32_t
pctladdr
;
struct
PCTL_SAVE_REG_TAG
pctl
;
uint32_t
phyaddr
;
struct
DDRPHY_SAVE_REG_TAG
phy
;
uint32_t
nocaddr
;
struct
MSCH_SAVE_REG_TAG
noc
;
uint32_t
pllselect
;
uint32_t
phypllockaddr
;
uint32_t
phyplllockmask
;
uint32_t
phyplllockval
;
uint32_t
pllpdstat
;
uint32_t
dpllmodeaddr
;
uint32_t
dpllslowmode
;
uint32_t
dpllnormalmode
;
uint32_t
dpllresetaddr
;
uint32_t
dpllreset
;
uint32_t
dplldereset
;
uint32_t
dpllconaddr
;
uint32_t
dpllcon
[
4
];
uint32_t
dplllockaddr
;
uint32_t
dplllockmask
;
uint32_t
dplllockval
;
uint32_t
ddrpllsrcdivaddr
;
uint32_t
ddrpllsrcdiv
;
uint32_t
retendisaddr
;
uint32_t
retendisval
;
uint32_t
grfregaddr
;
uint32_t
grfddrcreg
;
uint32_t
crupctlphysoftrstaddr
;
uint32_t
cruresetpctlphy
;
uint32_t
cruderesetphy
;
uint32_t
cruderesetpctlphy
;
uint32_t
physoftrstaddr
;
uint32_t
endtag
;
};
static
uint32_t
ddr_get_phy_pll_freq
(
void
)
{
uint32_t
ret
=
0
;
uint32_t
fb_div
,
pre_div
;
fb_div
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REGEC
);
fb_div
|=
(
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REGED
)
&
0x1
)
<<
8
;
pre_div
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REGEE
)
&
0xff
;
ret
=
2
*
24
*
fb_div
/
(
4
*
pre_div
);
return
ret
;
}
static
void
ddr_copy
(
uint32_t
*
pdest
,
uint32_t
*
psrc
,
uint32_t
words
)
{
uint32_t
i
;
for
(
i
=
0
;
i
<
words
;
i
++
)
pdest
[
i
]
=
psrc
[
i
];
}
static
void
ddr_get_dpll_cfg
(
uint32_t
*
p
)
{
uint32_t
nmhz
,
NO
,
NF
,
NR
;
nmhz
=
ddr_get_phy_pll_freq
();
if
(
nmhz
<=
150
)
NO
=
6
;
else
if
(
nmhz
<=
250
)
NO
=
4
;
else
if
(
nmhz
<=
500
)
NO
=
2
;
else
NO
=
1
;
NR
=
1
;
NF
=
2
*
nmhz
*
NR
*
NO
/
24
;
p
[
0
]
=
SET_NR
(
NR
)
|
SET_NO
(
NO
);
p
[
1
]
=
SET_NF
(
NF
);
p
[
2
]
=
SET_NB
(
NF
/
2
);
}
void
ddr_reg_save
(
uint32_t
pllpdstat
,
uint64_t
base_addr
)
{
struct
BACKUP_REG_TAG
*
p_ddr_reg
=
(
struct
BACKUP_REG_TAG
*
)
base_addr
;
struct
PCTL_SAVE_REG_TAG
*
pctl_tim
=
&
p_ddr_reg
->
pctl
;
p_ddr_reg
->
tag
=
0x56313031
;
p_ddr_reg
->
pctladdr
=
DDR_PCTL_BASE
;
p_ddr_reg
->
phyaddr
=
DDR_PHY_BASE
;
p_ddr_reg
->
nocaddr
=
SERVICE_BUS_BASE
;
/* PCTLR */
ddr_copy
((
uint32_t
*
)
&
pctl_tim
->
pctl_timing
.
TOGCNT1U
,
(
uint32_t
*
)(
DDR_PCTL_BASE
+
DDR_PCTL_TOGCNT1U
),
35
);
pctl_tim
->
pctl_timing
.
TREFI
|=
DDR_UPD_REF_ENABLE
;
pctl_tim
->
SCFG
=
mmio_read_32
(
DDR_PCTL_BASE
+
DDR_PCTL_SCFG
);
pctl_tim
->
CMDTSTATEN
=
mmio_read_32
(
DDR_PCTL_BASE
+
DDR_PCTL_CMDTSTATEN
);
pctl_tim
->
MCFG1
=
mmio_read_32
(
DDR_PCTL_BASE
+
DDR_PCTL_MCFG1
);
pctl_tim
->
MCFG
=
mmio_read_32
(
DDR_PCTL_BASE
+
DDR_PCTL_MCFG
);
pctl_tim
->
PPCFG
=
mmio_read_32
(
DDR_PCTL_BASE
+
DDR_PCTL_PPCFG
);
pctl_tim
->
pctl_timing
.
ddrfreq
=
mmio_read_32
(
DDR_PCTL_BASE
+
DDR_PCTL_TOGCNT1U
*
2
);
pctl_tim
->
DFITCTRLDELAY
=
mmio_read_32
(
DDR_PCTL_BASE
+
DDR_PCTL_DFITCTRLDELAY
);
pctl_tim
->
DFIODTCFG
=
mmio_read_32
(
DDR_PCTL_BASE
+
DDR_PCTL_DFIODTCFG
);
pctl_tim
->
DFIODTCFG1
=
mmio_read_32
(
DDR_PCTL_BASE
+
DDR_PCTL_DFIODTCFG1
);
pctl_tim
->
DFIODTRANKMAP
=
mmio_read_32
(
DDR_PCTL_BASE
+
DDR_PCTL_DFIODTRANKMAP
);
pctl_tim
->
DFITPHYWRDATA
=
mmio_read_32
(
DDR_PCTL_BASE
+
DDR_PCTL_DFITPHYWRDATA
);
pctl_tim
->
DFITPHYWRLAT
=
mmio_read_32
(
DDR_PCTL_BASE
+
DDR_PCTL_DFITPHYWRLAT
);
pctl_tim
->
DFITPHYWRDATALAT
=
mmio_read_32
(
DDR_PCTL_BASE
+
DDR_PCTL_DFITPHYWRDATALAT
);
pctl_tim
->
DFITRDDATAEN
=
mmio_read_32
(
DDR_PCTL_BASE
+
DDR_PCTL_DFITRDDATAEN
);
pctl_tim
->
DFITPHYRDLAT
=
mmio_read_32
(
DDR_PCTL_BASE
+
DDR_PCTL_DFITPHYRDLAT
);
pctl_tim
->
DFITPHYUPDTYPE0
=
mmio_read_32
(
DDR_PCTL_BASE
+
DDR_PCTL_DFITPHYUPDTYPE0
);
pctl_tim
->
DFITPHYUPDTYPE1
=
mmio_read_32
(
DDR_PCTL_BASE
+
DDR_PCTL_DFITPHYUPDTYPE1
);
pctl_tim
->
DFITPHYUPDTYPE2
=
mmio_read_32
(
DDR_PCTL_BASE
+
DDR_PCTL_DFITPHYUPDTYPE2
);
pctl_tim
->
DFITPHYUPDTYPE3
=
mmio_read_32
(
DDR_PCTL_BASE
+
DDR_PCTL_DFITPHYUPDTYPE3
);
pctl_tim
->
DFITCTRLUPDMIN
=
mmio_read_32
(
DDR_PCTL_BASE
+
DDR_PCTL_DFITCTRLUPDMIN
);
pctl_tim
->
DFITCTRLUPDMAX
=
mmio_read_32
(
DDR_PCTL_BASE
+
DDR_PCTL_DFITCTRLUPDMAX
);
pctl_tim
->
DFITCTRLUPDDLY
=
mmio_read_32
(
DDR_PCTL_BASE
+
DDR_PCTL_DFITCTRLUPDDLY
);
pctl_tim
->
DFIUPDCFG
=
mmio_read_32
(
DDR_PCTL_BASE
+
DDR_PCTL_DFIUPDCFG
);
pctl_tim
->
DFITREFMSKI
=
mmio_read_32
(
DDR_PCTL_BASE
+
DDR_PCTL_DFITREFMSKI
);
pctl_tim
->
DFITCTRLUPDI
=
mmio_read_32
(
DDR_PCTL_BASE
+
DDR_PCTL_DFITCTRLUPDI
);
pctl_tim
->
DFISTCFG0
=
mmio_read_32
(
DDR_PCTL_BASE
+
DDR_PCTL_DFISTCFG0
);
pctl_tim
->
DFISTCFG1
=
mmio_read_32
(
DDR_PCTL_BASE
+
DDR_PCTL_DFISTCFG1
);
pctl_tim
->
DFITDRAMCLKEN
=
mmio_read_32
(
DDR_PCTL_BASE
+
DDR_PCTL_DFITDRAMCLKEN
);
pctl_tim
->
DFITDRAMCLKDIS
=
mmio_read_32
(
DDR_PCTL_BASE
+
DDR_PCTL_DFITDRAMCLKDIS
);
pctl_tim
->
DFISTCFG2
=
mmio_read_32
(
DDR_PCTL_BASE
+
DDR_PCTL_DFISTCFG2
);
pctl_tim
->
DFILPCFG0
=
mmio_read_32
(
DDR_PCTL_BASE
+
DDR_PCTL_DFILPCFG0
);
/* PHY */
p_ddr_reg
->
phy
.
PHY_REG0
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REG0
);
p_ddr_reg
->
phy
.
PHY_REG1
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REG1
);
p_ddr_reg
->
phy
.
PHY_REGB
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REGB
);
p_ddr_reg
->
phy
.
PHY_REGC
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REGC
);
p_ddr_reg
->
phy
.
PHY_REG11
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REG11
);
p_ddr_reg
->
phy
.
PHY_REG13
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REG13
);
p_ddr_reg
->
phy
.
PHY_REG14
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REG14
);
p_ddr_reg
->
phy
.
PHY_REG16
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REG16
);
p_ddr_reg
->
phy
.
PHY_REG20
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REG20
);
p_ddr_reg
->
phy
.
PHY_REG21
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REG21
);
p_ddr_reg
->
phy
.
PHY_REG26
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REG26
);
p_ddr_reg
->
phy
.
PHY_REG27
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REG27
);
p_ddr_reg
->
phy
.
PHY_REG28
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REG28
);
p_ddr_reg
->
phy
.
PHY_REG30
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REG30
);
p_ddr_reg
->
phy
.
PHY_REG31
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REG31
);
p_ddr_reg
->
phy
.
PHY_REG36
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REG36
);
p_ddr_reg
->
phy
.
PHY_REG37
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REG37
);
p_ddr_reg
->
phy
.
PHY_REG38
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REG38
);
p_ddr_reg
->
phy
.
PHY_REG40
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REG40
);
p_ddr_reg
->
phy
.
PHY_REG41
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REG41
);
p_ddr_reg
->
phy
.
PHY_REG46
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REG46
);
p_ddr_reg
->
phy
.
PHY_REG47
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REG47
);
p_ddr_reg
->
phy
.
PHY_REG48
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REG48
);
p_ddr_reg
->
phy
.
PHY_REG50
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REG50
);
p_ddr_reg
->
phy
.
PHY_REG51
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REG51
);
p_ddr_reg
->
phy
.
PHY_REG56
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REG56
);
p_ddr_reg
->
phy
.
PHY_REG57
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REG57
);
p_ddr_reg
->
phy
.
PHY_REG58
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REG58
);
p_ddr_reg
->
phy
.
PHY_REGDLL
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REGDLL
);
p_ddr_reg
->
phy
.
PHY_REGEC
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REGEC
);
p_ddr_reg
->
phy
.
PHY_REGED
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REGED
);
p_ddr_reg
->
phy
.
PHY_REGEE
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REGEE
);
p_ddr_reg
->
phy
.
PHY_REGEF
=
0
;
if
(
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REG2
)
&
0x2
)
{
p_ddr_reg
->
phy
.
PHY_REGFB
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REG2C
);
p_ddr_reg
->
phy
.
PHY_REGFC
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REG3C
);
p_ddr_reg
->
phy
.
PHY_REGFD
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REG4C
);
p_ddr_reg
->
phy
.
PHY_REGFE
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REG5C
);
}
else
{
p_ddr_reg
->
phy
.
PHY_REGFB
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REGFB
);
p_ddr_reg
->
phy
.
PHY_REGFC
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REGFC
);
p_ddr_reg
->
phy
.
PHY_REGFD
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REGFD
);
p_ddr_reg
->
phy
.
PHY_REGFE
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REGFE
);
}
/* NOC */
p_ddr_reg
->
noc
.
ddrconf
=
mmio_read_32
(
SERVICE_BUS_BASE
+
MSCH_DDRCONF
);
p_ddr_reg
->
noc
.
ddrtiming
=
mmio_read_32
(
SERVICE_BUS_BASE
+
MSCH_DDRTIMING
);
p_ddr_reg
->
noc
.
ddrmode
=
mmio_read_32
(
SERVICE_BUS_BASE
+
MSCH_DDRMODE
);
p_ddr_reg
->
noc
.
readlatency
=
mmio_read_32
(
SERVICE_BUS_BASE
+
MSCH_READLATENCY
);
p_ddr_reg
->
noc
.
activate
=
mmio_read_32
(
SERVICE_BUS_BASE
+
MSCH_ACTIVATE
);
p_ddr_reg
->
noc
.
devtodev
=
mmio_read_32
(
SERVICE_BUS_BASE
+
MSCH_DEVTODEV
);
p_ddr_reg
->
pllselect
=
mmio_read_32
(
DDR_PHY_BASE
+
DDR_PHY_REGEE
)
*
0x1
;
p_ddr_reg
->
phypllockaddr
=
GRF_BASE
+
GRF_SOC_STATUS0
;
p_ddr_reg
->
phyplllockmask
=
GRF_DDRPHY_LOCK
;
p_ddr_reg
->
phyplllockval
=
0
;
/* PLLPD */
p_ddr_reg
->
pllpdstat
=
pllpdstat
;
/* DPLL */
p_ddr_reg
->
dpllmodeaddr
=
CRU_BASE
+
PLL_CONS
(
DPLL_ID
,
3
);
/* slow mode and power on */
p_ddr_reg
->
dpllslowmode
=
DPLL_WORK_SLOW_MODE
|
DPLL_POWER_DOWN
;
p_ddr_reg
->
dpllnormalmode
=
DPLL_WORK_NORMAL_MODE
;
p_ddr_reg
->
dpllresetaddr
=
CRU_BASE
+
PLL_CONS
(
DPLL_ID
,
3
);
p_ddr_reg
->
dpllreset
=
DPLL_RESET_CONTROL_NORMAL
;
p_ddr_reg
->
dplldereset
=
DPLL_RESET_CONTROL_RESET
;
p_ddr_reg
->
dpllconaddr
=
CRU_BASE
+
PLL_CONS
(
DPLL_ID
,
0
);
if
(
p_ddr_reg
->
pllselect
==
0
)
{
p_ddr_reg
->
dpllcon
[
0
]
=
(
mmio_read_32
(
CRU_BASE
+
PLL_CONS
(
DPLL_ID
,
0
))
&
0xffff
)
|
(
0xFFFF
<<
16
);
p_ddr_reg
->
dpllcon
[
1
]
=
(
mmio_read_32
(
CRU_BASE
+
PLL_CONS
(
DPLL_ID
,
1
))
&
0xffff
);
p_ddr_reg
->
dpllcon
[
2
]
=
(
mmio_read_32
(
CRU_BASE
+
PLL_CONS
(
DPLL_ID
,
2
))
&
0xffff
);
p_ddr_reg
->
dpllcon
[
3
]
=
(
mmio_read_32
(
CRU_BASE
+
PLL_CONS
(
DPLL_ID
,
3
))
&
0xffff
)
|
(
0xFFFF
<<
16
);
}
else
{
ddr_get_dpll_cfg
(
&
p_ddr_reg
->
dpllcon
[
0
]);
}
p_ddr_reg
->
pllselect
=
0
;
p_ddr_reg
->
dplllockaddr
=
CRU_BASE
+
PLL_CONS
(
DPLL_ID
,
1
);
p_ddr_reg
->
dplllockmask
=
DPLL_STATUS_LOCK
;
p_ddr_reg
->
dplllockval
=
DPLL_STATUS_LOCK
;
/* SET_DDR_PLL_SRC */
p_ddr_reg
->
ddrpllsrcdivaddr
=
CRU_BASE
+
CRU_CLKSELS_CON
(
13
);
p_ddr_reg
->
ddrpllsrcdiv
=
(
mmio_read_32
(
CRU_BASE
+
CRU_CLKSELS_CON
(
13
))
&
DDR_PLL_SRC_MASK
)
|
(
DDR_PLL_SRC_MASK
<<
16
);
p_ddr_reg
->
retendisaddr
=
PMU_BASE
+
PMU_PWRMD_COM
;
p_ddr_reg
->
retendisval
=
PD_PERI_PWRDN_ENABLE
;
p_ddr_reg
->
grfregaddr
=
GRF_BASE
+
GRF_DDRC0_CON0
;
p_ddr_reg
->
grfddrcreg
=
(
mmio_read_32
(
GRF_BASE
+
GRF_DDRC0_CON0
)
&
DDR_PLL_SRC_MASK
)
|
(
DDR_PLL_SRC_MASK
<<
16
);
/* pctl phy soft reset */
p_ddr_reg
->
crupctlphysoftrstaddr
=
CRU_BASE
+
CRU_SOFTRSTS_CON
(
10
);
p_ddr_reg
->
cruresetpctlphy
=
DDRCTRL0_PSRSTN_REQ
(
1
)
|
DDRCTRL0_SRSTN_REQ
(
1
)
|
DDRPHY0_PSRSTN_REQ
(
1
)
|
DDRPHY0_SRSTN_REQ
(
1
);
p_ddr_reg
->
cruderesetphy
=
DDRCTRL0_PSRSTN_REQ
(
1
)
|
DDRCTRL0_SRSTN_REQ
(
1
)
|
DDRPHY0_PSRSTN_REQ
(
0
)
|
DDRPHY0_SRSTN_REQ
(
0
);
p_ddr_reg
->
cruderesetpctlphy
=
DDRCTRL0_PSRSTN_REQ
(
0
)
|
DDRCTRL0_SRSTN_REQ
(
0
)
|
DDRPHY0_PSRSTN_REQ
(
0
)
|
DDRPHY0_SRSTN_REQ
(
0
);
p_ddr_reg
->
physoftrstaddr
=
DDR_PHY_BASE
+
DDR_PHY_REG0
;
p_ddr_reg
->
endtag
=
0xFFFFFFFF
;
}
/*
* "rk3368_ddr_reg_resume_V1.05.bin" is an executable bin which is generated
* by ARM DS5 for resuming ddr controller. If the soc wakes up from system
* suspend, ddr needs to be resumed and the resuming code needs to be run in
* sram. But there is not a way to pointing the resuming code to the PMUSRAM
* when linking .o files of bl31, so we use the
* "rk3368_ddr_reg_resume_V1.05.bin" whose code is position-independent and
* it can be loaded anywhere and run.
*/
static
__aligned
(
4
)
unsigned
int
ddr_reg_resume
[]
=
{
#include "rk3368_ddr_reg_resume_V1.05.bin"
};
uint32_t
ddr_get_resume_code_size
(
void
)
{
return
sizeof
(
ddr_reg_resume
);
}
uint32_t
ddr_get_resume_data_size
(
void
)
{
return
sizeof
(
struct
BACKUP_REG_TAG
);
}
uint32_t
*
ddr_get_resume_code_base
(
void
)
{
return
(
unsigned
int
*
)
ddr_reg_resume
;
}
plat/rockchip/rk3368/drivers/ddr/ddr_rk3368.h
0 → 100644
View file @
61dbb028
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __DDR_RK3368_H__
#define __DDR_RK3368_H__
#define DDR_PCTL_SCFG 0x0
#define DDR_PCTL_SCTL 0x4
#define DDR_PCTL_STAT 0x8
#define DDR_PCTL_INTRSTAT 0xc
#define DDR_PCTL_MCMD 0x40
#define DDR_PCTL_POWCTL 0x44
#define DDR_PCTL_POWSTAT 0x48
#define DDR_PCTL_CMDTSTAT 0x4c
#define DDR_PCTL_CMDTSTATEN 0x50
#define DDR_PCTL_MRRCFG0 0x60
#define DDR_PCTL_MRRSTAT0 0x64
#define DDR_PCTL_MRRSTAT1 0x68
#define DDR_PCTL_MCFG1 0x7c
#define DDR_PCTL_MCFG 0x80
#define DDR_PCTL_PPCFG 0x84
#define DDR_PCTL_MSTAT 0x88
#define DDR_PCTL_LPDDR2ZQCFG 0x8c
#define DDR_PCTL_DTUPDES 0x94
#define DDR_PCTL_DTUNA 0x98
#define DDR_PCTL_DTUNE 0x9c
#define DDR_PCTL_DTUPRD0 0xa0
#define DDR_PCTL_DTUPRD1 0xa4
#define DDR_PCTL_DTUPRD2 0xa8
#define DDR_PCTL_DTUPRD3 0xac
#define DDR_PCTL_DTUAWDT 0xb0
#define DDR_PCTL_TOGCNT1U 0xc0
#define DDR_PCTL_TINIT 0xc4
#define DDR_PCTL_TRSTH 0xc8
#define DDR_PCTL_TOGCNT100N 0xcc
#define DDR_PCTL_TREFI 0xd0
#define DDR_PCTL_TMRD 0xd4
#define DDR_PCTL_TRFC 0xd8
#define DDR_PCTL_TRP 0xdc
#define DDR_PCTL_TRTW 0xe0
#define DDR_PCTL_TAL 0xe4
#define DDR_PCTL_TCL 0xe8
#define DDR_PCTL_TCWL 0xec
#define DDR_PCTL_TRAS 0xf0
#define DDR_PCTL_TRC 0xf4
#define DDR_PCTL_TRCD 0xf8
#define DDR_PCTL_TRRD 0xfc
#define DDR_PCTL_TRTP 0x100
#define DDR_PCTL_TWR 0x104
#define DDR_PCTL_TWTR 0x108
#define DDR_PCTL_TEXSR 0x10c
#define DDR_PCTL_TXP 0x110
#define DDR_PCTL_TXPDLL 0x114
#define DDR_PCTL_TZQCS 0x118
#define DDR_PCTL_TZQCSI 0x11c
#define DDR_PCTL_TDQS 0x120
#define DDR_PCTL_TCKSRE 0x124
#define DDR_PCTL_TCKSRX 0x128
#define DDR_PCTL_TCKE 0x12c
#define DDR_PCTL_TMOD 0x130
#define DDR_PCTL_TRSTL 0x134
#define DDR_PCTL_TZQCL 0x138
#define DDR_PCTL_TMRR 0x13c
#define DDR_PCTL_TCKESR 0x140
#define DDR_PCTL_TDPD 0x144
#define DDR_PCTL_TREFI_MEM_DDR3 0x148
#define DDR_PCTL_ECCCFG 0x180
#define DDR_PCTL_ECCTST 0x184
#define DDR_PCTL_ECCCLR 0x188
#define DDR_PCTL_ECCLOG 0x18c
#define DDR_PCTL_DTUWACTL 0x200
#define DDR_PCTL_DTURACTL 0x204
#define DDR_PCTL_DTUCFG 0x208
#define DDR_PCTL_DTUECTL 0x20c
#define DDR_PCTL_DTUWD0 0x210
#define DDR_PCTL_DTUWD1 0x214
#define DDR_PCTL_DTUWD2 0x218
#define DDR_PCTL_DTUWD3 0x21c
#define DDR_PCTL_DTUWDM 0x220
#define DDR_PCTL_DTURD0 0x224
#define DDR_PCTL_DTURD1 0x228
#define DDR_PCTL_DTURD2 0x22c
#define DDR_PCTL_DTURD3 0x230
#define DDR_PCTL_DTULFSRWD 0x234
#define DDR_PCTL_DTULFSRRD 0x238
#define DDR_PCTL_DTUEAF 0x23c
#define DDR_PCTL_DFITCTRLDELAY 0x240
#define DDR_PCTL_DFIODTCFG 0x244
#define DDR_PCTL_DFIODTCFG1 0x248
#define DDR_PCTL_DFIODTRANKMAP 0x24c
#define DDR_PCTL_DFITPHYWRDATA 0x250
#define DDR_PCTL_DFITPHYWRLAT 0x254
#define DDR_PCTL_DFITPHYWRDATALAT 0x258
#define DDR_PCTL_DFITRDDATAEN 0x260
#define DDR_PCTL_DFITPHYRDLAT 0x264
#define DDR_PCTL_DFITPHYUPDTYPE0 0x270
#define DDR_PCTL_DFITPHYUPDTYPE1 0x274
#define DDR_PCTL_DFITPHYUPDTYPE2 0x278
#define DDR_PCTL_DFITPHYUPDTYPE3 0x27c
#define DDR_PCTL_DFITCTRLUPDMIN 0x280
#define DDR_PCTL_DFITCTRLUPDMAX 0x284
#define DDR_PCTL_DFITCTRLUPDDLY 0x288
#define DDR_PCTL_DFIUPDCFG 0x290
#define DDR_PCTL_DFITREFMSKI 0x294
#define DDR_PCTL_DFITCTRLUPDI 0x298
#define DDR_PCTL_DFITRCFG0 0x2ac
#define DDR_PCTL_DFITRSTAT0 0x2b0
#define DDR_PCTL_DFITRWRLVLEN 0x2b4
#define DDR_PCTL_DFITRRDLVLEN 0x2b8
#define DDR_PCTL_DFITRRDLVLGATEEN 0x2bc
#define DDR_PCTL_DFISTSTAT0 0x2c0
#define DDR_PCTL_DFISTCFG0 0x2c4
#define DDR_PCTL_DFISTCFG1 0x2c8
#define DDR_PCTL_DFITDRAMCLKEN 0x2d0
#define DDR_PCTL_DFITDRAMCLKDIS 0x2d4
#define DDR_PCTL_DFISTCFG2 0x2d8
#define DDR_PCTL_DFISTPARCLR 0x2dc
#define DDR_PCTL_DFISTPARLOG 0x2e0
#define DDR_PCTL_DFILPCFG0 0x2f0
#define DDR_PCTL_DFITRWRLVLRESP0 0x300
#define DDR_PCTL_DFITRWRLVLRESP1 0x304
#define DDR_PCTL_DFITRWRLVLRESP2 0x308
#define DDR_PCTL_DFITRRDLVLRESP0 0x30c
#define DDR_PCTL_DFITRRDLVLRESP1 0x310
#define DDR_PCTL_DFITRRDLVLRESP2 0x314
#define DDR_PCTL_DFITRWRLVLDELAY0 0x318
#define DDR_PCTL_DFITRWRLVLDELAY1 0x31c
#define DDR_PCTL_DFITRWRLVLDELAY2 0x320
#define DDR_PCTL_DFITRRDLVLDELAY0 0x324
#define DDR_PCTL_DFITRRDLVLDELAY1 0x328
#define DDR_PCTL_DFITRRDLVLDELAY2 0x32c
#define DDR_PCTL_DFITRRDLVLGATEDELAY0 0x330
#define DDR_PCTL_DFITRRDLVLGATEDELAY1 0x334
#define DDR_PCTL_DFITRRDLVLGATEDELAY2 0x338
#define DDR_PCTL_DFITRCMD 0x33c
#define DDR_PCTL_IPVR 0x3f8
#define DDR_PCTL_IPTR 0x3fc
/* DDR PHY REG */
#define DDR_PHY_REG0 0x0
#define DDR_PHY_REG1 0x4
#define DDR_PHY_REG2 0x8
#define DDR_PHY_REG3 0xc
#define DDR_PHY_REG4 0x10
#define DDR_PHY_REG5 0x14
#define DDR_PHY_REG6 0x18
#define DDR_PHY_REGB 0x2c
#define DDR_PHY_REGC 0x30
#define DDR_PHY_REG11 0x44
#define DDR_PHY_REG12 0x48
#define DDR_PHY_REG13 0x4c
#define DDR_PHY_REG14 0x50
#define DDR_PHY_REG16 0x58
#define DDR_PHY_REG20 0x80
#define DDR_PHY_REG21 0x84
#define DDR_PHY_REG26 0x98
#define DDR_PHY_REG27 0x9c
#define DDR_PHY_REG28 0xa0
#define DDR_PHY_REG2C 0xb0
#define DDR_PHY_REG30 0xc0
#define DDR_PHY_REG31 0xc4
#define DDR_PHY_REG36 0xd8
#define DDR_PHY_REG37 0xdc
#define DDR_PHY_REG38 0xe0
#define DDR_PHY_REG3C 0xf0
#define DDR_PHY_REG40 0x100
#define DDR_PHY_REG41 0x104
#define DDR_PHY_REG46 0x118
#define DDR_PHY_REG47 0x11c
#define DDR_PHY_REG48 0x120
#define DDR_PHY_REG4C 0x130
#define DDR_PHY_REG50 0x140
#define DDR_PHY_REG51 0x144
#define DDR_PHY_REG56 0x158
#define DDR_PHY_REG57 0x15c
#define DDR_PHY_REG58 0x160
#define DDR_PHY_REG5C 0x170
#define DDR_PHY_REGDLL 0x290
#define DDR_PHY_REGEC 0x3b0
#define DDR_PHY_REGED 0x3b4
#define DDR_PHY_REGEE 0x3b8
#define DDR_PHY_REGEF 0x3bc
#define DDR_PHY_REGF0 0x3c0
#define DDR_PHY_REGF1 0x3c4
#define DDR_PHY_REGF2 0x3c8
#define DDR_PHY_REGFA 0x3e8
#define DDR_PHY_REGFB 0x3ec
#define DDR_PHY_REGFC 0x3f0
#define DDR_PHY_REGFD 0x3f4
#define DDR_PHY_REGFE 0x3f8
#define DDR_PHY_REGFF 0x3fc
/* MSCH REG define */
#define MSCH_COREID 0x0
#define MSCH_DDRCONF 0x8
#define MSCH_DDRTIMING 0xc
#define MSCH_DDRMODE 0x10
#define MSCH_READLATENCY 0x14
#define MSCH_ACTIVATE 0x38
#define MSCH_DEVTODEV 0x3c
#define SET_NR(n) ((0x3f << (8 + 16)) | ((n - 1) << 8))
#define SET_NO(n) ((0xf << (0 + 16)) | ((n - 1) << 0))
#define SET_NF(n) ((n - 1) & 0x1fff)
#define SET_NB(n) ((n - 1) & 0xfff)
#define PLLMODE(n) ((0x3 << (8 + 16)) | (n << 8))
/* GRF REG define */
#define GRF_SOC_STATUS0 0x480
#define GRF_DDRPHY_LOCK (0x1 << 15)
#define GRF_DDRC0_CON0 0x600
/* CRU softreset ddr pctl, phy */
#define DDRMSCH0_SRSTN_REQ(n) (((0x1 << 10) << 16) | (n << 10))
#define DDRCTRL0_PSRSTN_REQ(n) (((0x1 << 3) << 16) | (n << 3))
#define DDRCTRL0_SRSTN_REQ(n) (((0x1 << 2) << 16) | (n << 2))
#define DDRPHY0_PSRSTN_REQ(n) (((0x1 << 1) << 16) | (n << 1))
#define DDRPHY0_SRSTN_REQ(n) (((0x1 << 0) << 16) | (n << 0))
/* CRU_DPLL_CON2 */
#define DPLL_STATUS_LOCK (1 << 31)
/* CRU_DPLL_CON3 */
#define DPLL_POWER_DOWN ((0x1 << (1 + 16)) | (0 << 1))
#define DPLL_WORK_NORMAL_MODE ((0x3 << (8 + 16)) | (0 << 8))
#define DPLL_WORK_SLOW_MODE ((0x3 << (8 + 16)) | (1 << 8))
#define DPLL_RESET_CONTROL_NORMAL ((0x1 << (5 + 16)) | (0x0 << 5))
#define DPLL_RESET_CONTROL_RESET ((0x1 << (5 + 16)) | (0x1 << 5))
/* PMU_PWRDN_CON */
#define PD_PERI_PWRDN_ENABLE (1 << 13)
#define DDR_PLL_SRC_MASK 0x13
/* DDR_PCTL_TREFI */
#define DDR_UPD_REF_ENABLE (0X1 << 31)
uint32_t
ddr_get_resume_code_size
(
void
);
uint32_t
ddr_get_resume_data_size
(
void
);
uint32_t
*
ddr_get_resume_code_base
(
void
);
void
ddr_reg_save
(
uint32_t
pllpdstat
,
uint64_t
base_addr
);
#endif
plat/rockchip/rk3368/drivers/ddr/rk3368_ddr_reg_resume_V1.05.bin
0 → 100644
View file @
61dbb028
0x14000088,
0xd10043ff,
0x5283ffe1,
0x52824902,
0x1b020400,
0x530d7c00,
0xb9000fe0,
0xb9400fe0,
0x340000a0,
0xb9400fe0,
0x51000401,
0xb9000fe1,
0x35ffffa0,
0x910043ff,
0xd65f03c0,
0x340000e2,
0xb9400023,
0xb9000003,
0x91001021,
0x91001000,
0x51000442,
0x35ffff62,
0xd65f03c0,
0xd10043ff,
0xb9400801,
0x12000821,
0xb9000fe1,
0xb9400fe1,
0x7100043f,
0x54000320,
0x52800021,
0x52800082,
0xb9400fe3,
0x34000143,
0x71000c7f,
0x54000100,
0x7100147f,
0x54000161,
0xb9000402,
0xb9400803,
0x12000863,
0x71000c7f,
0x54ffffa1,
0xb9000401,
0xb9400803,
0x12000863,
0x7100047f,
0x54ffffa1,
0xb9400803,
0x12000863,
0xb9000fe3,
0xb9400fe3,
0x7100047f,
0x54fffd61,
0x910043ff,
0xd65f03c0,
0xd10043ff,
0xb9400801,
0x12000821,
0xb9000fe1,
0xb9400fe1,
0x7100143f,
0x54000400,
0x52800021,
0x52800042,
0x52800063,
0xb9400fe4,
0x340000c4,
0x7100049f,
0x54000120,
0x71000c9f,
0x54000180,
0x14000010,
0xb9000401,
0xb9400804,
0x12000884,
0x7100049f,
0x54ffffa1,
0xb9000402,
0xb9400804,
0x12000884,
0x71000c9f,
0x54ffffa1,
0xb9000403,
0xb9400804,
0x12000884,
0x7100149f,
0x54ffffa1,
0xb9400804,
0x12000884,
0xb9000fe4,
0xb9400fe4,
0x7100149f,
0x54fffca1,
0x910043ff,
0xd65f03c0,
0xd10043ff,
0xb9400801,
0x12000821,
0xb9000fe1,
0xb9400fe1,
0x71000c3f,
0x54000400,
0x52800021,
0x52800042,
0x52800083,
0xb9400fe4,
0x34000164,
0x7100049f,
0x540001c0,
0x7100149f,
0x54000221,
0xb9000403,
0xb9400804,
0x12000884,
0x71000c9f,
0x54ffffa1,
0x1400000b,
0xb9000401,
0xb9400804,
0x12000884,
0x7100049f,
0x54ffffa1,
0xb9000402,
0xb9400804,
0x12000884,
0x71000c9f,
0x54ffffa1,
0xb9400804,
0x12000884,
0xb9000fe4,
0xb9400fe4,
0x71000c9f,
0x54fffca1,
0x910043ff,
0xd65f03c0,
0xd10103ff,
0xa9037bfd,
0x9100c3fd,
0xa9025ff6,
0xa90157f4,
0xf90007f3,
0xaa0003f3,
0xb9400674,
0xb9411276,
0xb941c660,
0xb941aa75,
0x7100041f,
0x54000261,
0xb9418e60,
0x321f0000,
0xb903b6c0,
0xb9418a60,
0xb903b2c0,
0xb9419260,
0xb903bac0,
0xb9418e60,
0x121e7800,
0xb903b6c0,
0xb941ca60,
0xb941ce61,
0xb941d262,
0xb9400003,
0xa030023,
0x6b22407f,
0x54ffffa0,
0x1400003b,
0xb941d660,
0x7100041f,
0x54000701,
0xb941da60,
0x3100041f,
0x54000080,
0xb941de61,
0x53007c00,
0xb9000001,
0xb941e660,
0x3100041f,
0x54000080,
0xb941ea61,
0x53007c00,
0xb9000001,
0xb941f260,
0x3100041f,
0x54000120,
0xaa1f03e1,
0x53007c00,
0x9107d262,
0xb8616843,
0xb8216803,
0x91001021,
0xf100203f,
0x54ffff81,
0x52800020,
0x97ffff3f,
0xb941e660,
0x3100041f,
0x54000080,
0xb941ee61,
0x53007c00,
0xb9000001,
0x52800020,
0x97ffff37,
0xb9420660,
0x3100041f,
0x54000100,
0xb9420a61,
0xb9420e62,
0x53007c00,
0xb9400003,
0xa030023,
0x6b22407f,
0x54ffffa1,
0xb9421260,
0x3100041f,
0x54000080,
0xb9421661,
0x53007c00,
0xb9000001,
0xb941da60,
0x3100041f,
0x54000080,
0xb941e261,
0x53007c00,
0xb9000001,
0xb9419660,
0xb903bec0,
0xb9422a60,
0x34000400,
0xb9422e61,
0x53007c17,
0xb90002e1,
0x52800140,
0x97ffff18,
0xb9423260,
0xb90002e0,
0x52800140,
0x97ffff14,
0xb9423660,
0xb90002e0,
0x52800140,
0x97ffff10,
0xb9423a60,
0x34000220,
0x53007c17,
0xb94002e0,
0x121c7400,
0xb90002e0,
0x52800020,
0x97ffff08,
0xb94002e0,
0x321e0000,
0xb90002e0,
0x528000a0,
0x97ffff03,
0xb94002e0,
0x321d0000,
0xb90002e0,
0x52800020,
0x97fffefe,
0xb9412a60,
0xb9004ec0,
0xb9412e60,
0xb90052c0,
0xb9413e60,
0xb9009ac0,
0xb9414260,
0xb9009ec0,
0xb9415260,
0xb900dac0,
0xb9415660,
0xb900dec0,
0xb9416660,
0xb9011ac0,
0xb9416a60,
0xb9011ec0,
0xb9417a60,
0xb9015ac0,
0xb9417e60,
0xb9015ec0,
0xb9418660,
0xb90292c0,
0xb9414660,
0xb900a2c0,
0xb9415a60,
0xb900e2c0,
0xb9416e60,
0xb90122c0,
0xb9418260,
0xb90162c0,
0xb9411660,
0xb90002c0,
0xb9411a60,
0xb90006c0,
0xb9411e60,
0xb9002ec0,
0xb9412260,
0xb90032c0,
0xb9412660,
0xb90046c0,
0xb9413260,
0xb9005ac0,
0xb9413660,
0xb90082c0,
0xb9413a60,
0xb90086c0,
0xb9414a60,
0xb900c2c0,
0xb9414e60,
0xb900c6c0,
0xb9415e60,
0xb90102c0,
0xb9416260,
0xb90106c0,
0xb9417260,
0xb90142c0,
0xb9417660,
0xb90146c0,
0x52800040,
0xb9000ac0,
0xb9411261,
0xb9419a60,
0xb900b020,
0xb9419a60,
0xb900b420,
0xb9419e60,
0xb900f020,
0xb9419e60,
0xb900f420,
0xb941a260,
0xb9013020,
0xb941a260,
0xb9013420,
0xb941a660,
0xb9017020,
0xb941a662,
0xaa1f03e0,
0xb9017422,
0x91008261,
0xb8606822,
0x8b000283,
0xb900c062,
0x91001000,
0xf102301f,
0x54ffff61,
0xb9400a60,
0xb9000280,
0xb9400e60,
0xb9005280,
0xb9401260,
0xb9007e80,
0xb9401660,
0xb9008280,
0xb9401a60,
0xb9008680,
0xb940ae60,
0xb9024280,
0xb940b260,
0xb9024680,
0xb940b660,
0xb9024a80,
0xb940ba60,
0xb9024e80,
0xb940be60,
0xb9025280,
0xb940c260,
0xb9025680,
0xb940c660,
0xb9025a80,
0xb940ca60,
0xb9026280,
0xb940ce60,
0xb9026680,
0xb940d260,
0xb9027280,
0xb940d660,
0xb9027680,
0xb940da60,
0xb9027a80,
0xb940de60,
0xb9027e80,
0xb940e260,
0xb9028280,
0xb940e660,
0xb9028680,
0xb940ea60,
0xb9028a80,
0xb940ee60,
0xb9029280,
0xb940f260,
0xb9029680,
0xb940f660,
0xb9029a80,
0xb940fa60,
0xb902c680,
0xb940fe60,
0xb902ca80,
0xb9410260,
0xb902d280,
0xb9410660,
0xb902d680,
0xb9410a60,
0xb902da80,
0xb9410e60,
0xb902f280,
0xb9422260,
0x3100041f,
0x540000c0,
0xb9422661,
0x53007c00,
0xb9000001,
0x52800020,
0x97fffe65,
0x52800020,
0xb9004680,
0xb9404a80,
0x3607ffe0,
0xb941ae60,
0xb9000aa0,
0xb941b260,
0xb9000ea0,
0xb941b660,
0xb90012a0,
0xb941ba60,
0xb90016a0,
0xb941be60,
0xb9003aa0,
0xb941c260,
0xb9003ea0,
0xb9422260,
0x3100041f,
0x54000080,
0x53007c00,
0x320083e1,
0xb9000001,
0xaa1403e0,
0x97fffe84,
0xb9421a60,
0x3100041f,
0x54000100,
0x53007c00,
0xb9421e61,
0xb9400002,
0x2a010041,
0xb9000001,
0x52800020,
0x97fffe43,
0xaa1403e0,
0x97fffea0,
0xb9422260,
0x3100041f,
0x54000080,
0x53007c00,
0x52a00021,
0xb9000001,
0xf94007f3,
0xa94157f4,
0xa9425ff6,
0xa9437bfd,
0x910103ff,
0xd65f03c0,
plat/rockchip/rk3368/drivers/pmu/pmu.c
0 → 100644
View file @
61dbb028
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch_helpers.h>
#include <assert.h>
#include <debug.h>
#include <delay_timer.h>
#include <errno.h>
#include <mmio.h>
#include <platform.h>
#include <platform_def.h>
#include <plat_private.h>
#include <rk3368_def.h>
#include <pmu_sram.h>
#include <soc.h>
#include <pmu.h>
#include <ddr_rk3368.h>
#include <pmu_com.h>
static
struct
psram_data_t
*
psram_sleep_cfg
=
(
struct
psram_data_t
*
)
PSRAM_DT_BASE
;
void
rk3368_flash_l2_b
(
void
)
{
uint32_t
wait_cnt
=
0
;
regs_updata_bit_set
(
PMU_BASE
+
PMU_SFT_CON
,
pmu_sft_l2flsh_clst_b
);
dsb
();
while
(
!
(
mmio_read_32
(
PMU_BASE
+
PMU_CORE_PWR_ST
)
&
BIT
(
clst_b_l2_flsh_done
)))
{
wait_cnt
++
;
if
(
!
(
wait_cnt
%
MAX_WAIT_CONUT
))
WARN
(
"%s:reg %x,wait
\n
"
,
__func__
,
mmio_read_32
(
PMU_BASE
+
PMU_CORE_PWR_ST
));
}
regs_updata_bit_clr
(
PMU_BASE
+
PMU_SFT_CON
,
pmu_sft_l2flsh_clst_b
);
}
static
inline
int
rk3368_pmu_bus_idle
(
uint32_t
req
,
uint32_t
idle
)
{
uint32_t
mask
=
BIT
(
req
);
uint32_t
idle_mask
=
0
;
uint32_t
idle_target
=
0
;
uint32_t
val
;
uint32_t
wait_cnt
=
0
;
switch
(
req
)
{
case
bus_ide_req_clst_l
:
idle_mask
=
BIT
(
pmu_idle_ack_cluster_l
);
idle_target
=
(
idle
<<
pmu_idle_ack_cluster_l
);
break
;
case
bus_ide_req_clst_b
:
idle_mask
=
BIT
(
pmu_idle_ack_cluster_b
);
idle_target
=
(
idle
<<
pmu_idle_ack_cluster_b
);
break
;
case
bus_ide_req_cxcs
:
idle_mask
=
BIT
(
pmu_idle_ack_cxcs
);
idle_target
=
((
!
idle
)
<<
pmu_idle_ack_cxcs
);
break
;
case
bus_ide_req_cci400
:
idle_mask
=
BIT
(
pmu_idle_ack_cci400
);
idle_target
=
((
!
idle
)
<<
pmu_idle_ack_cci400
);
break
;
case
bus_ide_req_gpu
:
idle_mask
=
BIT
(
pmu_idle_ack_gpu
)
|
BIT
(
pmu_idle_gpu
);
idle_target
=
(
idle
<<
pmu_idle_ack_gpu
)
|
(
idle
<<
pmu_idle_gpu
);
break
;
case
bus_ide_req_core
:
idle_mask
=
BIT
(
pmu_idle_ack_core
)
|
BIT
(
pmu_idle_core
);
idle_target
=
(
idle
<<
pmu_idle_ack_core
)
|
(
idle
<<
pmu_idle_core
);
break
;
case
bus_ide_req_bus
:
idle_mask
=
BIT
(
pmu_idle_ack_bus
)
|
BIT
(
pmu_idle_bus
);
idle_target
=
(
idle
<<
pmu_idle_ack_bus
)
|
(
idle
<<
pmu_idle_bus
);
break
;
case
bus_ide_req_dma
:
idle_mask
=
BIT
(
pmu_idle_ack_dma
)
|
BIT
(
pmu_idle_dma
);
idle_target
=
(
idle
<<
pmu_idle_ack_dma
)
|
(
idle
<<
pmu_idle_dma
);
break
;
case
bus_ide_req_peri
:
idle_mask
=
BIT
(
pmu_idle_ack_peri
)
|
BIT
(
pmu_idle_peri
);
idle_target
=
(
idle
<<
pmu_idle_ack_peri
)
|
(
idle
<<
pmu_idle_peri
);
break
;
case
bus_ide_req_video
:
idle_mask
=
BIT
(
pmu_idle_ack_video
)
|
BIT
(
pmu_idle_video
);
idle_target
=
(
idle
<<
pmu_idle_ack_video
)
|
(
idle
<<
pmu_idle_video
);
break
;
case
bus_ide_req_vio
:
idle_mask
=
BIT
(
pmu_idle_ack_vio
)
|
BIT
(
pmu_idle_vio
);
idle_target
=
(
pmu_idle_ack_vio
)
|
(
idle
<<
pmu_idle_vio
);
break
;
case
bus_ide_req_alive
:
idle_mask
=
BIT
(
pmu_idle_ack_alive
)
|
BIT
(
pmu_idle_alive
);
idle_target
=
(
idle
<<
pmu_idle_ack_alive
)
|
(
idle
<<
pmu_idle_alive
);
break
;
case
bus_ide_req_pmu
:
idle_mask
=
BIT
(
pmu_idle_ack_pmu
)
|
BIT
(
pmu_idle_pmu
);
idle_target
=
(
idle
<<
pmu_idle_ack_pmu
)
|
(
idle
<<
pmu_idle_pmu
);
break
;
case
bus_ide_req_msch
:
idle_mask
=
BIT
(
pmu_idle_ack_msch
)
|
BIT
(
pmu_idle_msch
);
idle_target
=
(
idle
<<
pmu_idle_ack_msch
)
|
(
idle
<<
pmu_idle_msch
);
break
;
case
bus_ide_req_cci
:
idle_mask
=
BIT
(
pmu_idle_ack_cci
)
|
BIT
(
pmu_idle_cci
);
idle_target
=
(
idle
<<
pmu_idle_ack_cci
)
|
(
idle
<<
pmu_idle_cci
);
break
;
default:
ERROR
(
"%s: Unsupported the idle request
\n
"
,
__func__
);
break
;
}
val
=
mmio_read_32
(
PMU_BASE
+
PMU_BUS_IDE_REQ
);
if
(
idle
)
val
|=
mask
;
else
val
&=
~
mask
;
mmio_write_32
(
PMU_BASE
+
PMU_BUS_IDE_REQ
,
val
);
while
((
mmio_read_32
(
PMU_BASE
+
PMU_BUS_IDE_ST
)
&
idle_mask
)
!=
idle_target
)
{
wait_cnt
++
;
if
(
!
(
wait_cnt
%
MAX_WAIT_CONUT
))
WARN
(
"%s:st=%x(%x)
\n
"
,
__func__
,
mmio_read_32
(
PMU_BASE
+
PMU_BUS_IDE_ST
),
idle_mask
);
}
return
0
;
}
void
pmu_scu_b_pwrup
(
void
)
{
regs_updata_bit_clr
(
PMU_BASE
+
PMU_SFT_CON
,
pmu_sft_acinactm_clst_b
);
rk3368_pmu_bus_idle
(
bus_ide_req_clst_b
,
0
);
}
static
void
pmu_scu_b_pwrdn
(
void
)
{
uint32_t
wait_cnt
=
0
;
if
((
mmio_read_32
(
PMU_BASE
+
PMU_PWRDN_ST
)
&
PM_PWRDM_CPUSB_MSK
)
!=
PM_PWRDM_CPUSB_MSK
)
{
ERROR
(
"%s: not all cpus is off
\n
"
,
__func__
);
return
;
}
rk3368_flash_l2_b
();
regs_updata_bit_set
(
PMU_BASE
+
PMU_SFT_CON
,
pmu_sft_acinactm_clst_b
);
while
(
!
(
mmio_read_32
(
PMU_BASE
+
PMU_CORE_PWR_ST
)
&
BIT
(
clst_b_l2_wfi
)))
{
wait_cnt
++
;
if
(
!
(
wait_cnt
%
MAX_WAIT_CONUT
))
ERROR
(
"%s:wait cluster-b l2(%x)
\n
"
,
__func__
,
mmio_read_32
(
PMU_BASE
+
PMU_CORE_PWR_ST
));
}
rk3368_pmu_bus_idle
(
bus_ide_req_clst_b
,
1
);
}
static
void
pmu_sleep_mode_config
(
void
)
{
uint32_t
pwrmd_core
,
pwrmd_com
;
pwrmd_core
=
BIT
(
pmu_mdcr_cpu0_pd
)
|
BIT
(
pmu_mdcr_scu_l_pd
)
|
BIT
(
pmu_mdcr_l2_flush
)
|
BIT
(
pmu_mdcr_l2_idle
)
|
BIT
(
pmu_mdcr_clr_clst_l
)
|
BIT
(
pmu_mdcr_clr_core
)
|
BIT
(
pmu_mdcr_clr_cci
)
|
BIT
(
pmu_mdcr_core_pd
);
pwrmd_com
=
BIT
(
pmu_mode_en
)
|
BIT
(
pmu_mode_sref_enter
)
|
BIT
(
pmu_mode_pwr_off
);
regs_updata_bit_set
(
PMU_BASE
+
PMU_WKUP_CFG2
,
pmu_cluster_l_wkup_en
);
regs_updata_bit_set
(
PMU_BASE
+
PMU_WKUP_CFG2
,
pmu_cluster_b_wkup_en
);
regs_updata_bit_clr
(
PMU_BASE
+
PMU_WKUP_CFG2
,
pmu_gpio_wkup_en
);
mmio_write_32
(
PMU_BASE
+
PMU_PLLLOCK_CNT
,
CYCL_24M_CNT_MS
(
2
));
mmio_write_32
(
PMU_BASE
+
PMU_PLLRST_CNT
,
CYCL_24M_CNT_US
(
100
));
mmio_write_32
(
PMU_BASE
+
PMU_STABLE_CNT
,
CYCL_24M_CNT_MS
(
2
));
mmio_write_32
(
PMU_BASE
+
PMU_PWRMD_CORE
,
pwrmd_core
);
mmio_write_32
(
PMU_BASE
+
PMU_PWRMD_COM
,
pwrmd_com
);
dsb
();
}
static
void
ddr_suspend_save
(
void
)
{
ddr_reg_save
(
1
,
psram_sleep_cfg
->
ddr_data
);
}
static
void
pmu_set_sleep_mode
(
void
)
{
ddr_suspend_save
();
pmu_sleep_mode_config
();
soc_sleep_config
();
regs_updata_bit_set
(
PMU_BASE
+
PMU_PWRMD_CORE
,
pmu_mdcr_global_int_dis
);
regs_updata_bit_set
(
PMU_BASE
+
PMU_SFT_CON
,
pmu_sft_glbl_int_dis_b
);
pmu_scu_b_pwrdn
();
mmio_write_32
(
SGRF_BASE
+
SGRF_SOC_CON
(
1
),
(
PMUSRAM_BASE
>>
CPU_BOOT_ADDR_ALIGN
)
|
CPU_BOOT_ADDR_WMASK
);
mmio_write_32
(
SGRF_BASE
+
SGRF_SOC_CON
(
2
),
(
PMUSRAM_BASE
>>
CPU_BOOT_ADDR_ALIGN
)
|
CPU_BOOT_ADDR_WMASK
);
}
void
plat_rockchip_pmusram_prepare
(
void
)
{
uint32_t
*
sram_dst
,
*
sram_src
;
size_t
sram_size
=
2
;
uint32_t
code_size
,
data_size
;
/* pmu sram code and data prepare */
sram_dst
=
(
uint32_t
*
)
PMUSRAM_BASE
;
sram_src
=
(
uint32_t
*
)
&
pmu_cpuson_entrypoint_start
;
sram_size
=
(
uint32_t
*
)
&
pmu_cpuson_entrypoint_end
-
(
uint32_t
*
)
sram_src
;
u32_align_cpy
(
sram_dst
,
sram_src
,
sram_size
);
/* ddr code */
sram_dst
+=
sram_size
;
sram_src
=
ddr_get_resume_code_base
();
code_size
=
ddr_get_resume_code_size
();
u32_align_cpy
(
sram_dst
,
sram_src
,
code_size
/
4
);
psram_sleep_cfg
->
ddr_func
=
(
uint64_t
)
sram_dst
;
/* ddr data */
sram_dst
+=
(
code_size
/
4
);
data_size
=
ddr_get_resume_data_size
();
psram_sleep_cfg
->
ddr_data
=
(
uint64_t
)
sram_dst
;
assert
((
uint64_t
)(
sram_dst
+
data_size
/
4
)
<
PSRAM_SP_BOTTOM
);
psram_sleep_cfg
->
sp
=
PSRAM_SP_TOP
;
}
static
int
cpus_id_power_domain
(
uint32_t
cluster
,
uint32_t
cpu
,
uint32_t
pd_state
,
uint32_t
wfie_msk
)
{
uint32_t
pd
;
uint64_t
mpidr
;
if
(
cluster
)
pd
=
PD_CPUB0
+
cpu
;
else
pd
=
PD_CPUL0
+
cpu
;
if
(
pmu_power_domain_st
(
pd
)
==
pd_state
)
return
0
;
if
(
pd_state
==
pmu_pd_off
)
{
mpidr
=
(
cluster
<<
MPIDR_AFF1_SHIFT
)
|
cpu
;
if
(
check_cpu_wfie
(
mpidr
,
wfie_msk
))
return
-
EINVAL
;
}
return
pmu_power_domain_ctr
(
pd
,
pd_state
);
}
static
void
nonboot_cpus_off
(
void
)
{
uint32_t
boot_cpu
,
boot_cluster
,
cpu
;
boot_cpu
=
MPIDR_AFFLVL0_VAL
(
read_mpidr_el1
());
boot_cluster
=
MPIDR_AFFLVL1_VAL
(
read_mpidr_el1
());
/* turn off noboot cpus */
for
(
cpu
=
0
;
cpu
<
PLATFORM_CLUSTER0_CORE_COUNT
;
cpu
++
)
{
if
(
!
boot_cluster
&&
(
cpu
==
boot_cpu
))
continue
;
cpus_id_power_domain
(
0
,
cpu
,
pmu_pd_off
,
CKECK_WFEI_MSK
);
}
for
(
cpu
=
0
;
cpu
<
PLATFORM_CLUSTER1_CORE_COUNT
;
cpu
++
)
{
if
(
boot_cluster
&&
(
cpu
==
boot_cpu
))
continue
;
cpus_id_power_domain
(
1
,
cpu
,
pmu_pd_off
,
CKECK_WFEI_MSK
);
}
}
static
int
cores_pwr_domain_on
(
unsigned
long
mpidr
,
uint64_t
entrypoint
)
{
uint32_t
cpu
,
cluster
;
uint32_t
cpuon_id
;
cpu
=
MPIDR_AFFLVL0_VAL
(
mpidr
);
cluster
=
MPIDR_AFFLVL1_VAL
(
mpidr
);
/* Make sure the cpu is off,Before power up the cpu! */
cpus_id_power_domain
(
cluster
,
cpu
,
pmu_pd_off
,
CKECK_WFEI_MSK
);
cpuon_id
=
(
cluster
*
PLATFORM_CLUSTER0_CORE_COUNT
)
+
cpu
;
assert
(
cpuson_flags
[
cpuon_id
]
==
0
);
cpuson_flags
[
cpuon_id
]
=
PMU_CPU_HOTPLUG
;
cpuson_entry_point
[
cpuon_id
]
=
entrypoint
;
/* Switch boot addr to pmusram */
mmio_write_32
(
SGRF_BASE
+
SGRF_SOC_CON
(
1
+
cluster
),
(
PMUSRAM_BASE
>>
CPU_BOOT_ADDR_ALIGN
)
|
CPU_BOOT_ADDR_WMASK
);
dsb
();
cpus_id_power_domain
(
cluster
,
cpu
,
pmu_pd_on
,
CKECK_WFEI_MSK
);
mmio_write_32
(
SGRF_BASE
+
SGRF_SOC_CON
(
1
+
cluster
),
(
COLD_BOOT_BASE
>>
CPU_BOOT_ADDR_ALIGN
)
|
CPU_BOOT_ADDR_WMASK
);
return
0
;
}
static
int
cores_pwr_domain_on_finish
(
void
)
{
uint32_t
cpuon_id
;
cpuon_id
=
plat_my_core_pos
();
assert
(
cpuson_flags
[
cpuon_id
]
==
0
);
cpuson_flags
[
cpuon_id
]
=
0x00
;
return
0
;
}
static
int
sys_pwr_domain_resume
(
void
)
{
psram_sleep_cfg
->
sys_mode
=
PMU_SYS_ON_MODE
;
pm_plls_resume
();
pmu_scu_b_pwrup
();
return
0
;
}
static
int
sys_pwr_domain_suspend
(
void
)
{
nonboot_cpus_off
();
pmu_set_sleep_mode
();
psram_sleep_cfg
->
sys_mode
=
PMU_SYS_SLP_MODE
;
psram_sleep_cfg
->
ddr_flag
=
0
;
return
0
;
}
static
struct
rockchip_pm_ops_cb
pm_ops
=
{
.
cores_pwr_dm_on
=
cores_pwr_domain_on
,
.
cores_pwr_dm_on_finish
=
cores_pwr_domain_on_finish
,
.
sys_pwr_dm_suspend
=
sys_pwr_domain_suspend
,
.
sys_pwr_dm_resume
=
sys_pwr_domain_resume
,
.
sys_gbl_soft_reset
=
soc_sys_global_soft_reset
,
};
void
plat_rockchip_pmu_init
(
void
)
{
uint32_t
cpu
;
plat_setup_rockchip_pm_ops
(
&
pm_ops
);
for
(
cpu
=
0
;
cpu
<
PLATFORM_CORE_COUNT
;
cpu
++
)
cpuson_flags
[
cpu
]
=
0
;
psram_sleep_cfg
->
sys_mode
=
PMU_SYS_ON_MODE
;
psram_sleep_cfg
->
boot_mpidr
=
read_mpidr_el1
()
&
0xffff
;
nonboot_cpus_off
();
INFO
(
"%s(%d): pd status %x
\n
"
,
__func__
,
__LINE__
,
mmio_read_32
(
PMU_BASE
+
PMU_PWRDN_ST
));
}
plat/rockchip/rk3368/drivers/pmu/pmu.h
0 → 100644
View file @
61dbb028
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __PMU_H__
#define __PMU_H__
/* Allocate sp reginon in pmusram */
#define PSRAM_SP_SIZE 0x80
#define PSRAM_SP_BOTTOM (PSRAM_SP_TOP - PSRAM_SP_SIZE)
/*****************************************************************************
* pmu con,reg
*****************************************************************************/
#define PMU_WKUP_CFG0 0x0
#define PMU_WKUP_CFG1 0x4
#define PMU_WKUP_CFG2 0x8
#define PMU_TIMEOUT_CNT 0x7c
#define PMU_PWRDN_CON 0xc
#define PMU_PWRDN_ST 0x10
#define PMU_CORE_PWR_ST 0x38
#define PMU_PWRMD_CORE 0x14
#define PMU_PWRMD_COM 0x18
#define PMU_SFT_CON 0x1c
#define PMU_BUS_IDE_REQ 0x3c
#define PMU_BUS_IDE_ST 0x40
#define PMU_OSC_CNT 0x48
#define PMU_PLLLOCK_CNT 0x4c
#define PMU_PLLRST_CNT 0x50
#define PMU_STABLE_CNT 0x54
#define PMU_DDRIO_PWR_CNT 0x58
#define PMU_WKUPRST_CNT 0x5c
enum
pmu_powermode_core
{
pmu_mdcr_global_int_dis
=
0
,
pmu_mdcr_core_src_gt
,
pmu_mdcr_clr_cci
,
pmu_mdcr_cpu0_pd
,
pmu_mdcr_clr_clst_l
=
4
,
pmu_mdcr_clr_core
,
pmu_mdcr_scu_l_pd
,
pmu_mdcr_core_pd
,
pmu_mdcr_l2_idle
=
8
,
pmu_mdcr_l2_flush
};
/*
* the shift of bits for cores status
*/
enum
pmu_core_pwrst_shift
{
clstl_cpu_wfe
=
2
,
clstl_cpu_wfi
=
6
,
clstb_cpu_wfe
=
12
,
clstb_cpu_wfi
=
16
};
enum
pmu_pdid
{
PD_CPUL0
=
0
,
PD_CPUL1
,
PD_CPUL2
,
PD_CPUL3
,
PD_SCUL
,
PD_CPUB0
=
5
,
PD_CPUB1
,
PD_CPUB2
,
PD_CPUB3
,
PD_SCUB
=
9
,
PD_PERI
=
13
,
PD_VIDEO
,
PD_VIO
,
PD_GPU0
,
PD_GPU1
,
PD_END
};
enum
pmu_bus_ide
{
bus_ide_req_clst_l
=
0
,
bus_ide_req_clst_b
,
bus_ide_req_gpu
,
bus_ide_req_core
,
bus_ide_req_bus
=
4
,
bus_ide_req_dma
,
bus_ide_req_peri
,
bus_ide_req_video
,
bus_ide_req_vio
=
8
,
bus_ide_req_res0
,
bus_ide_req_cxcs
,
bus_ide_req_alive
,
bus_ide_req_pmu
=
12
,
bus_ide_req_msch
,
bus_ide_req_cci
,
bus_ide_req_cci400
=
15
,
bus_ide_req_end
};
enum
pmu_powermode_common
{
pmu_mode_en
=
0
,
pmu_mode_res0
,
pmu_mode_bus_pd
,
pmu_mode_wkup_rst
,
pmu_mode_pll_pd
=
4
,
pmu_mode_pwr_off
,
pmu_mode_pmu_use_if
,
pmu_mode_pmu_alive_use_if
,
pmu_mode_osc_dis
=
8
,
pmu_mode_input_clamp
,
pmu_mode_sref_enter
,
pmu_mode_ddrc_gt
,
pmu_mode_ddrio_ret
=
12
,
pmu_mode_ddrio_ret_deq
,
pmu_mode_clr_pmu
,
pmu_mode_clr_alive
,
pmu_mode_clr_bus
=
16
,
pmu_mode_clr_dma
,
pmu_mode_clr_msch
,
pmu_mode_clr_peri
,
pmu_mode_clr_video
=
20
,
pmu_mode_clr_vio
,
pmu_mode_clr_gpu
,
pmu_mode_clr_mcu
,
pmu_mode_clr_cxcs
=
24
,
pmu_mode_clr_cci400
,
pmu_mode_res1
,
pmu_mode_res2
,
pmu_mode_res3
=
28
,
pmu_mode_mclst
};
enum
pmu_core_power_st
{
clst_l_cpu_wfe
=
2
,
clst_l_cpu_wfi
=
6
,
clst_b_l2_flsh_done
=
10
,
clst_b_l2_wfi
=
11
,
clst_b_cpu_wfe
=
12
,
clst_b_cpu_wfi
=
16
,
mcu_sleeping
=
20
,
};
enum
pmu_sft_con
{
pmu_sft_acinactm_clst_b
=
5
,
pmu_sft_l2flsh_clst_b
,
pmu_sft_glbl_int_dis_b
=
9
,
pmu_sft_ddrio_ret_cfg
=
11
,
};
enum
pmu_wkup_cfg2
{
pmu_cluster_l_wkup_en
=
0
,
pmu_cluster_b_wkup_en
,
pmu_gpio_wkup_en
,
pmu_sdio_wkup_en
,
pmu_sdmmc_wkup_en
,
pmu_sim_wkup_en
,
pmu_timer_wkup_en
,
pmu_usbdev_wkup_en
,
pmu_sft_wkup_en
,
pmu_wdt_mcu_wkup_en
,
pmu_timeout_wkup_en
,
};
enum
pmu_bus_idle_st
{
pmu_idle_ack_cluster_l
=
0
,
pmu_idle_ack_cluster_b
,
pmu_idle_ack_gpu
,
pmu_idle_ack_core
,
pmu_idle_ack_bus
,
pmu_idle_ack_dma
,
pmu_idle_ack_peri
,
pmu_idle_ack_video
,
pmu_idle_ack_vio
,
pmu_idle_ack_cci
=
10
,
pmu_idle_ack_msch
,
pmu_idle_ack_alive
,
pmu_idle_ack_pmu
,
pmu_idle_ack_cxcs
,
pmu_idle_ack_cci400
,
pmu_inactive_cluster_l
,
pmu_inactive_cluster_b
,
pmu_idle_gpu
,
pmu_idle_core
,
pmu_idle_bus
,
pmu_idle_dma
,
pmu_idle_peri
,
pmu_idle_video
,
pmu_idle_vio
,
pmu_idle_cci
=
26
,
pmu_idle_msch
,
pmu_idle_alive
,
pmu_idle_pmu
,
pmu_active_cxcs
,
pmu_active_cci
,
};
#define PM_PWRDM_CPUSB_MSK (0xf << 5)
#define CKECK_WFE_MSK 0x1
#define CKECK_WFI_MSK 0x10
#define CKECK_WFEI_MSK 0x11
#define PD_CTR_LOOP 500
#define CHK_CPU_LOOP 500
#define MAX_WAIT_CONUT 1000
#endif
/* __PMU_H__ */
plat/rockchip/rk3368/drivers/soc/soc.c
0 → 100644
View file @
61dbb028
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch_helpers.h>
#include <debug.h>
#include <mmio.h>
#include <platform_def.h>
#include <plat_private.h>
#include <rk3368_def.h>
#include <soc.h>
static
uint32_t
plls_con
[
END_PLL_ID
][
4
];
/* Table of regions to map using the MMU. */
const
mmap_region_t
plat_rk_mmap
[]
=
{
MAP_REGION_FLAT
(
CCI400_BASE
,
CCI400_SIZE
,
MT_DEVICE
|
MT_RW
|
MT_SECURE
),
MAP_REGION_FLAT
(
GIC400_BASE
,
GIC400_SIZE
,
MT_DEVICE
|
MT_RW
|
MT_SECURE
),
MAP_REGION_FLAT
(
STIME_BASE
,
STIME_SIZE
,
MT_DEVICE
|
MT_RW
|
MT_SECURE
),
MAP_REGION_FLAT
(
SGRF_BASE
,
SGRF_SIZE
,
MT_DEVICE
|
MT_RW
|
MT_SECURE
),
MAP_REGION_FLAT
(
PMUSRAM_BASE
,
PMUSRAM_SIZE
,
MT_MEMORY
|
MT_RW
|
MT_SECURE
),
MAP_REGION_FLAT
(
PMU_BASE
,
PMU_SIZE
,
MT_DEVICE
|
MT_RW
|
MT_SECURE
),
MAP_REGION_FLAT
(
UART_DBG_BASE
,
UART_DBG_SIZE
,
MT_DEVICE
|
MT_RW
|
MT_SECURE
),
MAP_REGION_FLAT
(
CRU_BASE
,
CRU_SIZE
,
MT_DEVICE
|
MT_RW
|
MT_SECURE
),
MAP_REGION_FLAT
(
DDR_PCTL_BASE
,
DDR_PCTL_SIZE
,
MT_DEVICE
|
MT_RW
|
MT_SECURE
),
MAP_REGION_FLAT
(
DDR_PHY_BASE
,
DDR_PHY_SIZE
,
MT_DEVICE
|
MT_RW
|
MT_SECURE
),
MAP_REGION_FLAT
(
GRF_BASE
,
GRF_SIZE
,
MT_DEVICE
|
MT_RW
|
MT_SECURE
),
MAP_REGION_FLAT
(
SERVICE_BUS_BASE
,
SERVICE_BUS_SISE
,
MT_DEVICE
|
MT_RW
|
MT_SECURE
),
{
0
}
};
/* The RockChip power domain tree descriptor */
const
unsigned
char
rockchip_power_domain_tree_desc
[]
=
{
/* No of root nodes */
PLATFORM_SYSTEM_COUNT
,
/* No of children for the root node */
PLATFORM_CLUSTER_COUNT
,
/* No of children for the first cluster node */
PLATFORM_CLUSTER0_CORE_COUNT
,
/* No of children for the second cluster node */
PLATFORM_CLUSTER1_CORE_COUNT
};
void
secure_timer_init
(
void
)
{
mmio_write_32
(
STIMER1_BASE
+
TIMER_LOADE_COUNT0
,
0xffffffff
);
mmio_write_32
(
STIMER1_BASE
+
TIMER_LOADE_COUNT1
,
0xffffffff
);
/* auto reload & enable the timer */
mmio_write_32
(
STIMER1_BASE
+
TIMER_CONTROL_REG
,
TIMER_EN
);
}
void
sgrf_init
(
void
)
{
/* setting all configurable ip into no-secure */
mmio_write_32
(
SGRF_BASE
+
SGRF_SOC_CON
(
5
),
SGRF_SOC_CON_NS
);
mmio_write_32
(
SGRF_BASE
+
SGRF_SOC_CON
(
6
),
SGRF_SOC_CON7_BITS
);
mmio_write_32
(
SGRF_BASE
+
SGRF_SOC_CON
(
7
),
SGRF_SOC_CON_NS
);
/* secure dma to no sesure */
mmio_write_32
(
SGRF_BASE
+
SGRF_BUSDMAC_CON
(
0
),
SGRF_BUSDMAC_CON0_NS
);
mmio_write_32
(
SGRF_BASE
+
SGRF_BUSDMAC_CON
(
1
),
SGRF_BUSDMAC_CON1_NS
);
dsb
();
/* rst dma1 */
mmio_write_32
(
CRU_BASE
+
CRU_SOFTRSTS_CON
(
1
),
RST_DMA1_MSK
|
(
RST_DMA1_MSK
<<
16
));
/* rst dma2 */
mmio_write_32
(
CRU_BASE
+
CRU_SOFTRSTS_CON
(
4
),
RST_DMA2_MSK
|
(
RST_DMA2_MSK
<<
16
));
dsb
();
/* release dma1 rst*/
mmio_write_32
(
CRU_BASE
+
CRU_SOFTRSTS_CON
(
1
),
(
RST_DMA1_MSK
<<
16
));
/* release dma2 rst*/
mmio_write_32
(
CRU_BASE
+
CRU_SOFTRSTS_CON
(
4
),
(
RST_DMA2_MSK
<<
16
));
}
void
plat_rockchip_soc_init
(
void
)
{
secure_timer_init
();
sgrf_init
();
}
void
regs_updata_bits
(
uintptr_t
addr
,
uint32_t
val
,
uint32_t
mask
,
uint32_t
shift
)
{
uint32_t
tmp
,
orig
;
orig
=
mmio_read_32
(
addr
);
tmp
=
orig
&
~
(
mask
<<
shift
);
tmp
|=
(
val
&
mask
)
<<
shift
;
if
(
tmp
!=
orig
)
mmio_write_32
(
addr
,
tmp
);
dsb
();
}
static
void
plls_suspend
(
uint32_t
pll_id
)
{
plls_con
[
pll_id
][
0
]
=
mmio_read_32
(
CRU_BASE
+
PLL_CONS
((
pll_id
),
0
));
plls_con
[
pll_id
][
1
]
=
mmio_read_32
(
CRU_BASE
+
PLL_CONS
((
pll_id
),
1
));
plls_con
[
pll_id
][
2
]
=
mmio_read_32
(
CRU_BASE
+
PLL_CONS
((
pll_id
),
2
));
plls_con
[
pll_id
][
3
]
=
mmio_read_32
(
CRU_BASE
+
PLL_CONS
((
pll_id
),
3
));
mmio_write_32
(
CRU_BASE
+
PLL_CONS
((
pll_id
),
3
),
PLL_SLOW_BITS
);
mmio_write_32
(
CRU_BASE
+
PLL_CONS
((
pll_id
),
3
),
PLL_BYPASS
);
}
static
void
pm_plls_suspend
(
void
)
{
plls_suspend
(
NPLL_ID
);
plls_suspend
(
CPLL_ID
);
plls_suspend
(
GPLL_ID
);
plls_suspend
(
ABPLL_ID
);
plls_suspend
(
ALPLL_ID
);
}
static
inline
void
plls_resume
(
void
)
{
mmio_write_32
(
CRU_BASE
+
PLL_CONS
(
ABPLL_ID
,
3
),
plls_con
[
ABPLL_ID
][
3
]
|
PLL_BYPASS_W_MSK
);
mmio_write_32
(
CRU_BASE
+
PLL_CONS
(
ALPLL_ID
,
3
),
plls_con
[
ALPLL_ID
][
3
]
|
PLL_BYPASS_W_MSK
);
mmio_write_32
(
CRU_BASE
+
PLL_CONS
(
GPLL_ID
,
3
),
plls_con
[
GPLL_ID
][
3
]
|
PLL_BYPASS_W_MSK
);
mmio_write_32
(
CRU_BASE
+
PLL_CONS
(
CPLL_ID
,
3
),
plls_con
[
CPLL_ID
][
3
]
|
PLL_BYPASS_W_MSK
);
mmio_write_32
(
CRU_BASE
+
PLL_CONS
(
NPLL_ID
,
3
),
plls_con
[
NPLL_ID
][
3
]
|
PLL_BYPASS_W_MSK
);
}
void
soc_sleep_config
(
void
)
{
int
i
=
0
;
for
(
i
=
0
;
i
<
CRU_CLKGATES_CON_CNT
;
i
++
)
mmio_write_32
(
CRU_BASE
+
CRU_CLKGATES_CON
(
i
),
0xffff0000
);
pm_plls_suspend
();
for
(
i
=
0
;
i
<
CRU_CLKGATES_CON_CNT
;
i
++
)
mmio_write_32
(
CRU_BASE
+
CRU_CLKGATES_CON
(
i
),
0xffff0000
);
}
void
pm_plls_resume
(
void
)
{
plls_resume
();
mmio_write_32
(
CRU_BASE
+
PLL_CONS
(
ABPLL_ID
,
3
),
plls_con
[
ABPLL_ID
][
3
]
|
PLLS_MODE_WMASK
);
mmio_write_32
(
CRU_BASE
+
PLL_CONS
(
ALPLL_ID
,
3
),
plls_con
[
ALPLL_ID
][
3
]
|
PLLS_MODE_WMASK
);
mmio_write_32
(
CRU_BASE
+
PLL_CONS
(
GPLL_ID
,
3
),
plls_con
[
GPLL_ID
][
3
]
|
PLLS_MODE_WMASK
);
mmio_write_32
(
CRU_BASE
+
PLL_CONS
(
CPLL_ID
,
3
),
plls_con
[
CPLL_ID
][
3
]
|
PLLS_MODE_WMASK
);
mmio_write_32
(
CRU_BASE
+
PLL_CONS
(
NPLL_ID
,
3
),
plls_con
[
NPLL_ID
][
3
]
|
PLLS_MODE_WMASK
);
}
void
__dead2
soc_sys_global_soft_reset
(
void
)
{
uint32_t
temp_val
;
mmio_write_32
(
CRU_BASE
+
PLL_CONS
((
GPLL_ID
),
3
),
PLL_SLOW_BITS
);
mmio_write_32
(
CRU_BASE
+
PLL_CONS
((
CPLL_ID
),
3
),
PLL_SLOW_BITS
);
mmio_write_32
(
CRU_BASE
+
PLL_CONS
((
NPLL_ID
),
3
),
PLL_SLOW_BITS
);
mmio_write_32
(
CRU_BASE
+
PLL_CONS
((
ABPLL_ID
),
3
),
PLL_SLOW_BITS
);
mmio_write_32
(
CRU_BASE
+
PLL_CONS
((
ALPLL_ID
),
3
),
PLL_SLOW_BITS
);
temp_val
=
mmio_read_32
(
CRU_BASE
+
CRU_GLB_RST_CON
)
|
PMU_RST_BY_SECOND_SFT
;
mmio_write_32
(
CRU_BASE
+
CRU_GLB_RST_CON
,
temp_val
);
mmio_write_32
(
CRU_BASE
+
CRU_GLB_SRST_SND
,
0xeca8
);
/*
* Maybe the HW needs some times to reset the system,
* so we do not hope the core to excute valid codes.
*/
while
(
1
)
;
}
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment