Commit 67748e48 authored by danh-arm's avatar danh-arm Committed by GitHub
Browse files

Merge pull request #788 from jeenu-arm/cpuops-framework

Add provision to extend CPU operations at more levels
parents 9acdafbc 5dd9dbb5
...@@ -1127,7 +1127,8 @@ can be found in the [cpu-specific-build-macros.md][CPUBM] file. ...@@ -1127,7 +1127,8 @@ can be found in the [cpu-specific-build-macros.md][CPUBM] file.
The CPU specific operations framework depends on the `cpu_ops` structure which The CPU specific operations framework depends on the `cpu_ops` structure which
needs to be exported for each type of CPU in the platform. It is defined in needs to be exported for each type of CPU in the platform. It is defined in
`include/lib/cpus/aarch64/cpu_macros.S` and has the following fields : `midr`, `include/lib/cpus/aarch64/cpu_macros.S` and has the following fields : `midr`,
`reset_func()`, `core_pwr_dwn()`, `cluster_pwr_dwn()` and `cpu_reg_dump()`. `reset_func()`, `cpu_pwr_down_ops` (array of power down functions) and
`cpu_reg_dump()`.
The CPU specific files in `lib/cpus` export a `cpu_ops` data structure with The CPU specific files in `lib/cpus` export a `cpu_ops` data structure with
suitable handlers for that CPU. For example, `lib/cpus/aarch64/cortex_a53.S` suitable handlers for that CPU. For example, `lib/cpus/aarch64/cortex_a53.S`
...@@ -1161,15 +1162,15 @@ During the BL31 initialization sequence, the pointer to the matching `cpu_ops` ...@@ -1161,15 +1162,15 @@ During the BL31 initialization sequence, the pointer to the matching `cpu_ops`
entry is stored in per-CPU data by `init_cpu_ops()` so that it can be quickly entry is stored in per-CPU data by `init_cpu_ops()` so that it can be quickly
retrieved during power down sequences. retrieved during power down sequences.
The PSCI service, upon receiving a power down request, determines the highest Various CPU drivers register handlers to perform power down at certain power
power level at which to execute power down sequence for a particular CPU and levels for that specific CPU. The PSCI service, upon receiving a power down
invokes the corresponding 'prepare' power down handler in the CPU specific request, determines the highest power level at which to execute power down
operations framework. For example, when a CPU executes a power down for power sequence for a particular CPU. It uses the `prepare_cpu_pwr_dwn()` function to
level 0, the `prepare_core_pwr_dwn()` retrieves the `cpu_ops` pointer from the pick the right power down handler for the requested level. The function
per-CPU data and the corresponding `core_pwr_dwn()` is invoked. Similarly when retrieves `cpu_ops` pointer member of per-CPU data, and from that, further
a CPU executes power down at power level 1, the `prepare_cluster_pwr_dwn()` retrieves `cpu_pwr_down_ops` array, and indexes into the required level. If the
retrieves the `cpu_ops` pointer and the corresponding `cluster_pwr_dwn()` is requested power level is higher than what a CPU driver supports, the handler
invoked. registered for highest level is invoked.
At runtime the platform hooks for power down are invoked by the PSCI service to At runtime the platform hooks for power down are invoked by the PSCI service to
perform platform specific operations during a power down sequence, for example perform platform specific operations during a power down sequence, for example
......
...@@ -35,6 +35,15 @@ ...@@ -35,6 +35,15 @@
#define CPU_IMPL_PN_MASK (MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) | \ #define CPU_IMPL_PN_MASK (MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) | \
(MIDR_PN_MASK << MIDR_PN_SHIFT) (MIDR_PN_MASK << MIDR_PN_SHIFT)
/* The number of CPU operations allowed */
#define CPU_MAX_PWR_DWN_OPS 2
/* Special constant to specify that CPU has no reset function */
#define CPU_NO_RESET_FUNC 0
/* Word size for 32-bit CPUs */
#define CPU_WORD_SIZE 4
/* /*
* Define the offsets to the fields in cpu_ops structure. * Define the offsets to the fields in cpu_ops structure.
*/ */
...@@ -47,33 +56,86 @@ CPU_RESET_FUNC: /* cpu_ops reset_func */ ...@@ -47,33 +56,86 @@ CPU_RESET_FUNC: /* cpu_ops reset_func */
.space 4 .space 4
#endif #endif
#if IMAGE_BL32 /* The power down core and cluster is needed only in BL32 */ #if IMAGE_BL32 /* The power down core and cluster is needed only in BL32 */
CPU_PWR_DWN_CORE: /* cpu_ops core_pwr_dwn */ CPU_PWR_DWN_OPS: /* cpu_ops power down functions */
.space 4 .space (4 * CPU_MAX_PWR_DWN_OPS)
CPU_PWR_DWN_CLUSTER: /* cpu_ops cluster_pwr_dwn */
.space 4
#endif #endif
CPU_OPS_SIZE = . CPU_OPS_SIZE = .
/* /*
* Convenience macro to declare cpu_ops structure. * Write given expressions as words
* Make sure the structure fields are as per the offsets *
* defined above. * _count:
* Write at least _count words. If the given number of expressions
* is less than _count, repeat the last expression to fill _count
* words in total
* _rest:
* Optional list of expressions. _this is for parameter extraction
* only, and has no significance to the caller
*
* Invoked as:
* fill_constants 2, foo, bar, blah, ...
*/
.macro fill_constants _count:req, _this, _rest:vararg
.ifgt \_count
/* Write the current expression */
.ifb \_this
.error "Nothing to fill"
.endif
.word \_this
/* Invoke recursively for remaining expressions */
.ifnb \_rest
fill_constants \_count-1, \_rest
.else
fill_constants \_count-1, \_this
.endif
.endif
.endm
/*
* Declare CPU operations
*
* _name:
* Name of the CPU for which operations are being specified
* _midr:
* Numeric value expected to read from CPU's MIDR
* _resetfunc:
* Reset function for the CPU. If there's no CPU reset function,
* specify CPU_NO_RESET_FUNC
* _power_down_ops:
* Comma-separated list of functions to perform power-down
* operatios on the CPU. At least one, and up to
* CPU_MAX_PWR_DWN_OPS number of functions may be specified.
* Starting at power level 0, these functions shall handle power
* down at subsequent power levels. If there aren't exactly
* CPU_MAX_PWR_DWN_OPS functions, the last specified one will be
* used to handle power down at subsequent levels
*/ */
.macro declare_cpu_ops _name:req, _midr:req, _noresetfunc = 0 .macro declare_cpu_ops _name:req, _midr:req, _resetfunc:req, \
_power_down_ops:vararg
.section cpu_ops, "a" .section cpu_ops, "a"
.align 2 .align 2
.type cpu_ops_\_name, %object .type cpu_ops_\_name, %object
.word \_midr .word \_midr
#if IMAGE_BL1 || IMAGE_BL32 #if IMAGE_BL1 || IMAGE_BL32
.if \_noresetfunc .word \_resetfunc
.word 0
.else
.word \_name\()_reset_func
.endif
#endif #endif
#if IMAGE_BL32 #if IMAGE_BL32
.word \_name\()_core_pwr_dwn 1:
.word \_name\()_cluster_pwr_dwn /* Insert list of functions */
fill_constants CPU_MAX_PWR_DWN_OPS, \_power_down_ops
2:
/*
* Error if no or more than CPU_MAX_PWR_DWN_OPS were specified in the
* list
*/
.ifeq 2b - 1b
.error "At least one power down function must be specified"
.else
.iflt 2b - 1b - (CPU_MAX_PWR_DWN_OPS * CPU_WORD_SIZE)
.error "More than CPU_MAX_PWR_DWN_OPS functions specified"
.endif
.endif
#endif #endif
.endm .endm
......
/* /*
* Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met: * modification, are permitted provided that the following conditions are met:
...@@ -35,6 +35,15 @@ ...@@ -35,6 +35,15 @@
#define CPU_IMPL_PN_MASK (MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) | \ #define CPU_IMPL_PN_MASK (MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) | \
(MIDR_PN_MASK << MIDR_PN_SHIFT) (MIDR_PN_MASK << MIDR_PN_SHIFT)
/* The number of CPU operations allowed */
#define CPU_MAX_PWR_DWN_OPS 2
/* Special constant to specify that CPU has no reset function */
#define CPU_NO_RESET_FUNC 0
/* Word size for 64-bit CPUs */
#define CPU_WORD_SIZE 8
/* /*
* Define the offsets to the fields in cpu_ops structure. * Define the offsets to the fields in cpu_ops structure.
*/ */
...@@ -47,10 +56,8 @@ CPU_RESET_FUNC: /* cpu_ops reset_func */ ...@@ -47,10 +56,8 @@ CPU_RESET_FUNC: /* cpu_ops reset_func */
.space 8 .space 8
#endif #endif
#if IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */ #if IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */
CPU_PWR_DWN_CORE: /* cpu_ops core_pwr_dwn */ CPU_PWR_DWN_OPS: /* cpu_ops power down functions */
.space 8 .space (8 * CPU_MAX_PWR_DWN_OPS)
CPU_PWR_DWN_CLUSTER: /* cpu_ops cluster_pwr_dwn */
.space 8
#endif #endif
#if (IMAGE_BL31 && CRASH_REPORTING) #if (IMAGE_BL31 && CRASH_REPORTING)
CPU_REG_DUMP: /* cpu specific register dump for crash reporting */ CPU_REG_DUMP: /* cpu specific register dump for crash reporting */
...@@ -59,24 +66,80 @@ CPU_REG_DUMP: /* cpu specific register dump for crash reporting */ ...@@ -59,24 +66,80 @@ CPU_REG_DUMP: /* cpu specific register dump for crash reporting */
CPU_OPS_SIZE = . CPU_OPS_SIZE = .
/* /*
* Convenience macro to declare cpu_ops structure. * Write given expressions as quad words
* Make sure the structure fields are as per the offsets *
* defined above. * _count:
* Write at least _count quad words. If the given number of
* expressions is less than _count, repeat the last expression to
* fill _count quad words in total
* _rest:
* Optional list of expressions. _this is for parameter extraction
* only, and has no significance to the caller
*
* Invoked as:
* fill_constants 2, foo, bar, blah, ...
*/
.macro fill_constants _count:req, _this, _rest:vararg
.ifgt \_count
/* Write the current expression */
.ifb \_this
.error "Nothing to fill"
.endif
.quad \_this
/* Invoke recursively for remaining expressions */
.ifnb \_rest
fill_constants \_count-1, \_rest
.else
fill_constants \_count-1, \_this
.endif
.endif
.endm
/*
* Declare CPU operations
*
* _name:
* Name of the CPU for which operations are being specified
* _midr:
* Numeric value expected to read from CPU's MIDR
* _resetfunc:
* Reset function for the CPU. If there's no CPU reset function,
* specify CPU_NO_RESET_FUNC
* _power_down_ops:
* Comma-separated list of functions to perform power-down
* operatios on the CPU. At least one, and up to
* CPU_MAX_PWR_DWN_OPS number of functions may be specified.
* Starting at power level 0, these functions shall handle power
* down at subsequent power levels. If there aren't exactly
* CPU_MAX_PWR_DWN_OPS functions, the last specified one will be
* used to handle power down at subsequent levels
*/ */
.macro declare_cpu_ops _name:req, _midr:req, _noresetfunc = 0 .macro declare_cpu_ops _name:req, _midr:req, _resetfunc:req, \
.section cpu_ops, "a"; .align 3 _power_down_ops:vararg
.section cpu_ops, "a"
.align 3
.type cpu_ops_\_name, %object .type cpu_ops_\_name, %object
.quad \_midr .quad \_midr
#if IMAGE_BL1 || IMAGE_BL31 #if IMAGE_BL1 || IMAGE_BL31
.if \_noresetfunc .quad \_resetfunc
.quad 0
.else
.quad \_name\()_reset_func
.endif
#endif #endif
#if IMAGE_BL31 #if IMAGE_BL31
.quad \_name\()_core_pwr_dwn 1:
.quad \_name\()_cluster_pwr_dwn /* Insert list of functions */
fill_constants CPU_MAX_PWR_DWN_OPS, \_power_down_ops
2:
/*
* Error if no or more than CPU_MAX_PWR_DWN_OPS were specified in the
* list
*/
.ifeq 2b - 1b
.error "At least one power down function must be specified"
.else
.iflt 2b - 1b - (CPU_MAX_PWR_DWN_OPS * CPU_WORD_SIZE)
.error "More than CPU_MAX_PWR_DWN_OPS functions specified"
.endif
.endif
#endif #endif
#if (IMAGE_BL31 && CRASH_REPORTING) #if (IMAGE_BL31 && CRASH_REPORTING)
.quad \_name\()_cpu_reg_dump .quad \_name\()_cpu_reg_dump
......
...@@ -65,4 +65,6 @@ func aem_generic_cluster_pwr_dwn ...@@ -65,4 +65,6 @@ func aem_generic_cluster_pwr_dwn
endfunc aem_generic_cluster_pwr_dwn endfunc aem_generic_cluster_pwr_dwn
/* cpu_ops for Base AEM FVP */ /* cpu_ops for Base AEM FVP */
declare_cpu_ops aem_generic, BASE_AEM_MIDR, 1 declare_cpu_ops aem_generic, BASE_AEM_MIDR, CPU_NO_RESET_FUNC, \
aem_generic_core_pwr_dwn, \
aem_generic_cluster_pwr_dwn
...@@ -141,4 +141,7 @@ func cortex_a32_cluster_pwr_dwn ...@@ -141,4 +141,7 @@ func cortex_a32_cluster_pwr_dwn
b cortex_a32_disable_smp b cortex_a32_disable_smp
endfunc cortex_a32_cluster_pwr_dwn endfunc cortex_a32_cluster_pwr_dwn
declare_cpu_ops cortex_a32, CORTEX_A32_MIDR declare_cpu_ops cortex_a32, CORTEX_A32_MIDR, \
cortex_a32_reset_func, \
cortex_a32_core_pwr_dwn, \
cortex_a32_cluster_pwr_dwn
...@@ -70,50 +70,39 @@ endfunc reset_handler ...@@ -70,50 +70,39 @@ endfunc reset_handler
#if IMAGE_BL32 /* The power down core and cluster is needed only in BL32 */ #if IMAGE_BL32 /* The power down core and cluster is needed only in BL32 */
/* /*
* The prepare core power down function for all platforms. After * void prepare_cpu_pwr_dwn(unsigned int power_level)
* the cpu_ops pointer is retrieved from cpu_data, the corresponding *
* pwr_dwn_core in the cpu_ops is invoked. Follows AAPCS. * Prepare CPU power down function for all platforms. The function takes
* a domain level to be powered down as its parameter. After the cpu_ops
* pointer is retrieved from cpu_data, the handler for requested power
* level is called.
*/ */
.globl prepare_core_pwr_dwn .globl prepare_cpu_pwr_dwn
func prepare_core_pwr_dwn func prepare_cpu_pwr_dwn
/* r12 is pushed to meet the 8 byte stack alignment requirement */
push {r12, lr}
bl _cpu_data
pop {r12, lr}
ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR]
#if ASM_ASSERTION
cmp r1, #0
ASM_ASSERT(ne)
#endif
/* Get the cpu_ops core_pwr_dwn handler */
ldr r0, [r1, #CPU_PWR_DWN_CORE]
bx r0
endfunc prepare_core_pwr_dwn
/* /*
* The prepare cluster power down function for all platforms. After * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
* the cpu_ops pointer is retrieved from cpu_data, the corresponding * power down handler for the last power level
* pwr_dwn_cluster in the cpu_ops is invoked. Follows AAPCS.
*/ */
.globl prepare_cluster_pwr_dwn mov r2, #(CPU_MAX_PWR_DWN_OPS - 1)
func prepare_cluster_pwr_dwn cmp r0, r2
/* r12 is pushed to meet the 8 byte stack alignment requirement */ movhi r0, r2
push {r12, lr}
push {r0, lr}
bl _cpu_data bl _cpu_data
pop {r12, lr} pop {r2, lr}
ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR] ldr r0, [r0, #CPU_DATA_CPU_OPS_PTR]
#if ASM_ASSERTION #if ASM_ASSERTION
cmp r1, #0 cmp r0, #0
ASM_ASSERT(ne) ASM_ASSERT(ne)
#endif #endif
/* Get the cpu_ops cluster_pwr_dwn handler */ /* Get the appropriate power down handler */
ldr r0, [r1, #CPU_PWR_DWN_CLUSTER] mov r1, #CPU_PWR_DWN_OPS
bx r0 add r1, r1, r2, lsl #2
endfunc prepare_cluster_pwr_dwn ldr r1, [r0, r1]
bx r1
endfunc prepare_cpu_pwr_dwn
/* /*
* Initializes the cpu_ops_ptr if not already initialized * Initializes the cpu_ops_ptr if not already initialized
......
...@@ -90,7 +90,11 @@ endfunc aem_generic_cpu_reg_dump ...@@ -90,7 +90,11 @@ endfunc aem_generic_cpu_reg_dump
/* cpu_ops for Base AEM FVP */ /* cpu_ops for Base AEM FVP */
declare_cpu_ops aem_generic, BASE_AEM_MIDR, 1 declare_cpu_ops aem_generic, BASE_AEM_MIDR, CPU_NO_RESET_FUNC, \
aem_generic_core_pwr_dwn, \
aem_generic_cluster_pwr_dwn
/* cpu_ops for Foundation FVP */ /* cpu_ops for Foundation FVP */
declare_cpu_ops aem_generic, FOUNDATION_AEM_MIDR, 1 declare_cpu_ops aem_generic, FOUNDATION_AEM_MIDR, CPU_NO_RESET_FUNC, \
aem_generic_core_pwr_dwn, \
aem_generic_cluster_pwr_dwn
...@@ -157,4 +157,7 @@ func cortex_a35_cpu_reg_dump ...@@ -157,4 +157,7 @@ func cortex_a35_cpu_reg_dump
ret ret
endfunc cortex_a35_cpu_reg_dump endfunc cortex_a35_cpu_reg_dump
declare_cpu_ops cortex_a35, CORTEX_A35_MIDR declare_cpu_ops cortex_a35, CORTEX_A35_MIDR, \
cortex_a35_reset_func, \
cortex_a35_core_pwr_dwn, \
cortex_a35_cluster_pwr_dwn
...@@ -244,4 +244,7 @@ func cortex_a53_cpu_reg_dump ...@@ -244,4 +244,7 @@ func cortex_a53_cpu_reg_dump
ret ret
endfunc cortex_a53_cpu_reg_dump endfunc cortex_a53_cpu_reg_dump
declare_cpu_ops cortex_a53, CORTEX_A53_MIDR declare_cpu_ops cortex_a53, CORTEX_A53_MIDR, \
cortex_a53_reset_func, \
cortex_a53_core_pwr_dwn, \
cortex_a53_cluster_pwr_dwn
...@@ -488,4 +488,7 @@ func cortex_a57_cpu_reg_dump ...@@ -488,4 +488,7 @@ func cortex_a57_cpu_reg_dump
endfunc cortex_a57_cpu_reg_dump endfunc cortex_a57_cpu_reg_dump
declare_cpu_ops cortex_a57, CORTEX_A57_MIDR declare_cpu_ops cortex_a57, CORTEX_A57_MIDR, \
cortex_a57_reset_func, \
cortex_a57_core_pwr_dwn, \
cortex_a57_cluster_pwr_dwn
...@@ -242,4 +242,7 @@ func cortex_a72_cpu_reg_dump ...@@ -242,4 +242,7 @@ func cortex_a72_cpu_reg_dump
endfunc cortex_a72_cpu_reg_dump endfunc cortex_a72_cpu_reg_dump
declare_cpu_ops cortex_a72, CORTEX_A72_MIDR declare_cpu_ops cortex_a72, CORTEX_A72_MIDR, \
cortex_a72_reset_func, \
cortex_a72_core_pwr_dwn, \
cortex_a72_cluster_pwr_dwn
...@@ -153,4 +153,7 @@ func cortex_a73_cpu_reg_dump ...@@ -153,4 +153,7 @@ func cortex_a73_cpu_reg_dump
ret ret
endfunc cortex_a73_cpu_reg_dump endfunc cortex_a73_cpu_reg_dump
declare_cpu_ops cortex_a73, CORTEX_A73_MIDR declare_cpu_ops cortex_a73, CORTEX_A73_MIDR, \
cortex_a73_reset_func, \
cortex_a73_core_pwr_dwn, \
cortex_a73_cluster_pwr_dwn
...@@ -74,31 +74,23 @@ endfunc reset_handler ...@@ -74,31 +74,23 @@ endfunc reset_handler
#if IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */ #if IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */
/* /*
* The prepare core power down function for all platforms. After * void prepare_cpu_pwr_dwn(unsigned int power_level)
* the cpu_ops pointer is retrieved from cpu_data, the corresponding *
* pwr_dwn_core in the cpu_ops is invoked. * Prepare CPU power down function for all platforms. The function takes
* a domain level to be powered down as its parameter. After the cpu_ops
* pointer is retrieved from cpu_data, the handler for requested power
* level is called.
*/ */
.globl prepare_core_pwr_dwn .globl prepare_cpu_pwr_dwn
func prepare_core_pwr_dwn func prepare_cpu_pwr_dwn
mrs x1, tpidr_el3
ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR]
#if ASM_ASSERTION
cmp x0, #0
ASM_ASSERT(ne)
#endif
/* Get the cpu_ops core_pwr_dwn handler */
ldr x1, [x0, #CPU_PWR_DWN_CORE]
br x1
endfunc prepare_core_pwr_dwn
/* /*
* The prepare cluster power down function for all platforms. After * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
* the cpu_ops pointer is retrieved from cpu_data, the corresponding * power down handler for the last power level
* pwr_dwn_cluster in the cpu_ops is invoked.
*/ */
.globl prepare_cluster_pwr_dwn mov_imm x2, (CPU_MAX_PWR_DWN_OPS - 1)
func prepare_cluster_pwr_dwn cmp x0, x2
csel x2, x2, x0, hi
mrs x1, tpidr_el3 mrs x1, tpidr_el3
ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR] ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR]
#if ASM_ASSERTION #if ASM_ASSERTION
...@@ -106,10 +98,12 @@ func prepare_cluster_pwr_dwn ...@@ -106,10 +98,12 @@ func prepare_cluster_pwr_dwn
ASM_ASSERT(ne) ASM_ASSERT(ne)
#endif #endif
/* Get the cpu_ops cluster_pwr_dwn handler */ /* Get the appropriate power down handler */
ldr x1, [x0, #CPU_PWR_DWN_CLUSTER] mov x1, #CPU_PWR_DWN_OPS
add x1, x1, x2, lsl #3
ldr x1, [x0, x1]
br x1 br x1
endfunc prepare_cluster_pwr_dwn endfunc prepare_cpu_pwr_dwn
/* /*
......
...@@ -163,4 +163,7 @@ func denver_cpu_reg_dump ...@@ -163,4 +163,7 @@ func denver_cpu_reg_dump
ret ret
endfunc denver_cpu_reg_dump endfunc denver_cpu_reg_dump
declare_cpu_ops denver, DENVER_1_0_MIDR declare_cpu_ops denver, DENVER_1_0_MIDR, \
denver_reset_func, \
denver_core_pwr_dwn, \
denver_cluster_pwr_dwn
...@@ -65,22 +65,13 @@ func psci_do_pwrdown_cache_maintenance ...@@ -65,22 +65,13 @@ func psci_do_pwrdown_cache_maintenance
bl do_stack_maintenance bl do_stack_maintenance
/* --------------------------------------------- /* ---------------------------------------------
* Determine how many levels of cache will be * Invoke CPU-specifc power down operations for
* subject to cache maintenance. Power level * the appropriate level
* 0 implies that only the cpu is being powered
* down. Only the L1 data cache needs to be
* flushed to the PoU in this case. For a higher
* power level we are assuming that a flush
* of L1 data and L2 unified cache is enough.
* This information should be provided by the
* platform.
* --------------------------------------------- * ---------------------------------------------
*/ */
cmp r4, #PSCI_CPU_PWR_LVL mov r0, r4
pop {r4,lr} pop {r4, lr}
b prepare_cpu_pwr_dwn
beq prepare_core_pwr_dwn
b prepare_cluster_pwr_dwn
endfunc psci_do_pwrdown_cache_maintenance endfunc psci_do_pwrdown_cache_maintenance
......
...@@ -59,24 +59,11 @@ func psci_do_pwrdown_cache_maintenance ...@@ -59,24 +59,11 @@ func psci_do_pwrdown_cache_maintenance
stp x19, x20, [sp,#-16]! stp x19, x20, [sp,#-16]!
/* --------------------------------------------- /* ---------------------------------------------
* Determine to how many levels of cache will be * Invoke CPU-specific power down operations for
* subject to cache maintenance. Power level * the appropriate level
* 0 implies that only the cpu is being powered
* down. Only the L1 data cache needs to be
* flushed to the PoU in this case. For a higher
* power level we are assuming that a flush
* of L1 data and L2 unified cache is enough.
* This information should be provided by the
* platform.
* --------------------------------------------- * ---------------------------------------------
*/ */
cmp w0, #PSCI_CPU_PWR_LVL bl prepare_cpu_pwr_dwn
b.eq do_core_pwr_dwn
bl prepare_cluster_pwr_dwn
b do_stack_maintenance
do_core_pwr_dwn:
bl prepare_core_pwr_dwn
/* --------------------------------------------- /* ---------------------------------------------
* Do stack maintenance by flushing the used * Do stack maintenance by flushing the used
...@@ -84,7 +71,6 @@ do_core_pwr_dwn: ...@@ -84,7 +71,6 @@ do_core_pwr_dwn:
* remainder. * remainder.
* --------------------------------------------- * ---------------------------------------------
*/ */
do_stack_maintenance:
bl plat_get_my_stack bl plat_get_my_stack
/* --------------------------------------------- /* ---------------------------------------------
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment