Commit ee7b35c4 authored by Andrew Thoelke's avatar Andrew Thoelke Committed by Vikram Kanigiri
Browse files

Re-design bakery lock memory allocation and algorithm

This patch unifies the bakery lock api's across coherent and normal
memory implementation of locks by using same data type `bakery_lock_t`
and similar arguments to functions.

A separate section `bakery_lock` has been created and used to allocate
memory for bakery locks using `DEFINE_BAKERY_LOCK`. When locks are
allocated in normal memory, each lock for a core has to spread
across multiple cache lines. By using the total size allocated in a
separate cache line for a single core at compile time, the memory for
other core locks is allocated at link time by multiplying the single
core locks size with (PLATFORM_CORE_COUNT - 1). The normal memory lock
algorithm now uses lock address instead of the `id` in the per_cpu_data.
For locks allocated in coherent memory, it moves locks from
tzfw_coherent_memory to bakery_lock section.

The bakery locks are allocated as part of bss or in coherent memory
depending on usage of coherent memory. Both these regions are
initialised to zero as part of run_time_init before locks are used.
Hence, bakery_lock_init() is made an empty function as the lock memory
is already initialised to zero.

The above design lead to the removal of psci bakery locks from
non_cpu_power_pd_node to psci_locks.

NOTE: THE BAKERY LOCK API WHEN USE_COHERENT_MEM IS NOT SET HAS CHANGED.
THIS IS A BREAKING CHANGE FOR ALL PLATFORM PORTS THAT ALLOCATE BAKERY
LOCKS IN NORMAL MEMORY.

Change-Id: Ic3751c0066b8032dcbf9d88f1d4dc73d15f61d8b
parent 604d5da6
...@@ -101,10 +101,31 @@ SECTIONS ...@@ -101,10 +101,31 @@ SECTIONS
* The .bss section gets initialised to 0 at runtime. * The .bss section gets initialised to 0 at runtime.
* Its base address must be 16-byte aligned. * Its base address must be 16-byte aligned.
*/ */
.bss : ALIGN(16) { .bss (NOLOAD) : ALIGN(16) {
__BSS_START__ = .; __BSS_START__ = .;
*(.bss*) *(.bss*)
*(COMMON) *(COMMON)
#if !USE_COHERENT_MEM
/*
* Bakery locks are stored in normal .bss memory
*
* Each lock's data is spread across multiple cache lines, one per CPU,
* but multiple locks can share the same cache line.
* The compiler will allocate enough memory for one CPU's bakery locks,
* the remaining cache lines are allocated by the linker script
*/
. = ALIGN(CACHE_WRITEBACK_GRANULE);
__BAKERY_LOCK_START__ = .;
*(bakery_lock)
. = ALIGN(CACHE_WRITEBACK_GRANULE);
__PERCPU_BAKERY_LOCK_SIZE__ = . - __BAKERY_LOCK_START__;
. = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1));
__BAKERY_LOCK_END__ = .;
#ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE
ASSERT(__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE,
"PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements");
#endif
#endif
__BSS_END__ = .; __BSS_END__ = .;
} >RAM } >RAM
...@@ -126,6 +147,12 @@ SECTIONS ...@@ -126,6 +147,12 @@ SECTIONS
*/ */
coherent_ram (NOLOAD) : ALIGN(4096) { coherent_ram (NOLOAD) : ALIGN(4096) {
__COHERENT_RAM_START__ = .; __COHERENT_RAM_START__ = .;
/*
* Bakery locks are stored in coherent memory
*
* Each lock's data is contiguous and fully allocated by the compiler
*/
*(bakery_lock)
*(tzfw_coherent_mem) *(tzfw_coherent_mem)
__COHERENT_RAM_END_UNALIGNED__ = .; __COHERENT_RAM_END_UNALIGNED__ = .;
/* /*
......
...@@ -1523,38 +1523,52 @@ approach described above. ...@@ -1523,38 +1523,52 @@ approach described above.
The below sections analyze the data structures allocated in the coherent memory The below sections analyze the data structures allocated in the coherent memory
region and the changes required to allocate them in normal memory. region and the changes required to allocate them in normal memory.
### PSCI Affinity map nodes ### Coherent memory usage in PSCI implementation
The `psci_aff_map` data structure stores the hierarchial node information for The `psci_non_cpu_pd_nodes` data structure stores the platform's power domain
each affinity level in the system including the PSCI states associated with them. tree information for state management of power domains. By default, this data
By default, this data structure is allocated in the coherent memory region in structure is allocated in the coherent memory region in the Trusted Firmware
the Trusted Firmware because it can be accessed by multiple CPUs, either with because it can be accessed by multple CPUs, either with caches enabled or
their caches enabled or disabled. disabled.
typedef struct aff_map_node { typedef struct non_cpu_pwr_domain_node {
unsigned long mpidr; /*
unsigned char ref_count; * Index of the first CPU power domain node level 0 which has this node
unsigned char state; * as its parent.
unsigned char level; */
#if USE_COHERENT_MEM unsigned int cpu_start_idx;
bakery_lock_t lock;
#else /*
unsigned char aff_map_index; * Number of CPU power domains which are siblings of the domain indexed
#endif * by 'cpu_start_idx' i.e. all the domains in the range 'cpu_start_idx
} aff_map_node_t; * -> cpu_start_idx + ncpus' have this node as their parent.
*/
unsigned int ncpus;
/*
* Index of the parent power domain node.
* TODO: Figure out whether to whether using pointer is more efficient.
*/
unsigned int parent_node;
plat_local_state_t local_state;
unsigned char level;
/* For indexing the psci_lock array*/
unsigned char lock_index;
} non_cpu_pd_node_t;
In order to move this data structure to normal memory, the use of each of its In order to move this data structure to normal memory, the use of each of its
fields must be analyzed. Fields like `mpidr` and `level` are only written once fields must be analyzed. Fields like `cpu_start_idx`, `ncpus`, `parent_node`
during cold boot. Hence removing them from coherent memory involves only doing `level` and `lock_index` are only written once during cold boot. Hence removing
a clean and invalidate of the cache lines after these fields are written. them from coherent memory involves only doing a clean and invalidate of the
cache lines after these fields are written.
The fields `state` and `ref_count` can be concurrently accessed by multiple
CPUs in different cache states. A Lamport's Bakery lock is used to ensure mutual The field `local_state` can be concurrently accessed by multiple CPUs in
exlusion to these fields. As a result, it is possible to move these fields out different cache states. A Lamport's Bakery lock `psci_locks` is used to ensure
of coherent memory by performing software cache maintenance on them. The field mutual exlusion to this field and a clean and invalidate is needed after it
`lock` is the bakery lock data structure when `USE_COHERENT_MEM` is enabled. is written.
The `aff_map_index` is used to identify the bakery lock when `USE_COHERENT_MEM`
is disabled.
### Bakery lock data ### Bakery lock data
...@@ -1563,9 +1577,13 @@ and is accessed by multiple CPUs with mismatched attributes. `bakery_lock_t` is ...@@ -1563,9 +1577,13 @@ and is accessed by multiple CPUs with mismatched attributes. `bakery_lock_t` is
defined as follows: defined as follows:
typedef struct bakery_lock { typedef struct bakery_lock {
int owner; /*
volatile char entering[BAKERY_LOCK_MAX_CPUS]; * The lock_data is a bit-field of 2 members:
volatile unsigned number[BAKERY_LOCK_MAX_CPUS]; * Bit[0] : choosing. This field is set when the CPU is
* choosing its bakery number.
* Bits[1 - 15] : number. This is the bakery number allocated.
*/
volatile uint16_t lock_data[BAKERY_LOCK_MAX_CPUS];
} bakery_lock_t; } bakery_lock_t;
It is a characteristic of Lamport's Bakery algorithm that the volatile per-CPU It is a characteristic of Lamport's Bakery algorithm that the volatile per-CPU
...@@ -1589,17 +1607,14 @@ the update made by CPU0 as well. ...@@ -1589,17 +1607,14 @@ the update made by CPU0 as well.
To use bakery locks when `USE_COHERENT_MEM` is disabled, the lock data structure To use bakery locks when `USE_COHERENT_MEM` is disabled, the lock data structure
has been redesigned. The changes utilise the characteristic of Lamport's Bakery has been redesigned. The changes utilise the characteristic of Lamport's Bakery
algorithm mentioned earlier. The per-CPU fields of the new lock structure are algorithm mentioned earlier. The bakery_lock structure only allocates the memory
aligned such that they are allocated on separate cache lines. The per-CPU data for a single CPU. The macro `DEFINE_BAKERY_LOCK` allocates all the bakery locks
framework in Trusted Firmware is used to achieve this. This enables software to needed for a CPU into a section `bakery_lock`. The linker allocates the memory
for other cores by using the total size allocated for the bakery_lock section
and multiplying it with (PLATFORM_CORE_COUNT - 1). This enables software to
perform software cache maintenance on the lock data structure without running perform software cache maintenance on the lock data structure without running
into coherency issues associated with mismatched attributes. into coherency issues associated with mismatched attributes.
The per-CPU data framework enables consolidation of data structures on the
fewest cache lines possible. This saves memory as compared to the scenario where
each data structure is separately aligned to the cache line boundary to achieve
the same effect.
The bakery lock data structure `bakery_info_t` is defined for use when The bakery lock data structure `bakery_info_t` is defined for use when
`USE_COHERENT_MEM` is disabled as follows: `USE_COHERENT_MEM` is disabled as follows:
...@@ -1615,12 +1630,10 @@ The bakery lock data structure `bakery_info_t` is defined for use when ...@@ -1615,12 +1630,10 @@ The bakery lock data structure `bakery_info_t` is defined for use when
The `bakery_info_t` represents a single per-CPU field of one lock and The `bakery_info_t` represents a single per-CPU field of one lock and
the combination of corresponding `bakery_info_t` structures for all CPUs in the the combination of corresponding `bakery_info_t` structures for all CPUs in the
system represents the complete bakery lock. It is embedded in the per-CPU system represents the complete bakery lock. The view in memory for a system
data framework `cpu_data` as shown below: with n bakery locks are:
CPU0 cpu_data bakery_lock section start
------------------
| .... |
|----------------| |----------------|
| `bakery_info_t`| <-- Lock_0 per-CPU field | `bakery_info_t`| <-- Lock_0 per-CPU field
| Lock_0 | for CPU0 | Lock_0 | for CPU0
...@@ -1633,12 +1646,11 @@ data framework `cpu_data` as shown below: ...@@ -1633,12 +1646,11 @@ data framework `cpu_data` as shown below:
| `bakery_info_t`| <-- Lock_N per-CPU field | `bakery_info_t`| <-- Lock_N per-CPU field
| Lock_N | for CPU0 | Lock_N | for CPU0
------------------ ------------------
| XXXXX |
| Padding to |
CPU1 cpu_data | next Cache WB | <--- Calculate PERCPU_BAKERY_LOCK_SIZE, allocate
| Granule | continuous memory for remaining CPUs.
------------------ ------------------
| .... |
|----------------|
| `bakery_info_t`| <-- Lock_0 per-CPU field | `bakery_info_t`| <-- Lock_0 per-CPU field
| Lock_0 | for CPU1 | Lock_0 | for CPU1
|----------------| |----------------|
...@@ -1650,14 +1662,20 @@ data framework `cpu_data` as shown below: ...@@ -1650,14 +1662,20 @@ data framework `cpu_data` as shown below:
| `bakery_info_t`| <-- Lock_N per-CPU field | `bakery_info_t`| <-- Lock_N per-CPU field
| Lock_N | for CPU1 | Lock_N | for CPU1
------------------ ------------------
| XXXXX |
| Padding to |
| next Cache WB |
| Granule |
------------------
Consider a system of 2 CPUs with 'N' bakery locks as shown above. For an Consider a system of 2 CPUs with 'N' bakery locks as shown above. For an
operation on Lock_N, the corresponding `bakery_info_t` in both CPU0 and CPU1 operation on Lock_N, the corresponding `bakery_info_t` in both CPU0 and CPU1
`cpu_data` need to be fetched and appropriate cache operations need to be `bakery_lock` section need to be fetched and appropriate cache operations need
performed for each access. to be performed for each access.
On ARM Platforms, bakery locks are used in psci (`psci_locks`) and power controller
driver (`arm_lock`).
For multiple bakery locks, an array of `bakery_info_t` is declared in `cpu_data`
and each lock is given an `id` to identify it in the array.
### Non Functional Impact of removing coherent memory ### Non Functional Impact of removing coherent memory
...@@ -1680,10 +1698,9 @@ Juno ARM development platform. ...@@ -1680,10 +1698,9 @@ Juno ARM development platform.
As mentioned earlier, almost a page of memory can be saved by disabling As mentioned earlier, almost a page of memory can be saved by disabling
`USE_COHERENT_MEM`. Each platform needs to consider these trade-offs to decide `USE_COHERENT_MEM`. Each platform needs to consider these trade-offs to decide
whether coherent memory should be used. If a platform disables whether coherent memory should be used. If a platform disables
`USE_COHERENT_MEM` and needs to use bakery locks in the porting layer, it should `USE_COHERENT_MEM` and needs to use bakery locks in the porting layer, it can
reserve memory in `cpu_data` by defining the macro `PLAT_PCPU_DATA_SIZE` (see optionally define macro `PLAT_PERCPU_BAKERY_LOCK_SIZE` (see the [Porting
the [Porting Guide]). Refer to the reference platform code for examples. Guide]). Refer to the reference platform code for examples.
12. Code Structure 12. Code Structure
------------------- -------------------
......
...@@ -76,21 +76,24 @@ mapped page tables, and enable both the instruction and data caches for each BL ...@@ -76,21 +76,24 @@ mapped page tables, and enable both the instruction and data caches for each BL
stage. In ARM standard platforms, each BL stage configures the MMU in stage. In ARM standard platforms, each BL stage configures the MMU in
the platform-specific architecture setup function, `blX_plat_arch_setup()`. the platform-specific architecture setup function, `blX_plat_arch_setup()`.
If the build option `USE_COHERENT_MEM` is enabled, each platform must allocate a If the build option `USE_COHERENT_MEM` is enabled, each platform can allocate a
block of identity mapped secure memory with Device-nGnRE attributes aligned to block of identity mapped secure memory with Device-nGnRE attributes aligned to
page boundary (4K) for each BL stage. This memory is identified by the section page boundary (4K) for each BL stage. All sections which allocate coherent
name `tzfw_coherent_mem` so that its possible for the firmware to place memory are grouped under `coherent_ram`. For ex: Bakery locks are placed in a
variables in it using the following C code directive: section identified by name `bakery_lock` inside `coherent_ram` so that its
possible for the firmware to place variables in it using the following C code
directive:
__attribute__ ((section("tzfw_coherent_mem"))) __attribute__ ((section("bakery_lock")))
Or alternatively the following assembler code directive: Or alternatively the following assembler code directive:
.section tzfw_coherent_mem .section bakery_lock
The `tzfw_coherent_mem` section is used to allocate any data structures that are The `coherent_ram` section is a sum of all sections like `bakery_lock` which are
accessed both when a CPU is executing with its MMU and caches enabled, and when used to allocate any data structures that are accessed both when a CPU is
it's running with its MMU and caches disabled. Examples are given below. executing with its MMU and caches enabled, and when it's running with its MMU
and caches disabled. Examples are given below.
The following variables, functions and constants must be defined by the platform The following variables, functions and constants must be defined by the platform
for the firmware to work correctly. for the firmware to work correctly.
...@@ -1150,6 +1153,12 @@ of the system counter, which is retrieved from the first entry in the frequency ...@@ -1150,6 +1153,12 @@ of the system counter, which is retrieved from the first entry in the frequency
modes table. modes table.
* **#define : PLAT_PERCPU_BAKERY_LOCK_SIZE** [optional]
It is used if the bakery locks are using normal memory. It defines the memory
(in bytes) to be allocated for the bakery locks and needs to be a multiple of
cache line size.
3.3 Power State Coordination Interface (in BL3-1) 3.3 Power State Coordination Interface (in BL3-1)
------------------------------------------------ ------------------------------------------------
......
...@@ -251,9 +251,6 @@ typedef struct psci_cpu_data { ...@@ -251,9 +251,6 @@ typedef struct psci_cpu_data {
/* The local power state of this CPU */ /* The local power state of this CPU */
plat_local_state_t local_state; plat_local_state_t local_state;
#if !USE_COHERENT_MEM
bakery_info_t pcpu_bakery_info[PSCI_NUM_NON_CPU_PWR_DOMAINS];
#endif
} psci_cpu_data_t; } psci_cpu_data_t;
/******************************************************************************* /*******************************************************************************
......
...@@ -56,6 +56,11 @@ ...@@ -56,6 +56,11 @@
* External bakery lock interface. * External bakery lock interface.
****************************************************************************/ ****************************************************************************/
#if USE_COHERENT_MEM #if USE_COHERENT_MEM
/*
* Bakery locks are stored in coherent memory
*
* Each lock's data is contiguous and fully allocated by the compiler
*/
typedef struct bakery_lock { typedef struct bakery_lock {
/* /*
...@@ -67,12 +72,15 @@ typedef struct bakery_lock { ...@@ -67,12 +72,15 @@ typedef struct bakery_lock {
volatile uint16_t lock_data[BAKERY_LOCK_MAX_CPUS]; volatile uint16_t lock_data[BAKERY_LOCK_MAX_CPUS];
} bakery_lock_t; } bakery_lock_t;
void bakery_lock_init(bakery_lock_t *bakery);
void bakery_lock_get(bakery_lock_t *bakery);
void bakery_lock_release(bakery_lock_t *bakery);
int bakery_lock_try(bakery_lock_t *bakery);
#else #else
/*
* Bakery locks are stored in normal .bss memory
*
* Each lock's data is spread across multiple cache lines, one per CPU,
* but multiple locks can share the same cache line.
* The compiler will allocate enough memory for one CPU's bakery locks,
* the remaining cache lines are allocated by the linker script
*/
typedef struct bakery_info { typedef struct bakery_info {
/* /*
...@@ -84,9 +92,19 @@ typedef struct bakery_info { ...@@ -84,9 +92,19 @@ typedef struct bakery_info {
volatile uint16_t lock_data; volatile uint16_t lock_data;
} bakery_info_t; } bakery_info_t;
void bakery_lock_get(unsigned int id, unsigned int offset); typedef bakery_info_t bakery_lock_t;
void bakery_lock_release(unsigned int id, unsigned int offset);
#endif /* __USE_COHERENT_MEM__ */ #endif /* __USE_COHERENT_MEM__ */
inline void bakery_lock_init(bakery_lock_t *bakery) {}
void bakery_lock_get(bakery_lock_t *bakery);
void bakery_lock_release(bakery_lock_t *bakery);
#define DEFINE_BAKERY_LOCK(_name) bakery_lock_t _name \
__attribute__ ((section("bakery_lock")))
#define DECLARE_BAKERY_LOCK(_name) extern bakery_lock_t _name
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __BAKERY_LOCK_H__ */ #endif /* __BAKERY_LOCK_H__ */
...@@ -63,16 +63,6 @@ ...@@ -63,16 +63,6 @@
assert(entry < BAKERY_LOCK_MAX_CPUS); \ assert(entry < BAKERY_LOCK_MAX_CPUS); \
} while (0) } while (0)
/* Initialize Bakery Lock to reset all ticket values */
void bakery_lock_init(bakery_lock_t *bakery)
{
assert(bakery);
/* All ticket values need to be 0 */
memset(bakery, 0, sizeof(*bakery));
}
/* Obtain a ticket for a given CPU */ /* Obtain a ticket for a given CPU */
static unsigned int bakery_get_ticket(bakery_lock_t *bakery, unsigned int me) static unsigned int bakery_get_ticket(bakery_lock_t *bakery, unsigned int me)
{ {
......
...@@ -56,12 +56,29 @@ ...@@ -56,12 +56,29 @@
* accesses regardless of status of address translation. * accesses regardless of status of address translation.
*/ */
/* This macro assumes that the bakery_info array is located at the offset specified */ #ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE
#define get_my_bakery_info(offset, id) \ /*
(((bakery_info_t *) (((uint8_t *)_cpu_data()) + offset)) + id) * Verify that the platform defined value for the per-cpu space for bakery locks is
* a multiple of the cache line size, to prevent multiple CPUs writing to the same
* bakery lock cache line
*
* Using this value, if provided, rather than the linker generated value results in
* more efficient code
*/
CASSERT((PLAT_PERCPU_BAKERY_LOCK_SIZE & (CACHE_WRITEBACK_GRANULE - 1)) == 0, \
PLAT_PERCPU_BAKERY_LOCK_SIZE_not_cacheline_multiple);
#define PERCPU_BAKERY_LOCK_SIZE (PLAT_PERCPU_BAKERY_LOCK_SIZE)
#else
/*
* Use the linker defined symbol which has evaluated the size reqiurement.
* This is not as efficient as using a platform defined constant
*/
extern void *__PERCPU_BAKERY_LOCK_SIZE__;
#define PERCPU_BAKERY_LOCK_SIZE ((uintptr_t)&__PERCPU_BAKERY_LOCK_SIZE__)
#endif
#define get_bakery_info_by_index(offset, id, ix) \ #define get_bakery_info(cpu_ix, lock) \
(((bakery_info_t *) (((uint8_t *)_cpu_data_by_index(ix)) + offset)) + id) (bakery_info_t *)((uintptr_t)lock + cpu_ix * PERCPU_BAKERY_LOCK_SIZE)
#define write_cache_op(addr, cached) \ #define write_cache_op(addr, cached) \
do { \ do { \
...@@ -73,7 +90,7 @@ ...@@ -73,7 +90,7 @@
#define read_cache_op(addr, cached) if (cached) \ #define read_cache_op(addr, cached) if (cached) \
dccivac((uint64_t)addr) dccivac((uint64_t)addr)
static unsigned int bakery_get_ticket(int id, unsigned int offset, static unsigned int bakery_get_ticket(bakery_lock_t *lock,
unsigned int me, int is_cached) unsigned int me, int is_cached)
{ {
unsigned int my_ticket, their_ticket; unsigned int my_ticket, their_ticket;
...@@ -84,7 +101,7 @@ static unsigned int bakery_get_ticket(int id, unsigned int offset, ...@@ -84,7 +101,7 @@ static unsigned int bakery_get_ticket(int id, unsigned int offset,
* Obtain a reference to the bakery information for this cpu and ensure * Obtain a reference to the bakery information for this cpu and ensure
* it is not NULL. * it is not NULL.
*/ */
my_bakery_info = get_my_bakery_info(offset, id); my_bakery_info = get_bakery_info(me, lock);
assert(my_bakery_info); assert(my_bakery_info);
/* /*
...@@ -115,7 +132,7 @@ static unsigned int bakery_get_ticket(int id, unsigned int offset, ...@@ -115,7 +132,7 @@ static unsigned int bakery_get_ticket(int id, unsigned int offset,
* Get a reference to the other contender's bakery info and * Get a reference to the other contender's bakery info and
* ensure that a stale copy is not read. * ensure that a stale copy is not read.
*/ */
their_bakery_info = get_bakery_info_by_index(offset, id, they); their_bakery_info = get_bakery_info(they, lock);
assert(their_bakery_info); assert(their_bakery_info);
read_cache_op(their_bakery_info, is_cached); read_cache_op(their_bakery_info, is_cached);
...@@ -141,7 +158,7 @@ static unsigned int bakery_get_ticket(int id, unsigned int offset, ...@@ -141,7 +158,7 @@ static unsigned int bakery_get_ticket(int id, unsigned int offset,
return my_ticket; return my_ticket;
} }
void bakery_lock_get(unsigned int id, unsigned int offset) void bakery_lock_get(bakery_lock_t *lock)
{ {
unsigned int they, me, is_cached; unsigned int they, me, is_cached;
unsigned int my_ticket, my_prio, their_ticket; unsigned int my_ticket, my_prio, their_ticket;
...@@ -153,7 +170,7 @@ void bakery_lock_get(unsigned int id, unsigned int offset) ...@@ -153,7 +170,7 @@ void bakery_lock_get(unsigned int id, unsigned int offset)
is_cached = read_sctlr_el3() & SCTLR_C_BIT; is_cached = read_sctlr_el3() & SCTLR_C_BIT;
/* Get a ticket */ /* Get a ticket */
my_ticket = bakery_get_ticket(id, offset, me, is_cached); my_ticket = bakery_get_ticket(lock, me, is_cached);
/* /*
* Now that we got our ticket, compute our priority value, then compare * Now that we got our ticket, compute our priority value, then compare
...@@ -168,7 +185,7 @@ void bakery_lock_get(unsigned int id, unsigned int offset) ...@@ -168,7 +185,7 @@ void bakery_lock_get(unsigned int id, unsigned int offset)
* Get a reference to the other contender's bakery info and * Get a reference to the other contender's bakery info and
* ensure that a stale copy is not read. * ensure that a stale copy is not read.
*/ */
their_bakery_info = get_bakery_info_by_index(offset, id, they); their_bakery_info = get_bakery_info(they, lock);
assert(their_bakery_info); assert(their_bakery_info);
/* Wait for the contender to get their ticket */ /* Wait for the contender to get their ticket */
...@@ -199,12 +216,12 @@ void bakery_lock_get(unsigned int id, unsigned int offset) ...@@ -199,12 +216,12 @@ void bakery_lock_get(unsigned int id, unsigned int offset)
/* Lock acquired */ /* Lock acquired */
} }
void bakery_lock_release(unsigned int id, unsigned int offset) void bakery_lock_release(bakery_lock_t *lock)
{ {
bakery_info_t *my_bakery_info; bakery_info_t *my_bakery_info;
unsigned int is_cached = read_sctlr_el3() & SCTLR_C_BIT; unsigned int is_cached = read_sctlr_el3() & SCTLR_C_BIT;
my_bakery_info = get_my_bakery_info(offset, id); my_bakery_info = get_bakery_info(plat_my_core_pos(), lock);
assert(bakery_ticket_number(my_bakery_info->lock_data)); assert(bakery_ticket_number(my_bakery_info->lock_data));
my_bakery_info->lock_data = 0; my_bakery_info->lock_data = 0;
......
...@@ -78,6 +78,8 @@ __attribute__ ((section("tzfw_coherent_mem"))) ...@@ -78,6 +78,8 @@ __attribute__ ((section("tzfw_coherent_mem")))
#endif #endif
; ;
DEFINE_BAKERY_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT]; cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];
/******************************************************************************* /*******************************************************************************
......
...@@ -42,23 +42,12 @@ ...@@ -42,23 +42,12 @@
* The following helper macros abstract the interface to the Bakery * The following helper macros abstract the interface to the Bakery
* Lock API. * Lock API.
*/ */
#if USE_COHERENT_MEM
#define psci_lock_init(non_cpu_pd_node, idx) \
bakery_lock_init(&(non_cpu_pd_node)[(idx)].lock)
#define psci_lock_get(non_cpu_pd_node) \
bakery_lock_get(&((non_cpu_pd_node)->lock))
#define psci_lock_release(non_cpu_pd_node) \
bakery_lock_release(&((non_cpu_pd_node)->lock))
#else
#define psci_lock_init(non_cpu_pd_node, idx) \ #define psci_lock_init(non_cpu_pd_node, idx) \
((non_cpu_pd_node)[(idx)].lock_index = (idx)) ((non_cpu_pd_node)[(idx)].lock_index = (idx))
#define psci_lock_get(non_cpu_pd_node) \ #define psci_lock_get(non_cpu_pd_node) \
bakery_lock_get((non_cpu_pd_node)->lock_index, \ bakery_lock_get(&psci_locks[(non_cpu_pd_node)->lock_index])
CPU_DATA_PSCI_LOCK_OFFSET)
#define psci_lock_release(non_cpu_pd_node) \ #define psci_lock_release(non_cpu_pd_node) \
bakery_lock_release((non_cpu_pd_node)->lock_index, \ bakery_lock_release(&psci_locks[(non_cpu_pd_node)->lock_index])
CPU_DATA_PSCI_LOCK_OFFSET)
#endif
/* /*
* The PSCI capability which are provided by the generic code but does not * The PSCI capability which are provided by the generic code but does not
...@@ -140,12 +129,9 @@ typedef struct non_cpu_pwr_domain_node { ...@@ -140,12 +129,9 @@ typedef struct non_cpu_pwr_domain_node {
plat_local_state_t local_state; plat_local_state_t local_state;
unsigned char level; unsigned char level;
#if USE_COHERENT_MEM
bakery_lock_t lock; /* For indexing the psci_lock array*/
#else
/* For indexing the bakery_info array in per CPU data */
unsigned char lock_index; unsigned char lock_index;
#endif
} non_cpu_pd_node_t; } non_cpu_pd_node_t;
typedef struct cpu_pwr_domain_node { typedef struct cpu_pwr_domain_node {
...@@ -174,6 +160,9 @@ extern non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS]; ...@@ -174,6 +160,9 @@ extern non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS];
extern cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT]; extern cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];
extern unsigned int psci_caps; extern unsigned int psci_caps;
/* One bakery lock is required for each non-cpu power domain */
DECLARE_BAKERY_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
/******************************************************************************* /*******************************************************************************
* SPD's power management hooks registered with PSCI * SPD's power management hooks registered with PSCI
******************************************************************************/ ******************************************************************************/
......
...@@ -181,12 +181,6 @@ static void populate_power_domain_tree(const unsigned char *topology) ...@@ -181,12 +181,6 @@ static void populate_power_domain_tree(const unsigned char *topology)
/* Validate the sanity of array exported by the platform */ /* Validate the sanity of array exported by the platform */
assert(j == PLATFORM_CORE_COUNT); assert(j == PLATFORM_CORE_COUNT);
#if !USE_COHERENT_MEM
/* Flush the non CPU power domain data to memory */
flush_dcache_range((uintptr_t) &psci_non_cpu_pd_nodes,
sizeof(psci_non_cpu_pd_nodes));
#endif
} }
/******************************************************************************* /*******************************************************************************
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment