Commit 6de8b24f authored by davidcunado-arm's avatar davidcunado-arm Committed by GitHub
Browse files

Merge pull request #953 from vwadekar/tegra-misra-fixes-v1

Tegra misra fixes v1
parents 0dc3c353 ab712fd8
......@@ -15,9 +15,9 @@
* ---------------------------------------------
*/
func cortex_a53_disable_smp
ldcopr16 r0, r1, CPUECTLR
bic64_imm r0, r1, CPUECTLR_SMP_BIT
stcopr16 r0, r1, CPUECTLR
ldcopr16 r0, r1, CORTEX_A53_ECTLR
bic64_imm r0, r1, CORTEX_A53_ECTLR_SMP_BIT
stcopr16 r0, r1, CORTEX_A53_ECTLR
isb
dsb sy
bx lr
......@@ -32,9 +32,9 @@ func cortex_a53_reset_func
* Enable the SMP bit.
* ---------------------------------------------
*/
ldcopr16 r0, r1, CPUECTLR
orr64_imm r0, r1, CPUECTLR_SMP_BIT
stcopr16 r0, r1, CPUECTLR
ldcopr16 r0, r1, CORTEX_A53_ECTLR
orr64_imm r0, r1, CORTEX_A53_ECTLR_SMP_BIT
stcopr16 r0, r1, CORTEX_A53_ECTLR
isb
bx lr
endfunc cortex_a53_reset_func
......
......@@ -16,9 +16,9 @@
* ---------------------------------------------
*/
func cortex_a57_disable_smp
ldcopr16 r0, r1, CPUECTLR
bic64_imm r0, r1, CPUECTLR_SMP_BIT
stcopr16 r0, r1, CPUECTLR
ldcopr16 r0, r1, CORTEX_A57_ECTLR
bic64_imm r0, r1, CORTEX_A57_ECTLR_SMP_BIT
stcopr16 r0, r1, CORTEX_A57_ECTLR
bx lr
endfunc cortex_a57_disable_smp
......@@ -28,11 +28,11 @@ endfunc cortex_a57_disable_smp
* ---------------------------------------------
*/
func cortex_a57_disable_l2_prefetch
ldcopr16 r0, r1, CPUECTLR
orr64_imm r0, r1, CPUECTLR_DIS_TWD_ACC_PFTCH_BIT
bic64_imm r0, r1, (CPUECTLR_L2_IPFTCH_DIST_MASK | \
CPUECTLR_L2_DPFTCH_DIST_MASK)
stcopr16 r0, r1, CPUECTLR
ldcopr16 r0, r1, CORTEX_A57_ECTLR
orr64_imm r0, r1, CORTEX_A57_ECTLR_DIS_TWD_ACC_PFTCH_BIT
bic64_imm r0, r1, (CORTEX_A57_ECTLR_L2_IPFTCH_DIST_MASK | \
CORTEX_A57_ECTLR_L2_DPFTCH_DIST_MASK)
stcopr16 r0, r1, CORTEX_A57_ECTLR
isb
dsb ish
bx lr
......@@ -59,9 +59,9 @@ func cortex_a57_reset_func
* Enable the SMP bit.
* ---------------------------------------------
*/
ldcopr16 r0, r1, CPUECTLR
orr64_imm r0, r1, CPUECTLR_SMP_BIT
stcopr16 r0, r1, CPUECTLR
ldcopr16 r0, r1, CORTEX_A57_ECTLR
orr64_imm r0, r1, CORTEX_A57_ECTLR_SMP_BIT
stcopr16 r0, r1, CORTEX_A57_ECTLR
isb
bx lr
endfunc cortex_a57_reset_func
......
......@@ -15,11 +15,11 @@
* ---------------------------------------------
*/
func cortex_a72_disable_l2_prefetch
ldcopr16 r0, r1, CPUECTLR
orr64_imm r0, r1, CPUECTLR_DIS_TWD_ACC_PFTCH_BIT
bic64_imm r0, r1, (CPUECTLR_L2_IPFTCH_DIST_MASK | \
CPUECTLR_L2_DPFTCH_DIST_MASK)
stcopr16 r0, r1, CPUECTLR
ldcopr16 r0, r1, CORTEX_A72_ECTLR
orr64_imm r0, r1, CORTEX_A72_ECTLR_DIS_TWD_ACC_PFTCH_BIT
bic64_imm r0, r1, (CORTEX_A72_ECTLR_L2_IPFTCH_DIST_MASK | \
CORTEX_A72_ECTLR_L2_DPFTCH_DIST_MASK)
stcopr16 r0, r1, CORTEX_A72_ECTLR
isb
bx lr
endfunc cortex_a72_disable_l2_prefetch
......@@ -29,9 +29,9 @@ endfunc cortex_a72_disable_l2_prefetch
* ---------------------------------------------
*/
func cortex_a72_disable_hw_prefetcher
ldcopr16 r0, r1, CPUACTLR
orr64_imm r0, r1, CPUACTLR_DISABLE_L1_DCACHE_HW_PFTCH
stcopr16 r0, r1, CPUACTLR
ldcopr16 r0, r1, CORTEX_A72_ACTLR
orr64_imm r0, r1, CORTEX_A72_ACTLR_DISABLE_L1_DCACHE_HW_PFTCH
stcopr16 r0, r1, CORTEX_A72_ACTLR
isb
dsb ish
bx lr
......@@ -43,9 +43,9 @@ endfunc cortex_a72_disable_hw_prefetcher
* ---------------------------------------------
*/
func cortex_a72_disable_smp
ldcopr16 r0, r1, CPUECTLR
bic64_imm r0, r1, CPUECTLR_SMP_BIT
stcopr16 r0, r1, CPUECTLR
ldcopr16 r0, r1, CORTEX_A72_ECTLR
bic64_imm r0, r1, CORTEX_A72_ECTLR_SMP_BIT
stcopr16 r0, r1, CORTEX_A72_ECTLR
bx lr
endfunc cortex_a72_disable_smp
......@@ -70,9 +70,9 @@ func cortex_a72_reset_func
* Enable the SMP bit.
* ---------------------------------------------
*/
ldcopr16 r0, r1, CPUECTLR
orr64_imm r0, r1, CPUECTLR_SMP_BIT
stcopr16 r0, r1, CPUECTLR
ldcopr16 r0, r1, CORTEX_A72_ECTLR
orr64_imm r0, r1, CORTEX_A72_ECTLR_SMP_BIT
stcopr16 r0, r1, CORTEX_A72_ECTLR
isb
bx lr
endfunc cortex_a72_reset_func
......
......@@ -33,9 +33,9 @@ endfunc cortex_a53_disable_dcache
* ---------------------------------------------
*/
func cortex_a53_disable_smp
mrs x0, CPUECTLR_EL1
bic x0, x0, #CPUECTLR_SMP_BIT
msr CPUECTLR_EL1, x0
mrs x0, CORTEX_A53_ECTLR_EL1
bic x0, x0, #CORTEX_A53_ECTLR_SMP_BIT
msr CORTEX_A53_ECTLR_EL1, x0
isb
dsb sy
ret
......@@ -56,10 +56,10 @@ func errata_a53_826319_wa
mov x17, x30
bl check_errata_826319
cbz x0, 1f
mrs x1, L2ACTLR_EL1
bic x1, x1, #L2ACTLR_ENABLE_UNIQUECLEAN
orr x1, x1, #L2ACTLR_DISABLE_CLEAN_PUSH
msr L2ACTLR_EL1, x1
mrs x1, CORTEX_A53_L2ACTLR_EL1
bic x1, x1, #CORTEX_A53_L2ACTLR_ENABLE_UNIQUECLEAN
orr x1, x1, #CORTEX_A53_L2ACTLR_DISABLE_CLEAN_PUSH
msr CORTEX_A53_L2ACTLR_EL1, x1
1:
ret x17
endfunc errata_a53_826319_wa
......@@ -93,9 +93,9 @@ func a53_disable_non_temporal_hint
mov x17, x30
bl check_errata_disable_non_temporal_hint
cbz x0, 1f
mrs x1, CPUACTLR_EL1
orr x1, x1, #CPUACTLR_DTAH
msr CPUACTLR_EL1, x1
mrs x1, CORTEX_A53_ACTLR_EL1
orr x1, x1, #CORTEX_A53_ACTLR_DTAH
msr CORTEX_A53_ACTLR_EL1, x1
1:
ret x17
endfunc a53_disable_non_temporal_hint
......@@ -126,9 +126,9 @@ func errata_a53_855873_wa
bl check_errata_855873
cbz x0, 1f
mrs x1, CPUACTLR_EL1
orr x1, x1, #CPUACTLR_ENDCCASCI
msr CPUACTLR_EL1, x1
mrs x1, CORTEX_A53_ACTLR_EL1
orr x1, x1, #CORTEX_A53_ACTLR_ENDCCASCI
msr CORTEX_A53_ACTLR_EL1, x1
1:
ret x17
endfunc errata_a53_855873_wa
......@@ -168,9 +168,9 @@ func cortex_a53_reset_func
* Enable the SMP bit.
* ---------------------------------------------
*/
mrs x0, CPUECTLR_EL1
orr x0, x0, #CPUECTLR_SMP_BIT
msr CPUECTLR_EL1, x0
mrs x0, CORTEX_A53_ECTLR_EL1
orr x0, x0, #CORTEX_A53_ECTLR_SMP_BIT
msr CORTEX_A53_ECTLR_EL1, x0
isb
ret x19
endfunc cortex_a53_reset_func
......@@ -275,10 +275,10 @@ cortex_a53_regs: /* The ascii list of register names to be reported */
func cortex_a53_cpu_reg_dump
adr x6, cortex_a53_regs
mrs x8, CPUECTLR_EL1
mrs x9, CPUMERRSR_EL1
mrs x10, L2MERRSR_EL1
mrs x11, CPUACTLR_EL1
mrs x8, CORTEX_A53_ECTLR_EL1
mrs x9, CORTEX_A53_MERRSR_EL1
mrs x10, CORTEX_A53_L2MERRSR_EL1
mrs x11, CORTEX_A53_ACTLR_EL1
ret
endfunc cortex_a53_cpu_reg_dump
......
......@@ -29,12 +29,12 @@ endfunc cortex_a57_disable_dcache
* ---------------------------------------------
*/
func cortex_a57_disable_l2_prefetch
mrs x0, CPUECTLR_EL1
orr x0, x0, #CPUECTLR_DIS_TWD_ACC_PFTCH_BIT
mov x1, #CPUECTLR_L2_IPFTCH_DIST_MASK
orr x1, x1, #CPUECTLR_L2_DPFTCH_DIST_MASK
mrs x0, CORTEX_A57_ECTLR_EL1
orr x0, x0, #CORTEX_A57_ECTLR_DIS_TWD_ACC_PFTCH_BIT
mov x1, #CORTEX_A57_ECTLR_L2_IPFTCH_DIST_MASK
orr x1, x1, #CORTEX_A57_ECTLR_L2_DPFTCH_DIST_MASK
bic x0, x0, x1
msr CPUECTLR_EL1, x0
msr CORTEX_A57_ECTLR_EL1, x0
isb
dsb ish
ret
......@@ -45,9 +45,9 @@ endfunc cortex_a57_disable_l2_prefetch
* ---------------------------------------------
*/
func cortex_a57_disable_smp
mrs x0, CPUECTLR_EL1
bic x0, x0, #CPUECTLR_SMP_BIT
msr CPUECTLR_EL1, x0
mrs x0, CORTEX_A57_ECTLR_EL1
bic x0, x0, #CORTEX_A57_ECTLR_SMP_BIT
msr CORTEX_A57_ECTLR_EL1, x0
ret
endfunc cortex_a57_disable_smp
......@@ -78,9 +78,9 @@ func errata_a57_806969_wa
mov x17, x30
bl check_errata_806969
cbz x0, 1f
mrs x1, CPUACTLR_EL1
orr x1, x1, #CPUACTLR_NO_ALLOC_WBWA
msr CPUACTLR_EL1, x1
mrs x1, CORTEX_A57_ACTLR_EL1
orr x1, x1, #CORTEX_A57_ACTLR_NO_ALLOC_WBWA
msr CORTEX_A57_ACTLR_EL1, x1
1:
ret x17
endfunc errata_a57_806969_wa
......@@ -120,9 +120,9 @@ func errata_a57_813420_wa
mov x17, x30
bl check_errata_813420
cbz x0, 1f
mrs x1, CPUACTLR_EL1
orr x1, x1, #CPUACTLR_DCC_AS_DCCI
msr CPUACTLR_EL1, x1
mrs x1, CORTEX_A57_ACTLR_EL1
orr x1, x1, #CORTEX_A57_ACTLR_DCC_AS_DCCI
msr CORTEX_A57_ACTLR_EL1, x1
1:
ret x17
endfunc errata_a57_813420_wa
......@@ -150,9 +150,9 @@ func a57_disable_ldnp_overread
mov x17, x30
bl check_errata_disable_ldnp_overread
cbz x0, 1f
mrs x1, CPUACTLR_EL1
orr x1, x1, #CPUACTLR_DIS_OVERREAD
msr CPUACTLR_EL1, x1
mrs x1, CORTEX_A57_ACTLR_EL1
orr x1, x1, #CORTEX_A57_ACTLR_DIS_OVERREAD
msr CORTEX_A57_ACTLR_EL1, x1
1:
ret x17
endfunc a57_disable_ldnp_overread
......@@ -177,9 +177,9 @@ func errata_a57_826974_wa
mov x17, x30
bl check_errata_826974
cbz x0, 1f
mrs x1, CPUACTLR_EL1
orr x1, x1, #CPUACTLR_DIS_LOAD_PASS_DMB
msr CPUACTLR_EL1, x1
mrs x1, CORTEX_A57_ACTLR_EL1
orr x1, x1, #CORTEX_A57_ACTLR_DIS_LOAD_PASS_DMB
msr CORTEX_A57_ACTLR_EL1, x1
1:
ret x17
endfunc errata_a57_826974_wa
......@@ -204,9 +204,9 @@ func errata_a57_826977_wa
mov x17, x30
bl check_errata_826977
cbz x0, 1f
mrs x1, CPUACTLR_EL1
orr x1, x1, #CPUACTLR_GRE_NGRE_AS_NGNRE
msr CPUACTLR_EL1, x1
mrs x1, CORTEX_A57_ACTLR_EL1
orr x1, x1, #CORTEX_A57_ACTLR_GRE_NGRE_AS_NGNRE
msr CORTEX_A57_ACTLR_EL1, x1
1:
ret x17
endfunc errata_a57_826977_wa
......@@ -231,15 +231,16 @@ func errata_a57_828024_wa
mov x17, x30
bl check_errata_828024
cbz x0, 1f
mrs x1, CPUACTLR_EL1
mrs x1, CORTEX_A57_ACTLR_EL1
/*
* Setting the relevant bits in CPUACTLR_EL1 has to be done in 2
* instructions here because the resulting bitmask doesn't fit in a
* 16-bit value so it cannot be encoded in a single instruction.
*/
orr x1, x1, #CPUACTLR_NO_ALLOC_WBWA
orr x1, x1, #(CPUACTLR_DIS_L1_STREAMING | CPUACTLR_DIS_STREAMING)
msr CPUACTLR_EL1, x1
orr x1, x1, #CORTEX_A57_ACTLR_NO_ALLOC_WBWA
orr x1, x1, #(CORTEX_A57_ACTLR_DIS_L1_STREAMING | \
CORTEX_A57_ACTLR_DIS_STREAMING)
msr CORTEX_A57_ACTLR_EL1, x1
1:
ret x17
endfunc errata_a57_828024_wa
......@@ -264,9 +265,9 @@ func errata_a57_829520_wa
mov x17, x30
bl check_errata_829520
cbz x0, 1f
mrs x1, CPUACTLR_EL1
orr x1, x1, #CPUACTLR_DIS_INDIRECT_PREDICTOR
msr CPUACTLR_EL1, x1
mrs x1, CORTEX_A57_ACTLR_EL1
orr x1, x1, #CORTEX_A57_ACTLR_DIS_INDIRECT_PREDICTOR
msr CORTEX_A57_ACTLR_EL1, x1
1:
ret x17
endfunc errata_a57_829520_wa
......@@ -291,9 +292,9 @@ func errata_a57_833471_wa
mov x17, x30
bl check_errata_833471
cbz x0, 1f
mrs x1, CPUACTLR_EL1
orr x1, x1, #CPUACTLR_FORCE_FPSCR_FLUSH
msr CPUACTLR_EL1, x1
mrs x1, CORTEX_A57_ACTLR_EL1
orr x1, x1, #CORTEX_A57_ACTLR_FORCE_FPSCR_FLUSH
msr CORTEX_A57_ACTLR_EL1, x1
1:
ret x17
endfunc errata_a57_833471_wa
......@@ -357,9 +358,9 @@ func cortex_a57_reset_func
* Enable the SMP bit.
* ---------------------------------------------
*/
mrs x0, CPUECTLR_EL1
orr x0, x0, #CPUECTLR_SMP_BIT
msr CPUECTLR_EL1, x0
mrs x0, CORTEX_A57_ECTLR_EL1
orr x0, x0, #CORTEX_A57_ECTLR_SMP_BIT
msr CORTEX_A57_ECTLR_EL1, x0
isb
ret x19
endfunc cortex_a57_reset_func
......@@ -503,9 +504,9 @@ cortex_a57_regs: /* The ascii list of register names to be reported */
func cortex_a57_cpu_reg_dump
adr x6, cortex_a57_regs
mrs x8, CPUECTLR_EL1
mrs x9, CPUMERRSR_EL1
mrs x10, L2MERRSR_EL1
mrs x8, CORTEX_A57_ECTLR_EL1
mrs x9, CORTEX_A57_MERRSR_EL1
mrs x10, CORTEX_A57_L2MERRSR_EL1
ret
endfunc cortex_a57_cpu_reg_dump
......
......@@ -27,12 +27,12 @@ endfunc cortex_a72_disable_dcache
* ---------------------------------------------
*/
func cortex_a72_disable_l2_prefetch
mrs x0, CPUECTLR_EL1
orr x0, x0, #CPUECTLR_DIS_TWD_ACC_PFTCH_BIT
mov x1, #CPUECTLR_L2_IPFTCH_DIST_MASK
orr x1, x1, #CPUECTLR_L2_DPFTCH_DIST_MASK
mrs x0, CORTEX_A72_ECTLR_EL1
orr x0, x0, #CORTEX_A72_ECTLR_DIS_TWD_ACC_PFTCH_BIT
mov x1, #CORTEX_A72_ECTLR_L2_IPFTCH_DIST_MASK
orr x1, x1, #CORTEX_A72_ECTLR_L2_DPFTCH_DIST_MASK
bic x0, x0, x1
msr CPUECTLR_EL1, x0
msr CORTEX_A72_ECTLR_EL1, x0
isb
ret
endfunc cortex_a72_disable_l2_prefetch
......@@ -42,9 +42,9 @@ endfunc cortex_a72_disable_l2_prefetch
* ---------------------------------------------
*/
func cortex_a72_disable_hw_prefetcher
mrs x0, CPUACTLR_EL1
orr x0, x0, #CPUACTLR_DISABLE_L1_DCACHE_HW_PFTCH
msr CPUACTLR_EL1, x0
mrs x0, CORTEX_A72_ACTLR_EL1
orr x0, x0, #CORTEX_A72_ACTLR_DISABLE_L1_DCACHE_HW_PFTCH
msr CORTEX_A72_ACTLR_EL1, x0
isb
dsb ish
ret
......@@ -55,9 +55,9 @@ endfunc cortex_a72_disable_hw_prefetcher
* ---------------------------------------------
*/
func cortex_a72_disable_smp
mrs x0, CPUECTLR_EL1
bic x0, x0, #CPUECTLR_SMP_BIT
msr CPUECTLR_EL1, x0
mrs x0, CORTEX_A72_ECTLR_EL1
bic x0, x0, #CORTEX_A72_ECTLR_SMP_BIT
msr CORTEX_A72_ECTLR_EL1, x0
ret
endfunc cortex_a72_disable_smp
......@@ -82,9 +82,9 @@ func cortex_a72_reset_func
* As a bare minimum enable the SMP bit.
* ---------------------------------------------
*/
mrs x0, CPUECTLR_EL1
orr x0, x0, #CPUECTLR_SMP_BIT
msr CPUECTLR_EL1, x0
mrs x0, CORTEX_A72_ECTLR_EL1
orr x0, x0, #CORTEX_A72_ECTLR_SMP_BIT
msr CORTEX_A72_ECTLR_EL1, x0
isb
ret
endfunc cortex_a72_reset_func
......@@ -211,9 +211,9 @@ cortex_a72_regs: /* The ascii list of register names to be reported */
func cortex_a72_cpu_reg_dump
adr x6, cortex_a72_regs
mrs x8, CPUECTLR_EL1
mrs x9, CPUMERRSR_EL1
mrs x10, L2MERRSR_EL1
mrs x8, CORTEX_A72_ECTLR_EL1
mrs x9, CORTEX_A72_MERRSR_EL1
mrs x10, CORTEX_A72_L2MERRSR_EL1
ret
endfunc cortex_a72_cpu_reg_dump
......
......@@ -60,7 +60,7 @@ int errata_needs_reporting(spinlock_t *lock, uint32_t *reported)
* Applied: INFO
* Not applied: VERBOSE
*/
void errata_print_msg(int status, const char *cpu, const char *id)
void errata_print_msg(unsigned int status, const char *cpu, const char *id)
{
/* Errata status strings */
static const char *const errata_status_str[] = {
......
......@@ -332,7 +332,7 @@ void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
unsigned int node_index[])
{
unsigned int parent_node = psci_cpu_pd_nodes[cpu_idx].parent_node;
int i;
unsigned int i;
for (i = PSCI_CPU_PWR_LVL + 1; i <= end_lvl; i++) {
*node_index++ = parent_node;
......@@ -901,7 +901,7 @@ void psci_print_power_domain_map(void)
*****************************************************************************/
int psci_secondaries_brought_up(void)
{
int idx, n_valid = 0;
unsigned int idx, n_valid = 0;
for (idx = 0; idx < ARRAY_SIZE(psci_cpu_pd_nodes); idx++) {
if (psci_cpu_pd_nodes[idx].mpidr != PSCI_INVALID_MPIDR)
......
......@@ -209,7 +209,7 @@ int psci_cpu_off(void)
int psci_affinity_info(u_register_t target_affinity,
unsigned int lowest_affinity_level)
{
unsigned int target_idx;
int target_idx;
/* We dont support level higher than PSCI_CPU_PWR_LVL */
if (lowest_affinity_level > PSCI_CPU_PWR_LVL)
......
......@@ -19,7 +19,7 @@
******************************************************************************/
static void psci_set_power_off_state(psci_power_state_t *state_info)
{
int lvl;
unsigned int lvl;
for (lvl = PSCI_CPU_PWR_LVL; lvl <= PLAT_MAX_PWR_LVL; lvl++)
state_info->pwr_domain_state[lvl] = PLAT_MAX_OFF_STATE;
......
......@@ -37,7 +37,7 @@
*/
static int xlat_table_get_index(xlat_ctx_t *ctx, const uint64_t *table)
{
for (int i = 0; i < ctx->tables_num; i++)
for (unsigned int i = 0; i < ctx->tables_num; i++)
if (ctx->tables[i] == table)
return i;
......@@ -53,7 +53,7 @@ static int xlat_table_get_index(xlat_ctx_t *ctx, const uint64_t *table)
/* Returns a pointer to an empty translation table. */
static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
{
for (int i = 0; i < ctx->tables_num; i++)
for (unsigned int i = 0; i < ctx->tables_num; i++)
if (ctx->tables_mapped_regions[i] == 0)
return ctx->tables[i];
......@@ -203,7 +203,7 @@ static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
const uintptr_t table_base_va,
uint64_t *const table_base,
const int table_entries,
const int level)
const unsigned int level)
{
assert(level >= ctx->base_level && level <= XLAT_TABLE_LEVEL_MAX);
......@@ -468,7 +468,7 @@ static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
const uintptr_t table_base_va,
uint64_t *const table_base,
const int table_entries,
const int level)
const unsigned int level)
{
assert(level >= ctx->base_level && level <= XLAT_TABLE_LEVEL_MAX);
......@@ -1053,14 +1053,14 @@ void init_xlation_table(xlat_ctx_t *ctx)
/* All tables must be zeroed before mapping any region. */
for (int i = 0; i < ctx->base_table_entries; i++)
for (unsigned int i = 0; i < ctx->base_table_entries; i++)
ctx->base_table[i] = INVALID_DESC;
for (int j = 0; j < ctx->tables_num; j++) {
for (unsigned int j = 0; j < ctx->tables_num; j++) {
#if PLAT_XLAT_TABLES_DYNAMIC
ctx->tables_mapped_regions[j] = 0;
#endif
for (int i = 0; i < XLAT_TABLE_ENTRIES; i++)
for (unsigned int i = 0; i < XLAT_TABLE_ENTRIES; i++)
ctx->tables[j][i] = INVALID_DESC;
}
......
......@@ -52,7 +52,7 @@ typedef struct {
* null entry.
*/
mmap_region_t *mmap;
int mmap_num;
unsigned int mmap_num;
/*
* Array of finer-grain translation tables.
......@@ -60,7 +60,7 @@ typedef struct {
* contain both level-2 and level-3 entries.
*/
uint64_t (*tables)[XLAT_TABLE_ENTRIES];
int tables_num;
unsigned int tables_num;
/*
* Keep track of how many regions are mapped in each table. The base
* table can't be unmapped so it isn't needed to keep track of it.
......@@ -69,14 +69,14 @@ typedef struct {
int *tables_mapped_regions;
#endif /* PLAT_XLAT_TABLES_DYNAMIC */
int next_table;
unsigned int next_table;
/*
* Base translation table. It doesn't need to have the same amount of
* entries as the ones used for other levels.
*/
uint64_t *base_table;
int base_table_entries;
unsigned int base_table_entries;
/*
* Max Physical and Virtual addresses currently in use by the
......@@ -87,10 +87,10 @@ typedef struct {
uintptr_t max_va;
/* Level of the base translation table. */
int base_level;
unsigned int base_level;
/* Set to 1 when the translation tables are initialized. */
int initialized;
unsigned int initialized;
/*
* Bit mask that has to be ORed to the rest of a translation table
......
......@@ -86,9 +86,9 @@ func JUNO_HANDLER(0)
* Cortex-A57 specific settings
* --------------------------------------------------------------------
*/
mov x0, #((L2_DATA_RAM_LATENCY_3_CYCLES << L2CTLR_DATA_RAM_LATENCY_SHIFT) | \
(L2_TAG_RAM_LATENCY_3_CYCLES << L2CTLR_TAG_RAM_LATENCY_SHIFT))
msr L2CTLR_EL1, x0
mov x0, #((CORTEX_A57_L2_DATA_RAM_LATENCY_3_CYCLES << CORTEX_A57_L2CTLR_DATA_RAM_LATENCY_SHIFT) | \
(CORTEX_A57_L2_TAG_RAM_LATENCY_3_CYCLES << CORTEX_A57_L2CTLR_TAG_RAM_LATENCY_SHIFT))
msr CORTEX_A57_L2CTLR_EL1, x0
1:
isb
ret
......@@ -123,8 +123,8 @@ A57:
* Cortex-A57 specific settings
* --------------------------------------------------------------------
*/
mov x0, #(L2_DATA_RAM_LATENCY_3_CYCLES << L2CTLR_DATA_RAM_LATENCY_SHIFT)
msr L2CTLR_EL1, x0
mov x0, #(CORTEX_A57_L2_DATA_RAM_LATENCY_3_CYCLES << CORTEX_A57_L2CTLR_DATA_RAM_LATENCY_SHIFT)
msr CORTEX_A57_L2CTLR_EL1, x0
isb
ret
endfunc JUNO_HANDLER(1)
......@@ -157,9 +157,9 @@ A72:
* Cortex-A72 specific settings
* --------------------------------------------------------------------
*/
mov x0, #((L2_DATA_RAM_LATENCY_3_CYCLES << L2CTLR_DATA_RAM_LATENCY_SHIFT) | \
(L2_TAG_RAM_LATENCY_2_CYCLES << L2CTLR_TAG_RAM_LATENCY_SHIFT))
msr L2CTLR_EL1, x0
mov x0, #((CORTEX_A72_L2_DATA_RAM_LATENCY_3_CYCLES << CORTEX_A72_L2CTLR_DATA_RAM_LATENCY_SHIFT) | \
(CORTEX_A72_L2_TAG_RAM_LATENCY_2_CYCLES << CORTEX_A72_L2CTLR_TAG_RAM_LATENCY_SHIFT))
msr CORTEX_A57_L2CTLR_EL1, x0
isb
ret
endfunc JUNO_HANDLER(2)
......
......@@ -20,11 +20,11 @@ func pm_asm_code
mov x0, 0
msr oslar_el1, x0
mrs x0, CPUACTLR_EL1
bic x0, x0, #(CPUACTLR_RADIS | CPUACTLR_L1RADIS)
mrs x0, CORTEX_A53_ACTLR_EL1
bic x0, x0, #(CORTEX_A53_ACTLR_RADIS | CORTEX_A53_ACTLR_L1RADIS)
orr x0, x0, #0x180000
orr x0, x0, #0xe000
msr CPUACTLR_EL1, x0
msr CORTEX_A53_ACTLR_EL1, x0
mrs x3, actlr_el3
orr x3, x3, #ACTLR_EL3_L2ECTLR_BIT
......
......@@ -68,18 +68,18 @@
* Enable processor retention
* ---------------------------
*/
mrs x0, L2ECTLR_EL1
mov x1, #RETENTION_ENTRY_TICKS_512 << L2ECTLR_RET_CTRL_SHIFT
bic x0, x0, #L2ECTLR_RET_CTRL_MASK
mrs x0, CORTEX_A57_L2ECTLR_EL1
mov x1, #RETENTION_ENTRY_TICKS_512
bic x0, x0, #CORTEX_A57_L2ECTLR_RET_CTRL_MASK
orr x0, x0, x1
msr L2ECTLR_EL1, x0
msr CORTEX_A57_L2ECTLR_EL1, x0
isb
mrs x0, CPUECTLR_EL1
mov x1, #RETENTION_ENTRY_TICKS_512 << CPUECTLR_CPU_RET_CTRL_SHIFT
bic x0, x0, #CPUECTLR_CPU_RET_CTRL_MASK
mrs x0, CORTEX_A57_ECTLR_EL1
mov x1, #RETENTION_ENTRY_TICKS_512
bic x0, x0, #CORTEX_A57_ECTLR_CPU_RET_CTRL_MASK
orr x0, x0, x1
msr CPUECTLR_EL1, x0
msr CORTEX_A57_ECTLR_EL1, x0
isb
/* -------------------------------------------------------
......@@ -98,11 +98,11 @@
adr x0, tegra_enable_l2_ecc_parity_prot
ldr x0, [x0]
cbz x0, 1f
mrs x0, L2CTLR_EL1
and x1, x0, #L2_ECC_PARITY_PROTECTION_BIT
mrs x0, CORTEX_A57_L2CTLR_EL1
and x1, x0, #CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT
cbnz x1, 1f
orr x0, x0, #L2_ECC_PARITY_PROTECTION_BIT
msr L2CTLR_EL1, x0
orr x0, x0, #CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT
msr CORTEX_A57_L2CTLR_EL1, x0
isb
/* --------------------------------
......@@ -317,18 +317,18 @@ func tegra_secure_entrypoint
* entries from the branch predictor array.
* -------------------------------------------------------
*/
mrs x0, CPUACTLR_EL1
mrs x0, CORTEX_A57_ACTLR_EL1
orr x0, x0, #1
msr CPUACTLR_EL1, x0 /* invalidate BTB and I$ together */
msr CORTEX_A57_ACTLR_EL1, x0 /* invalidate BTB and I$ together */
dsb sy
isb
ic iallu /* actual invalidate */
dsb sy
isb
mrs x0, CPUACTLR_EL1
mrs x0, CORTEX_A57_ACTLR_EL1
bic x0, x0, #1
msr CPUACTLR_EL1, X0 /* restore original CPUACTLR_EL1 */
msr CORTEX_A57_ACTLR_EL1, X0 /* restore original CPUACTLR_EL1 */
dsb sy
isb
......@@ -352,7 +352,7 @@ func tegra_secure_entrypoint
msr oslar_el1, x0 /* os lock stays 0 across warm reset */
mov x3, #3
movz x4, #0x8000, lsl #48
msr CPUACTLR_EL1, x4 /* turn off RCG */
msr CORTEX_A57_ACTLR_EL1, x4 /* turn off RCG */
isb
msr rmr_el3, x3 /* request warm reset */
isb
......
......@@ -15,9 +15,6 @@
#include <utils.h>
#include <xlat_tables_v2.h>
#define TEGRA_GPU_RESET_REG_OFFSET 0x28c
#define GPU_RESET_BIT (1 << 24)
/* Video Memory base and size (live values) */
static uint64_t video_mem_base;
static uint64_t video_mem_size;
......@@ -135,20 +132,8 @@ void tegra_memctrl_videomem_setup(uint64_t phys_base, uint32_t size_in_bytes)
{
uintptr_t vmem_end_old = video_mem_base + (video_mem_size << 20);
uintptr_t vmem_end_new = phys_base + size_in_bytes;
uint32_t regval;
unsigned long long non_overlap_area_size;
/*
* The GPU is the user of the Video Memory region. In order to
* transition to the new memory region smoothly, we program the
* new base/size ONLY if the GPU is in reset mode.
*/
regval = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_GPU_RESET_REG_OFFSET);
if ((regval & GPU_RESET_BIT) == 0) {
ERROR("GPU not in reset! Video Memory setup failed\n");
return;
}
/*
* Setup the Memory controller to restrict CPU accesses to the Video
* Memory region
......
......@@ -19,9 +19,6 @@
#include <utils.h>
#include <xlat_tables_v2.h>
#define TEGRA_GPU_RESET_REG_OFFSET 0x30
#define GPU_RESET_BIT (1 << 0)
/* Video Memory base and size (live values) */
static uint64_t video_mem_base;
static uint64_t video_mem_size_mb;
......@@ -254,32 +251,12 @@ static void tegra_memctrl_reconfig_mss_clients(void)
wdata_0 = MC_CLIENT_HOTRESET_CTRL0_RESET_VAL;
tegra_mc_write_32(MC_CLIENT_HOTRESET_CTRL0, wdata_0);
/* Wait for HOTRESET STATUS to indicate FLUSH_DONE */
do {
val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS0);
} while ((val & wdata_0) != wdata_0);
/* Wait one more time due to SW WAR for known legacy issue */
do {
val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS0);
} while ((val & wdata_0) != wdata_0);
val = tegra_mc_read_32(MC_CLIENT_HOTRESET_CTRL1);
assert(val == wdata_1);
wdata_1 = MC_CLIENT_HOTRESET_CTRL1_RESET_VAL;
tegra_mc_write_32(MC_CLIENT_HOTRESET_CTRL1, wdata_1);
/* Wait for HOTRESET STATUS to indicate FLUSH_DONE */
do {
val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS1);
} while ((val & wdata_1) != wdata_1);
/* Wait one more time due to SW WAR for known legacy issue */
do {
val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS1);
} while ((val & wdata_1) != wdata_1);
#endif
}
......@@ -623,20 +600,8 @@ void tegra_memctrl_videomem_setup(uint64_t phys_base, uint32_t size_in_bytes)
{
uintptr_t vmem_end_old = video_mem_base + (video_mem_size_mb << 20);
uintptr_t vmem_end_new = phys_base + size_in_bytes;
uint32_t regval;
unsigned long long non_overlap_area_size;
/*
* The GPU is the user of the Video Memory region. In order to
* transition to the new memory region smoothly, we program the
* new base/size ONLY if the GPU is in reset mode.
*/
regval = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_GPU_RESET_REG_OFFSET);
if ((regval & GPU_RESET_BIT) == 0U) {
ERROR("GPU not in reset! Video Memory setup failed\n");
return;
}
/*
* Setup the Memory controller to restrict CPU accesses to the Video
* Memory region
......
......@@ -11,8 +11,10 @@
#include <pmc.h>
#include <tegra_def.h>
#define RESET_ENABLE 0x10U
/* Module IDs used during power ungate procedure */
static const int pmc_cpu_powergate_id[4] = {
static const uint32_t pmc_cpu_powergate_id[4] = {
0, /* CPU 0 */
9, /* CPU 1 */
10, /* CPU 2 */
......@@ -23,7 +25,7 @@ static const int pmc_cpu_powergate_id[4] = {
* Power ungate CPU to start the boot process. CPU reset vectors must be
* populated before calling this function.
******************************************************************************/
void tegra_pmc_cpu_on(int cpu)
void tegra_pmc_cpu_on(int32_t cpu)
{
uint32_t val;
......@@ -31,35 +33,34 @@ void tegra_pmc_cpu_on(int cpu)
* Check if CPU is already power ungated
*/
val = tegra_pmc_read_32(PMC_PWRGATE_STATUS);
if (val & (1 << pmc_cpu_powergate_id[cpu]))
return;
/*
* The PMC deasserts the START bit when it starts the power
* ungate process. Loop till no power toggle is in progress.
*/
do {
val = tegra_pmc_read_32(PMC_PWRGATE_TOGGLE);
} while (val & PMC_TOGGLE_START);
/*
* Start the power ungate procedure
*/
val = pmc_cpu_powergate_id[cpu] | PMC_TOGGLE_START;
tegra_pmc_write_32(PMC_PWRGATE_TOGGLE, val);
/*
* The PMC deasserts the START bit when it starts the power
* ungate process. Loop till powergate START bit is asserted.
*/
do {
val = tegra_pmc_read_32(PMC_PWRGATE_TOGGLE);
} while (val & (1 << 8));
/* loop till the CPU is power ungated */
do {
val = tegra_pmc_read_32(PMC_PWRGATE_STATUS);
} while ((val & (1 << pmc_cpu_powergate_id[cpu])) == 0);
if ((val & (1U << pmc_cpu_powergate_id[cpu])) == 0U) {
/*
* The PMC deasserts the START bit when it starts the power
* ungate process. Loop till no power toggle is in progress.
*/
do {
val = tegra_pmc_read_32(PMC_PWRGATE_TOGGLE);
} while ((val & PMC_TOGGLE_START) != 0U);
/*
* Start the power ungate procedure
*/
val = pmc_cpu_powergate_id[cpu] | PMC_TOGGLE_START;
tegra_pmc_write_32(PMC_PWRGATE_TOGGLE, val);
/*
* The PMC deasserts the START bit when it starts the power
* ungate process. Loop till powergate START bit is asserted.
*/
do {
val = tegra_pmc_read_32(PMC_PWRGATE_TOGGLE);
} while ((val & (1U << 8)) != 0U);
/* loop till the CPU is power ungated */
do {
val = tegra_pmc_read_32(PMC_PWRGATE_STATUS);
} while ((val & (1U << pmc_cpu_powergate_id[cpu])) == 0U);
}
}
/*******************************************************************************
......@@ -69,9 +70,10 @@ void tegra_pmc_cpu_setup(uint64_t reset_addr)
{
uint32_t val;
tegra_pmc_write_32(PMC_SECURE_SCRATCH34, (reset_addr & 0xFFFFFFFF) | 1);
val = reset_addr >> 32;
tegra_pmc_write_32(PMC_SECURE_SCRATCH35, val & 0x7FF);
tegra_pmc_write_32(PMC_SECURE_SCRATCH34,
((uint32_t)reset_addr & 0xFFFFFFFFU) | 1U);
val = (uint32_t)(reset_addr >> 32U);
tegra_pmc_write_32(PMC_SECURE_SCRATCH35, val & 0x7FFU);
}
/*******************************************************************************
......@@ -101,7 +103,7 @@ __dead2 void tegra_pmc_system_reset(void)
uint32_t reg;
reg = tegra_pmc_read_32(PMC_CONFIG);
reg |= 0x10; /* restart */
reg |= RESET_ENABLE; /* restart */
tegra_pmc_write_32(PMC_CONFIG, reg);
wfi();
......
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
......@@ -7,23 +7,24 @@
#include <delay_timer.h>
#include <mmio.h>
#include <tegra_def.h>
#include <tegra_private.h>
static uint32_t tegra_timerus_get_value(void)
{
return mmio_read_32(TEGRA_TMRUS_BASE);
}
static const timer_ops_t tegra_timer_ops = {
.get_timer_value = tegra_timerus_get_value,
.clk_mult = 1,
.clk_div = 1,
};
/*
* Initialise the on-chip free rolling us counter as the delay
* timer.
*/
void tegra_delay_timer_init(void)
{
static const timer_ops_t tegra_timer_ops = {
.get_timer_value = tegra_timerus_get_value,
.clk_mult = 1,
.clk_div = 1,
};
timer_init(&tegra_timer_ops);
}
......@@ -18,13 +18,13 @@
#include <tegra_def.h>
#include <tegra_private.h>
DEFINE_BAKERY_LOCK(tegra_fiq_lock);
static DEFINE_BAKERY_LOCK(tegra_fiq_lock);
/*******************************************************************************
* Static variables
******************************************************************************/
static uint64_t ns_fiq_handler_addr;
static unsigned int fiq_handler_active;
static uint32_t fiq_handler_active;
static pcpu_fiq_state_t fiq_state[PLATFORM_CORE_COUNT];
/*******************************************************************************
......@@ -37,7 +37,7 @@ static uint64_t tegra_fiq_interrupt_handler(uint32_t id,
{
cpu_context_t *ctx = cm_get_context(NON_SECURE);
el3_state_t *el3state_ctx = get_el3state_ctx(ctx);
int cpu = plat_my_core_pos();
uint32_t cpu = plat_my_core_pos();
uint32_t irq;
bakery_lock_get(&tegra_fiq_lock);
......@@ -52,22 +52,23 @@ static uint64_t tegra_fiq_interrupt_handler(uint32_t id,
* Save elr_el3 and spsr_el3 from the saved context, and overwrite
* the context with the NS fiq_handler_addr and SPSR value.
*/
fiq_state[cpu].elr_el3 = read_ctx_reg(el3state_ctx, CTX_ELR_EL3);
fiq_state[cpu].spsr_el3 = read_ctx_reg(el3state_ctx, CTX_SPSR_EL3);
fiq_state[cpu].elr_el3 = read_ctx_reg((el3state_ctx), (uint32_t)(CTX_ELR_EL3));
fiq_state[cpu].spsr_el3 = read_ctx_reg((el3state_ctx), (uint32_t)(CTX_SPSR_EL3));
/*
* Set the new ELR to continue execution in the NS world using the
* FIQ handler registered earlier.
*/
assert(ns_fiq_handler_addr);
write_ctx_reg(el3state_ctx, CTX_ELR_EL3, ns_fiq_handler_addr);
write_ctx_reg((el3state_ctx), (uint32_t)(CTX_ELR_EL3), (ns_fiq_handler_addr));
/*
* Mark this interrupt as complete to avoid a FIQ storm.
*/
irq = plat_ic_acknowledge_interrupt();
if (irq < 1022)
if (irq < 1022U) {
plat_ic_end_of_interrupt(irq);
}
bakery_lock_release(&tegra_fiq_lock);
......@@ -79,27 +80,27 @@ static uint64_t tegra_fiq_interrupt_handler(uint32_t id,
******************************************************************************/
void tegra_fiq_handler_setup(void)
{
uint64_t flags;
int rc;
uint32_t flags;
int32_t rc;
/* return if already registered */
if (fiq_handler_active)
return;
/*
* Register an interrupt handler for FIQ interrupts generated for
* NS interrupt sources
*/
flags = 0;
set_interrupt_rm_flag(flags, NON_SECURE);
rc = register_interrupt_type_handler(INTR_TYPE_EL3,
tegra_fiq_interrupt_handler,
flags);
if (rc)
panic();
/* handler is now active */
fiq_handler_active = 1;
if (fiq_handler_active == 0U) {
/*
* Register an interrupt handler for FIQ interrupts generated for
* NS interrupt sources
*/
flags = 0U;
set_interrupt_rm_flag((flags), (NON_SECURE));
rc = register_interrupt_type_handler(INTR_TYPE_EL3,
tegra_fiq_interrupt_handler,
flags);
if (rc != 0) {
panic();
}
/* handler is now active */
fiq_handler_active = 1;
}
}
/*******************************************************************************
......@@ -113,26 +114,26 @@ void tegra_fiq_set_ns_entrypoint(uint64_t entrypoint)
/*******************************************************************************
* Handler to return the NS EL1/EL0 CPU context
******************************************************************************/
int tegra_fiq_get_intr_context(void)
int32_t tegra_fiq_get_intr_context(void)
{
cpu_context_t *ctx = cm_get_context(NON_SECURE);
gp_regs_t *gpregs_ctx = get_gpregs_ctx(ctx);
el1_sys_regs_t *el1state_ctx = get_sysregs_ctx(ctx);
int cpu = plat_my_core_pos();
const el1_sys_regs_t *el1state_ctx = get_sysregs_ctx(ctx);
uint32_t cpu = plat_my_core_pos();
uint64_t val;
/*
* We store the ELR_EL3, SPSR_EL3, SP_EL0 and SP_EL1 registers so
* that el3_exit() sends these values back to the NS world.
*/
write_ctx_reg(gpregs_ctx, CTX_GPREG_X0, fiq_state[cpu].elr_el3);
write_ctx_reg(gpregs_ctx, CTX_GPREG_X1, fiq_state[cpu].spsr_el3);
write_ctx_reg((gpregs_ctx), (uint32_t)(CTX_GPREG_X0), (fiq_state[cpu].elr_el3));
write_ctx_reg((gpregs_ctx), (uint32_t)(CTX_GPREG_X1), (fiq_state[cpu].spsr_el3));
val = read_ctx_reg(gpregs_ctx, CTX_GPREG_SP_EL0);
write_ctx_reg(gpregs_ctx, CTX_GPREG_X2, val);
val = read_ctx_reg((gpregs_ctx), (uint32_t)(CTX_GPREG_SP_EL0));
write_ctx_reg((gpregs_ctx), (uint32_t)(CTX_GPREG_X2), (val));
val = read_ctx_reg(el1state_ctx, CTX_SP_EL1);
write_ctx_reg(gpregs_ctx, CTX_GPREG_X3, val);
val = read_ctx_reg((el1state_ctx), (uint32_t)(CTX_SP_EL1));
write_ctx_reg((gpregs_ctx), (uint32_t)(CTX_GPREG_X3), (val));
return 0;
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment