asm_macros.S 5.85 KB
Newer Older
Soby Mathew's avatar
Soby Mathew committed
1
/*
2
 * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
Soby Mathew's avatar
Soby Mathew committed
3
 *
dp-arm's avatar
dp-arm committed
4
 * SPDX-License-Identifier: BSD-3-Clause
Soby Mathew's avatar
Soby Mathew committed
5
 */
6
7
#ifndef ASM_MACROS_S
#define ASM_MACROS_S
Soby Mathew's avatar
Soby Mathew committed
8
9

#include <arch.h>
10
11
#include <common/asm_macros_common.S>
#include <lib/spinlock.h>
Soby Mathew's avatar
Soby Mathew committed
12

13
14
15
16
17
18
19
20
21
22
23
24
25
26
/*
 * TLBI instruction with type specifier that implements the workaround for
 * errata 813419 of Cortex-A57.
 */
#if ERRATA_A57_813419
#define TLB_INVALIDATE(_reg, _coproc) \
	stcopr	_reg, _coproc; \
	dsb	ish; \
	stcopr	_reg, _coproc
#else
#define TLB_INVALIDATE(_reg, _coproc) \
	stcopr	_reg, _coproc
#endif

Soby Mathew's avatar
Soby Mathew committed
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
#define WORD_SIZE	4

	/*
	 * Co processor register accessors
	 */
	.macro ldcopr reg, coproc, opc1, CRn, CRm, opc2
	mrc	\coproc, \opc1, \reg, \CRn, \CRm, \opc2
	.endm

	.macro ldcopr16 reg1, reg2, coproc, opc1, CRm
	mrrc	\coproc, \opc1, \reg1, \reg2, \CRm
	.endm

	.macro stcopr reg, coproc, opc1, CRn, CRm, opc2
	mcr	\coproc, \opc1, \reg, \CRn, \CRm, \opc2
	.endm

	.macro stcopr16 reg1, reg2, coproc, opc1, CRm
	mcrr	\coproc, \opc1, \reg1, \reg2, \CRm
	.endm

	/* Cache line size helpers */
	.macro	dcache_line_size  reg, tmp
	ldcopr	\tmp, CTR
	ubfx	\tmp, \tmp, #CTR_DMINLINE_SHIFT, #CTR_DMINLINE_WIDTH
	mov	\reg, #WORD_SIZE
	lsl	\reg, \reg, \tmp
	.endm

	.macro	icache_line_size  reg, tmp
	ldcopr	\tmp, CTR
	and	\tmp, \tmp, #CTR_IMINLINE_MASK
	mov	\reg, #WORD_SIZE
	lsl	\reg, \reg, \tmp
	.endm

63
64
65
66
67
68
69
70
71
72
	/*
	 * Declare the exception vector table, enforcing it is aligned on a
	 * 32 byte boundary.
	 */
	.macro vector_base  label
	.section .vectors, "ax"
	.align 5
	\label:
	.endm

Soby Mathew's avatar
Soby Mathew committed
73
74
75
76
77
78
79
80
	/*
	 * This macro calculates the base address of the current CPU's multi
	 * processor(MP) stack using the plat_my_core_pos() index, the name of
	 * the stack storage and the size of each stack.
	 * Out: r0 = physical address of stack base
	 * Clobber: r14, r1, r2
	 */
	.macro get_my_mp_stack _name, _size
81
	bl	plat_my_core_pos
Soby Mathew's avatar
Soby Mathew committed
82
83
84
85
86
87
88
89
90
91
92
93
94
95
	ldr r2, =(\_name + \_size)
	mov r1, #\_size
	mla r0, r0, r1, r2
	.endm

	/*
	 * This macro calculates the base address of a uniprocessor(UP) stack
	 * using the name of the stack storage and the size of the stack
	 * Out: r0 = physical address of stack base
	 */
	.macro get_up_stack _name, _size
	ldr r0, =(\_name + \_size)
	.endm

96
97
#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_VIRTUALIZATION)
	/*
98
	 * Macro for mitigating against speculative execution.
99
100
101
	 * ARMv7 cores without Virtualization extension do not support the
	 * eret instruction.
	 */
102
	.macro exception_return
103
	movs	pc, lr
104
105
106
107
108
109
110
	dsb	nsh
	isb
	.endm

#else
	/*
	 * Macro for mitigating against speculative execution beyond ERET.
111
	 * If possible use Speculation Barrier instruction defined in ARMv8.5
112
113
114
	 */
	.macro exception_return
	eret
115
116
117
#if ARM_ARCH_AT_LEAST(8, 5)
	sb
#else
118
119
	dsb	nsh
	isb
120
#endif
121
122
123
	.endm
#endif

124
125
126
127
128
129
130
131
132
#if (ARM_ARCH_MAJOR == 7)
	/* ARMv7 does not support stl instruction */
	.macro stl _reg, _write_lock
	dmb
	str	\_reg, \_write_lock
	dsb
	.endm
#endif

133
134
135
136
137
138
139
140
141
142
143
144
145
	/*
	 * Helper macro to generate the best mov/movw/movt combinations
	 * according to the value to be moved.
	 */
	.macro mov_imm _reg, _val
		.if ((\_val) & 0xffff0000) == 0
			mov	\_reg, #(\_val)
		.else
			movw	\_reg, #((\_val) & 0xffff)
			movt	\_reg, #((\_val) >> 16)
		.endif
	.endm

146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
	/*
	 * Macro to mark instances where we're jumping to a function and don't
	 * expect a return. To provide the function being jumped to with
	 * additional information, we use 'bl' instruction to jump rather than
	 * 'b'.
         *
	 * Debuggers infer the location of a call from where LR points to, which
	 * is usually the instruction after 'bl'. If this macro expansion
	 * happens to be the last location in a function, that'll cause the LR
	 * to point a location beyond the function, thereby misleading debugger
	 * back trace. We therefore insert a 'nop' after the function call for
	 * debug builds, unless 'skip_nop' parameter is non-zero.
	 */
	.macro no_ret _func:req, skip_nop=0
	bl	\_func
#if DEBUG
	.ifeq \skip_nop
	nop
	.endif
#endif
	.endm

168
169
170
171
172
173
174
175
176
	/*
	 * Reserve space for a spin lock in assembly file.
	 */
	.macro define_asm_spinlock _name:req
	.align	SPINLOCK_ASM_ALIGN
	\_name:
	.space	SPINLOCK_ASM_SIZE
	.endm

177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
	/*
	 * Helper macro to OR the bottom 32 bits of `_val` into `_reg_l`
	 * and the top 32 bits of `_val` into `_reg_h`.  If either the bottom
	 * or top word of `_val` is zero, the corresponding OR operation
	 * is skipped.
	 */
	.macro orr64_imm _reg_l, _reg_h, _val
		.if (\_val >> 32)
			orr \_reg_h, \_reg_h, #(\_val >> 32)
		.endif
		.if (\_val & 0xffffffff)
			orr \_reg_l, \_reg_l, #(\_val & 0xffffffff)
		.endif
	.endm

	/*
	 * Helper macro to bitwise-clear bits in `_reg_l` and
	 * `_reg_h` given a 64 bit immediate `_val`.  The set bits
	 * in the bottom word of `_val` dictate which bits from
	 * `_reg_l` should be cleared.  Similarly, the set bits in
	 * the top word of `_val` dictate which bits from `_reg_h`
	 * should be cleared.  If either the bottom or top word of
	 * `_val` is zero, the corresponding BIC operation is skipped.
	 */
	.macro bic64_imm _reg_l, _reg_h, _val
		.if (\_val >> 32)
			bic \_reg_h, \_reg_h, #(\_val >> 32)
		.endif
		.if (\_val & 0xffffffff)
			bic \_reg_l, \_reg_l, #(\_val & 0xffffffff)
		.endif
	.endm

210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
	/*
	 * Helper macro for carrying out division in software when
	 * hardware division is not suported. \top holds the dividend
	 * in the function call and the remainder after
	 * the function is executed. \bot holds the divisor. \div holds
	 * the quotient and \temp is a temporary registed used in calcualtion.
	 * The division algorithm has been obtained from:
	 * http://www.keil.com/support/man/docs/armasm/armasm_dom1359731155623.htm
	 */
	.macro	softudiv	div:req,top:req,bot:req,temp:req

	mov     \temp, \bot
	cmp     \temp, \top, lsr #1
div1:
	movls   \temp, \temp, lsl #1
	cmp     \temp, \top, lsr #1
	bls     div1
	mov     \div, #0

div2:
	cmp     \top, \temp
	subcs   \top, \top,\temp
	ADC     \div, \div, \div
	mov     \temp, \temp, lsr #1
	cmp     \temp, \bot
	bhs     div2
	.endm
237
#endif /* ASM_MACROS_S */