misc_helpers.S 4.64 KB
Newer Older
1
/*
2
 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 *
 * Redistributions of source code must retain the above copyright notice, this
 * list of conditions and the following disclaimer.
 *
 * Redistributions in binary form must reproduce the above copyright notice,
 * this list of conditions and the following disclaimer in the documentation
 * and/or other materials provided with the distribution.
 *
 * Neither the name of ARM nor the names of its contributors may be used
 * to endorse or promote products derived from this software without specific
 * prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 */

31
#include <arch.h>
32
#include <asm_macros.S>
33
#include <assert_macros.S>
34

35
36
37
38
39
	.globl	get_afflvl_shift
	.globl	mpidr_mask_lower_afflvls
	.globl	eret
	.globl	smc

40
41
	.globl	zeromem16
	.globl	memcpy16
42

43
44
45
	.globl	disable_mmu_el3
	.globl	disable_mmu_icache_el3

46
47
48
49
#if SUPPORT_VFP
	.globl	enable_vfp
#endif

50
func get_afflvl_shift
51
52
53
54
55
	cmp	x0, #3
	cinc	x0, x0, eq
	mov	x1, #MPIDR_AFFLVL_SHIFT
	lsl	x0, x0, x1
	ret
56
endfunc get_afflvl_shift
57

58
func mpidr_mask_lower_afflvls
59
60
61
62
63
64
65
	cmp	x1, #3
	cinc	x1, x1, eq
	mov	x2, #MPIDR_AFFLVL_SHIFT
	lsl	x2, x1, x2
	lsr	x0, x0, x2
	lsl	x0, x0, x2
	ret
66
endfunc mpidr_mask_lower_afflvls
67
68


69
func eret
70
	eret
71
endfunc eret
72
73


74
func smc
75
	smc	#0
76
endfunc smc
77
78
79
80
81
82
83
84

/* -----------------------------------------------------------------------
 * void zeromem16(void *mem, unsigned int length);
 *
 * Initialise a memory region to 0.
 * The memory address must be 16-byte aligned.
 * -----------------------------------------------------------------------
 */
85
func zeromem16
86
87
88
89
#if ASM_ASSERTION
	tst	x0, #0xf
	ASM_ASSERT(eq)
#endif
90
91
92
93
94
95
96
97
98
99
100
101
102
103
	add	x2, x0, x1
/* zero 16 bytes at a time */
z_loop16:
	sub	x3, x2, x0
	cmp	x3, #16
	b.lt	z_loop1
	stp	xzr, xzr, [x0], #16
	b	z_loop16
/* zero byte per byte */
z_loop1:
	cmp	x0, x2
	b.eq	z_end
	strb	wzr, [x0], #1
	b	z_loop1
104
105
106
z_end:
	ret
endfunc zeromem16
107
108
109
110
111
112
113
114
115
116


/* --------------------------------------------------------------------------
 * void memcpy16(void *dest, const void *src, unsigned int length)
 *
 * Copy length bytes from memory area src to memory area dest.
 * The memory areas should not overlap.
 * Destination and source addresses must be 16-byte aligned.
 * --------------------------------------------------------------------------
 */
117
func memcpy16
118
119
120
121
122
#if ASM_ASSERTION
	orr	x3, x0, x1
	tst	x3, #0xf
	ASM_ASSERT(eq)
#endif
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
/* copy 16 bytes at a time */
m_loop16:
	cmp	x2, #16
	b.lt	m_loop1
	ldp	x3, x4, [x1], #16
	stp	x3, x4, [x0], #16
	sub	x2, x2, #16
	b	m_loop16
/* copy byte per byte */
m_loop1:
	cbz	x2, m_end
	ldrb	w3, [x1], #1
	strb	w3, [x0], #1
	subs	x2, x2, #1
	b.ne	m_loop1
138
139
140
m_end:
	ret
endfunc memcpy16
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158

/* ---------------------------------------------------------------------------
 * Disable the MMU at EL3
 * This is implemented in assembler to ensure that the data cache is cleaned
 * and invalidated after the MMU is disabled without any intervening cacheable
 * data accesses
 * ---------------------------------------------------------------------------
 */

func disable_mmu_el3
	mov	x1, #(SCTLR_M_BIT | SCTLR_C_BIT)
do_disable_mmu:
	mrs	x0, sctlr_el3
	bic	x0, x0, x1
	msr	sctlr_el3, x0
	isb				// ensure MMU is off
	mov	x0, #DCCISW		// DCache clean and invalidate
	b	dcsw_op_all
159
endfunc disable_mmu_el3
160
161
162
163
164


func disable_mmu_icache_el3
	mov	x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
	b	do_disable_mmu
165
endfunc disable_mmu_icache_el3
166

167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
/* ---------------------------------------------------------------------------
 * Enable the use of VFP at EL3
 * ---------------------------------------------------------------------------
 */
#if SUPPORT_VFP
func enable_vfp
	mrs	x0, cpacr_el1
	orr	x0, x0, #CPACR_VFP_BITS
	msr	cpacr_el1, x0
	mrs	x0, cptr_el3
	mov	x1, #AARCH64_CPTR_TFP
	bic	x0, x0, x1
	msr	cptr_el3, x0
	isb
	ret
182
endfunc enable_vfp
183
#endif