summaryrefslogtreecommitdiff
path: root/arch/x86/lib/memmove_32.S
blob: 35010ba3dd6fddcf4d67d49bbc51ce198ab0b8d3 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
/* SPDX-License-Identifier: GPL-2.0 */

#include <linux/export.h>
#include <linux/linkage.h>

SYM_FUNC_START(memmove)
/*
 * void *memmove(void *dest_in, const void *src_in, size_t n)
 * -mregparm=3 passes these in registers:
 * dest_in: %eax
 * src_in: %edx
 * n: %ecx
 * See also: arch/x86/entry/calling.h for description of the calling convention.
 *
 * n can remain in %ecx, but for `rep movsl`, we'll need dest in %edi and src
 * in %esi.
 */
.set dest_in, %eax
.set dest, %edi
.set src_in, %edx
.set src, %esi
.set n, %ecx
.set tmp0, %edx
.set tmp0w, %dx
.set tmp1, %ebx
.set tmp1w, %bx
.set tmp2, %eax
.set tmp3b, %cl

/*
 * Save all callee-saved registers, because this function is going to clobber
 * all of them:
 */
	pushl	%ebp
	movl	%esp, %ebp	// set standard frame pointer

	pushl	%ebx
	pushl	%edi
	pushl	%esi
	pushl	%eax		// save 'dest_in' parameter [eax] as the return value

	movl src_in, src
	movl dest_in, dest

	/* Handle more 16 bytes in loop */
	cmpl	$0x10, n
	jb	.Lmove_16B

	/* Decide forward/backward copy mode */
	cmpl	dest, src
	jb	.Lbackwards_header

	/*
	 * movs instruction have many startup latency
	 * so we handle small size by general register.
	 */
	cmpl	$680, n
	jb	.Ltoo_small_forwards
	/* movs instruction is only good for aligned case. */
	movl	src, tmp0
	xorl	dest, tmp0
	andl	$0xff, tmp0
	jz	.Lforward_movs
.Ltoo_small_forwards:
	subl	$0x10, n

	/* We gobble 16 bytes forward in each loop. */
.Lmove_16B_forwards_loop:
	subl	$0x10, n
	movl	0*4(src), tmp0
	movl	1*4(src), tmp1
	movl	tmp0, 0*4(dest)
	movl	tmp1, 1*4(dest)
	movl	2*4(src), tmp0
	movl	3*4(src), tmp1
	movl	tmp0, 2*4(dest)
	movl	tmp1, 3*4(dest)
	leal	0x10(src), src
	leal	0x10(dest), dest
	jae	.Lmove_16B_forwards_loop
	addl	$0x10, n
	jmp	.Lmove_16B

	/* Handle data forward by movs. */
.p2align 4
.Lforward_movs:
	movl	-4(src, n), tmp0
	leal	-4(dest, n), tmp1
	shrl	$2, n
	rep	movsl
	movl	tmp0, (tmp1)
	jmp	.Ldone

	/* Handle data backward by movs. */
.p2align 4
.Lbackwards_movs:
	movl	(src), tmp0
	movl	dest, tmp1
	leal	-4(src, n), src
	leal	-4(dest, n), dest
	shrl	$2, n
	std
	rep	movsl
	movl	tmp0,(tmp1)
	cld
	jmp	.Ldone

	/* Start to prepare for backward copy. */
.p2align 4
.Lbackwards_header:
	cmpl	$680, n
	jb	.Ltoo_small_backwards
	movl	src, tmp0
	xorl	dest, tmp0
	andl	$0xff, tmp0
	jz	.Lbackwards_movs

	/* Calculate copy position to tail. */
.Ltoo_small_backwards:
	addl	n, src
	addl	n, dest
	subl	$0x10, n

	/* We gobble 16 bytes backward in each loop. */
.Lmove_16B_backwards_loop:
	subl	$0x10, n

	movl	-1*4(src), tmp0
	movl	-2*4(src), tmp1
	movl	tmp0, -1*4(dest)
	movl	tmp1, -2*4(dest)
	movl	-3*4(src), tmp0
	movl	-4*4(src), tmp1
	movl	tmp0, -3*4(dest)
	movl	tmp1, -4*4(dest)
	leal	-0x10(src), src
	leal	-0x10(dest), dest
	jae	.Lmove_16B_backwards_loop
	/* Calculate copy position to head. */
	addl	$0x10, n
	subl	n, src
	subl	n, dest

	/* Move data from 8 bytes to 15 bytes. */
.p2align 4
.Lmove_16B:
	cmpl	$8, n
	jb	.Lmove_8B
	movl	0*4(src), tmp0
	movl	1*4(src), tmp1
	movl	-2*4(src, n), tmp2
	movl	-1*4(src, n), src

	movl	tmp0, 0*4(dest)
	movl	tmp1, 1*4(dest)
	movl	tmp2, -2*4(dest, n)
	movl	src, -1*4(dest, n)
	jmp	.Ldone

	/* Move data from 4 bytes to 7 bytes. */
.p2align 4
.Lmove_8B:
	cmpl	$4, n
	jb	.Lmove_4B
	movl	0*4(src), tmp0
	movl	-1*4(src, n), tmp1
	movl	tmp0, 0*4(dest)
	movl	tmp1, -1*4(dest, n)
	jmp	.Ldone

	/* Move data from 2 bytes to 3 bytes. */
.p2align 4
.Lmove_4B:
	cmpl	$2, n
	jb	.Lmove_1B
	movw	0*2(src), tmp0w
	movw	-1*2(src, n), tmp1w
	movw	tmp0w, 0*2(dest)
	movw	tmp1w, -1*2(dest, n)
	jmp	.Ldone

	/* Move data for 1 byte. */
.p2align 4
.Lmove_1B:
	cmpl	$1, n
	jb	.Ldone
	movb	(src), tmp3b
	movb	tmp3b, (dest)
.p2align 4
.Ldone:
	popl	dest_in	// restore 'dest_in' [eax] as the return value
	/* Restore all callee-saved registers: */
	popl	%esi
	popl	%edi
	popl	%ebx
	popl	%ebp

	RET
SYM_FUNC_END(memmove)
EXPORT_SYMBOL(memmove)