summaryrefslogtreecommitdiff
path: root/arch/x86/kernel/relocate_kernel_32.S
blob: c7c4b1917336d1d95b5b998b0b065a89513287cd (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * relocate_kernel.S - put the kernel image in place to boot
 * Copyright (C) 2002-2004 Eric Biederman  <ebiederm@xmission.com>
 */

#include <linux/linkage.h>
#include <asm/page_types.h>
#include <asm/kexec.h>
#include <asm/nospec-branch.h>
#include <asm/processor-flags.h>

/*
 * Must be relocatable PIC code callable as a C function, in particular
 * there must be a plain RET and not jump to return thunk.
 */

#define PTR(x) (x << 2)

/*
 * control_page + KEXEC_CONTROL_CODE_MAX_SIZE
 * ~ control_page + PAGE_SIZE are used as data storage and stack for
 * jumping back
 */
#define DATA(offset)		(KEXEC_CONTROL_CODE_MAX_SIZE+(offset))

/* Minimal CPU state */
#define ESP			DATA(0x0)
#define CR0			DATA(0x4)
#define CR3			DATA(0x8)
#define CR4			DATA(0xc)

/* other data */
#define CP_VA_CONTROL_PAGE	DATA(0x10)
#define CP_PA_PGD		DATA(0x14)
#define CP_PA_SWAP_PAGE		DATA(0x18)
#define CP_PA_BACKUP_PAGES_MAP	DATA(0x1c)

	.text
SYM_CODE_START_NOALIGN(relocate_kernel)
	/* Save the CPU context, used for jumping back */

	pushl	%ebx
	pushl	%esi
	pushl	%edi
	pushl	%ebp
	pushf

	movl	20+8(%esp), %ebp /* list of pages */
	movl	PTR(VA_CONTROL_PAGE)(%ebp), %edi
	movl	%esp, ESP(%edi)
	movl	%cr0, %eax
	movl	%eax, CR0(%edi)
	movl	%cr3, %eax
	movl	%eax, CR3(%edi)
	movl	%cr4, %eax
	movl	%eax, CR4(%edi)

	/* read the arguments and say goodbye to the stack */
	movl  20+4(%esp), %ebx /* page_list */
	movl  20+8(%esp), %ebp /* list of pages */
	movl  20+12(%esp), %edx /* start address */
	movl  20+16(%esp), %ecx /* cpu_has_pae */
	movl  20+20(%esp), %esi /* preserve_context */

	/* zero out flags, and disable interrupts */
	pushl $0
	popfl

	/* save some information for jumping back */
	movl	PTR(VA_CONTROL_PAGE)(%ebp), %edi
	movl	%edi, CP_VA_CONTROL_PAGE(%edi)
	movl	PTR(PA_PGD)(%ebp), %eax
	movl	%eax, CP_PA_PGD(%edi)
	movl	PTR(PA_SWAP_PAGE)(%ebp), %eax
	movl	%eax, CP_PA_SWAP_PAGE(%edi)
	movl	%ebx, CP_PA_BACKUP_PAGES_MAP(%edi)

	/*
	 * get physical address of control page now
	 * this is impossible after page table switch
	 */
	movl	PTR(PA_CONTROL_PAGE)(%ebp), %edi

	/* switch to new set of page tables */
	movl	PTR(PA_PGD)(%ebp), %eax
	movl	%eax, %cr3

	/* setup a new stack at the end of the physical control page */
	lea	PAGE_SIZE(%edi), %esp

	/* jump to identity mapped page */
	movl    %edi, %eax
	addl    $(identity_mapped - relocate_kernel), %eax
	pushl   %eax
	ANNOTATE_UNRET_SAFE
	ret
	int3
SYM_CODE_END(relocate_kernel)

SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
	/* set return address to 0 if not preserving context */
	pushl	$0
	/* store the start address on the stack */
	pushl   %edx

	/*
	 * Set cr0 to a known state:
	 *  - Paging disabled
	 *  - Alignment check disabled
	 *  - Write protect disabled
	 *  - No task switch
	 *  - Don't do FP software emulation.
	 *  - Protected mode enabled
	 */
	movl	%cr0, %eax
	andl	$~(X86_CR0_PG | X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %eax
	orl	$(X86_CR0_PE), %eax
	movl	%eax, %cr0

	/* clear cr4 if applicable */
	testl	%ecx, %ecx
	jz	1f
	/*
	 * Set cr4 to a known state:
	 * Setting everything to zero seems safe.
	 */
	xorl	%eax, %eax
	movl	%eax, %cr4

	jmp 1f
1:

	/* Flush the TLB (needed?) */
	xorl	%eax, %eax
	movl	%eax, %cr3

	movl	CP_PA_SWAP_PAGE(%edi), %eax
	pushl	%eax
	pushl	%ebx
	call	swap_pages
	addl	$8, %esp

	/*
	 * To be certain of avoiding problems with self-modifying code
	 * I need to execute a serializing instruction here.
	 * So I flush the TLB, it's handy, and not processor dependent.
	 */
	xorl	%eax, %eax
	movl	%eax, %cr3

	/*
	 * set all of the registers to known values
	 * leave %esp alone
	 */

	testl	%esi, %esi
	jnz 1f
	xorl	%edi, %edi
	xorl	%eax, %eax
	xorl	%ebx, %ebx
	xorl    %ecx, %ecx
	xorl    %edx, %edx
	xorl    %esi, %esi
	xorl    %ebp, %ebp
	ANNOTATE_UNRET_SAFE
	ret
	int3
1:
	popl	%edx
	movl	CP_PA_SWAP_PAGE(%edi), %esp
	addl	$PAGE_SIZE, %esp
2:
	ANNOTATE_RETPOLINE_SAFE
	call	*%edx

	/* get the re-entry point of the peer system */
	movl	0(%esp), %ebp
	call	1f
1:
	popl	%ebx
	subl	$(1b - relocate_kernel), %ebx
	movl	CP_VA_CONTROL_PAGE(%ebx), %edi
	lea	PAGE_SIZE(%ebx), %esp
	movl	CP_PA_SWAP_PAGE(%ebx), %eax
	movl	CP_PA_BACKUP_PAGES_MAP(%ebx), %edx
	pushl	%eax
	pushl	%edx
	call	swap_pages
	addl	$8, %esp
	movl	CP_PA_PGD(%ebx), %eax
	movl	%eax, %cr3
	movl	%cr0, %eax
	orl	$X86_CR0_PG, %eax
	movl	%eax, %cr0
	lea	PAGE_SIZE(%edi), %esp
	movl	%edi, %eax
	addl	$(virtual_mapped - relocate_kernel), %eax
	pushl	%eax
	ANNOTATE_UNRET_SAFE
	ret
	int3
SYM_CODE_END(identity_mapped)

SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
	movl	CR4(%edi), %eax
	movl	%eax, %cr4
	movl	CR3(%edi), %eax
	movl	%eax, %cr3
	movl	CR0(%edi), %eax
	movl	%eax, %cr0
	movl	ESP(%edi), %esp
	movl	%ebp, %eax

	popf
	popl	%ebp
	popl	%edi
	popl	%esi
	popl	%ebx
	ANNOTATE_UNRET_SAFE
	ret
	int3
SYM_CODE_END(virtual_mapped)

	/* Do the copies */
SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
	movl	8(%esp), %edx
	movl	4(%esp), %ecx
	pushl	%ebp
	pushl	%ebx
	pushl	%edi
	pushl	%esi
	movl	%ecx, %ebx
	jmp	1f

0:	/* top, read another word from the indirection page */
	movl	(%ebx), %ecx
	addl	$4, %ebx
1:
	testb	$0x1, %cl     /* is it a destination page */
	jz	2f
	movl	%ecx,	%edi
	andl	$0xfffff000, %edi
	jmp     0b
2:
	testb	$0x2, %cl    /* is it an indirection page */
	jz	2f
	movl	%ecx,	%ebx
	andl	$0xfffff000, %ebx
	jmp     0b
2:
	testb   $0x4, %cl    /* is it the done indicator */
	jz      2f
	jmp     3f
2:
	testb   $0x8, %cl    /* is it the source indicator */
	jz      0b	     /* Ignore it otherwise */
	movl    %ecx,   %esi /* For every source page do a copy */
	andl    $0xfffff000, %esi

	movl	%edi, %eax
	movl	%esi, %ebp

	movl	%edx, %edi
	movl    $1024, %ecx
	rep ; movsl

	movl	%ebp, %edi
	movl	%eax, %esi
	movl	$1024, %ecx
	rep ; movsl

	movl	%eax, %edi
	movl	%edx, %esi
	movl	$1024, %ecx
	rep ; movsl

	lea	PAGE_SIZE(%ebp), %esi
	jmp     0b
3:
	popl	%esi
	popl	%edi
	popl	%ebx
	popl	%ebp
	ANNOTATE_UNRET_SAFE
	ret
	int3
SYM_CODE_END(swap_pages)

	.globl kexec_control_code_size
.set kexec_control_code_size, . - relocate_kernel