summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Woodhouse <dwmw@amazon.co.uk>2024-12-05 15:05:18 +0000
committerIngo Molnar <mingo@kernel.org>2024-12-06 10:42:00 +0100
commit93e489ad7a4694bb2fe8110f5012f85bd3eee65a (patch)
treedcb0e5e5a2857b4d7c43596f9c80caf5603c3f21
parentb7155dfd4999211247cce40be2665c71235ab094 (diff)
x86/kexec: Clean up register usage in relocate_kernel()
The memory encryption flag is passed in %r8 because that's where the calling convention puts it. Instead of moving it to %r12 and then using %r8 for other things, just leave it in %r8 and use other registers instead. Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> Signed-off-by: Ingo Molnar <mingo@kernel.org> Cc: Baoquan He <bhe@redhat.com> Cc: Vivek Goyal <vgoyal@redhat.com> Cc: Dave Young <dyoung@redhat.com> Cc: Eric Biederman <ebiederm@xmission.com> Cc: Ard Biesheuvel <ardb@kernel.org> Cc: "H. Peter Anvin" <hpa@zytor.com> Link: https://lore.kernel.org/r/20241205153343.3275139-13-dwmw2@infradead.org
-rw-r--r--arch/x86/kernel/relocate_kernel_64.S17
1 files changed, 6 insertions, 11 deletions
diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
index 739041c5bca3..8bc86a1e056a 100644
--- a/arch/x86/kernel/relocate_kernel_64.S
+++ b/arch/x86/kernel/relocate_kernel_64.S
@@ -79,24 +79,18 @@ SYM_CODE_START_NOALIGN(relocate_kernel)
movq %cr4, %r13
movq %r13, saved_cr4(%rip)
- /* Save SME active flag */
- movq %r8, %r12
-
/* save indirection list for jumping back */
movq %rdi, pa_backup_pages_map(%rip)
/* Save the preserve_context to %r11 as swap_pages clobbers %rcx. */
movq %rcx, %r11
- /* Physical address of control page */
- movq %rsi, %r8
-
/* setup a new stack at the end of the physical control page */
- lea PAGE_SIZE(%r8), %rsp
+ lea PAGE_SIZE(%rsi), %rsp
/* jump to identity mapped page */
- addq $(identity_mapped - relocate_kernel), %r8
- pushq %r8
+ addq $(identity_mapped - relocate_kernel), %rsi
+ pushq %rsi
ANNOTATE_UNRET_SAFE
ret
int3
@@ -107,8 +101,9 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
/*
* %rdi indirection page
* %rdx start address
+ * %r8 host_mem_enc_active
+ * %r9 page table page
* %r11 preserve_context
- * %r12 host_mem_enc_active
* %r13 original CR4 when relocate_kernel() was invoked
*/
@@ -161,7 +156,7 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
* entries that will conflict with the now unencrypted memory
* used by kexec. Flush the caches before copying the kernel.
*/
- testq %r12, %r12
+ testq %r8, %r8
jz .Lsme_off
wbinvd
.Lsme_off: