summaryrefslogtreecommitdiff
path: root/arch/arm64/kernel/machine_kexec.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel/machine_kexec.c')
-rw-r--r--arch/arm64/kernel/machine_kexec.c256
1 files changed, 111 insertions, 145 deletions
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index 0df8493624e0..b38aae5b488d 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -11,6 +11,8 @@
#include <linux/kernel.h>
#include <linux/kexec.h>
#include <linux/page-flags.h>
+#include <linux/reboot.h>
+#include <linux/set_memory.h>
#include <linux/smp.h>
#include <asm/cacheflush.h>
@@ -20,12 +22,8 @@
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/page.h>
-
-#include "cpu-reset.h"
-
-/* Global variables for the arm64_relocate_new_kernel routine. */
-extern const unsigned char arm64_relocate_new_kernel[];
-extern const unsigned long arm64_relocate_new_kernel_size;
+#include <asm/sections.h>
+#include <asm/trans_pgd.h>
/**
* kexec_image_info - For debugging output.
@@ -34,23 +32,12 @@ extern const unsigned long arm64_relocate_new_kernel_size;
static void _kexec_image_info(const char *func, int line,
const struct kimage *kimage)
{
- unsigned long i;
-
- pr_debug("%s:%d:\n", func, line);
- pr_debug(" kexec kimage info:\n");
- pr_debug(" type: %d\n", kimage->type);
- pr_debug(" start: %lx\n", kimage->start);
- pr_debug(" head: %lx\n", kimage->head);
- pr_debug(" nr_segments: %lu\n", kimage->nr_segments);
-
- for (i = 0; i < kimage->nr_segments; i++) {
- pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n",
- i,
- kimage->segment[i].mem,
- kimage->segment[i].mem + kimage->segment[i].memsz,
- kimage->segment[i].memsz,
- kimage->segment[i].memsz / PAGE_SIZE);
- }
+ kexec_dprintk("%s:%d:\n", func, line);
+ kexec_dprintk(" kexec kimage info:\n");
+ kexec_dprintk(" type: %d\n", kimage->type);
+ kexec_dprintk(" head: %lx\n", kimage->head);
+ kexec_dprintk(" kern_reloc: %pa\n", &kimage->arch.kern_reloc);
+ kexec_dprintk(" el2_vectors: %pa\n", &kimage->arch.el2_vectors);
}
void machine_kexec_cleanup(struct kimage *kimage)
@@ -67,8 +54,6 @@ void machine_kexec_cleanup(struct kimage *kimage)
*/
int machine_kexec_prepare(struct kimage *kimage)
{
- kexec_image_info(kimage);
-
if (kimage->type != KEXEC_TYPE_CRASH && cpus_are_stuck_in_kernel()) {
pr_err("Can't kexec: CPUs are stuck in the kernel.\n");
return -EBUSY;
@@ -78,43 +63,6 @@ int machine_kexec_prepare(struct kimage *kimage)
}
/**
- * kexec_list_flush - Helper to flush the kimage list and source pages to PoC.
- */
-static void kexec_list_flush(struct kimage *kimage)
-{
- kimage_entry_t *entry;
-
- for (entry = &kimage->head; ; entry++) {
- unsigned int flag;
- void *addr;
-
- /* flush the list entries. */
- __flush_dcache_area(entry, sizeof(kimage_entry_t));
-
- flag = *entry & IND_FLAGS;
- if (flag == IND_DONE)
- break;
-
- addr = phys_to_virt(*entry & PAGE_MASK);
-
- switch (flag) {
- case IND_INDIRECTION:
- /* Set entry point just before the new list page. */
- entry = (kimage_entry_t *)addr - 1;
- break;
- case IND_SOURCE:
- /* flush the source pages. */
- __flush_dcache_area(addr, PAGE_SIZE);
- break;
- case IND_DESTINATION:
- break;
- default:
- BUG();
- }
- }
-}
-
-/**
* kexec_segment_flush - Helper to flush the kimage segments to PoC.
*/
static void kexec_segment_flush(const struct kimage *kimage)
@@ -131,11 +79,84 @@ static void kexec_segment_flush(const struct kimage *kimage)
kimage->segment[i].memsz,
kimage->segment[i].memsz / PAGE_SIZE);
- __flush_dcache_area(phys_to_virt(kimage->segment[i].mem),
- kimage->segment[i].memsz);
+ dcache_clean_inval_poc(
+ (unsigned long)phys_to_virt(kimage->segment[i].mem),
+ (unsigned long)phys_to_virt(kimage->segment[i].mem) +
+ kimage->segment[i].memsz);
}
}
+/* Allocates pages for kexec page table */
+static void *kexec_page_alloc(void *arg)
+{
+ struct kimage *kimage = arg;
+ struct page *page = kimage_alloc_control_pages(kimage, 0);
+ void *vaddr = NULL;
+
+ if (!page)
+ return NULL;
+
+ vaddr = page_address(page);
+ memset(vaddr, 0, PAGE_SIZE);
+
+ return vaddr;
+}
+
+int machine_kexec_post_load(struct kimage *kimage)
+{
+ int rc;
+ pgd_t *trans_pgd;
+ void *reloc_code = page_to_virt(kimage->control_code_page);
+ long reloc_size;
+ struct trans_pgd_info info = {
+ .trans_alloc_page = kexec_page_alloc,
+ .trans_alloc_arg = kimage,
+ };
+
+ /* If in place, relocation is not used, only flush next kernel */
+ if (kimage->head & IND_DONE) {
+ kexec_segment_flush(kimage);
+ kexec_image_info(kimage);
+ return 0;
+ }
+
+ kimage->arch.el2_vectors = 0;
+ if (is_hyp_nvhe()) {
+ rc = trans_pgd_copy_el2_vectors(&info,
+ &kimage->arch.el2_vectors);
+ if (rc)
+ return rc;
+ }
+
+ /* Create a copy of the linear map */
+ trans_pgd = kexec_page_alloc(kimage);
+ if (!trans_pgd)
+ return -ENOMEM;
+ rc = trans_pgd_create_copy(&info, &trans_pgd, PAGE_OFFSET, PAGE_END);
+ if (rc)
+ return rc;
+ kimage->arch.ttbr1 = __pa(trans_pgd);
+ kimage->arch.zero_page = __pa_symbol(empty_zero_page);
+
+ reloc_size = __relocate_new_kernel_end - __relocate_new_kernel_start;
+ memcpy(reloc_code, __relocate_new_kernel_start, reloc_size);
+ kimage->arch.kern_reloc = __pa(reloc_code);
+ rc = trans_pgd_idmap_page(&info, &kimage->arch.ttbr0,
+ &kimage->arch.t0sz, reloc_code);
+ if (rc)
+ return rc;
+ kimage->arch.phys_offset = virt_to_phys(kimage) - (long)kimage;
+
+ /* Flush the reloc_code in preparation for its execution. */
+ dcache_clean_inval_poc((unsigned long)reloc_code,
+ (unsigned long)reloc_code + reloc_size);
+ icache_inval_pou((uintptr_t)reloc_code,
+ (uintptr_t)reloc_code + reloc_size);
+ kexec_image_info(kimage);
+
+ return 0;
+}
+
/**
* machine_kexec - Do the kexec reboot.
*
@@ -143,8 +164,6 @@ static void kexec_segment_flush(const struct kimage *kimage)
*/
void machine_kexec(struct kimage *kimage)
{
- phys_addr_t reboot_code_buffer_phys;
- void *reboot_code_buffer;
bool in_kexec_crash = (kimage == kexec_crash_image);
bool stuck_cpus = cpus_are_stuck_in_kernel();
@@ -155,71 +174,35 @@ void machine_kexec(struct kimage *kimage)
WARN(in_kexec_crash && (stuck_cpus || smp_crash_stop_failed()),
"Some CPUs may be stale, kdump will be unreliable.\n");
- reboot_code_buffer_phys = page_to_phys(kimage->control_code_page);
- reboot_code_buffer = phys_to_virt(reboot_code_buffer_phys);
-
- kexec_image_info(kimage);
-
- pr_debug("%s:%d: control_code_page: %p\n", __func__, __LINE__,
- kimage->control_code_page);
- pr_debug("%s:%d: reboot_code_buffer_phys: %pa\n", __func__, __LINE__,
- &reboot_code_buffer_phys);
- pr_debug("%s:%d: reboot_code_buffer: %p\n", __func__, __LINE__,
- reboot_code_buffer);
- pr_debug("%s:%d: relocate_new_kernel: %p\n", __func__, __LINE__,
- arm64_relocate_new_kernel);
- pr_debug("%s:%d: relocate_new_kernel_size: 0x%lx(%lu) bytes\n",
- __func__, __LINE__, arm64_relocate_new_kernel_size,
- arm64_relocate_new_kernel_size);
-
- /*
- * Copy arm64_relocate_new_kernel to the reboot_code_buffer for use
- * after the kernel is shut down.
- */
- memcpy(reboot_code_buffer, arm64_relocate_new_kernel,
- arm64_relocate_new_kernel_size);
-
- /* Flush the reboot_code_buffer in preparation for its execution. */
- __flush_dcache_area(reboot_code_buffer, arm64_relocate_new_kernel_size);
-
- /*
- * Although we've killed off the secondary CPUs, we don't update
- * the online mask if we're handling a crash kernel and consequently
- * need to avoid flush_icache_range(), which will attempt to IPI
- * the offline CPUs. Therefore, we must use the __* variant here.
- */
- __flush_icache_range((uintptr_t)reboot_code_buffer,
- arm64_relocate_new_kernel_size);
-
- /* Flush the kimage list and its buffers. */
- kexec_list_flush(kimage);
-
- /* Flush the new image if already in place. */
- if ((kimage != kexec_crash_image) && (kimage->head & IND_DONE))
- kexec_segment_flush(kimage);
-
pr_info("Bye!\n");
local_daif_mask();
/*
- * cpu_soft_restart will shutdown the MMU, disable data caches, then
- * transfer control to the reboot_code_buffer which contains a copy of
- * the arm64_relocate_new_kernel routine. arm64_relocate_new_kernel
- * uses physical addressing to relocate the new image to its final
- * position and transfers control to the image entry point when the
- * relocation is complete.
+ * Both restart and kernel_reloc will shutdown the MMU, disable data
+ * caches. However, restart will start new kernel or purgatory directly,
+ * kernel_reloc contains the body of arm64_relocate_new_kernel
* In kexec case, kimage->start points to purgatory assuming that
* kernel entry and dtb address are embedded in purgatory by
* userspace (kexec-tools).
* In kexec_file case, the kernel starts directly without purgatory.
*/
- cpu_soft_restart(reboot_code_buffer_phys, kimage->head, kimage->start,
-#ifdef CONFIG_KEXEC_FILE
- kimage->arch.dtb_mem);
-#else
- 0);
-#endif
+ if (kimage->head & IND_DONE) {
+ typeof(cpu_soft_restart) *restart;
+
+ cpu_install_idmap();
+ restart = (void *)__pa_symbol(cpu_soft_restart);
+ restart(is_hyp_nvhe(), kimage->start, kimage->arch.dtb_mem,
+ 0, 0);
+ } else {
+ void (*kernel_reloc)(struct kimage *kimage);
+
+ if (is_hyp_nvhe())
+ __hyp_set_vectors(kimage->arch.el2_vectors);
+ cpu_install_ttbr0(kimage->arch.ttbr0, kimage->arch.t0sz);
+ kernel_reloc = (void *)kimage->arch.kern_reloc;
+ kernel_reloc(kimage);
+ }
BUG(); /* Should never get here. */
}
@@ -272,28 +255,6 @@ void machine_crash_shutdown(struct pt_regs *regs)
pr_info("Starting crashdump kernel...\n");
}
-void arch_kexec_protect_crashkres(void)
-{
- int i;
-
- kexec_segment_flush(kexec_crash_image);
-
- for (i = 0; i < kexec_crash_image->nr_segments; i++)
- set_memory_valid(
- __phys_to_virt(kexec_crash_image->segment[i].mem),
- kexec_crash_image->segment[i].memsz >> PAGE_SHIFT, 0);
-}
-
-void arch_kexec_unprotect_crashkres(void)
-{
- int i;
-
- for (i = 0; i < kexec_crash_image->nr_segments; i++)
- set_memory_valid(
- __phys_to_virt(kexec_crash_image->segment[i].mem),
- kexec_crash_image->segment[i].memsz >> PAGE_SHIFT, 1);
-}
-
#ifdef CONFIG_HIBERNATION
/*
* To preserve the crash dump kernel image, the relevant memory segments
@@ -335,8 +296,13 @@ bool crash_is_nosave(unsigned long pfn)
/* in reserved memory? */
addr = __pfn_to_phys(pfn);
- if ((addr < crashk_res.start) || (crashk_res.end < addr))
- return false;
+ if ((addr < crashk_res.start) || (crashk_res.end < addr)) {
+ if (!crashk_low_res.end)
+ return false;
+
+ if ((addr < crashk_low_res.start) || (crashk_low_res.end < addr))
+ return false;
+ }
if (!kexec_crash_image)
return true;