// SPDX-License-Identifier: GPL-2.0 /* * kexec_file for arm64 * * Copyright (C) 2018 Linaro Limited * Author: AKASHI Takahiro * * Most code is derived from arm64 port of kexec-tools */ #define pr_fmt(fmt) "kexec_file: " fmt #include #include #include #include #include #include #include #include #include #include #include const struct kexec_file_ops * const kexec_file_loaders[] = { &kexec_image_ops, NULL }; int arch_kimage_file_post_load_cleanup(struct kimage *image) { kvfree(image->arch.dtb); image->arch.dtb = NULL; vfree(image->elf_headers); image->elf_headers = NULL; image->elf_headers_sz = 0; return kexec_image_post_load_cleanup_default(image); } static int prepare_elf_headers(void **addr, unsigned long *sz) { struct crash_mem *cmem; unsigned int nr_ranges; int ret; u64 i; phys_addr_t start, end; nr_ranges = 1; /* for exclusion of crashkernel region */ for_each_mem_range(i, &start, &end) nr_ranges++; cmem = kmalloc(struct_size(cmem, ranges, nr_ranges), GFP_KERNEL); if (!cmem) return -ENOMEM; cmem->max_nr_ranges = nr_ranges; cmem->nr_ranges = 0; for_each_mem_range(i, &start, &end) { cmem->ranges[cmem->nr_ranges].start = start; cmem->ranges[cmem->nr_ranges].end = end - 1; cmem->nr_ranges++; } /* Exclude crashkernel region */ ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end); if (!ret) ret = crash_prepare_elf64_headers(cmem, true, addr, sz); kfree(cmem); return ret; } /* * Tries to add the initrd and DTB to the image. If it is not possible to find * valid locations, this function will undo changes to the image and return non * zero. */ int load_other_segments(struct kimage *image, unsigned long kernel_load_addr, unsigned long kernel_size, char *initrd, unsigned long initrd_len, char *cmdline) { struct kexec_buf kbuf; void *headers, *dtb = NULL; unsigned long headers_sz, initrd_load_addr = 0, dtb_len, orig_segments = image->nr_segments; int ret = 0; kbuf.image = image; /* not allocate anything below the kernel */ kbuf.buf_min = kernel_load_addr + kernel_size; /* load elf core header */ if (image->type == KEXEC_TYPE_CRASH) { ret = prepare_elf_headers(&headers, &headers_sz); if (ret) { pr_err("Preparing elf core header failed\n"); goto out_err; } kbuf.buffer = headers; kbuf.bufsz = headers_sz; kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; kbuf.memsz = headers_sz; kbuf.buf_align = SZ_64K; /* largest supported page size */ kbuf.buf_max = ULONG_MAX; kbuf.top_down = true; ret = kexec_add_buffer(&kbuf); if (ret) { vfree(headers); goto out_err; } image->elf_headers = headers; image->elf_load_addr = kbuf.mem; image->elf_headers_sz = headers_sz; pr_debug("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n", image->elf_load_addr, kbuf.bufsz, kbuf.memsz); } /* load initrd */ if (initrd) { kbuf.buffer = initrd; kbuf.bufsz = initrd_len; kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; kbuf.memsz = initrd_len; kbuf.buf_align = 0; /* within 1GB-aligned window of up to 32GB in size */ kbuf.buf_max = round_down(kernel_load_addr, SZ_1G) + (unsigned long)SZ_1G * 32; kbuf.top_down = false; ret = kexec_add_buffer(&kbuf); if (ret) goto out_err; initrd_load_addr = kbuf.mem; pr_debug("Loaded initrd at 0x%lx bufsz=0x%lx memsz=0x%lx\n", initrd_load_addr, kbuf.bufsz, kbuf.memsz); } /* load dtb */ dtb = of_kexec_alloc_and_setup_fdt(image, initrd_load_addr, initrd_len, cmdline, 0); if (!dtb) { pr_err("Preparing for new dtb failed\n"); ret = -EINVAL; goto out_err; } /* trim it */ fdt_pack(dtb); dtb_len = fdt_totalsize(dtb); kbuf.buffer = dtb; kbuf.bufsz = dtb_len; kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; kbuf.memsz = dtb_len; /* not across 2MB boundary */ kbuf.buf_align = SZ_2M; kbuf.buf_max = ULONG_MAX; kbuf.top_down = true; ret = kexec_add_buffer(&kbuf); if (ret) goto out_err; image->arch.dtb = dtb; image->arch.dtb_mem = kbuf.mem; pr_debug("Loaded dtb at 0x%lx bufsz=0x%lx memsz=0x%lx\n", kbuf.mem, kbuf.bufsz, kbuf.memsz); return 0; out_err: image->nr_segments = orig_segments; kvfree(dtb); return ret; }