diff options
author | Steven Chen <chenste@linux.microsoft.com> | 2025-04-21 15:25:09 -0700 |
---|---|---|
committer | Mimi Zohar <zohar@linux.ibm.com> | 2025-04-29 15:54:53 -0400 |
commit | 0091d9241ea24c5275be4a3e5a032862fd9de9ec (patch) | |
tree | c9bc30f0ff6c9aeeca8ea1479e0c077bfdd5c239 /kernel/kexec_core.c | |
parent | c95e1acb6d7f00efab73e41b31e0560751e3f469 (diff) |
kexec: define functions to map and unmap segments
Implement kimage_map_segment() to enable IMA to map the measurement log
list to the kimage structure during the kexec 'load' stage. This function
gathers the source pages within the specified address range, and maps them
to a contiguous virtual address range.
This is a preparation for later usage.
Implement kimage_unmap_segment() for unmapping segments using vunmap().
Cc: Eric Biederman <ebiederm@xmission.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Vivek Goyal <vgoyal@redhat.com>
Cc: Dave Young <dyoung@redhat.com>
Co-developed-by: Tushar Sugandhi <tusharsu@linux.microsoft.com>
Signed-off-by: Tushar Sugandhi <tusharsu@linux.microsoft.com>
Signed-off-by: Steven Chen <chenste@linux.microsoft.com>
Acked-by: Baoquan He <bhe@redhat.com>
Tested-by: Stefan Berger <stefanb@linux.ibm.com> # ppc64/kvm
Signed-off-by: Mimi Zohar <zohar@linux.ibm.com>
Diffstat (limited to 'kernel/kexec_core.c')
-rw-r--r-- | kernel/kexec_core.c | 54 |
1 files changed, 54 insertions, 0 deletions
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index 3e62b944c883..9c59fa480b0b 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c @@ -877,6 +877,60 @@ int kimage_load_segment(struct kimage *image, return result; } +void *kimage_map_segment(struct kimage *image, + unsigned long addr, unsigned long size) +{ + unsigned long src_page_addr, dest_page_addr = 0; + unsigned long eaddr = addr + size; + kimage_entry_t *ptr, entry; + struct page **src_pages; + unsigned int npages; + void *vaddr = NULL; + int i; + + /* + * Collect the source pages and map them in a contiguous VA range. + */ + npages = PFN_UP(eaddr) - PFN_DOWN(addr); + src_pages = kmalloc_array(npages, sizeof(*src_pages), GFP_KERNEL); + if (!src_pages) { + pr_err("Could not allocate ima pages array.\n"); + return NULL; + } + + i = 0; + for_each_kimage_entry(image, ptr, entry) { + if (entry & IND_DESTINATION) { + dest_page_addr = entry & PAGE_MASK; + } else if (entry & IND_SOURCE) { + if (dest_page_addr >= addr && dest_page_addr < eaddr) { + src_page_addr = entry & PAGE_MASK; + src_pages[i++] = + virt_to_page(__va(src_page_addr)); + if (i == npages) + break; + dest_page_addr += PAGE_SIZE; + } + } + } + + /* Sanity check. */ + WARN_ON(i < npages); + + vaddr = vmap(src_pages, npages, VM_MAP, PAGE_KERNEL); + kfree(src_pages); + + if (!vaddr) + pr_err("Could not map ima buffer.\n"); + + return vaddr; +} + +void kimage_unmap_segment(void *segment_buffer) +{ + vunmap(segment_buffer); +} + struct kexec_load_limit { /* Mutex protects the limit count. */ struct mutex mutex; |