diff options
Diffstat (limited to 'mm/kmsan/shadow.c')
| -rw-r--r-- | mm/kmsan/shadow.c | 62 |
1 files changed, 35 insertions, 27 deletions
diff --git a/mm/kmsan/shadow.c b/mm/kmsan/shadow.c index a787c04e9583..e7f554a31bb4 100644 --- a/mm/kmsan/shadow.c +++ b/mm/kmsan/shadow.c @@ -123,14 +123,12 @@ return_dummy: */ void *kmsan_get_metadata(void *address, bool is_origin) { - u64 addr = (u64)address, pad, off; + u64 addr = (u64)address, off; struct page *page; void *ret; - if (is_origin && !IS_ALIGNED(addr, KMSAN_ORIGIN_SIZE)) { - pad = addr % KMSAN_ORIGIN_SIZE; - addr -= pad; - } + if (is_origin) + addr = ALIGN_DOWN(addr, KMSAN_ORIGIN_SIZE); address = (void *)addr; if (kmsan_internal_is_vmalloc_addr(address) || kmsan_internal_is_module_addr(address)) @@ -145,7 +143,7 @@ void *kmsan_get_metadata(void *address, bool is_origin) return NULL; if (!page_has_metadata(page)) return NULL; - off = addr % PAGE_SIZE; + off = offset_in_page(addr); return (is_origin ? origin_ptr_for(page) : shadow_ptr_for(page)) + off; } @@ -209,39 +207,39 @@ void kmsan_free_page(struct page *page, unsigned int order) if (!kmsan_enabled || kmsan_in_runtime()) return; kmsan_enter_runtime(); - kmsan_internal_poison_memory(page_address(page), - PAGE_SIZE << compound_order(page), - GFP_KERNEL, + kmsan_internal_poison_memory(page_address(page), page_size(page), + GFP_KERNEL & ~(__GFP_RECLAIM), KMSAN_POISON_CHECK | KMSAN_POISON_FREE); kmsan_leave_runtime(); } -void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end, - pgprot_t prot, struct page **pages, - unsigned int page_shift) +int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end, + pgprot_t prot, struct page **pages, + unsigned int page_shift, gfp_t gfp_mask) { unsigned long shadow_start, origin_start, shadow_end, origin_end; struct page **s_pages, **o_pages; - int nr, mapped; + int nr, mapped, err = 0; if (!kmsan_enabled) - return; + return 0; shadow_start = vmalloc_meta((void *)start, KMSAN_META_SHADOW); shadow_end = vmalloc_meta((void *)end, KMSAN_META_SHADOW); if (!shadow_start) - return; + return 0; nr = (end - start) / PAGE_SIZE; - s_pages = kcalloc(nr, sizeof(*s_pages), GFP_KERNEL); - o_pages = kcalloc(nr, sizeof(*o_pages), GFP_KERNEL); - if (!s_pages || !o_pages) + s_pages = kcalloc(nr, sizeof(*s_pages), gfp_mask); + o_pages = kcalloc(nr, sizeof(*o_pages), gfp_mask); + if (!s_pages || !o_pages) { + err = -ENOMEM; goto ret; + } for (int i = 0; i < nr; i++) { s_pages[i] = shadow_page_for(pages[i]); o_pages[i] = origin_page_for(pages[i]); } - prot = __pgprot(pgprot_val(prot) | _PAGE_NX); prot = PAGE_KERNEL; origin_start = vmalloc_meta((void *)start, KMSAN_META_ORIGIN); @@ -249,11 +247,19 @@ void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end, kmsan_enter_runtime(); mapped = __vmap_pages_range_noflush(shadow_start, shadow_end, prot, s_pages, page_shift); - KMSAN_WARN_ON(mapped); + kmsan_leave_runtime(); + if (mapped) { + err = mapped; + goto ret; + } + kmsan_enter_runtime(); mapped = __vmap_pages_range_noflush(origin_start, origin_end, prot, o_pages, page_shift); - KMSAN_WARN_ON(mapped); kmsan_leave_runtime(); + if (mapped) { + err = mapped; + goto ret; + } flush_tlb_kernel_range(shadow_start, shadow_end); flush_tlb_kernel_range(origin_start, origin_end); flush_cache_vmap(shadow_start, shadow_end); @@ -262,6 +268,7 @@ void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end, ret: kfree(s_pages); kfree(o_pages); + return err; } /* Allocate metadata for pages allocated at boot time. */ @@ -272,16 +279,17 @@ void __init kmsan_init_alloc_meta_for_range(void *start, void *end) struct page *page; u64 size; - start = (void *)ALIGN_DOWN((u64)start, PAGE_SIZE); - size = ALIGN((u64)end - (u64)start, PAGE_SIZE); - shadow = memblock_alloc(size, PAGE_SIZE); - origin = memblock_alloc(size, PAGE_SIZE); + start = (void *)PAGE_ALIGN_DOWN((u64)start); + size = PAGE_ALIGN((u64)end - (u64)start); + shadow = memblock_alloc_or_panic(size, PAGE_SIZE); + origin = memblock_alloc_or_panic(size, PAGE_SIZE); + for (u64 addr = 0; addr < size; addr += PAGE_SIZE) { page = virt_to_page_or_null((char *)start + addr); - shadow_p = virt_to_page_or_null((char *)shadow + addr); + shadow_p = virt_to_page((char *)shadow + addr); set_no_shadow_origin_page(shadow_p); shadow_page_for(page) = shadow_p; - origin_p = virt_to_page_or_null((char *)origin + addr); + origin_p = virt_to_page((char *)origin + addr); set_no_shadow_origin_page(origin_p); origin_page_for(page) = origin_p; } |
