diff options
Diffstat (limited to 'mm/kasan/shadow.c')
| -rw-r--r-- | mm/kasan/shadow.c | 186 |
1 files changed, 120 insertions, 66 deletions
diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c index dd772f9d0f08..29a751a8a08d 100644 --- a/mm/kasan/shadow.c +++ b/mm/kasan/shadow.c @@ -125,20 +125,16 @@ void kasan_poison(const void *addr, size_t size, u8 value, bool init) { void *shadow_start, *shadow_end; - if (!kasan_arch_is_ready()) + if (!kasan_enabled()) return; /* * Perform shadow offset calculation based on untagged address, as - * some of the callers (e.g. kasan_poison_object_data) pass tagged + * some of the callers (e.g. kasan_poison_new_object) pass tagged * addresses to this function. */ addr = kasan_reset_tag(addr); - /* Skip KFENCE memory if called explicitly outside of sl*b. */ - if (is_kfence_address(addr)) - return; - if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK)) return; if (WARN_ON(size & KASAN_GRANULE_MASK)) @@ -149,12 +145,12 @@ void kasan_poison(const void *addr, size_t size, u8 value, bool init) __memset(shadow_start, value, shadow_end - shadow_start); } -EXPORT_SYMBOL(kasan_poison); +EXPORT_SYMBOL_GPL(kasan_poison); #ifdef CONFIG_KASAN_GENERIC void kasan_poison_last_granule(const void *addr, size_t size) { - if (!kasan_arch_is_ready()) + if (!kasan_enabled()) return; if (size & KASAN_GRANULE_MASK) { @@ -170,19 +166,11 @@ void kasan_unpoison(const void *addr, size_t size, bool init) /* * Perform shadow offset calculation based on untagged address, as - * some of the callers (e.g. kasan_unpoison_object_data) pass tagged + * some of the callers (e.g. kasan_unpoison_new_object) pass tagged * addresses to this function. */ addr = kasan_reset_tag(addr); - /* - * Skip KFENCE memory if called explicitly outside of sl*b. Also note - * that calls to ksize(), where size is not a multiple of machine-word - * size, would otherwise poison the invalid portion of the word. - */ - if (is_kfence_address(addr)) - return; - if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK)) return; @@ -211,19 +199,12 @@ static bool shadow_mapped(unsigned long addr) pud = pud_offset(p4d, addr); if (pud_none(*pud)) return false; - - /* - * We can't use pud_large() or pud_huge(), the first one is - * arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse - * pud_bad(), if pud is bad then it's bad because it's huge. - */ - if (pud_bad(*pud)) + if (pud_leaf(*pud)) return true; pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) return false; - - if (pmd_bad(*pmd)) + if (pmd_leaf(*pmd)) return true; pte = pte_offset_kernel(pmd, addr); return !pte_none(ptep_get(pte)); @@ -311,41 +292,114 @@ void __init __weak kasan_populate_early_vm_area_shadow(void *start, { } +struct vmalloc_populate_data { + unsigned long start; + struct page **pages; +}; + static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr, - void *unused) + void *_data) { - unsigned long page; + struct vmalloc_populate_data *data = _data; + struct page *page; pte_t pte; + int index; - if (likely(!pte_none(ptep_get(ptep)))) - return 0; + arch_leave_lazy_mmu_mode(); - page = __get_free_page(GFP_KERNEL); - if (!page) - return -ENOMEM; - - memset((void *)page, KASAN_VMALLOC_INVALID, PAGE_SIZE); - pte = pfn_pte(PFN_DOWN(__pa(page)), PAGE_KERNEL); + index = PFN_DOWN(addr - data->start); + page = data->pages[index]; + __memset(page_to_virt(page), KASAN_VMALLOC_INVALID, PAGE_SIZE); + pte = pfn_pte(page_to_pfn(page), PAGE_KERNEL); spin_lock(&init_mm.page_table_lock); if (likely(pte_none(ptep_get(ptep)))) { set_pte_at(&init_mm, addr, ptep, pte); - page = 0; + data->pages[index] = NULL; } spin_unlock(&init_mm.page_table_lock); - if (page) - free_page(page); + + arch_enter_lazy_mmu_mode(); + return 0; } -int kasan_populate_vmalloc(unsigned long addr, unsigned long size) +static void ___free_pages_bulk(struct page **pages, int nr_pages) +{ + int i; + + for (i = 0; i < nr_pages; i++) { + if (pages[i]) { + __free_pages(pages[i], 0); + pages[i] = NULL; + } + } +} + +static int ___alloc_pages_bulk(struct page **pages, int nr_pages, gfp_t gfp_mask) +{ + unsigned long nr_populated, nr_total = nr_pages; + struct page **page_array = pages; + + while (nr_pages) { + nr_populated = alloc_pages_bulk(gfp_mask, nr_pages, pages); + if (!nr_populated) { + ___free_pages_bulk(page_array, nr_total - nr_pages); + return -ENOMEM; + } + pages += nr_populated; + nr_pages -= nr_populated; + } + + return 0; +} + +static int __kasan_populate_vmalloc_do(unsigned long start, unsigned long end, gfp_t gfp_mask) +{ + unsigned long nr_pages, nr_total = PFN_UP(end - start); + struct vmalloc_populate_data data; + unsigned int flags; + int ret = 0; + + data.pages = (struct page **)__get_free_page(gfp_mask | __GFP_ZERO); + if (!data.pages) + return -ENOMEM; + + while (nr_total) { + nr_pages = min(nr_total, PAGE_SIZE / sizeof(data.pages[0])); + ret = ___alloc_pages_bulk(data.pages, nr_pages, gfp_mask); + if (ret) + break; + + data.start = start; + + /* + * page tables allocations ignore external gfp mask, enforce it + * by the scope API + */ + flags = memalloc_apply_gfp_scope(gfp_mask); + ret = apply_to_page_range(&init_mm, start, nr_pages * PAGE_SIZE, + kasan_populate_vmalloc_pte, &data); + memalloc_restore_scope(flags); + + ___free_pages_bulk(data.pages, nr_pages); + if (ret) + break; + + start += nr_pages * PAGE_SIZE; + nr_total -= nr_pages; + } + + free_page((unsigned long)data.pages); + + return ret; +} + +int __kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask) { unsigned long shadow_start, shadow_end; int ret; - if (!kasan_arch_is_ready()) - return 0; - if (!is_vmalloc_or_module_addr((void *)addr)) return 0; @@ -367,9 +421,7 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size) shadow_start = PAGE_ALIGN_DOWN(shadow_start); shadow_end = PAGE_ALIGN(shadow_end); - ret = apply_to_page_range(&init_mm, shadow_start, - shadow_end - shadow_start, - kasan_populate_vmalloc_pte, NULL); + ret = __kasan_populate_vmalloc_do(shadow_start, shadow_end, gfp_mask); if (ret) return ret; @@ -416,18 +468,23 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size) static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr, void *unused) { - unsigned long page; + pte_t pte; + int none; - page = (unsigned long)__va(pte_pfn(ptep_get(ptep)) << PAGE_SHIFT); + arch_leave_lazy_mmu_mode(); spin_lock(&init_mm.page_table_lock); - - if (likely(!pte_none(ptep_get(ptep)))) { + pte = ptep_get(ptep); + none = pte_none(pte); + if (likely(!none)) pte_clear(&init_mm, addr, ptep); - free_page(page); - } spin_unlock(&init_mm.page_table_lock); + if (likely(!none)) + __free_page(pfn_to_page(pte_pfn(pte))); + + arch_enter_lazy_mmu_mode(); + return 0; } @@ -506,17 +563,15 @@ static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr, * pages entirely covered by the free region, we will not run in to any * trouble - any simultaneous allocations will be for disjoint regions. */ -void kasan_release_vmalloc(unsigned long start, unsigned long end, +void __kasan_release_vmalloc(unsigned long start, unsigned long end, unsigned long free_region_start, - unsigned long free_region_end) + unsigned long free_region_end, + unsigned long flags) { void *shadow_start, *shadow_end; unsigned long region_start, region_end; unsigned long size; - if (!kasan_arch_is_ready()) - return; - region_start = ALIGN(start, KASAN_MEMORY_PER_SHADOW_PAGE); region_end = ALIGN_DOWN(end, KASAN_MEMORY_PER_SHADOW_PAGE); @@ -541,12 +596,17 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end, __memset(shadow_start, KASAN_SHADOW_INIT, shadow_end - shadow_start); return; } - apply_to_existing_page_range(&init_mm, + + + if (flags & KASAN_VMALLOC_PAGE_RANGE) + apply_to_existing_page_range(&init_mm, (unsigned long)shadow_start, size, kasan_depopulate_vmalloc_pte, NULL); - flush_tlb_kernel_range((unsigned long)shadow_start, - (unsigned long)shadow_end); + + if (flags & KASAN_VMALLOC_TLB_FLUSH) + flush_tlb_kernel_range((unsigned long)shadow_start, + (unsigned long)shadow_end); } } @@ -560,9 +620,6 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size, * with setting memory tags, so the KASAN_VMALLOC_INIT flag is ignored. */ - if (!kasan_arch_is_ready()) - return (void *)start; - if (!is_vmalloc_or_module_addr(start)) return (void *)start; @@ -585,9 +642,6 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size, */ void __kasan_poison_vmalloc(const void *start, unsigned long size) { - if (!kasan_arch_is_ready()) - return; - if (!is_vmalloc_or_module_addr(start)) return; |
