diff options
Diffstat (limited to 'mm/kasan/init.c')
| -rw-r--r-- | mm/kasan/init.c | 88 |
1 files changed, 45 insertions, 43 deletions
diff --git a/mm/kasan/init.c b/mm/kasan/init.c index fe6be0be1f76..f084e7a5df1e 100644 --- a/mm/kasan/init.c +++ b/mm/kasan/init.c @@ -1,14 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 /* - * This file contains some kasan initialization code. + * This file contains KASAN shadow initialization code. * * Copyright (c) 2015 Samsung Electronics Co., Ltd. * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * */ #include <linux/memblock.h> @@ -18,9 +13,9 @@ #include <linux/mm.h> #include <linux/pfn.h> #include <linux/slab.h> +#include <linux/pgalloc.h> #include <asm/page.h> -#include <asm/pgalloc.h> #include "kasan.h" @@ -46,7 +41,7 @@ static inline bool kasan_p4d_table(pgd_t pgd) } #endif #if CONFIG_PGTABLE_LEVELS > 3 -pud_t kasan_early_shadow_pud[PTRS_PER_PUD] __page_aligned_bss; +pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD] __page_aligned_bss; static inline bool kasan_pud_table(p4d_t p4d) { return p4d_page(p4d) == virt_to_page(lm_alias(kasan_early_shadow_pud)); @@ -58,7 +53,7 @@ static inline bool kasan_pud_table(p4d_t p4d) } #endif #if CONFIG_PGTABLE_LEVELS > 2 -pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD] __page_aligned_bss; +pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD] __page_aligned_bss; static inline bool kasan_pmd_table(pud_t pud) { return pud_page(pud) == virt_to_page(lm_alias(kasan_early_shadow_pmd)); @@ -69,7 +64,8 @@ static inline bool kasan_pmd_table(pud_t pud) return false; } #endif -pte_t kasan_early_shadow_pte[PTRS_PER_PTE] __page_aligned_bss; +pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS] + __page_aligned_bss; static inline bool kasan_pte_table(pmd_t pmd) { @@ -130,8 +126,10 @@ static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr, if (slab_is_available()) p = pte_alloc_one_kernel(&init_mm); - else + else { p = early_alloc(PAGE_SIZE, NUMA_NO_NODE); + kernel_pte_init(p); + } if (!p) return -ENOMEM; @@ -170,8 +168,9 @@ static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr, if (!p) return -ENOMEM; } else { - pud_populate(&init_mm, pud, - early_alloc(PAGE_SIZE, NUMA_NO_NODE)); + p = early_alloc(PAGE_SIZE, NUMA_NO_NODE); + pmd_init(p); + pud_populate(&init_mm, pud, p); } } zero_pmd_populate(pud, addr, next); @@ -192,7 +191,7 @@ static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr, pud_t *pud; pmd_t *pmd; - p4d_populate(&init_mm, p4d, + p4d_populate_kernel(addr, p4d, lm_alias(kasan_early_shadow_pud)); pud = pud_offset(p4d, addr); pud_populate(&init_mm, pud, @@ -211,8 +210,9 @@ static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr, if (!p) return -ENOMEM; } else { - p4d_populate(&init_mm, p4d, - early_alloc(PAGE_SIZE, NUMA_NO_NODE)); + p = early_alloc(PAGE_SIZE, NUMA_NO_NODE); + pud_init(p); + p4d_populate_kernel(addr, p4d, p); } } zero_pud_populate(p4d, addr, next); @@ -224,8 +224,8 @@ static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr, /** * kasan_populate_early_shadow - populate shadow memory region with * kasan_early_shadow_page - * @shadow_start - start of the memory range to populate - * @shadow_end - end of the memory range to populate + * @shadow_start: start of the memory range to populate + * @shadow_end: end of the memory range to populate */ int __ref kasan_populate_early_shadow(const void *shadow_start, const void *shadow_end) @@ -251,10 +251,10 @@ int __ref kasan_populate_early_shadow(const void *shadow_start, * puds,pmds, so pgd_populate(), pud_populate() * is noops. */ - pgd_populate(&init_mm, pgd, + pgd_populate_kernel(addr, pgd, lm_alias(kasan_early_shadow_p4d)); p4d = p4d_offset(pgd, addr); - p4d_populate(&init_mm, p4d, + p4d_populate_kernel(addr, p4d, lm_alias(kasan_early_shadow_pud)); pud = pud_offset(p4d, addr); pud_populate(&init_mm, pud, @@ -266,14 +266,12 @@ int __ref kasan_populate_early_shadow(const void *shadow_start, } if (pgd_none(*pgd)) { - p4d_t *p; if (slab_is_available()) { - p = p4d_alloc(&init_mm, pgd, addr); - if (!p) + if (!p4d_alloc(&init_mm, pgd, addr)) return -ENOMEM; } else { - pgd_populate(&init_mm, pgd, + pgd_populate_kernel(addr, pgd, early_alloc(PAGE_SIZE, NUMA_NO_NODE)); } } @@ -290,7 +288,7 @@ static void kasan_free_pte(pte_t *pte_start, pmd_t *pmd) for (i = 0; i < PTRS_PER_PTE; i++) { pte = pte_start + i; - if (!pte_none(*pte)) + if (!pte_none(ptep_get(pte))) return; } @@ -347,16 +345,19 @@ static void kasan_remove_pte_table(pte_t *pte, unsigned long addr, unsigned long end) { unsigned long next; + pte_t ptent; for (; addr < end; addr = next, pte++) { next = (addr + PAGE_SIZE) & PAGE_MASK; if (next > end) next = end; - if (!pte_present(*pte)) + ptent = ptep_get(pte); + + if (!pte_present(ptent)) continue; - if (WARN_ON(!kasan_early_shadow_page_entry(*pte))) + if (WARN_ON(!kasan_early_shadow_page_entry(ptent))) continue; pte_clear(&init_mm, addr, pte); } @@ -377,9 +378,10 @@ static void kasan_remove_pmd_table(pmd_t *pmd, unsigned long addr, if (kasan_pte_table(*pmd)) { if (IS_ALIGNED(addr, PMD_SIZE) && - IS_ALIGNED(next, PMD_SIZE)) + IS_ALIGNED(next, PMD_SIZE)) { pmd_clear(pmd); - continue; + continue; + } } pte = pte_offset_kernel(pmd, addr); kasan_remove_pte_table(pte, addr, next); @@ -402,9 +404,10 @@ static void kasan_remove_pud_table(pud_t *pud, unsigned long addr, if (kasan_pmd_table(*pud)) { if (IS_ALIGNED(addr, PUD_SIZE) && - IS_ALIGNED(next, PUD_SIZE)) + IS_ALIGNED(next, PUD_SIZE)) { pud_clear(pud); - continue; + continue; + } } pmd = pmd_offset(pud, addr); pmd_base = pmd_offset(pud, 0); @@ -428,9 +431,10 @@ static void kasan_remove_p4d_table(p4d_t *p4d, unsigned long addr, if (kasan_pud_table(*p4d)) { if (IS_ALIGNED(addr, P4D_SIZE) && - IS_ALIGNED(next, P4D_SIZE)) + IS_ALIGNED(next, P4D_SIZE)) { p4d_clear(p4d); - continue; + continue; + } } pud = pud_offset(p4d, addr); kasan_remove_pud_table(pud, addr, next); @@ -446,9 +450,8 @@ void kasan_remove_zero_shadow(void *start, unsigned long size) addr = (unsigned long)kasan_mem_to_shadow(start); end = addr + (size >> KASAN_SHADOW_SCALE_SHIFT); - if (WARN_ON((unsigned long)start % - (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)) || - WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE))) + if (WARN_ON((unsigned long)start % KASAN_MEMORY_PER_SHADOW_PAGE) || + WARN_ON(size % KASAN_MEMORY_PER_SHADOW_PAGE)) return; for (; addr < end; addr = next) { @@ -462,9 +465,10 @@ void kasan_remove_zero_shadow(void *start, unsigned long size) if (kasan_p4d_table(*pgd)) { if (IS_ALIGNED(addr, PGDIR_SIZE) && - IS_ALIGNED(next, PGDIR_SIZE)) + IS_ALIGNED(next, PGDIR_SIZE)) { pgd_clear(pgd); - continue; + continue; + } } p4d = p4d_offset(pgd, addr); @@ -481,14 +485,12 @@ int kasan_add_zero_shadow(void *start, unsigned long size) shadow_start = kasan_mem_to_shadow(start); shadow_end = shadow_start + (size >> KASAN_SHADOW_SCALE_SHIFT); - if (WARN_ON((unsigned long)start % - (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)) || - WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE))) + if (WARN_ON((unsigned long)start % KASAN_MEMORY_PER_SHADOW_PAGE) || + WARN_ON(size % KASAN_MEMORY_PER_SHADOW_PAGE)) return -EINVAL; ret = kasan_populate_early_shadow(shadow_start, shadow_end); if (ret) - kasan_remove_zero_shadow(shadow_start, - size >> KASAN_SHADOW_SCALE_SHIFT); + kasan_remove_zero_shadow(start, size); return ret; } |
