From 99f3fe416c71aa3d5aba69174c274309ededfd42 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 21 Dec 2023 21:04:48 +0100 Subject: kasan: clean up is_kfence_address checks 1. Do not untag addresses that are passed to is_kfence_address: it tolerates tagged addresses. 2. Move is_kfence_address checks from internal KASAN functions (kasan_poison/unpoison, etc.) to external-facing ones. Note that kasan_poison/unpoison are never called outside of KASAN/slab code anymore; the comment is wrong, so drop it. 3. Simplify/reorganize the code around the updated checks. Link: https://lkml.kernel.org/r/1065732315ef4e141b6177d8f612232d4d5bc0ab.1703188911.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Marco Elver Signed-off-by: Andrew Morton --- mm/kasan/common.c | 26 +++++++++++++++++--------- mm/kasan/kasan.h | 16 ++-------------- mm/kasan/shadow.c | 12 ------------ 3 files changed, 19 insertions(+), 35 deletions(-) diff --git a/mm/kasan/common.c b/mm/kasan/common.c index f4255e807b74..86adf80cc11a 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -79,6 +79,9 @@ EXPORT_SYMBOL(kasan_disable_current); void __kasan_unpoison_range(const void *address, size_t size) { + if (is_kfence_address(address)) + return; + kasan_unpoison(address, size, false); } @@ -218,9 +221,6 @@ static inline bool poison_slab_object(struct kmem_cache *cache, void *object, tagged_object = object; object = kasan_reset_tag(object); - if (is_kfence_address(object)) - return false; - if (unlikely(nearest_obj(cache, virt_to_slab(object), object) != object)) { kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_INVALID_FREE); return true; @@ -247,7 +247,12 @@ static inline bool poison_slab_object(struct kmem_cache *cache, void *object, bool __kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip, bool init) { - bool buggy_object = poison_slab_object(cache, object, ip, init); + bool buggy_object; + + if (is_kfence_address(object)) + return false; + + buggy_object = poison_slab_object(cache, object, ip, init); return buggy_object ? true : kasan_quarantine_put(cache, object); } @@ -359,7 +364,7 @@ void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object if (unlikely(object == NULL)) return NULL; - if (is_kfence_address(kasan_reset_tag(object))) + if (is_kfence_address(object)) return (void *)object; /* The object has already been unpoisoned by kasan_slab_alloc(). */ @@ -417,7 +422,7 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag if (unlikely(object == ZERO_SIZE_PTR)) return (void *)object; - if (is_kfence_address(kasan_reset_tag(object))) + if (is_kfence_address(object)) return (void *)object; /* @@ -483,6 +488,9 @@ bool __kasan_mempool_poison_object(void *ptr, unsigned long ip) return true; } + if (is_kfence_address(ptr)) + return false; + slab = folio_slab(folio); return !poison_slab_object(slab->slab_cache, ptr, ip, false); } @@ -492,9 +500,6 @@ void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip) struct slab *slab; gfp_t flags = 0; /* Might be executing under a lock. */ - if (is_kfence_address(kasan_reset_tag(ptr))) - return; - slab = virt_to_slab(ptr); /* @@ -507,6 +512,9 @@ void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip) return; } + if (is_kfence_address(ptr)) + return; + /* Unpoison the object and save alloc info for non-kmalloc() allocations. */ unpoison_slab_object(slab->slab_cache, ptr, size, flags); diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 1c34511090d7..5fbcc1b805bc 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -466,35 +466,23 @@ static inline u8 kasan_random_tag(void) { return 0; } static inline void kasan_poison(const void *addr, size_t size, u8 value, bool init) { - addr = kasan_reset_tag(addr); - - /* Skip KFENCE memory if called explicitly outside of sl*b. */ - if (is_kfence_address(addr)) - return; - if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK)) return; if (WARN_ON(size & KASAN_GRANULE_MASK)) return; - hw_set_mem_tag_range((void *)addr, size, value, init); + hw_set_mem_tag_range(kasan_reset_tag(addr), size, value, init); } static inline void kasan_unpoison(const void *addr, size_t size, bool init) { u8 tag = get_tag(addr); - addr = kasan_reset_tag(addr); - - /* Skip KFENCE memory if called explicitly outside of sl*b. */ - if (is_kfence_address(addr)) - return; - if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK)) return; size = round_up(size, KASAN_GRANULE_SIZE); - hw_set_mem_tag_range((void *)addr, size, tag, init); + hw_set_mem_tag_range(kasan_reset_tag(addr), size, tag, init); } static inline bool kasan_byte_accessible(const void *addr) diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c index 0154d200be40..30625303d01a 100644 --- a/mm/kasan/shadow.c +++ b/mm/kasan/shadow.c @@ -135,10 +135,6 @@ void kasan_poison(const void *addr, size_t size, u8 value, bool init) */ addr = kasan_reset_tag(addr); - /* Skip KFENCE memory if called explicitly outside of sl*b. */ - if (is_kfence_address(addr)) - return; - if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK)) return; if (WARN_ON(size & KASAN_GRANULE_MASK)) @@ -175,14 +171,6 @@ void kasan_unpoison(const void *addr, size_t size, bool init) */ addr = kasan_reset_tag(addr); - /* - * Skip KFENCE memory if called explicitly outside of sl*b. Also note - * that calls to ksize(), where size is not a multiple of machine-word - * size, would otherwise poison the invalid portion of the word. - */ - if (is_kfence_address(addr)) - return; - if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK)) return; -- cgit