diff options
Diffstat (limited to 'mm/kasan/common.c')
| -rw-r--r-- | mm/kasan/common.c | 60 |
1 files changed, 38 insertions, 22 deletions
diff --git a/mm/kasan/common.c b/mm/kasan/common.c index ed4873e18c75..1d27f1bd260b 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -32,6 +32,15 @@ #include "kasan.h" #include "../slab.h" +#if defined(CONFIG_ARCH_DEFER_KASAN) || defined(CONFIG_KASAN_HW_TAGS) +/* + * Definition of the unified static key declared in kasan-enabled.h. + * This provides consistent runtime enable/disable across KASAN modes. + */ +DEFINE_STATIC_KEY_FALSE(kasan_flag_enabled); +EXPORT_SYMBOL_GPL(kasan_flag_enabled); +#endif + struct slab *kasan_addr_to_slab(const void *addr) { if (virt_addr_valid(addr)) @@ -230,16 +239,12 @@ static bool check_slab_allocation(struct kmem_cache *cache, void *object, } static inline void poison_slab_object(struct kmem_cache *cache, void *object, - bool init, bool still_accessible) + bool init) { void *tagged_object = object; object = kasan_reset_tag(object); - /* RCU slabs could be legally used after free within the RCU period. */ - if (unlikely(still_accessible)) - return; - kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE), KASAN_SLAB_FREE, init); @@ -250,18 +255,36 @@ static inline void poison_slab_object(struct kmem_cache *cache, void *object, bool __kasan_slab_pre_free(struct kmem_cache *cache, void *object, unsigned long ip) { - if (!kasan_arch_is_ready() || is_kfence_address(object)) + if (is_kfence_address(object)) return false; return check_slab_allocation(cache, object, ip); } bool __kasan_slab_free(struct kmem_cache *cache, void *object, bool init, - bool still_accessible) + bool still_accessible, bool no_quarantine) { - if (!kasan_arch_is_ready() || is_kfence_address(object)) + if (is_kfence_address(object)) return false; - poison_slab_object(cache, object, init, still_accessible); + /* + * If this point is reached with an object that must still be + * accessible under RCU, we can't poison it; in that case, also skip the + * quarantine. This should mostly only happen when CONFIG_SLUB_RCU_DEBUG + * has been disabled manually. + * + * Putting the object on the quarantine wouldn't help catch UAFs (since + * we can't poison it here), and it would mask bugs caused by + * SLAB_TYPESAFE_BY_RCU users not being careful enough about object + * reuse; so overall, putting the object into the quarantine here would + * be counterproductive. + */ + if (still_accessible) + return false; + + poison_slab_object(cache, object, init); + + if (no_quarantine) + return false; /* * If the object is put into quarantine, do not let slab put the object @@ -282,9 +305,6 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object, bool init, static inline bool check_page_allocation(void *ptr, unsigned long ip) { - if (!kasan_arch_is_ready()) - return false; - if (ptr != page_address(virt_to_head_page(ptr))) { kasan_report_invalid_free(ptr, ip, KASAN_REPORT_INVALID_FREE); return true; @@ -497,29 +517,25 @@ void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order, bool __kasan_mempool_poison_object(void *ptr, unsigned long ip) { - struct folio *folio = virt_to_folio(ptr); + struct page *page = virt_to_page(ptr); struct slab *slab; - /* - * This function can be called for large kmalloc allocation that get - * their memory from page_alloc. Thus, the folio might not be a slab. - */ - if (unlikely(!folio_test_slab(folio))) { + if (unlikely(PageLargeKmalloc(page))) { if (check_page_allocation(ptr, ip)) return false; - kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false); + kasan_poison(ptr, page_size(page), KASAN_PAGE_FREE, false); return true; } - if (is_kfence_address(ptr) || !kasan_arch_is_ready()) + if (is_kfence_address(ptr)) return true; - slab = folio_slab(folio); + slab = page_slab(page); if (check_slab_allocation(slab->slab_cache, ptr, ip)) return false; - poison_slab_object(slab->slab_cache, ptr, false, false); + poison_slab_object(slab->slab_cache, ptr, false); return true; } |
