diff options
Diffstat (limited to 'mm/kasan/generic.c')
-rw-r--r-- | mm/kasan/generic.c | 112 |
1 files changed, 24 insertions, 88 deletions
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c index df6627f62402..d54e89f8c3e7 100644 --- a/mm/kasan/generic.c +++ b/mm/kasan/generic.c @@ -334,14 +334,6 @@ DEFINE_ASAN_SET_SHADOW(f3); DEFINE_ASAN_SET_SHADOW(f5); DEFINE_ASAN_SET_SHADOW(f8); -/* Only allow cache merging when no per-object metadata is present. */ -slab_flags_t kasan_never_merge(void) -{ - if (!kasan_requires_meta()) - return 0; - return SLAB_KASAN; -} - /* * Adaptive redzone policy taken from the userspace AddressSanitizer runtime. * For larger allocations larger redzones are used. @@ -370,15 +362,13 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, return; /* - * SLAB_KASAN is used to mark caches that are sanitized by KASAN - * and that thus have per-object metadata. - * Currently this flag is used in two places: - * 1. In slab_ksize() to account for per-object metadata when - * calculating the size of the accessible memory within the object. - * 2. In slab_common.c via kasan_never_merge() to prevent merging of - * caches with per-object metadata. + * SLAB_KASAN is used to mark caches that are sanitized by KASAN and + * that thus have per-object metadata. Currently, this flag is used in + * slab_ksize() to account for per-object metadata when calculating the + * size of the accessible memory within the object. Additionally, we use + * SLAB_NO_MERGE to prevent merging of caches with per-object metadata. */ - *flags |= SLAB_KASAN; + *flags |= SLAB_KASAN | SLAB_NO_MERGE; ok_size = *size; @@ -402,9 +392,12 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can * be touched after it was freed, or * 2. Object has a constructor, which means it's expected to - * retain its content until the next allocation. + * retain its content until the next allocation, or + * 3. It is from a kmalloc cache which enables the debug option + * to store original size. */ - if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor) { + if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor || + slub_debug_orig_size(cache)) { cache->kasan_info.free_meta_offset = *size; *size += sizeof(struct kasan_free_meta); goto free_meta_added; @@ -485,16 +478,6 @@ void kasan_init_object_meta(struct kmem_cache *cache, const void *object) if (alloc_meta) { /* Zero out alloc meta to mark it as invalid. */ __memset(alloc_meta, 0, sizeof(*alloc_meta)); - - /* - * Prepare the lock for saving auxiliary stack traces. - * Temporarily disable KASAN bug reporting to allow instrumented - * raw_spin_lock_init to access aux_lock, which resides inside - * of a redzone. - */ - kasan_disable_current(); - raw_spin_lock_init(&alloc_meta->aux_lock); - kasan_enable_current(); } /* @@ -506,47 +489,23 @@ void kasan_init_object_meta(struct kmem_cache *cache, const void *object) static void release_alloc_meta(struct kasan_alloc_meta *meta) { - /* Evict the stack traces from stack depot. */ - stack_depot_put(meta->alloc_track.stack); - stack_depot_put(meta->aux_stack[0]); - stack_depot_put(meta->aux_stack[1]); - - /* - * Zero out alloc meta to mark it as invalid but keep aux_lock - * initialized to avoid having to reinitialize it when another object - * is allocated in the same slot. - */ - __memset(&meta->alloc_track, 0, sizeof(meta->alloc_track)); - __memset(meta->aux_stack, 0, sizeof(meta->aux_stack)); + /* Zero out alloc meta to mark it as invalid. */ + __memset(meta, 0, sizeof(*meta)); } static void release_free_meta(const void *object, struct kasan_free_meta *meta) { + if (!kasan_arch_is_ready()) + return; + /* Check if free meta is valid. */ if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_SLAB_FREE_META) return; - /* Evict the stack trace from the stack depot. */ - stack_depot_put(meta->free_track.stack); - /* Mark free meta as invalid. */ *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE; } -void kasan_release_object_meta(struct kmem_cache *cache, const void *object) -{ - struct kasan_alloc_meta *alloc_meta; - struct kasan_free_meta *free_meta; - - alloc_meta = kasan_get_alloc_meta(cache, object); - if (alloc_meta) - release_alloc_meta(alloc_meta); - - free_meta = kasan_get_free_meta(cache, object); - if (free_meta) - release_free_meta(object, free_meta); -} - size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object) { struct kasan_cache *info = &cache->kasan_info; @@ -565,14 +524,16 @@ size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object) sizeof(struct kasan_free_meta) : 0); } -static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags) +/* + * This function avoids dynamic memory allocations and thus can be called from + * contexts that do not allow allocating memory. + */ +void kasan_record_aux_stack(void *addr) { struct slab *slab = kasan_addr_to_slab(addr); struct kmem_cache *cache; struct kasan_alloc_meta *alloc_meta; void *object; - depot_stack_handle_t new_handle, old_handle; - unsigned long flags; if (is_kfence_address(addr) || !slab) return; @@ -583,33 +544,8 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags) if (!alloc_meta) return; - new_handle = kasan_save_stack(0, depot_flags); - - /* - * Temporarily disable KASAN bug reporting to allow instrumented - * spinlock functions to access aux_lock, which resides inside of a - * redzone. - */ - kasan_disable_current(); - raw_spin_lock_irqsave(&alloc_meta->aux_lock, flags); - old_handle = alloc_meta->aux_stack[1]; alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0]; - alloc_meta->aux_stack[0] = new_handle; - raw_spin_unlock_irqrestore(&alloc_meta->aux_lock, flags); - kasan_enable_current(); - - stack_depot_put(old_handle); -} - -void kasan_record_aux_stack(void *addr) -{ - return __kasan_record_aux_stack(addr, - STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET); -} - -void kasan_record_aux_stack_noalloc(void *addr) -{ - return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_GET); + alloc_meta->aux_stack[0] = kasan_save_stack(0, 0); } void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags) @@ -620,7 +556,7 @@ void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags) if (!alloc_meta) return; - /* Evict previous stack traces (might exist for krealloc or mempool). */ + /* Invalidate previous stack traces (might exist for krealloc or mempool). */ release_alloc_meta(alloc_meta); kasan_save_track(&alloc_meta->alloc_track, flags); @@ -634,7 +570,7 @@ void kasan_save_free_info(struct kmem_cache *cache, void *object) if (!free_meta) return; - /* Evict previous stack trace (might exist for mempool). */ + /* Invalidate previous stack trace (might exist for mempool). */ release_free_meta(object, free_meta); kasan_save_track(&free_meta->free_track, 0); |