diff options
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 51 |
1 files changed, 30 insertions, 21 deletions
diff --git a/mm/slub.c b/mm/slub.c index 829a1f08e8a2..3520acaf9afa 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -845,10 +845,12 @@ static int disable_higher_order_debug; static inline void metadata_access_enable(void) { kasan_disable_current(); + kmsan_disable_current(); } static inline void metadata_access_disable(void) { + kmsan_enable_current(); kasan_enable_current(); } @@ -1153,7 +1155,13 @@ static void init_object(struct kmem_cache *s, void *object, u8 val) unsigned int poison_size = s->object_size; if (s->flags & SLAB_RED_ZONE) { - memset(p - s->red_left_pad, val, s->red_left_pad); + /* + * Here and below, avoid overwriting the KMSAN shadow. Keeping + * the shadow makes it possible to distinguish uninit-value + * from use-after-free. + */ + memset_no_sanitize_memory(p - s->red_left_pad, val, + s->red_left_pad); if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) { /* @@ -1166,12 +1174,13 @@ static void init_object(struct kmem_cache *s, void *object, u8 val) } if (s->flags & __OBJECT_POISON) { - memset(p, POISON_FREE, poison_size - 1); - p[poison_size - 1] = POISON_END; + memset_no_sanitize_memory(p, POISON_FREE, poison_size - 1); + memset_no_sanitize_memory(p + poison_size - 1, POISON_END, 1); } if (s->flags & SLAB_RED_ZONE) - memset(p + poison_size, val, s->inuse - poison_size); + memset_no_sanitize_memory(p + poison_size, val, + s->inuse - poison_size); } static void restore_bytes(struct kmem_cache *s, char *message, u8 data, @@ -1181,9 +1190,16 @@ static void restore_bytes(struct kmem_cache *s, char *message, u8 data, memset(from, data, to - from); } -static int check_bytes_and_report(struct kmem_cache *s, struct slab *slab, - u8 *object, char *what, - u8 *start, unsigned int value, unsigned int bytes) +#ifdef CONFIG_KMSAN +#define pad_check_attributes noinline __no_kmsan_checks +#else +#define pad_check_attributes +#endif + +static pad_check_attributes int +check_bytes_and_report(struct kmem_cache *s, struct slab *slab, + u8 *object, char *what, + u8 *start, unsigned int value, unsigned int bytes) { u8 *fault; u8 *end; @@ -1273,7 +1289,8 @@ static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p) } /* Check the pad bytes at the end of a slab page */ -static void slab_pad_check(struct kmem_cache *s, struct slab *slab) +static pad_check_attributes void +slab_pad_check(struct kmem_cache *s, struct slab *slab) { u8 *start; u8 *fault; @@ -2021,7 +2038,7 @@ static inline bool need_slab_obj_ext(void) return true; /* - * CONFIG_MEMCG_KMEM creates vector of obj_cgroup objects conditionally + * CONFIG_MEMCG creates vector of obj_cgroup objects conditionally * inside memcg_slab_post_alloc_hook. No other users for now. */ return false; @@ -2126,7 +2143,7 @@ alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, #endif /* CONFIG_MEM_ALLOC_PROFILING */ -#ifdef CONFIG_MEMCG_KMEM +#ifdef CONFIG_MEMCG static void memcg_alloc_abort_single(struct kmem_cache *s, void *object); @@ -2168,7 +2185,7 @@ void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, __memcg_slab_free_hook(s, slab, p, objects, obj_exts); } -#else /* CONFIG_MEMCG_KMEM */ +#else /* CONFIG_MEMCG */ static inline bool memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru, gfp_t flags, size_t size, @@ -2181,7 +2198,7 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, int objects) { } -#endif /* CONFIG_MEMCG_KMEM */ +#endif /* CONFIG_MEMCG */ /* * Hooks for other subsystems that check memory allocations. In a typical @@ -3914,14 +3931,6 @@ static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s, 0, sizeof(void *)); } -noinline int should_failslab(struct kmem_cache *s, gfp_t gfpflags) -{ - if (__should_failslab(s, gfpflags)) - return -ENOMEM; - return 0; -} -ALLOW_ERROR_INJECTION(should_failslab, ERRNO); - static __fastpath_inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) { @@ -4465,7 +4474,7 @@ void slab_free(struct kmem_cache *s, struct slab *slab, void *object, do_slab_free(s, slab, object, object, 1, addr); } -#ifdef CONFIG_MEMCG_KMEM +#ifdef CONFIG_MEMCG /* Do not inline the rare memcg charging failed path into the allocation path */ static noinline void memcg_alloc_abort_single(struct kmem_cache *s, void *object) |