summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrey Konovalov <andreyknvl@google.com>2023-12-19 23:29:03 +0100
committerAndrew Morton <akpm@linux-foundation.org>2023-12-29 11:58:40 -0800
commit1ce9a0523938f87dd8505233cc3445f8e2d8dcee (patch)
treebb4ea2cab493a33ff5fceaca86dc48c93f4fba06
parent86b15969831bde23c96de00db46687762a6e9e7d (diff)
kasan: rename and document kasan_(un)poison_object_data
Rename kasan_unpoison_object_data to kasan_unpoison_new_object and add a documentation comment. Do the same for kasan_poison_object_data. The new names and the comments should suggest the users that these hooks are intended for internal use by the slab allocator. The following patch will remove non-slab-internal uses of these hooks. No functional changes. [andreyknvl@google.com: update references to renamed functions in comments] Link: https://lkml.kernel.org/r/20231221180637.105098-1-andrey.konovalov@linux.dev Link: https://lkml.kernel.org/r/eab156ebbd635f9635ef67d1a4271f716994e628.1703024586.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Reviewed-by: Marco Elver <elver@google.com> Cc: Alexander Lobakin <alobakin@pm.me> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: Breno Leitao <leitao@debian.org> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Evgenii Stepanov <eugenis@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--include/linux/kasan.h35
-rw-r--r--mm/kasan/common.c4
-rw-r--r--mm/kasan/shadow.c4
-rw-r--r--mm/slab.c10
-rw-r--r--mm/slub.c4
-rw-r--r--net/core/skbuff.c8
6 files changed, 41 insertions, 24 deletions
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 7392c5d89b92..d49e3d4c099e 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -129,20 +129,39 @@ static __always_inline void kasan_poison_slab(struct slab *slab)
__kasan_poison_slab(slab);
}
-void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
-static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
+void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object);
+/**
+ * kasan_unpoison_new_object - Temporarily unpoison a new slab object.
+ * @cache: Cache the object belong to.
+ * @object: Pointer to the object.
+ *
+ * This function is intended for the slab allocator's internal use. It
+ * temporarily unpoisons an object from a newly allocated slab without doing
+ * anything else. The object must later be repoisoned by
+ * kasan_poison_new_object().
+ */
+static __always_inline void kasan_unpoison_new_object(struct kmem_cache *cache,
void *object)
{
if (kasan_enabled())
- __kasan_unpoison_object_data(cache, object);
+ __kasan_unpoison_new_object(cache, object);
}
-void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
-static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
+void __kasan_poison_new_object(struct kmem_cache *cache, void *object);
+/**
+ * kasan_unpoison_new_object - Repoison a new slab object.
+ * @cache: Cache the object belong to.
+ * @object: Pointer to the object.
+ *
+ * This function is intended for the slab allocator's internal use. It
+ * repoisons an object that was previously unpoisoned by
+ * kasan_unpoison_new_object() without doing anything else.
+ */
+static __always_inline void kasan_poison_new_object(struct kmem_cache *cache,
void *object)
{
if (kasan_enabled())
- __kasan_poison_object_data(cache, object);
+ __kasan_poison_new_object(cache, object);
}
void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
@@ -342,9 +361,9 @@ static inline bool kasan_unpoison_pages(struct page *page, unsigned int order,
return false;
}
static inline void kasan_poison_slab(struct slab *slab) {}
-static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
+static inline void kasan_unpoison_new_object(struct kmem_cache *cache,
void *object) {}
-static inline void kasan_poison_object_data(struct kmem_cache *cache,
+static inline void kasan_poison_new_object(struct kmem_cache *cache,
void *object) {}
static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
const void *object)
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index bf16c2dfa8e7..f4255e807b74 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -143,12 +143,12 @@ void __kasan_poison_slab(struct slab *slab)
KASAN_SLAB_REDZONE, false);
}
-void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
+void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object)
{
kasan_unpoison(object, cache->object_size, false);
}
-void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
+void __kasan_poison_new_object(struct kmem_cache *cache, void *object)
{
kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
KASAN_SLAB_REDZONE, false);
diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
index d687f09a7ae3..0154d200be40 100644
--- a/mm/kasan/shadow.c
+++ b/mm/kasan/shadow.c
@@ -130,7 +130,7 @@ void kasan_poison(const void *addr, size_t size, u8 value, bool init)
/*
* Perform shadow offset calculation based on untagged address, as
- * some of the callers (e.g. kasan_poison_object_data) pass tagged
+ * some of the callers (e.g. kasan_poison_new_object) pass tagged
* addresses to this function.
*/
addr = kasan_reset_tag(addr);
@@ -170,7 +170,7 @@ void kasan_unpoison(const void *addr, size_t size, bool init)
/*
* Perform shadow offset calculation based on untagged address, as
- * some of the callers (e.g. kasan_unpoison_object_data) pass tagged
+ * some of the callers (e.g. kasan_unpoison_new_object) pass tagged
* addresses to this function.
*/
addr = kasan_reset_tag(addr);
diff --git a/mm/slab.c b/mm/slab.c
index 9ad3d0f2d1a5..773c79e153f3 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2327,11 +2327,9 @@ static void cache_init_objs_debug(struct kmem_cache *cachep, struct slab *slab)
* They must also be threaded.
*/
if (cachep->ctor && !(cachep->flags & SLAB_POISON)) {
- kasan_unpoison_object_data(cachep,
- objp + obj_offset(cachep));
+ kasan_unpoison_new_object(cachep, objp + obj_offset(cachep));
cachep->ctor(objp + obj_offset(cachep));
- kasan_poison_object_data(
- cachep, objp + obj_offset(cachep));
+ kasan_poison_new_object(cachep, objp + obj_offset(cachep));
}
if (cachep->flags & SLAB_RED_ZONE) {
@@ -2472,9 +2470,9 @@ static void cache_init_objs(struct kmem_cache *cachep,
/* constructor could break poison info */
if (DEBUG == 0 && cachep->ctor) {
- kasan_unpoison_object_data(cachep, objp);
+ kasan_unpoison_new_object(cachep, objp);
cachep->ctor(objp);
- kasan_poison_object_data(cachep, objp);
+ kasan_poison_new_object(cachep, objp);
}
if (!shuffled)
diff --git a/mm/slub.c b/mm/slub.c
index 782bd8a6bd34..891742e5932a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1860,9 +1860,9 @@ static void *setup_object(struct kmem_cache *s, void *object)
setup_object_debug(s, object);
object = kasan_init_slab_obj(s, object);
if (unlikely(s->ctor)) {
- kasan_unpoison_object_data(s, object);
+ kasan_unpoison_new_object(s, object);
s->ctor(object);
- kasan_poison_object_data(s, object);
+ kasan_poison_new_object(s, object);
}
return object;
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index b157efea5dea..63bb6526399d 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -337,7 +337,7 @@ static struct sk_buff *napi_skb_cache_get(void)
}
skb = nc->skb_cache[--nc->skb_count];
- kasan_unpoison_object_data(skbuff_cache, skb);
+ kasan_unpoison_new_object(skbuff_cache, skb);
return skb;
}
@@ -1309,13 +1309,13 @@ static void napi_skb_cache_put(struct sk_buff *skb)
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
u32 i;
- kasan_poison_object_data(skbuff_cache, skb);
+ kasan_poison_new_object(skbuff_cache, skb);
nc->skb_cache[nc->skb_count++] = skb;
if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++)
- kasan_unpoison_object_data(skbuff_cache,
- nc->skb_cache[i]);
+ kasan_unpoison_new_object(skbuff_cache,
+ nc->skb_cache[i]);
kmem_cache_free_bulk(skbuff_cache, NAPI_SKB_CACHE_HALF,
nc->skb_cache + NAPI_SKB_CACHE_HALF);