summaryrefslogtreecommitdiff
path: root/mm/kasan/common.c
diff options
context:
space:
mode:
authorAndrey Konovalov <andreyknvl@google.com>2020-12-22 12:02:52 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2020-12-22 12:55:08 -0800
commitc0054c565ae598073d6c27762c7d4f7de49a45d9 (patch)
tree26e8cbacc152f42d8d2655eb064c6cebb1dc0dae /mm/kasan/common.c
parent77f57c983065d0569ee1b4af80f07224b439af57 (diff)
kasan: inline kasan_reset_tag for tag-based modes
Using kasan_reset_tag() currently results in a function call. As it's called quite often from the allocator code, this leads to a noticeable slowdown. Move it to include/linux/kasan.h and turn it into a static inline function. Also remove the now unneeded reset_tag() internal KASAN macro and use kasan_reset_tag() instead. Link: https://lkml.kernel.org/r/6940383a3a9dfb416134d338d8fac97a9ebb8686.1606162397.git.andreyknvl@google.com Link: https://linux-review.googlesource.com/id/I4d2061acfe91d480a75df00b07c22d8494ef14b5 Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Reviewed-by: Marco Elver <elver@google.com> Reviewed-by: Dmitry Vyukov <dvyukov@google.com> Tested-by: Vincenzo Frascino <vincenzo.frascino@arm.com> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Branislav Rankov <Branislav.Rankov@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Evgenii Stepanov <eugenis@google.com> Cc: Kevin Brodsky <kevin.brodsky@arm.com> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/kasan/common.c')
-rw-r--r--mm/kasan/common.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index b71dfe7c5059..780ec27459ab 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -179,14 +179,14 @@ size_t kasan_metadata_size(struct kmem_cache *cache)
struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
const void *object)
{
- return (void *)reset_tag(object) + cache->kasan_info.alloc_meta_offset;
+ return kasan_reset_tag(object) + cache->kasan_info.alloc_meta_offset;
}
struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
const void *object)
{
BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
- return (void *)reset_tag(object) + cache->kasan_info.free_meta_offset;
+ return kasan_reset_tag(object) + cache->kasan_info.free_meta_offset;
}
void kasan_poison_slab(struct page *page)
@@ -283,7 +283,7 @@ static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
tag = get_tag(object);
tagged_object = object;
- object = reset_tag(object);
+ object = kasan_reset_tag(object);
if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
object)) {