diff options
Diffstat (limited to 'mm/kasan/kasan.h')
-rw-r--r-- | mm/kasan/kasan.h | 36 |
1 files changed, 16 insertions, 20 deletions
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index d0f172f2b978..129178be5e64 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -6,7 +6,6 @@ #include <linux/kasan.h> #include <linux/kasan-tags.h> #include <linux/kfence.h> -#include <linux/spinlock.h> #include <linux/stackdepot.h> #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) @@ -265,13 +264,6 @@ struct kasan_global { struct kasan_alloc_meta { struct kasan_track alloc_track; /* Free track is stored in kasan_free_meta. */ - /* - * aux_lock protects aux_stack from accesses from concurrent - * kasan_record_aux_stack calls. It is a raw spinlock to avoid sleeping - * on RT kernels, as kasan_record_aux_stack_noalloc can be called from - * non-sleepable contexts. - */ - raw_spinlock_t aux_lock; depot_stack_handle_t aux_stack[2]; }; @@ -398,10 +390,8 @@ struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache, struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache, const void *object); void kasan_init_object_meta(struct kmem_cache *cache, const void *object); -void kasan_release_object_meta(struct kmem_cache *cache, const void *object); #else static inline void kasan_init_object_meta(struct kmem_cache *cache, const void *object) { } -static inline void kasan_release_object_meta(struct kmem_cache *cache, const void *object) { } #endif depot_stack_handle_t kasan_save_stack(gfp_t flags, depot_flags_t depot_flags); @@ -511,18 +501,18 @@ static inline bool kasan_byte_accessible(const void *addr) /** * kasan_poison - mark the memory range as inaccessible - * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE - * @size - range size, must be aligned to KASAN_GRANULE_SIZE - * @value - value that's written to metadata for the range - * @init - whether to initialize the memory range (only for hardware tag-based) + * @addr: range start address, must be aligned to KASAN_GRANULE_SIZE + * @size: range size, must be aligned to KASAN_GRANULE_SIZE + * @value: value that's written to metadata for the range + * @init: whether to initialize the memory range (only for hardware tag-based) */ void kasan_poison(const void *addr, size_t size, u8 value, bool init); /** * kasan_unpoison - mark the memory range as accessible - * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE - * @size - range size, can be unaligned - * @init - whether to initialize the memory range (only for hardware tag-based) + * @addr: range start address, must be aligned to KASAN_GRANULE_SIZE + * @size: range size, can be unaligned + * @init: whether to initialize the memory range (only for hardware tag-based) * * For the tag-based modes, the @size gets aligned to KASAN_GRANULE_SIZE before * marking the range. @@ -540,8 +530,8 @@ bool kasan_byte_accessible(const void *addr); /** * kasan_poison_last_granule - mark the last granule of the memory range as * inaccessible - * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE - * @size - range size + * @address: range start address, must be aligned to KASAN_GRANULE_SIZE + * @size: range size * * This function is only available for the generic mode, as it's the only mode * that has partially poisoned memory granules. @@ -565,6 +555,12 @@ static inline bool kasan_arch_is_ready(void) { return true; } void kasan_kunit_test_suite_start(void); void kasan_kunit_test_suite_end(void); +#ifdef CONFIG_RUST +char kasan_test_rust_uaf(void); +#else +static inline char kasan_test_rust_uaf(void) { return '\0'; } +#endif + #else /* CONFIG_KASAN_KUNIT_TEST */ static inline void kasan_kunit_test_suite_start(void) { } @@ -572,7 +568,7 @@ static inline void kasan_kunit_test_suite_end(void) { } #endif /* CONFIG_KASAN_KUNIT_TEST */ -#if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST) || IS_ENABLED(CONFIG_KASAN_MODULE_TEST) +#if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST) bool kasan_save_enable_multi_shot(void); void kasan_restore_multi_shot(bool enabled); |