summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/kasan.h4
-rw-r--r--mm/kasan/kasan.c12
-rw-r--r--mm/slab.c2
-rw-r--r--mm/slub.c2
4 files changed, 10 insertions, 10 deletions
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index d6459bd1376d..de784fd11d12 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -43,7 +43,7 @@ void kasan_unpoison_stack_above_sp_to(const void *watermark);
void kasan_alloc_pages(struct page *page, unsigned int order);
void kasan_free_pages(struct page *page, unsigned int order);
-void kasan_cache_create(struct kmem_cache *cache, size_t *size,
+void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
slab_flags_t *flags);
void kasan_cache_shrink(struct kmem_cache *cache);
void kasan_cache_shutdown(struct kmem_cache *cache);
@@ -92,7 +92,7 @@ static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
static inline void kasan_free_pages(struct page *page, unsigned int order) {}
static inline void kasan_cache_create(struct kmem_cache *cache,
- size_t *size,
+ unsigned int *size,
slab_flags_t *flags) {}
static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index e13d911251e7..f7a5e1d1ba87 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -323,9 +323,9 @@ void kasan_free_pages(struct page *page, unsigned int order)
* Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
* For larger allocations larger redzones are used.
*/
-static size_t optimal_redzone(size_t object_size)
+static unsigned int optimal_redzone(unsigned int object_size)
{
- int rz =
+ return
object_size <= 64 - 16 ? 16 :
object_size <= 128 - 32 ? 32 :
object_size <= 512 - 64 ? 64 :
@@ -333,14 +333,13 @@ static size_t optimal_redzone(size_t object_size)
object_size <= (1 << 14) - 256 ? 256 :
object_size <= (1 << 15) - 512 ? 512 :
object_size <= (1 << 16) - 1024 ? 1024 : 2048;
- return rz;
}
-void kasan_cache_create(struct kmem_cache *cache, size_t *size,
+void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
slab_flags_t *flags)
{
+ unsigned int orig_size = *size;
int redzone_adjust;
- int orig_size = *size;
/* Add alloc meta. */
cache->kasan_info.alloc_meta_offset = *size;
@@ -358,7 +357,8 @@ void kasan_cache_create(struct kmem_cache *cache, size_t *size,
if (redzone_adjust > 0)
*size += redzone_adjust;
- *size = min(KMALLOC_MAX_SIZE, max(*size, cache->object_size +
+ *size = min_t(unsigned int, KMALLOC_MAX_SIZE,
+ max(*size, cache->object_size +
optimal_redzone(cache->object_size)));
/*
diff --git a/mm/slab.c b/mm/slab.c
index 063a02d79c8e..fb106e8277b7 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1994,7 +1994,7 @@ int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags)
size_t ralign = BYTES_PER_WORD;
gfp_t gfp;
int err;
- size_t size = cachep->size;
+ unsigned int size = cachep->size;
#if DEBUG
#if FORCED_DEBUG
diff --git a/mm/slub.c b/mm/slub.c
index b4a739f8f84d..dfead847961c 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3458,7 +3458,7 @@ static void set_cpu_partial(struct kmem_cache *s)
static int calculate_sizes(struct kmem_cache *s, int forced_order)
{
slab_flags_t flags = s->flags;
- size_t size = s->object_size;
+ unsigned int size = s->object_size;
int order;
/*