summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarco Elver <elver@google.com>2019-07-11 20:54:14 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-12 11:05:42 -0700
commit10d1f8cb3965a6f633bf23eb984cda552927e3a5 (patch)
treeb7b7ea9283290f231348c7b4880ef3453e48b098
parentbb104ed78552147bed3a981fdada622afd2084b6 (diff)
mm/slab: refactor common ksize KASAN logic into slab_common.c
This refactors common code of ksize() between the various allocators into slab_common.c: __ksize() is the allocator-specific implementation without instrumentation, whereas ksize() includes the required KASAN logic. Link: http://lkml.kernel.org/r/20190626142014.141844-5-elver@google.com Signed-off-by: Marco Elver <elver@google.com> Acked-by: Christoph Lameter <cl@linux.com> Reviewed-by: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Konovalov <andreyknvl@google.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Kees Cook <keescook@chromium.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/slab.h1
-rw-r--r--mm/slab.c22
-rw-r--r--mm/slab_common.c26
-rw-r--r--mm/slob.c4
-rw-r--r--mm/slub.c14
5 files changed, 36 insertions, 31 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 9449b19c5f10..98c3d12b7275 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -184,6 +184,7 @@ void * __must_check __krealloc(const void *, size_t, gfp_t);
void * __must_check krealloc(const void *, size_t, gfp_t);
void kfree(const void *);
void kzfree(const void *);
+size_t __ksize(const void *);
size_t ksize(const void *);
#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
diff --git a/mm/slab.c b/mm/slab.c
index db01e9aae31b..3521a351ceb5 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4204,20 +4204,12 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
#endif /* CONFIG_HARDENED_USERCOPY */
/**
- * ksize - get the actual amount of memory allocated for a given object
- * @objp: Pointer to the object
+ * __ksize -- Uninstrumented ksize.
*
- * kmalloc may internally round up allocations and return more memory
- * than requested. ksize() can be used to determine the actual amount of
- * memory allocated. The caller may use this additional memory, even though
- * a smaller amount of memory was initially specified with the kmalloc call.
- * The caller must guarantee that objp points to a valid object previously
- * allocated with either kmalloc() or kmem_cache_alloc(). The object
- * must not be freed during the duration of the call.
- *
- * Return: size of the actual memory used by @objp in bytes
+ * Unlike ksize(), __ksize() is uninstrumented, and does not provide the same
+ * safety checks as ksize() with KASAN instrumentation enabled.
*/
-size_t ksize(const void *objp)
+size_t __ksize(const void *objp)
{
struct kmem_cache *c;
size_t size;
@@ -4228,11 +4220,7 @@ size_t ksize(const void *objp)
c = virt_to_cache(objp);
size = c ? c->object_size : 0;
- /* We assume that ksize callers could use the whole allocated area,
- * so we need to unpoison this area.
- */
- kasan_unpoison_shadow(objp, size);
return size;
}
-EXPORT_SYMBOL(ksize);
+EXPORT_SYMBOL(__ksize);
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 58251ba63e4a..b7c6a40e436a 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1597,6 +1597,32 @@ void kzfree(const void *p)
}
EXPORT_SYMBOL(kzfree);
+/**
+ * ksize - get the actual amount of memory allocated for a given object
+ * @objp: Pointer to the object
+ *
+ * kmalloc may internally round up allocations and return more memory
+ * than requested. ksize() can be used to determine the actual amount of
+ * memory allocated. The caller may use this additional memory, even though
+ * a smaller amount of memory was initially specified with the kmalloc call.
+ * The caller must guarantee that objp points to a valid object previously
+ * allocated with either kmalloc() or kmem_cache_alloc(). The object
+ * must not be freed during the duration of the call.
+ *
+ * Return: size of the actual memory used by @objp in bytes
+ */
+size_t ksize(const void *objp)
+{
+ size_t size = __ksize(objp);
+ /*
+ * We assume that ksize callers could use whole allocated area,
+ * so we need to unpoison this area.
+ */
+ kasan_unpoison_shadow(objp, size);
+ return size;
+}
+EXPORT_SYMBOL(ksize);
+
/* Tracepoints definitions. */
EXPORT_TRACEPOINT_SYMBOL(kmalloc);
EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
diff --git a/mm/slob.c b/mm/slob.c
index 84aefd9b91ee..7f421d0ca9ab 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -527,7 +527,7 @@ void kfree(const void *block)
EXPORT_SYMBOL(kfree);
/* can't use ksize for kmem_cache_alloc memory, only kmalloc */
-size_t ksize(const void *block)
+size_t __ksize(const void *block)
{
struct page *sp;
int align;
@@ -545,7 +545,7 @@ size_t ksize(const void *block)
m = (unsigned int *)(block - align);
return SLOB_UNITS(*m) * SLOB_UNIT;
}
-EXPORT_SYMBOL(ksize);
+EXPORT_SYMBOL(__ksize);
int __kmem_cache_create(struct kmem_cache *c, slab_flags_t flags)
{
diff --git a/mm/slub.c b/mm/slub.c
index d46a91759b96..5e217653286c 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3895,7 +3895,7 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
}
#endif /* CONFIG_HARDENED_USERCOPY */
-static size_t __ksize(const void *object)
+size_t __ksize(const void *object)
{
struct page *page;
@@ -3911,17 +3911,7 @@ static size_t __ksize(const void *object)
return slab_ksize(page->slab_cache);
}
-
-size_t ksize(const void *object)
-{
- size_t size = __ksize(object);
- /* We assume that ksize callers could use whole allocated area,
- * so we need to unpoison this area.
- */
- kasan_unpoison_shadow(object, size);
- return size;
-}
-EXPORT_SYMBOL(ksize);
+EXPORT_SYMBOL(__ksize);
void kfree(const void *x)
{