summaryrefslogtreecommitdiff
path: root/include/linux/slab.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/slab.h')
-rw-r--r--include/linux/slab.h144
1 files changed, 50 insertions, 94 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index a713b0e5bbcd..6dc495f76644 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -189,7 +189,6 @@ int kmem_cache_shrink(struct kmem_cache *s);
void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __alloc_size(2);
void kfree(const void *objp);
void kfree_sensitive(const void *objp);
-size_t __ksize(const void *objp);
size_t ksize(const void *objp);
#ifdef CONFIG_PRINTK
bool kmem_valid_obj(void *object);
@@ -245,27 +244,17 @@ static inline unsigned int arch_slab_minalign(void)
#ifdef CONFIG_SLAB
/*
- * The largest kmalloc size supported by the SLAB allocators is
- * 32 megabyte (2^25) or the maximum allocatable page order if that is
- * less than 32 MB.
- *
- * WARNING: Its not easy to increase this value since the allocators have
- * to do various tricks to work around compiler limitations in order to
- * ensure proper constant folding.
+ * SLAB and SLUB directly allocates requests fitting in to an order-1 page
+ * (PAGE_SIZE*2). Larger requests are passed to the page allocator.
*/
-#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
- (MAX_ORDER + PAGE_SHIFT - 1) : 25)
-#define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
+#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
+#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
#ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW 5
#endif
#endif
#ifdef CONFIG_SLUB
-/*
- * SLUB directly allocates requests fitting in to an order-1 page
- * (PAGE_SIZE*2). Larger requests are passed to the page allocator.
- */
#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
#ifndef KMALLOC_SHIFT_LOW
@@ -417,10 +406,6 @@ static __always_inline unsigned int __kmalloc_index(size_t size,
if (size <= 512 * 1024) return 19;
if (size <= 1024 * 1024) return 20;
if (size <= 2 * 1024 * 1024) return 21;
- if (size <= 4 * 1024 * 1024) return 22;
- if (size <= 8 * 1024 * 1024) return 23;
- if (size <= 16 * 1024 * 1024) return 24;
- if (size <= 32 * 1024 * 1024) return 25;
if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES) && size_is_constant)
BUILD_BUG_ON_MSG(1, "unexpected size in kmalloc_index()");
@@ -430,6 +415,7 @@ static __always_inline unsigned int __kmalloc_index(size_t size,
/* Will never be reached. Needed because the compiler may complain */
return -1;
}
+static_assert(PAGE_SHIFT <= 20);
#define kmalloc_index(s) __kmalloc_index(s, true)
#endif /* !CONFIG_SLOB */
@@ -458,42 +444,22 @@ static __always_inline void kfree_bulk(size_t size, void **p)
kmem_cache_free_bulk(NULL, size, p);
}
-#ifdef CONFIG_NUMA
void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
__alloc_size(1);
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
__malloc;
-#else
-static __always_inline __alloc_size(1) void *__kmalloc_node(size_t size, gfp_t flags, int node)
-{
- return __kmalloc(size, flags);
-}
-
-static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
-{
- return kmem_cache_alloc(s, flags);
-}
-#endif
#ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
- __assume_slab_alignment __alloc_size(3);
-
-#ifdef CONFIG_NUMA
-extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
- int node, size_t size) __assume_slab_alignment
- __alloc_size(4);
-#else
-static __always_inline __alloc_size(4) void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
- gfp_t gfpflags, int node, size_t size)
-{
- return kmem_cache_alloc_trace(s, gfpflags, size);
-}
-#endif /* CONFIG_NUMA */
+void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
+ __assume_kmalloc_alignment __alloc_size(3);
+void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
+ int node, size_t size) __assume_kmalloc_alignment
+ __alloc_size(4);
#else /* CONFIG_TRACING */
-static __always_inline __alloc_size(3) void *kmem_cache_alloc_trace(struct kmem_cache *s,
- gfp_t flags, size_t size)
+/* Save a function call when CONFIG_TRACING=n */
+static __always_inline __alloc_size(3)
+void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
{
void *ret = kmem_cache_alloc(s, flags);
@@ -501,8 +467,9 @@ static __always_inline __alloc_size(3) void *kmem_cache_alloc_trace(struct kmem_
return ret;
}
-static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
- int node, size_t size)
+static __always_inline __alloc_size(4)
+void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
+ int node, size_t size)
{
void *ret = kmem_cache_alloc_node(s, gfpflags, node);
@@ -511,25 +478,11 @@ static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s, g
}
#endif /* CONFIG_TRACING */
-extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment
- __alloc_size(1);
-
-#ifdef CONFIG_TRACING
-extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
- __assume_page_alignment __alloc_size(1);
-#else
-static __always_inline __alloc_size(1) void *kmalloc_order_trace(size_t size, gfp_t flags,
- unsigned int order)
-{
- return kmalloc_order(size, flags, order);
-}
-#endif
+void *kmalloc_large(size_t size, gfp_t flags) __assume_page_alignment
+ __alloc_size(1);
-static __always_inline __alloc_size(1) void *kmalloc_large(size_t size, gfp_t flags)
-{
- unsigned int order = get_order(size);
- return kmalloc_order_trace(size, flags, order);
-}
+void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_alignment
+ __alloc_size(1);
/**
* kmalloc - allocate memory
@@ -599,7 +552,7 @@ static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
if (!index)
return ZERO_SIZE_PTR;
- return kmem_cache_alloc_trace(
+ return kmalloc_trace(
kmalloc_caches[kmalloc_type(flags)][index],
flags, size);
#endif
@@ -607,23 +560,35 @@ static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
return __kmalloc(size, flags);
}
+#ifndef CONFIG_SLOB
static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
{
-#ifndef CONFIG_SLOB
- if (__builtin_constant_p(size) &&
- size <= KMALLOC_MAX_CACHE_SIZE) {
- unsigned int i = kmalloc_index(size);
+ if (__builtin_constant_p(size)) {
+ unsigned int index;
+
+ if (size > KMALLOC_MAX_CACHE_SIZE)
+ return kmalloc_large_node(size, flags, node);
+
+ index = kmalloc_index(size);
- if (!i)
+ if (!index)
return ZERO_SIZE_PTR;
- return kmem_cache_alloc_node_trace(
- kmalloc_caches[kmalloc_type(flags)][i],
- flags, node, size);
+ return kmalloc_node_trace(
+ kmalloc_caches[kmalloc_type(flags)][index],
+ flags, node, size);
}
-#endif
return __kmalloc_node(size, flags, node);
}
+#else
+static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
+{
+ if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE)
+ return kmalloc_large_node(size, flags, node);
+
+ return __kmalloc_node(size, flags, node);
+}
+#endif
/**
* kmalloc_array - allocate memory for an array.
@@ -673,6 +638,12 @@ static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flag
return kmalloc_array(n, size, flags | __GFP_ZERO);
}
+void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
+ unsigned long caller) __alloc_size(1);
+#define kmalloc_node_track_caller(size, flags, node) \
+ __kmalloc_node_track_caller(size, flags, node, \
+ _RET_IP_)
+
/*
* kmalloc_track_caller is a special version of kmalloc that records the
* calling function of the routine calling it for slab leak tracking instead
@@ -681,9 +652,9 @@ static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flag
* allocator where we care about the real place the memory allocation
* request comes from.
*/
-extern void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller);
#define kmalloc_track_caller(size, flags) \
- __kmalloc_track_caller(size, flags, _RET_IP_)
+ __kmalloc_node_track_caller(size, flags, \
+ NUMA_NO_NODE, _RET_IP_)
static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
int node)
@@ -702,21 +673,6 @@ static inline __alloc_size(1, 2) void *kcalloc_node(size_t n, size_t size, gfp_t
return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
}
-
-#ifdef CONFIG_NUMA
-extern void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
- unsigned long caller) __alloc_size(1);
-#define kmalloc_node_track_caller(size, flags, node) \
- __kmalloc_node_track_caller(size, flags, node, \
- _RET_IP_)
-
-#else /* CONFIG_NUMA */
-
-#define kmalloc_node_track_caller(size, flags, node) \
- kmalloc_track_caller(size, flags)
-
-#endif /* CONFIG_NUMA */
-
/*
* Shortcuts
*/