summaryrefslogtreecommitdiff
path: root/mm/slab_common.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab_common.c')
-rw-r--r--mm/slab_common.c231
1 files changed, 8 insertions, 223 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 8d431193c273..238293b1dbe1 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -21,6 +21,7 @@
#include <linux/swiotlb.h>
#include <linux/proc_fs.h>
#include <linux/debugfs.h>
+#include <linux/kmemleak.h>
#include <linux/kasan.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
@@ -71,10 +72,8 @@ static int __init setup_slab_merge(char *str)
return 1;
}
-#ifdef CONFIG_SLUB
__setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
__setup_param("slub_merge", slub_merge, setup_slab_merge, 0);
-#endif
__setup("slab_nomerge", setup_slab_nomerge);
__setup("slab_merge", setup_slab_merge);
@@ -197,10 +196,6 @@ struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
if (s->size - size >= sizeof(void *))
continue;
- if (IS_ENABLED(CONFIG_SLAB) && align &&
- (align > s->align || s->align % align))
- continue;
-
return s;
}
return NULL;
@@ -670,7 +665,7 @@ EXPORT_SYMBOL(random_kmalloc_seed);
* of two cache sizes there. The size of larger slabs can be determined using
* fls.
*/
-static u8 size_index[24] __ro_after_init = {
+u8 kmalloc_size_index[24] __ro_after_init = {
3, /* 8 */
4, /* 16 */
5, /* 24 */
@@ -697,33 +692,6 @@ static u8 size_index[24] __ro_after_init = {
2 /* 192 */
};
-static inline unsigned int size_index_elem(unsigned int bytes)
-{
- return (bytes - 1) / 8;
-}
-
-/*
- * Find the kmem_cache structure that serves a given size of
- * allocation
- */
-struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags, unsigned long caller)
-{
- unsigned int index;
-
- if (size <= 192) {
- if (!size)
- return ZERO_SIZE_PTR;
-
- index = size_index[size_index_elem(size)];
- } else {
- if (WARN_ON_ONCE(size > KMALLOC_MAX_CACHE_SIZE))
- return NULL;
- index = fls(size - 1);
- }
-
- return kmalloc_caches[kmalloc_type(flags, caller)][index];
-}
-
size_t kmalloc_size_roundup(size_t size)
{
if (size && size <= KMALLOC_MAX_CACHE_SIZE) {
@@ -848,9 +816,9 @@ void __init setup_kmalloc_cache_index_table(void)
for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
unsigned int elem = size_index_elem(i);
- if (elem >= ARRAY_SIZE(size_index))
+ if (elem >= ARRAY_SIZE(kmalloc_size_index))
break;
- size_index[elem] = KMALLOC_SHIFT_LOW;
+ kmalloc_size_index[elem] = KMALLOC_SHIFT_LOW;
}
if (KMALLOC_MIN_SIZE >= 64) {
@@ -859,7 +827,7 @@ void __init setup_kmalloc_cache_index_table(void)
* is 64 byte.
*/
for (i = 64 + 8; i <= 96; i += 8)
- size_index[size_index_elem(i)] = 7;
+ kmalloc_size_index[size_index_elem(i)] = 7;
}
@@ -870,7 +838,7 @@ void __init setup_kmalloc_cache_index_table(void)
* instead.
*/
for (i = 128 + 8; i <= 192; i += 8)
- size_index[size_index_elem(i)] = 8;
+ kmalloc_size_index[size_index_elem(i)] = 8;
}
}
@@ -968,95 +936,6 @@ void __init create_kmalloc_caches(slab_flags_t flags)
slab_state = UP;
}
-void free_large_kmalloc(struct folio *folio, void *object)
-{
- unsigned int order = folio_order(folio);
-
- if (WARN_ON_ONCE(order == 0))
- pr_warn_once("object pointer: 0x%p\n", object);
-
- kmemleak_free(object);
- kasan_kfree_large(object);
- kmsan_kfree_large(object);
-
- mod_lruvec_page_state(folio_page(folio, 0), NR_SLAB_UNRECLAIMABLE_B,
- -(PAGE_SIZE << order));
- __free_pages(folio_page(folio, 0), order);
-}
-
-static void *__kmalloc_large_node(size_t size, gfp_t flags, int node);
-static __always_inline
-void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
-{
- struct kmem_cache *s;
- void *ret;
-
- if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
- ret = __kmalloc_large_node(size, flags, node);
- trace_kmalloc(caller, ret, size,
- PAGE_SIZE << get_order(size), flags, node);
- return ret;
- }
-
- s = kmalloc_slab(size, flags, caller);
-
- if (unlikely(ZERO_OR_NULL_PTR(s)))
- return s;
-
- ret = __kmem_cache_alloc_node(s, flags, node, size, caller);
- ret = kasan_kmalloc(s, ret, size, flags);
- trace_kmalloc(caller, ret, size, s->size, flags, node);
- return ret;
-}
-
-void *__kmalloc_node(size_t size, gfp_t flags, int node)
-{
- return __do_kmalloc_node(size, flags, node, _RET_IP_);
-}
-EXPORT_SYMBOL(__kmalloc_node);
-
-void *__kmalloc(size_t size, gfp_t flags)
-{
- return __do_kmalloc_node(size, flags, NUMA_NO_NODE, _RET_IP_);
-}
-EXPORT_SYMBOL(__kmalloc);
-
-void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
- int node, unsigned long caller)
-{
- return __do_kmalloc_node(size, flags, node, caller);
-}
-EXPORT_SYMBOL(__kmalloc_node_track_caller);
-
-/**
- * kfree - free previously allocated memory
- * @object: pointer returned by kmalloc() or kmem_cache_alloc()
- *
- * If @object is NULL, no operation is performed.
- */
-void kfree(const void *object)
-{
- struct folio *folio;
- struct slab *slab;
- struct kmem_cache *s;
-
- trace_kfree(_RET_IP_, object);
-
- if (unlikely(ZERO_OR_NULL_PTR(object)))
- return;
-
- folio = virt_to_folio(object);
- if (unlikely(!folio_test_slab(folio))) {
- free_large_kmalloc(folio, (void *)object);
- return;
- }
-
- slab = folio_slab(folio);
- s = slab->slab_cache;
- __kmem_cache_free(s, (void *)object, _RET_IP_);
-}
-EXPORT_SYMBOL(kfree);
-
/**
* __ksize -- Report full size of underlying allocation
* @object: pointer to the object
@@ -1093,30 +972,6 @@ size_t __ksize(const void *object)
return slab_ksize(folio_slab(folio)->slab_cache);
}
-void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
-{
- void *ret = __kmem_cache_alloc_node(s, gfpflags, NUMA_NO_NODE,
- size, _RET_IP_);
-
- trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, NUMA_NO_NODE);
-
- ret = kasan_kmalloc(s, ret, size, gfpflags);
- return ret;
-}
-EXPORT_SYMBOL(kmalloc_trace);
-
-void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
- int node, size_t size)
-{
- void *ret = __kmem_cache_alloc_node(s, gfpflags, node, size, _RET_IP_);
-
- trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node);
-
- ret = kasan_kmalloc(s, ret, size, gfpflags);
- return ret;
-}
-EXPORT_SYMBOL(kmalloc_node_trace);
-
gfp_t kmalloc_fix_flags(gfp_t flags)
{
gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
@@ -1129,57 +984,6 @@ gfp_t kmalloc_fix_flags(gfp_t flags)
return flags;
}
-/*
- * To avoid unnecessary overhead, we pass through large allocation requests
- * directly to the page allocator. We use __GFP_COMP, because we will need to
- * know the allocation order to free the pages properly in kfree.
- */
-
-static void *__kmalloc_large_node(size_t size, gfp_t flags, int node)
-{
- struct page *page;
- void *ptr = NULL;
- unsigned int order = get_order(size);
-
- if (unlikely(flags & GFP_SLAB_BUG_MASK))
- flags = kmalloc_fix_flags(flags);
-
- flags |= __GFP_COMP;
- page = alloc_pages_node(node, flags, order);
- if (page) {
- ptr = page_address(page);
- mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
- PAGE_SIZE << order);
- }
-
- ptr = kasan_kmalloc_large(ptr, size, flags);
- /* As ptr might get tagged, call kmemleak hook after KASAN. */
- kmemleak_alloc(ptr, size, 1, flags);
- kmsan_kmalloc_large(ptr, size, flags);
-
- return ptr;
-}
-
-void *kmalloc_large(size_t size, gfp_t flags)
-{
- void *ret = __kmalloc_large_node(size, flags, NUMA_NO_NODE);
-
- trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
- flags, NUMA_NO_NODE);
- return ret;
-}
-EXPORT_SYMBOL(kmalloc_large);
-
-void *kmalloc_large_node(size_t size, gfp_t flags, int node)
-{
- void *ret = __kmalloc_large_node(size, flags, node);
-
- trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
- flags, node);
- return ret;
-}
-EXPORT_SYMBOL(kmalloc_large_node);
-
#ifdef CONFIG_SLAB_FREELIST_RANDOM
/* Randomize a generic freelist */
static void freelist_randomize(unsigned int *list,
@@ -1222,12 +1026,8 @@ void cache_random_seq_destroy(struct kmem_cache *cachep)
}
#endif /* CONFIG_SLAB_FREELIST_RANDOM */
-#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
-#ifdef CONFIG_SLAB
-#define SLABINFO_RIGHTS (0600)
-#else
+#ifdef CONFIG_SLUB_DEBUG
#define SLABINFO_RIGHTS (0400)
-#endif
static void print_slabinfo_header(struct seq_file *m)
{
@@ -1235,18 +1035,10 @@ static void print_slabinfo_header(struct seq_file *m)
* Output format version, so at least we can change it
* without _too_ many complaints.
*/
-#ifdef CONFIG_DEBUG_SLAB
- seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
-#else
seq_puts(m, "slabinfo - version: 2.1\n");
-#endif
seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
-#ifdef CONFIG_DEBUG_SLAB
- seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
- seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
-#endif
seq_putc(m, '\n');
}
@@ -1370,7 +1162,7 @@ static int __init slab_proc_init(void)
}
module_init(slab_proc_init);
-#endif /* CONFIG_SLAB || CONFIG_SLUB_DEBUG */
+#endif /* CONFIG_SLUB_DEBUG */
static __always_inline __realloc_size(2) void *
__do_krealloc(const void *p, size_t new_size, gfp_t flags)
@@ -1488,10 +1280,3 @@ EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
EXPORT_TRACEPOINT_SYMBOL(kfree);
EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
-int should_failslab(struct kmem_cache *s, gfp_t gfpflags)
-{
- if (__should_failslab(s, gfpflags))
- return -ENOMEM;
- return 0;
-}
-ALLOW_ERROR_INJECTION(should_failslab, ERRNO);