summaryrefslogtreecommitdiff
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c54
1 files changed, 10 insertions, 44 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 28652e4218e0..284ab737faee 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1467,53 +1467,17 @@ static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
}
#ifdef CONFIG_DEBUG_PAGEALLOC
-static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
- unsigned long caller)
-{
- int size = cachep->object_size;
-
- addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
-
- if (size < 5 * sizeof(unsigned long))
- return;
-
- *addr++ = 0x12345678;
- *addr++ = caller;
- *addr++ = smp_processor_id();
- size -= 3 * sizeof(unsigned long);
- {
- unsigned long *sptr = &caller;
- unsigned long svalue;
-
- while (!kstack_end(sptr)) {
- svalue = *sptr++;
- if (kernel_text_address(svalue)) {
- *addr++ = svalue;
- size -= sizeof(unsigned long);
- if (size <= sizeof(unsigned long))
- break;
- }
- }
-
- }
- *addr++ = 0x87654321;
-}
-
-static void slab_kernel_map(struct kmem_cache *cachep, void *objp,
- int map, unsigned long caller)
+static void slab_kernel_map(struct kmem_cache *cachep, void *objp, int map)
{
if (!is_debug_pagealloc_cache(cachep))
return;
- if (caller)
- store_stackinfo(cachep, objp, caller);
-
kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map);
}
#else
static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp,
- int map, unsigned long caller) {}
+ int map) {}
#endif
@@ -1661,7 +1625,7 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep,
if (cachep->flags & SLAB_POISON) {
check_poison_obj(cachep, objp);
- slab_kernel_map(cachep, objp, 1, 0);
+ slab_kernel_map(cachep, objp, 1);
}
if (cachep->flags & SLAB_RED_ZONE) {
if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
@@ -2115,6 +2079,8 @@ done:
cachep->allocflags = __GFP_COMP;
if (flags & SLAB_CACHE_DMA)
cachep->allocflags |= GFP_DMA;
+ if (flags & SLAB_CACHE_DMA32)
+ cachep->allocflags |= GFP_DMA32;
if (flags & SLAB_RECLAIM_ACCOUNT)
cachep->allocflags |= __GFP_RECLAIMABLE;
cachep->size = size;
@@ -2372,7 +2338,6 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep,
/* Slab management obj is off-slab. */
freelist = kmem_cache_alloc_node(cachep->freelist_cache,
local_flags, nodeid);
- freelist = kasan_reset_tag(freelist);
if (!freelist)
return NULL;
} else {
@@ -2432,7 +2397,7 @@ static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page)
/* need to poison the objs? */
if (cachep->flags & SLAB_POISON) {
poison_obj(cachep, objp, POISON_FREE);
- slab_kernel_map(cachep, objp, 0, 0);
+ slab_kernel_map(cachep, objp, 0);
}
}
#endif
@@ -2811,7 +2776,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
if (cachep->flags & SLAB_POISON) {
poison_obj(cachep, objp, POISON_FREE);
- slab_kernel_map(cachep, objp, 0, caller);
+ slab_kernel_map(cachep, objp, 0);
}
return objp;
}
@@ -3075,7 +3040,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
return objp;
if (cachep->flags & SLAB_POISON) {
check_poison_obj(cachep, objp);
- slab_kernel_map(cachep, objp, 1, 0);
+ slab_kernel_map(cachep, objp, 1);
poison_obj(cachep, objp, POISON_INUSE);
}
if (cachep->flags & SLAB_STORE_USER)
@@ -4306,7 +4271,8 @@ static void show_symbol(struct seq_file *m, unsigned long address)
static int leaks_show(struct seq_file *m, void *p)
{
- struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
+ struct kmem_cache *cachep = list_entry(p, struct kmem_cache,
+ root_caches_node);
struct page *page;
struct kmem_cache_node *n;
const char *name;