summaryrefslogtreecommitdiff
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c79
1 files changed, 35 insertions, 44 deletions
diff --git a/mm/slab.c b/mm/slab.c
index f7117ad9b3a3..9df370558e5d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -371,12 +371,6 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
static int slab_max_order = SLAB_MAX_ORDER_LO;
static bool slab_max_order_set __initdata;
-static inline struct kmem_cache *virt_to_cache(const void *obj)
-{
- struct page *page = virt_to_head_page(obj);
- return page->slab_cache;
-}
-
static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
unsigned int idx)
{
@@ -1245,7 +1239,7 @@ void __init kmem_cache_init(void)
nr_node_ids * sizeof(struct kmem_cache_node *),
SLAB_HWCACHE_ALIGN, 0, 0);
list_add(&kmem_cache->list, &slab_caches);
- memcg_link_cache(kmem_cache);
+ memcg_link_cache(kmem_cache, NULL);
slab_state = PARTIAL;
/*
@@ -1366,7 +1360,6 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
int nodeid)
{
struct page *page;
- int nr_pages;
flags |= cachep->allocflags;
@@ -1376,17 +1369,11 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
return NULL;
}
- if (memcg_charge_slab(page, flags, cachep->gfporder, cachep)) {
+ if (charge_slab_page(page, flags, cachep->gfporder, cachep)) {
__free_pages(page, cachep->gfporder);
return NULL;
}
- nr_pages = (1 << cachep->gfporder);
- if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
- mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, nr_pages);
- else
- mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, nr_pages);
-
__SetPageSlab(page);
/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
if (sk_memalloc_socks() && page_is_pfmemalloc(page))
@@ -1401,12 +1388,6 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
{
int order = cachep->gfporder;
- unsigned long nr_freed = (1 << order);
-
- if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
- mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, -nr_freed);
- else
- mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, -nr_freed);
BUG_ON(!PageSlab(page));
__ClearPageSlabPfmemalloc(page);
@@ -1415,8 +1396,8 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
page->mapping = NULL;
if (current->reclaim_state)
- current->reclaim_state->reclaimed_slab += nr_freed;
- memcg_uncharge_slab(page, order, cachep);
+ current->reclaim_state->reclaimed_slab += 1 << order;
+ uncharge_slab_page(page, order, cachep);
__free_pages(page, order);
}
@@ -1830,6 +1811,14 @@ static bool set_objfreelist_slab_cache(struct kmem_cache *cachep,
cachep->num = 0;
+ /*
+ * If slab auto-initialization on free is enabled, store the freelist
+ * off-slab, so that its contents don't end up in one of the allocated
+ * objects.
+ */
+ if (unlikely(slab_want_init_on_free(cachep)))
+ return false;
+
if (cachep->ctor || flags & SLAB_TYPESAFE_BY_RCU)
return false;
@@ -2258,6 +2247,10 @@ void __kmemcg_cache_deactivate(struct kmem_cache *cachep)
{
__kmem_cache_shrink(cachep);
}
+
+void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s)
+{
+}
#endif
int __kmem_cache_shutdown(struct kmem_cache *cachep)
@@ -3263,7 +3256,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
local_irq_restore(save_flags);
ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
- if (unlikely(flags & __GFP_ZERO) && ptr)
+ if (unlikely(slab_want_init_on_alloc(flags, cachep)) && ptr)
memset(ptr, 0, cachep->object_size);
slab_post_alloc_hook(cachep, flags, 1, &ptr);
@@ -3320,7 +3313,7 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
prefetchw(objp);
- if (unlikely(flags & __GFP_ZERO) && objp)
+ if (unlikely(slab_want_init_on_alloc(flags, cachep)) && objp)
memset(objp, 0, cachep->object_size);
slab_post_alloc_hook(cachep, flags, 1, &objp);
@@ -3441,6 +3434,8 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
struct array_cache *ac = cpu_cache_get(cachep);
check_irq_off();
+ if (unlikely(slab_want_init_on_free(cachep)))
+ memset(objp, 0, cachep->object_size);
kmemleak_free_recursive(objp, cachep->flags);
objp = cache_free_debugcheck(cachep, objp, caller);
@@ -3528,7 +3523,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_);
/* Clear memory outside IRQ disabled section */
- if (unlikely(flags & __GFP_ZERO))
+ if (unlikely(slab_want_init_on_alloc(flags, s)))
for (i = 0; i < size; i++)
memset(p[i], 0, s->object_size);
@@ -3715,6 +3710,8 @@ void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
s = virt_to_cache(objp);
else
s = cache_from_obj(orig_s, objp);
+ if (!s)
+ continue;
debug_check_no_locks_freed(objp, s->object_size);
if (!(s->flags & SLAB_DEBUG_OBJECTS))
@@ -3749,6 +3746,10 @@ void kfree(const void *objp)
local_irq_save(flags);
kfree_debugcheck(objp);
c = virt_to_cache(objp);
+ if (!c) {
+ local_irq_restore(flags);
+ return;
+ }
debug_check_no_locks_freed(objp, c->object_size);
debug_check_no_obj_freed(objp, c->object_size);
@@ -4204,33 +4205,23 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
#endif /* CONFIG_HARDENED_USERCOPY */
/**
- * ksize - get the actual amount of memory allocated for a given object
- * @objp: Pointer to the object
- *
- * kmalloc may internally round up allocations and return more memory
- * than requested. ksize() can be used to determine the actual amount of
- * memory allocated. The caller may use this additional memory, even though
- * a smaller amount of memory was initially specified with the kmalloc call.
- * The caller must guarantee that objp points to a valid object previously
- * allocated with either kmalloc() or kmem_cache_alloc(). The object
- * must not be freed during the duration of the call.
+ * __ksize -- Uninstrumented ksize.
*
- * Return: size of the actual memory used by @objp in bytes
+ * Unlike ksize(), __ksize() is uninstrumented, and does not provide the same
+ * safety checks as ksize() with KASAN instrumentation enabled.
*/
-size_t ksize(const void *objp)
+size_t __ksize(const void *objp)
{
+ struct kmem_cache *c;
size_t size;
BUG_ON(!objp);
if (unlikely(objp == ZERO_SIZE_PTR))
return 0;
- size = virt_to_cache(objp)->object_size;
- /* We assume that ksize callers could use the whole allocated area,
- * so we need to unpoison this area.
- */
- kasan_unpoison_shadow(objp, size);
+ c = virt_to_cache(objp);
+ size = c ? c->object_size : 0;
return size;
}
-EXPORT_SYMBOL(ksize);
+EXPORT_SYMBOL(__ksize);