summaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
authorAbel Wu <wuyun.wu@huawei.com>2020-10-13 16:48:43 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-10-13 18:38:27 -0700
commit9f986d998a3001b6eeb189be8444bc0360e61e24 (patch)
tree51a1a364958d941d87d2e2c2d4afcd03af2c4d75 /mm/slub.c
parentc270cf3041a5cb7c5853d45a794309e66576493a (diff)
mm/slub: fix missing ALLOC_SLOWPATH stat when bulk alloc
The ALLOC_SLOWPATH statistics is missing in bulk allocation now. Fix it by doing statistics in alloc slow path. Signed-off-by: Abel Wu <wuyun.wu@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Pekka Enberg <penberg@kernel.org> Acked-by: David Rientjes <rientjes@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Hewenliang <hewenliang4@huawei.com> Cc: Hu Shiyuan <hushiyuan@huawei.com> Link: http://lkml.kernel.org/r/20200811022427.1363-1-wuyun.wu@huawei.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c3
1 files changed, 2 insertions, 1 deletions
diff --git a/mm/slub.c b/mm/slub.c
index da6438bd8202..7728a0b71d63 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2661,6 +2661,8 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
void *freelist;
struct page *page;
+ stat(s, ALLOC_SLOWPATH);
+
page = c->page;
if (!page) {
/*
@@ -2850,7 +2852,6 @@ redo:
page = c->page;
if (unlikely(!object || !node_match(page, node))) {
object = __slab_alloc(s, gfpflags, node, addr, c);
- stat(s, ALLOC_SLOWPATH);
} else {
void *next_object = get_freepointer_safe(s, object);