summaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
authorWei Yang <richard.weiyang@gmail.com>2017-07-06 15:36:31 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-07-06 16:24:30 -0700
commita93cf07bc3fb4e7bc924d33c387dabc85086ea38 (patch)
treeed8c028af545abf8743afcff1000758cbd947d28 /mm/slub.c
parentd3111e6cce6001e71ddc4737d0d412c2300043a2 (diff)
mm/slub.c: wrap cpu_slab->partial in CONFIG_SLUB_CPU_PARTIAL
cpu_slab's field partial is used when CONFIG_SLUB_CPU_PARTIAL is set, which means we can save a pointer's space on each cpu for every slub item. This patch wraps cpu_slab->partial in CONFIG_SLUB_CPU_PARTIAL and wraps its sysfs use too. [akpm@linux-foundation.org: avoid strange 80-col tricks] Link: http://lkml.kernel.org/r/20170502144533.10729-3-richard.weiyang@gmail.com Signed-off-by: Wei Yang <richard.weiyang@gmail.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c18
1 files changed, 11 insertions, 7 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 7234e0e03bdc..48071c541275 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2303,7 +2303,7 @@ static bool has_cpu_slab(int cpu, void *info)
struct kmem_cache *s = info;
struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
- return c->page || c->partial;
+ return c->page || slub_percpu_partial(c);
}
static void flush_all(struct kmem_cache *s)
@@ -2565,9 +2565,9 @@ load_freelist:
new_slab:
- if (c->partial) {
- page = c->page = c->partial;
- c->partial = page->next;
+ if (slub_percpu_partial(c)) {
+ page = c->page = slub_percpu_partial(c);
+ slub_set_percpu_partial(c, page);
stat(s, CPU_PARTIAL_ALLOC);
goto redo;
}
@@ -4754,7 +4754,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
total += x;
nodes[node] += x;
- page = READ_ONCE(c->partial);
+ page = slub_percpu_partial_read_once(c);
if (page) {
node = page_to_nid(page);
if (flags & SO_TOTAL)
@@ -4982,7 +4982,9 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
int len;
for_each_online_cpu(cpu) {
- struct page *page = per_cpu_ptr(s->cpu_slab, cpu)->partial;
+ struct page *page;
+
+ page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
if (page) {
pages += page->pages;
@@ -4994,7 +4996,9 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
#ifdef CONFIG_SMP
for_each_online_cpu(cpu) {
- struct page *page = per_cpu_ptr(s->cpu_slab, cpu) ->partial;
+ struct page *page;
+
+ page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
if (page && len < PAGE_SIZE - 20)
len += sprintf(buf + len, " C%d=%d(%d)", cpu,