summaryrefslogtreecommitdiff
path: root/mm/slab.c
diff options
context:
space:
mode:
authorGeliang Tang <geliangtang@163.com>2016-01-14 15:17:56 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-14 16:00:49 -0800
commitd8ad47d83f95abe2dfece1338633e376fec3bd31 (patch)
tree8c02c6f9c2f84b8ea819c02bb38afa10e4ae7362 /mm/slab.c
parent2bd03e49d66775da8cebdcc8d5bec7d68512ae87 (diff)
mm/slab.c use list_first_entry_or_null()
Simplify the code with list_first_entry_or_null(). Signed-off-by: Geliang Tang <geliangtang@163.com> Acked-by: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 4765c97ce690..6bb046649450 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2791,18 +2791,18 @@ retry:
}
while (batchcount > 0) {
- struct list_head *entry;
struct page *page;
/* Get slab alloc is to come from. */
- entry = n->slabs_partial.next;
- if (entry == &n->slabs_partial) {
+ page = list_first_entry_or_null(&n->slabs_partial,
+ struct page, lru);
+ if (!page) {
n->free_touched = 1;
- entry = n->slabs_free.next;
- if (entry == &n->slabs_free)
+ page = list_first_entry_or_null(&n->slabs_free,
+ struct page, lru);
+ if (!page)
goto must_grow;
}
- page = list_entry(entry, struct page, lru);
check_spinlock_acquired(cachep);
/*
@@ -3085,7 +3085,6 @@ retry:
static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
int nodeid)
{
- struct list_head *entry;
struct page *page;
struct kmem_cache_node *n;
void *obj;
@@ -3098,15 +3097,16 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
retry:
check_irq_off();
spin_lock(&n->list_lock);
- entry = n->slabs_partial.next;
- if (entry == &n->slabs_partial) {
+ page = list_first_entry_or_null(&n->slabs_partial,
+ struct page, lru);
+ if (!page) {
n->free_touched = 1;
- entry = n->slabs_free.next;
- if (entry == &n->slabs_free)
+ page = list_first_entry_or_null(&n->slabs_free,
+ struct page, lru);
+ if (!page)
goto must_grow;
}
- page = list_entry(entry, struct page, lru);
check_spinlock_acquired_node(cachep, nodeid);
STATS_INC_NODEALLOCS(cachep);