summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMinchan Kim <minchan@kernel.org>2016-07-26 15:23:26 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-26 16:19:19 -0700
commit4aa409cab7c39c90f4b725ff22f52bbf5d2fc4e0 (patch)
tree5c88e077ed6a29ef192d3e861bf1f99b5ea3b111
parent3783689a1aa82ef27a6418b043dd7a077b8330c5 (diff)
zsmalloc: separate free_zspage from putback_zspage
Currently, putback_zspage does free zspage under class->lock if fullness become ZS_EMPTY but it makes trouble to implement locking scheme for new zspage migration. So, this patch is to separate free_zspage from putback_zspage and free zspage out of class->lock which is preparation for zspage migration. Link: http://lkml.kernel.org/r/1464736881-24886-10-git-send-email-minchan@kernel.org Signed-off-by: Minchan Kim <minchan@kernel.org> Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/zsmalloc.c27
1 files changed, 11 insertions, 16 deletions
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index c6d2cbe0f19f..dd3708611f65 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1687,14 +1687,12 @@ static struct zspage *isolate_zspage(struct size_class *class, bool source)
/*
* putback_zspage - add @zspage into right class's fullness list
- * @pool: target pool
* @class: destination class
* @zspage: target page
*
* Return @zspage's fullness_group
*/
-static enum fullness_group putback_zspage(struct zs_pool *pool,
- struct size_class *class,
+static enum fullness_group putback_zspage(struct size_class *class,
struct zspage *zspage)
{
enum fullness_group fullness;
@@ -1703,15 +1701,6 @@ static enum fullness_group putback_zspage(struct zs_pool *pool,
insert_zspage(class, zspage, fullness);
set_zspage_mapping(zspage, class->index, fullness);
- if (fullness == ZS_EMPTY) {
- zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
- class->size, class->pages_per_zspage));
- atomic_long_sub(class->pages_per_zspage,
- &pool->pages_allocated);
-
- free_zspage(pool, zspage);
- }
-
return fullness;
}
@@ -1760,23 +1749,29 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class)
if (!migrate_zspage(pool, class, &cc))
break;
- putback_zspage(pool, class, dst_zspage);
+ putback_zspage(class, dst_zspage);
}
/* Stop if we couldn't find slot */
if (dst_zspage == NULL)
break;
- putback_zspage(pool, class, dst_zspage);
- if (putback_zspage(pool, class, src_zspage) == ZS_EMPTY)
+ putback_zspage(class, dst_zspage);
+ if (putback_zspage(class, src_zspage) == ZS_EMPTY) {
+ zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
+ class->size, class->pages_per_zspage));
+ atomic_long_sub(class->pages_per_zspage,
+ &pool->pages_allocated);
+ free_zspage(pool, src_zspage);
pool->stats.pages_compacted += class->pages_per_zspage;
+ }
spin_unlock(&class->lock);
cond_resched();
spin_lock(&class->lock);
}
if (src_zspage)
- putback_zspage(pool, class, src_zspage);
+ putback_zspage(class, src_zspage);
spin_unlock(&class->lock);
}