diff options
Diffstat (limited to 'mm/zsmalloc.c')
-rw-r--r-- | mm/zsmalloc.c | 36 |
1 files changed, 21 insertions, 15 deletions
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 999b513c7fdf..2c5e56a65354 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -244,6 +244,7 @@ static inline void free_zpdesc(struct zpdesc *zpdesc) { struct page *page = zpdesc_page(zpdesc); + /* PageZsmalloc is sticky until the page is freed to the buddy. */ __free_page(page); } @@ -876,11 +877,10 @@ static void reset_zpdesc(struct zpdesc *zpdesc) { struct page *page = zpdesc_page(zpdesc); - __ClearPageMovable(page); ClearPagePrivate(page); zpdesc->zspage = NULL; zpdesc->next = NULL; - __ClearPageZsmalloc(page); + /* PageZsmalloc is sticky until the page is freed to the buddy. */ } static int trylock_zspage(struct zspage *zspage) @@ -1043,6 +1043,9 @@ static struct zspage *alloc_zspage(struct zs_pool *pool, if (!zspage) return NULL; + if (!IS_ENABLED(CONFIG_COMPACTION)) + gfp &= ~__GFP_MOVABLE; + zspage->magic = ZSPAGE_MAGIC; zspage->pool = pool; zspage->class = class->index; @@ -1055,7 +1058,6 @@ static struct zspage *alloc_zspage(struct zs_pool *pool, if (!zpdesc) { while (--i >= 0) { zpdesc_dec_zone_page_state(zpdescs[i]); - __zpdesc_clear_zsmalloc(zpdescs[i]); free_zpdesc(zpdescs[i]); } cache_free_zspage(pool, zspage); @@ -1686,8 +1688,6 @@ static void lock_zspage(struct zspage *zspage) #ifdef CONFIG_COMPACTION -static const struct movable_operations zsmalloc_mops; - static void replace_sub_page(struct size_class *class, struct zspage *zspage, struct zpdesc *newzpdesc, struct zpdesc *oldzpdesc) { @@ -1710,18 +1710,17 @@ static void replace_sub_page(struct size_class *class, struct zspage *zspage, set_first_obj_offset(newzpdesc, first_obj_offset); if (unlikely(ZsHugePage(zspage))) newzpdesc->handle = oldzpdesc->handle; - __zpdesc_set_movable(newzpdesc, &zsmalloc_mops); + __zpdesc_set_movable(newzpdesc); } static bool zs_page_isolate(struct page *page, isolate_mode_t mode) { /* - * Page is locked so zspage couldn't be destroyed. For detail, look at - * lock_zspage in free_zspage. + * Page is locked so zspage can't be destroyed concurrently + * (see free_zspage()). But if the page was already destroyed + * (see reset_zpdesc()), refuse isolation here. */ - VM_BUG_ON_PAGE(PageIsolated(page), page); - - return true; + return page_zpdesc(page)->zspage; } static int zs_page_migrate(struct page *newpage, struct page *page, @@ -1739,7 +1738,15 @@ static int zs_page_migrate(struct page *newpage, struct page *page, unsigned long old_obj, new_obj; unsigned int obj_idx; - VM_BUG_ON_PAGE(!zpdesc_is_isolated(zpdesc), zpdesc_page(zpdesc)); + /* + * TODO: nothing prevents a zspage from getting destroyed while + * it is isolated for migration, as the page lock is temporarily + * dropped after zs_page_isolate() succeeded: we should rework that + * and defer destroying such pages once they are un-isolated (putback) + * instead. + */ + if (!zpdesc->zspage) + return MIGRATEPAGE_SUCCESS; /* The page is locked, so this pointer must remain valid */ zspage = get_zspage(zpdesc); @@ -1811,10 +1818,9 @@ static int zs_page_migrate(struct page *newpage, struct page *page, static void zs_page_putback(struct page *page) { - VM_BUG_ON_PAGE(!PageIsolated(page), page); } -static const struct movable_operations zsmalloc_mops = { +const struct movable_operations zsmalloc_mops = { .isolate_page = zs_page_isolate, .migrate_page = zs_page_migrate, .putback_page = zs_page_putback, @@ -1877,7 +1883,7 @@ static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) do { WARN_ON(!zpdesc_trylock(zpdesc)); - __zpdesc_set_movable(zpdesc, &zsmalloc_mops); + __zpdesc_set_movable(zpdesc); zpdesc_unlock(zpdesc); } while ((zpdesc = get_next_zpdesc(zpdesc)) != NULL); } |