summaryrefslogtreecommitdiff
path: root/mm/z3fold.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/z3fold.c')
-rw-r--r--mm/z3fold.c155
1 files changed, 55 insertions, 100 deletions
diff --git a/mm/z3fold.c b/mm/z3fold.c
index b3c0577b8095..f41f8b0d9e9a 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -181,6 +181,7 @@ enum z3fold_page_flags {
NEEDS_COMPACTING,
PAGE_STALE,
PAGE_CLAIMED, /* by either reclaim or free */
+ PAGE_MIGRATED, /* page is migrated and soon to be released */
};
/*
@@ -212,10 +213,8 @@ static int size_to_chunks(size_t size)
static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
gfp_t gfp)
{
- struct z3fold_buddy_slots *slots;
-
- slots = kmem_cache_zalloc(pool->c_handle,
- (gfp & ~(__GFP_HIGHMEM | __GFP_MOVABLE)));
+ struct z3fold_buddy_slots *slots = kmem_cache_zalloc(pool->c_handle,
+ gfp);
if (slots) {
/* It will be freed separately in free_handle(). */
@@ -272,8 +271,13 @@ static inline struct z3fold_header *get_z3fold_header(unsigned long handle)
zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
locked = z3fold_page_trylock(zhdr);
read_unlock(&slots->lock);
- if (locked)
- break;
+ if (locked) {
+ struct page *page = virt_to_page(zhdr);
+
+ if (!test_bit(PAGE_MIGRATED, &page->private))
+ break;
+ z3fold_page_unlock(zhdr);
+ }
cpu_relax();
} while (true);
} else {
@@ -297,9 +301,6 @@ static inline void free_handle(unsigned long handle, struct z3fold_header *zhdr)
int i;
bool is_free;
- if (handle & (1 << PAGE_HEADLESS))
- return;
-
if (WARN_ON(*(unsigned long *)handle == 0))
return;
@@ -345,7 +346,7 @@ static struct file_system_type z3fold_fs = {
};
static struct vfsmount *z3fold_mnt;
-static int z3fold_mount(void)
+static int __init z3fold_mount(void)
{
int ret = 0;
@@ -394,6 +395,7 @@ static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
clear_bit(NEEDS_COMPACTING, &page->private);
clear_bit(PAGE_STALE, &page->private);
clear_bit(PAGE_CLAIMED, &page->private);
+ clear_bit(PAGE_MIGRATED, &page->private);
if (headless)
return zhdr;
@@ -420,7 +422,6 @@ static void free_z3fold_page(struct page *page, bool headless)
__ClearPageMovable(page);
unlock_page(page);
}
- ClearPagePrivate(page);
__free_page(page);
}
@@ -521,13 +522,8 @@ static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
list_add(&zhdr->buddy, &pool->stale);
queue_work(pool->release_wq, &pool->work);
spin_unlock(&pool->stale_lock);
-}
-static void release_z3fold_page(struct kref *ref)
-{
- struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
- refcount);
- __release_z3fold_page(zhdr, false);
+ atomic64_dec(&pool->pages_nr);
}
static void release_z3fold_page_locked(struct kref *ref)
@@ -738,13 +734,9 @@ static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr)
return new_zhdr;
out_fail:
- if (new_zhdr) {
- if (kref_put(&new_zhdr->refcount, release_z3fold_page_locked))
- atomic64_dec(&pool->pages_nr);
- else {
- add_to_unbuddied(pool, new_zhdr);
- z3fold_page_unlock(new_zhdr);
- }
+ if (new_zhdr && !kref_put(&new_zhdr->refcount, release_z3fold_page_locked)) {
+ add_to_unbuddied(pool, new_zhdr);
+ z3fold_page_unlock(new_zhdr);
}
return NULL;
@@ -817,10 +809,8 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked)
list_del_init(&zhdr->buddy);
spin_unlock(&pool->lock);
- if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
- atomic64_dec(&pool->pages_nr);
+ if (kref_put(&zhdr->refcount, release_z3fold_page_locked))
return;
- }
if (test_bit(PAGE_STALE, &page->private) ||
test_and_set_bit(PAGE_CLAIMED, &page->private)) {
@@ -830,9 +820,7 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked)
if (!zhdr->foreign_handles && buddy_single(zhdr) &&
zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) {
- if (kref_put(&zhdr->refcount, release_z3fold_page_locked))
- atomic64_dec(&pool->pages_nr);
- else {
+ if (!kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
clear_bit(PAGE_CLAIMED, &page->private);
z3fold_page_unlock(zhdr);
}
@@ -877,7 +865,6 @@ lookup:
/* Re-check under lock. */
spin_lock(&pool->lock);
- l = &unbuddied[i];
if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
struct z3fold_header, buddy)) ||
!z3fold_page_trylock(zhdr)) {
@@ -951,10 +938,19 @@ lookup:
}
}
- if (zhdr && !zhdr->slots)
- zhdr->slots = alloc_slots(pool,
- can_sleep ? GFP_NOIO : GFP_ATOMIC);
+ if (zhdr && !zhdr->slots) {
+ zhdr->slots = alloc_slots(pool, GFP_ATOMIC);
+ if (!zhdr->slots)
+ goto out_fail;
+ }
return zhdr;
+
+out_fail:
+ if (!kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
+ add_to_unbuddied(pool, zhdr);
+ z3fold_page_unlock(zhdr);
+ }
+ return NULL;
}
/*
@@ -1064,9 +1060,6 @@ static void z3fold_destroy_pool(struct z3fold_pool *pool)
* performed first. If no suitable free region is found, then a new page is
* allocated and added to the pool to satisfy the request.
*
- * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
- * as z3fold pool pages.
- *
* Return: 0 if success and handle is set, otherwise -EINVAL if the size or
* gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
* a new page.
@@ -1080,7 +1073,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
enum buddy bud;
bool can_sleep = gfpflags_allow_blocking(gfp);
- if (!size)
+ if (!size || (gfp & __GFP_HIGHMEM))
return -EINVAL;
if (size > PAGE_SIZE)
@@ -1094,10 +1087,8 @@ retry:
if (zhdr) {
bud = get_free_buddy(zhdr, chunks);
if (bud == HEADLESS) {
- if (kref_put(&zhdr->refcount,
+ if (!kref_put(&zhdr->refcount,
release_z3fold_page_locked))
- atomic64_dec(&pool->pages_nr);
- else
z3fold_page_unlock(zhdr);
pr_err("No free chunks in unbuddied\n");
WARN_ON(1);
@@ -1109,28 +1100,7 @@ retry:
bud = FIRST;
}
- page = NULL;
- if (can_sleep) {
- spin_lock(&pool->stale_lock);
- zhdr = list_first_entry_or_null(&pool->stale,
- struct z3fold_header, buddy);
- /*
- * Before allocating a page, let's see if we can take one from
- * the stale pages list. cancel_work_sync() can sleep so we
- * limit this case to the contexts where we can sleep
- */
- if (zhdr) {
- list_del(&zhdr->buddy);
- spin_unlock(&pool->stale_lock);
- cancel_work_sync(&zhdr->work);
- page = virt_to_page(zhdr);
- } else {
- spin_unlock(&pool->stale_lock);
- }
- }
- if (!page)
- page = alloc_page(gfp);
-
+ page = alloc_page(gfp);
if (!page)
return -ENOMEM;
@@ -1150,10 +1120,9 @@ retry:
__SetPageMovable(page, pool->inode->i_mapping);
unlock_page(page);
} else {
- if (trylock_page(page)) {
- __SetPageMovable(page, pool->inode->i_mapping);
- unlock_page(page);
- }
+ WARN_ON(!trylock_page(page));
+ __SetPageMovable(page, pool->inode->i_mapping);
+ unlock_page(page);
}
z3fold_page_lock(zhdr);
@@ -1190,9 +1159,9 @@ headless:
* @handle: handle associated with the allocation returned by z3fold_alloc()
*
* In the case that the z3fold page in which the allocation resides is under
- * reclaim, as indicated by the PG_reclaim flag being set, this function
- * only sets the first|last_chunks to 0. The page is actually freed
- * once both buddies are evicted (see z3fold_reclaim_page() below).
+ * reclaim, as indicated by the PAGE_CLAIMED flag being set, this function
+ * only sets the first|middle|last_chunks to 0. The page is actually freed
+ * once all buddies are evicted (see z3fold_reclaim_page() below).
*/
static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
{
@@ -1244,24 +1213,19 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
if (!page_claimed)
free_handle(handle, zhdr);
- if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) {
- atomic64_dec(&pool->pages_nr);
+ if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list))
return;
- }
if (page_claimed) {
/* the page has not been claimed by us */
- z3fold_page_unlock(zhdr);
+ put_z3fold_header(zhdr);
return;
}
if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
- put_z3fold_header(zhdr);
clear_bit(PAGE_CLAIMED, &page->private);
+ put_z3fold_header(zhdr);
return;
}
if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
- spin_lock(&pool->lock);
- list_del_init(&zhdr->buddy);
- spin_unlock(&pool->lock);
zhdr->cpu = -1;
kref_get(&zhdr->refcount);
clear_bit(PAGE_CLAIMED, &page->private);
@@ -1353,14 +1317,7 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
break;
}
- if (kref_get_unless_zero(&zhdr->refcount) == 0) {
- zhdr = NULL;
- break;
- }
if (!z3fold_page_trylock(zhdr)) {
- if (kref_put(&zhdr->refcount,
- release_z3fold_page))
- atomic64_dec(&pool->pages_nr);
zhdr = NULL;
continue; /* can't evict at this point */
}
@@ -1371,16 +1328,14 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
*/
if (zhdr->foreign_handles ||
test_and_set_bit(PAGE_CLAIMED, &page->private)) {
- if (kref_put(&zhdr->refcount,
- release_z3fold_page_locked))
- atomic64_dec(&pool->pages_nr);
- else
- z3fold_page_unlock(zhdr);
+ z3fold_page_unlock(zhdr);
zhdr = NULL;
continue; /* can't evict such page */
}
list_del_init(&zhdr->buddy);
zhdr->cpu = -1;
+ /* See comment in __z3fold_alloc. */
+ kref_get(&zhdr->refcount);
break;
}
@@ -1452,7 +1407,6 @@ next:
if (kref_put(&zhdr->refcount,
release_z3fold_page_locked)) {
kmem_cache_free(pool->c_handle, slots);
- atomic64_dec(&pool->pages_nr);
return 0;
}
/*
@@ -1463,8 +1417,10 @@ next:
spin_lock(&pool->lock);
list_add(&page->lru, &pool->lru);
spin_unlock(&pool->lock);
- z3fold_page_unlock(zhdr);
+ if (list_empty(&zhdr->buddy))
+ add_to_unbuddied(pool, zhdr);
clear_bit(PAGE_CLAIMED, &page->private);
+ z3fold_page_unlock(zhdr);
}
/* We started off locked to we need to lock the pool back */
@@ -1616,8 +1572,8 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa
if (!z3fold_page_trylock(zhdr))
return -EAGAIN;
if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) {
- z3fold_page_unlock(zhdr);
clear_bit(PAGE_CLAIMED, &page->private);
+ z3fold_page_unlock(zhdr);
return -EBUSY;
}
if (work_pending(&zhdr->work)) {
@@ -1627,7 +1583,7 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa
new_zhdr = page_address(newpage);
memcpy(new_zhdr, zhdr, PAGE_SIZE);
newpage->private = page->private;
- page->private = 0;
+ set_bit(PAGE_MIGRATED, &page->private);
z3fold_page_unlock(zhdr);
spin_lock_init(&new_zhdr->page_lock);
INIT_WORK(&new_zhdr->work, compact_page_work);
@@ -1638,7 +1594,6 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa
INIT_LIST_HEAD(&new_zhdr->buddy);
new_mapping = page_mapping(page);
__ClearPageMovable(page);
- ClearPagePrivate(page);
get_page(newpage);
z3fold_page_lock(new_zhdr);
@@ -1658,8 +1613,8 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa
queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
- page_mapcount_reset(page);
- clear_bit(PAGE_CLAIMED, &page->private);
+ /* PAGE_CLAIMED and PAGE_MIGRATED are cleared now. */
+ page->private = 0;
put_page(page);
return 0;
}
@@ -1676,13 +1631,13 @@ static void z3fold_page_putback(struct page *page)
if (!list_empty(&zhdr->buddy))
list_del_init(&zhdr->buddy);
INIT_LIST_HEAD(&page->lru);
- if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
- atomic64_dec(&pool->pages_nr);
+ if (kref_put(&zhdr->refcount, release_z3fold_page_locked))
return;
- }
spin_lock(&pool->lock);
list_add(&page->lru, &pool->lru);
spin_unlock(&pool->lock);
+ if (list_empty(&zhdr->buddy))
+ add_to_unbuddied(pool, zhdr);
clear_bit(PAGE_CLAIMED, &page->private);
z3fold_page_unlock(zhdr);
}