summaryrefslogtreecommitdiff
path: root/mm/zswap.c
diff options
context:
space:
mode:
authorDomenico Cerasuolo <cerasuolodomenico@gmail.com>2023-06-14 16:31:22 +0200
committerAndrew Morton <akpm@linux-foundation.org>2023-06-19 16:19:33 -0700
commit418fd29d9de53b143e28a07430e08dd414b74c3d (patch)
tree6b0623257b4c5a129d8956b00da741000bddb0fb /mm/zswap.c
parent6c77b607ee26472fb945aa41734281c39d06d68f (diff)
mm: zswap: invaldiate entry after writeback
When an entry started writeback, it used to be invalidated with ref count logic alone, meaning that it would stay on the tree until all references were put. The problem with this behavior is that as soon as the writeback started, the ownership of the data held by the entry is passed to the swapcache and should not be left in zswap too. Currently there are no known issues because of this, but this change explicitly invalidates an entry that started writeback to reduce opportunities for future bugs. This patch is a follow up on the series titled "mm: zswap: move writeback LRU from zpool to zswap" + commit f090b7949768("mm: zswap: support exclusive loads"). Link: https://lkml.kernel.org/r/20230614143122.74471-1-cerasuolodomenico@gmail.com Signed-off-by: Domenico Cerasuolo <cerasuolodomenico@gmail.com> Suggested-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Dan Streetman <ddstreet@ieee.org> Cc: Nhat Pham <nphamcs@gmail.com> Cc: Seth Jennings <sjenning@redhat.com> Cc: Vitaly Wool <vitaly.wool@konsulko.com> Cc: Yosry Ahmed <yosryahmed@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/zswap.c')
-rw-r--r--mm/zswap.c33
1 files changed, 17 insertions, 16 deletions
diff --git a/mm/zswap.c b/mm/zswap.c
index 02d0a6f46f4a..c122f042a49d 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -599,6 +599,16 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
return NULL;
}
+static void zswap_invalidate_entry(struct zswap_tree *tree,
+ struct zswap_entry *entry)
+{
+ /* remove from rbtree */
+ zswap_rb_erase(&tree->rbroot, entry);
+
+ /* drop the initial reference from entry creation */
+ zswap_entry_put(tree, entry);
+}
+
static int zswap_reclaim_entry(struct zswap_pool *pool)
{
struct zswap_entry *entry;
@@ -644,12 +654,13 @@ static int zswap_reclaim_entry(struct zswap_pool *pool)
goto put_unlock;
}
- /* Check for invalidate() race */
- if (entry != zswap_rb_search(&tree->rbroot, swpoffset))
- goto put_unlock;
-
- /* Drop base reference */
- zswap_entry_put(tree, entry);
+ /*
+ * Writeback started successfully, the page now belongs to the
+ * swapcache. Drop the entry from zswap - unless invalidate already
+ * took it out while we had the tree->lock released for IO.
+ */
+ if (entry == zswap_rb_search(&tree->rbroot, swpoffset))
+ zswap_invalidate_entry(tree, entry);
put_unlock:
/* Drop local reference */
@@ -1376,16 +1387,6 @@ shrink:
goto reject;
}
-static void zswap_invalidate_entry(struct zswap_tree *tree,
- struct zswap_entry *entry)
-{
- /* remove from rbtree */
- zswap_rb_erase(&tree->rbroot, entry);
-
- /* drop the initial reference from entry creation */
- zswap_entry_put(tree, entry);
-}
-
/*
* returns 0 if the page was successfully decompressed
* return -1 on entry not found or error