summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorYosry Ahmed <yosryahmed@google.com>2023-06-07 19:51:43 +0000
committerAndrew Morton <akpm@linux-foundation.org>2023-06-19 16:19:05 -0700
commitb9c91c43412f2e07a5287dfe7027acdd8fb0b1ef (patch)
tree1dfa30c07b46c1c9e048c89eaf66ba58c18daa1a /mm
parent32b6a4a1745a46918f748f6fb7641e588fbec6f2 (diff)
mm: zswap: support exclusive loads
Commit 71024cb4a0bf ("frontswap: remove frontswap_tmem_exclusive_gets") removed support for exclusive loads from frontswap as it was not used. Bring back exclusive loads support to frontswap by adding an "exclusive" output parameter to frontswap_ops->load. On the zswap side, add a module parameter to enable/disable exclusive loads, and a config option to control the boot default value. Refactor zswap entry invalidation in zswap_frontswap_invalidate_page() into zswap_invalidate_entry() to reuse it in zswap_frontswap_load() if exclusive loads are enabled. With exclusive loads, we avoid having two copies of the same page in memory (compressed & uncompressed) after faulting it in from zswap. On the other hand, if the page is to be reclaimed again without being dirtied, it will be re-compressed. Compression is not usually slow, and a page that was just faulted in is less likely to be reclaimed again soon. Link: https://lkml.kernel.org/r/20230607195143.1473802-1-yosryahmed@google.com Signed-off-by: Yosry Ahmed <yosryahmed@google.com> Suggested-by: Yu Zhao <yuzhao@google.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Dan Streetman <ddstreet@ieee.org> Cc: Domenico Cerasuolo <cerasuolodomenico@gmail.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Nhat Pham <nphamcs@gmail.com> Cc: Seth Jennings <sjenning@redhat.com> Cc: Vitaly Wool <vitaly.wool@konsulko.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig16
-rw-r--r--mm/frontswap.c10
-rw-r--r--mm/zswap.c28
3 files changed, 44 insertions, 10 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index 7672a22647b4..12f32f8d26bf 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -46,6 +46,22 @@ config ZSWAP_DEFAULT_ON
The selection made here can be overridden by using the kernel
command line 'zswap.enabled=' option.
+config ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON
+ bool "Invalidate zswap entries when pages are loaded"
+ depends on ZSWAP
+ help
+ If selected, exclusive loads for zswap will be enabled at boot,
+ otherwise it will be disabled.
+
+ If exclusive loads are enabled, when a page is loaded from zswap,
+ the zswap entry is invalidated at once, as opposed to leaving it
+ in zswap until the swap entry is freed.
+
+ This avoids having two copies of the same page in memory
+ (compressed and uncompressed) after faulting in a page from zswap.
+ The cost is that if the page was never dirtied and needs to be
+ swapped out again, it will be re-compressed.
+
choice
prompt "Default compressor"
depends on ZSWAP
diff --git a/mm/frontswap.c b/mm/frontswap.c
index 279e55b4ed87..2fb5df3384b8 100644
--- a/mm/frontswap.c
+++ b/mm/frontswap.c
@@ -206,6 +206,7 @@ int __frontswap_load(struct page *page)
int type = swp_type(entry);
struct swap_info_struct *sis = swap_info[type];
pgoff_t offset = swp_offset(entry);
+ bool exclusive = false;
VM_BUG_ON(!frontswap_ops);
VM_BUG_ON(!PageLocked(page));
@@ -215,9 +216,14 @@ int __frontswap_load(struct page *page)
return -1;
/* Try loading from each implementation, until one succeeds. */
- ret = frontswap_ops->load(type, offset, page);
- if (ret == 0)
+ ret = frontswap_ops->load(type, offset, page, &exclusive);
+ if (ret == 0) {
inc_frontswap_loads();
+ if (exclusive) {
+ SetPageDirty(page);
+ __frontswap_clear(sis, offset);
+ }
+ }
return ret;
}
diff --git a/mm/zswap.c b/mm/zswap.c
index bcb82e09eb64..9fa86265f6dd 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -138,6 +138,10 @@ static bool zswap_non_same_filled_pages_enabled = true;
module_param_named(non_same_filled_pages_enabled, zswap_non_same_filled_pages_enabled,
bool, 0644);
+static bool zswap_exclusive_loads_enabled = IS_ENABLED(
+ CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON);
+module_param_named(exclusive_loads, zswap_exclusive_loads_enabled, bool, 0644);
+
/*********************************
* data structures
**********************************/
@@ -1340,12 +1344,22 @@ shrink:
goto reject;
}
+static void zswap_invalidate_entry(struct zswap_tree *tree,
+ struct zswap_entry *entry)
+{
+ /* remove from rbtree */
+ zswap_rb_erase(&tree->rbroot, entry);
+
+ /* drop the initial reference from entry creation */
+ zswap_entry_put(tree, entry);
+}
+
/*
* returns 0 if the page was successfully decompressed
* return -1 on entry not found or error
*/
static int zswap_frontswap_load(unsigned type, pgoff_t offset,
- struct page *page)
+ struct page *page, bool *exclusive)
{
struct zswap_tree *tree = zswap_trees[type];
struct zswap_entry *entry;
@@ -1415,6 +1429,10 @@ stats:
freeentry:
spin_lock(&tree->lock);
zswap_entry_put(tree, entry);
+ if (!ret && zswap_exclusive_loads_enabled) {
+ zswap_invalidate_entry(tree, entry);
+ *exclusive = true;
+ }
spin_unlock(&tree->lock);
return ret;
@@ -1434,13 +1452,7 @@ static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset)
spin_unlock(&tree->lock);
return;
}
-
- /* remove from rbtree */
- zswap_rb_erase(&tree->rbroot, entry);
-
- /* drop the initial reference from entry creation */
- zswap_entry_put(tree, entry);
-
+ zswap_invalidate_entry(tree, entry);
spin_unlock(&tree->lock);
}