summaryrefslogtreecommitdiff
path: root/mm/swap_state.c
diff options
context:
space:
mode:
authorKairui Song <kasong@tencent.com>2023-10-17 09:17:28 +0800
committerAndrew Morton <akpm@linux-foundation.org>2023-10-25 16:47:11 -0700
commite5b306a082982cb98bc7ec48a382225522401a61 (patch)
treee00b6ae62076aebb9089890881ef918f4e799a46 /mm/swap_state.c
parente56808fef8f71a192b2740c0b6ea8be7ab865d54 (diff)
mm/swap: avoid a xa load for swapout path
A variable is never used for swapout path (shadowp is NULL) and compiler is unable to optimize out the unneeded load since it's a function call. The was introduced by 3852f6768ede ("mm/swapcache: support to handle the shadow entries"). Link: https://lkml.kernel.org/r/20231017011728.37508-1-ryncsn@gmail.com Signed-off-by: Kairui Song <kasong@tencent.com> Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Huang Ying <ying.huang@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/swap_state.c')
-rw-r--r--mm/swap_state.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/mm/swap_state.c b/mm/swap_state.c
index b3b14bd0dd64..ab79ffb71736 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -109,9 +109,9 @@ int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
goto unlock;
for (i = 0; i < nr; i++) {
VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio);
- old = xas_load(&xas);
- if (xa_is_value(old)) {
- if (shadowp)
+ if (shadowp) {
+ old = xas_load(&xas);
+ if (xa_is_value(old))
*shadowp = old;
}
xas_store(&xas, folio);