summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorKemeng Shi <shikemeng@huaweicloud.com>2025-03-26 00:25:23 +0800
committerAndrew Morton <akpm@linux-foundation.org>2025-05-11 17:48:13 -0700
commit835b868878d0127bd29b8c0009dc424a63dadffb (patch)
tree311cce2430a06414a942c92d8b6794f57309b08e /mm
parent64944ef6a13e774546eb7635bdd26ef963335a41 (diff)
mm: swap: use swap_entries_free() to free swap entry in swap_entry_put_locked()
In swap_entry_put_locked(), we will set slot to SWAP_HAS_CACHE before using swap_entries_free() to do actual swap entry freeing. This introduce an unnecessary intermediate state. By using swap_entries_free() in swap_entry_put_locked(), we can eliminate the need to set slot to SWAP_HAS_CACHE. This change would make the behavior of swap_entry_put_locked() more consistent with other put() operations which will do actual free work after put last reference. Link: https://lkml.kernel.org/r/20250325162528.68385-4-shikemeng@huaweicloud.com Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com> Reviewed-by: Tim Chen <tim.c.chen@linux.intel.com> Reviewed-by: Kairui Song <kasong@tencent.com> Reviewed-by: Baoquan He <bhe@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/swapfile.c20
1 files changed, 9 insertions, 11 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c
index c6b4c74622fc..f0ba27db9b3e 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1356,9 +1356,11 @@ out:
}
static unsigned char swap_entry_put_locked(struct swap_info_struct *si,
- unsigned long offset,
+ struct swap_cluster_info *ci,
+ swp_entry_t entry,
unsigned char usage)
{
+ unsigned long offset = swp_offset(entry);
unsigned char count;
unsigned char has_cache;
@@ -1390,7 +1392,7 @@ static unsigned char swap_entry_put_locked(struct swap_info_struct *si,
if (usage)
WRITE_ONCE(si->swap_map[offset], usage);
else
- WRITE_ONCE(si->swap_map[offset], SWAP_HAS_CACHE);
+ swap_entries_free(si, ci, entry, 1);
return usage;
}
@@ -1469,9 +1471,7 @@ static unsigned char swap_entry_put(struct swap_info_struct *si,
unsigned char usage;
ci = lock_cluster(si, offset);
- usage = swap_entry_put_locked(si, offset, 1);
- if (!usage)
- swap_entries_free(si, ci, swp_entry(si->type, offset), 1);
+ usage = swap_entry_put_locked(si, ci, entry, 1);
unlock_cluster(ci);
return usage;
@@ -1570,8 +1570,8 @@ static void cluster_swap_free_nr(struct swap_info_struct *si,
ci = lock_cluster(si, offset);
do {
- if (!swap_entry_put_locked(si, offset, usage))
- swap_entries_free(si, ci, swp_entry(si->type, offset), 1);
+ swap_entry_put_locked(si, ci, swp_entry(si->type, offset),
+ usage);
} while (++offset < end);
unlock_cluster(ci);
}
@@ -1616,10 +1616,8 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry)
if (swap_only_has_cache(si, offset, size))
swap_entries_free(si, ci, entry, size);
else {
- for (int i = 0; i < size; i++, entry.val++) {
- if (!swap_entry_put_locked(si, offset + i, SWAP_HAS_CACHE))
- swap_entries_free(si, ci, entry, 1);
- }
+ for (int i = 0; i < size; i++, entry.val++)
+ swap_entry_put_locked(si, ci, entry, SWAP_HAS_CACHE);
}
unlock_cluster(ci);
}