summaryrefslogtreecommitdiff
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2023-05-19 14:39:58 +0200
committerAndrew Morton <akpm@linux-foundation.org>2023-06-09 16:25:36 -0700
commitf98a497e1f16ee411df72629e32e31cba4cfa9cf (patch)
tree7ac687f9f024801df463a95d096fe1a064cfe619 /mm/vmscan.c
parente8606320e9af9774fd879e71c940fc9e5fd9b901 (diff)
mm: compaction: remove unnecessary is_via_compact_memory() checks
Remove from all paths not reachable via /proc/sys/vm/compact_memory. Link: https://lkml.kernel.org/r/20230519123959.77335-5-hannes@cmpxchg.org Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Michal Hocko <mhocko@suse.com> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c8
1 files changed, 1 insertions, 7 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 9f8bfd1fcf58..99e4ae44850d 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -6399,9 +6399,6 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
if (!managed_zone(zone))
continue;
- if (sc->order == -1) /* is_via_compact_memory() */
- return false;
-
/* Allocation can already succeed, nothing to do */
if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone),
sc->reclaim_idx, 0))
@@ -6598,9 +6595,6 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
{
unsigned long watermark;
- if (sc->order == -1) /* is_via_compact_memory() */
- goto suitable;
-
/* Allocation can already succeed, nothing to do */
if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone),
sc->reclaim_idx, 0))
@@ -6610,7 +6604,7 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
if (compaction_suitable(zone, sc->order,
sc->reclaim_idx) == COMPACT_SKIPPED)
return false;
-suitable:
+
/*
* Compaction is already possible, but it takes time to run and there
* are potentially other callers using the pages just freed. So proceed