summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/msm/msm_gem_shrinker.c
diff options
context:
space:
mode:
authorDmitry Osipenko <dmitry.osipenko@collabora.com>2022-11-04 19:04:59 +0300
committerDmitry Osipenko <dmitry.osipenko@collabora.com>2023-02-27 07:06:56 +0300
commit9630b585b607bd26f505d34620b14d75b9a5af7d (patch)
tree46eb1c8f9016be79477886fc966a5b6ce4480544 /drivers/gpu/drm/msm/msm_gem_shrinker.c
parentee9adb7a45516cfa536ca92253d7ae59d56db9e4 (diff)
drm/msm/gem: Prevent blocking within shrinker loop
Consider this scenario: 1. APP1 continuously creates lots of small GEMs 2. APP2 triggers `drop_caches` 3. Shrinker starts to evict APP1 GEMs, while APP1 produces new purgeable GEMs 4. msm_gem_shrinker_scan() returns non-zero number of freed pages and causes shrinker to try shrink more 5. msm_gem_shrinker_scan() returns non-zero number of freed pages again, goto 4 6. The APP2 is blocked in `drop_caches` until APP1 stops producing purgeable GEMs To prevent this blocking scenario, check number of remaining pages that GPU shrinker couldn't release due to a GEM locking contention or shrinking rejection. If there are no remaining pages left to shrink, then there is no need to free up more pages and shrinker may break out from the loop. This problem was found during shrinker/madvise IOCTL testing of virtio-gpu driver. The MSM driver is affected in the same way. Reviewed-by: Rob Clark <robdclark@gmail.com> Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de> Fixes: b352ba54a820 ("drm/msm/gem: Convert to using drm_gem_lru") Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> Link: https://lore.kernel.org/all/20230108210445.3948344-2-dmitry.osipenko@collabora.com/
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gem_shrinker.c')
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c11
1 files changed, 9 insertions, 2 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index 051bdbc093cf..f38296ad8743 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -107,6 +107,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
bool (*shrink)(struct drm_gem_object *obj);
bool cond;
unsigned long freed;
+ unsigned long remaining;
} stages[] = {
/* Stages of progressively more aggressive/expensive reclaim: */
{ &priv->lru.dontneed, purge, true },
@@ -116,14 +117,18 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
};
long nr = sc->nr_to_scan;
unsigned long freed = 0;
+ unsigned long remaining = 0;
for (unsigned i = 0; (nr > 0) && (i < ARRAY_SIZE(stages)); i++) {
if (!stages[i].cond)
continue;
stages[i].freed =
- drm_gem_lru_scan(stages[i].lru, nr, stages[i].shrink);
+ drm_gem_lru_scan(stages[i].lru, nr,
+ &stages[i].remaining,
+ stages[i].shrink);
nr -= stages[i].freed;
freed += stages[i].freed;
+ remaining += stages[i].remaining;
}
if (freed) {
@@ -132,7 +137,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
stages[3].freed);
}
- return (freed > 0) ? freed : SHRINK_STOP;
+ return (freed > 0 && remaining > 0) ? freed : SHRINK_STOP;
}
#ifdef CONFIG_DEBUG_FS
@@ -182,10 +187,12 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
NULL,
};
unsigned idx, unmapped = 0;
+ unsigned long remaining = 0;
for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) {
unmapped += drm_gem_lru_scan(lrus[idx],
vmap_shrink_limit - unmapped,
+ &remaining,
vmap_shrink);
}