summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/msm/msm_gem_shrinker.c
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2017-07-10 21:56:39 +0200
committerDaniel Vetter <daniel.vetter@ffwll.ch>2017-07-10 21:56:39 +0200
commit953152253e9cbd4f358d4b4ca56d48072af3846d (patch)
tree6c60e924732351682959f911833e3434d78aa849 /drivers/gpu/drm/msm/msm_gem_shrinker.c
parent6d6a89708188823d676c9b84e7e2534b822465e3 (diff)
parent00fc2c26bc46a64545cdf95a1511461ea9acecb4 (diff)
Merge tag 'drm-for-v4.13' into drm-intel-next-queued
Resync with the main drm-next pull request for 4.13. What we really need is to fully resync with pending drm-misc, but that's not yet possible due to the still ongoing merge window. Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gem_shrinker.c')
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c16
1 files changed, 14 insertions, 2 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index ab1dd020eb04..b72d8e6cd51d 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -20,6 +20,18 @@
static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
{
+ /* NOTE: we are *closer* to being able to get rid of
+ * mutex_trylock_recursive().. the msm_gem code itself does
+ * not need struct_mutex, although codepaths that can trigger
+ * shrinker are still called in code-paths that hold the
+ * struct_mutex.
+ *
+ * Also, msm_obj->madv is protected by struct_mutex.
+ *
+ * The next step is probably split out a seperate lock for
+ * protecting inactive_list, so that shrinker does not need
+ * struct_mutex.
+ */
switch (mutex_trylock_recursive(&dev->struct_mutex)) {
case MUTEX_TRYLOCK_FAILED:
return false;
@@ -77,7 +89,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
if (freed >= sc->nr_to_scan)
break;
if (is_purgeable(msm_obj)) {
- msm_gem_purge(&msm_obj->base);
+ msm_gem_purge(&msm_obj->base, OBJ_LOCK_SHRINKER);
freed += msm_obj->base.size >> PAGE_SHIFT;
}
}
@@ -106,7 +118,7 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
if (is_vunmapable(msm_obj)) {
- msm_gem_vunmap(&msm_obj->base);
+ msm_gem_vunmap(&msm_obj->base, OBJ_LOCK_SHRINKER);
/* since we don't know any better, lets bail after a few
* and if necessary the shrinker will be invoked again.
* Seems better than unmapping *everything*