summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/msm/msm_gem.c
diff options
context:
space:
mode:
authorRob Clark <robdclark@chromium.org>2020-11-16 09:48:51 -0800
committerRob Clark <robdclark@chromium.org>2020-11-21 09:50:24 -0800
commit3edfa30f2340e6c361b34fc0c53a5f3d3bbf9704 (patch)
treed234a13a566eeee447a9573ff4c80e57f8884353 /drivers/gpu/drm/msm/msm_gem.c
parentfcd371c23c3a0a89bf6f3f415b14f75658c55c1c (diff)
drm/msm/shrinker: Only iterate dontneed objs
In situations where the GPU is mostly idle, all or nearly all buffer objects will be in the inactive list. But if the system is under memory pressure (from something other than GPU), we could still get a lot of shrinker calls. Which results in traversing a list of thousands of objs and in the end finding nothing to shrink. Which isn't so efficient. Instead split the inactive_list into two lists, one inactive objs which are shrinkable, and a second one for those that are not. This way we can avoid traversing objs which we know are not shrinker candidates. v2: Fix inverted logic think-o Signed-off-by: Rob Clark <robdclark@chromium.org>
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gem.c')
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c34
1 files changed, 27 insertions, 7 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 562db92aa631..834bb4e0b7ce 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -18,6 +18,7 @@
#include "msm_gpu.h"
#include "msm_mmu.h"
+static void update_inactive(struct msm_gem_object *msm_obj);
static dma_addr_t physaddr(struct drm_gem_object *obj)
{
@@ -677,6 +678,12 @@ int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
madv = msm_obj->madv;
+ /* If the obj is inactive, we might need to move it
+ * between inactive lists
+ */
+ if (msm_obj->active_count == 0)
+ update_inactive(msm_obj);
+
msm_gem_unlock(obj);
return (madv != __MSM_MADV_PURGED);
@@ -780,19 +787,31 @@ void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
void msm_gem_active_put(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_drm_private *priv = obj->dev->dev_private;
might_sleep();
WARN_ON(!msm_gem_is_locked(obj));
if (--msm_obj->active_count == 0) {
- mutex_lock(&priv->mm_lock);
- list_del_init(&msm_obj->mm_list);
- list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
- mutex_unlock(&priv->mm_lock);
+ update_inactive(msm_obj);
}
}
+static void update_inactive(struct msm_gem_object *msm_obj)
+{
+ struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
+
+ mutex_lock(&priv->mm_lock);
+ WARN_ON(msm_obj->active_count != 0);
+
+ list_del_init(&msm_obj->mm_list);
+ if (msm_obj->madv == MSM_MADV_WILLNEED)
+ list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
+ else
+ list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
+
+ mutex_unlock(&priv->mm_lock);
+}
+
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
{
bool write = !!(op & MSM_PREP_WRITE);
@@ -1098,7 +1117,8 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
}
mutex_lock(&priv->mm_lock);
- list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
+ /* Initially obj is idle, obj->madv == WILLNEED: */
+ list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
mutex_unlock(&priv->mm_lock);
return obj;
@@ -1168,7 +1188,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
msm_gem_unlock(obj);
mutex_lock(&priv->mm_lock);
- list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
+ list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
mutex_unlock(&priv->mm_lock);
return obj;