summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/msm/msm_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gem.c')
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c212
1 files changed, 158 insertions, 54 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index f091c1e164fa..b199942266a2 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -96,7 +96,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- WARN_ON(!msm_gem_is_locked(obj));
+ GEM_WARN_ON(!msm_gem_is_locked(obj));
if (!msm_obj->pages) {
struct drm_device *dev = obj->dev;
@@ -130,6 +130,9 @@ static struct page **get_pages(struct drm_gem_object *obj)
*/
if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
sync_for_device(msm_obj);
+
+ GEM_WARN_ON(msm_obj->active_count);
+ update_inactive(msm_obj);
}
return msm_obj->pages;
@@ -162,6 +165,7 @@ static void put_pages(struct drm_gem_object *obj)
sg_free_table(msm_obj->sgt);
kfree(msm_obj->sgt);
+ msm_obj->sgt = NULL;
}
if (use_pages(obj))
@@ -180,7 +184,7 @@ struct page **msm_gem_get_pages(struct drm_gem_object *obj)
msm_gem_lock(obj);
- if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
+ if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
msm_gem_unlock(obj);
return ERR_PTR(-EBUSY);
}
@@ -256,7 +260,7 @@ static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
goto out;
}
- if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
+ if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
msm_gem_unlock(obj);
return VM_FAULT_SIGBUS;
}
@@ -289,7 +293,7 @@ static uint64_t mmap_offset(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev;
int ret;
- WARN_ON(!msm_gem_is_locked(obj));
+ GEM_WARN_ON(!msm_gem_is_locked(obj));
/* Make it mmapable */
ret = drm_gem_create_mmap_offset(obj);
@@ -318,7 +322,7 @@ static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
- WARN_ON(!msm_gem_is_locked(obj));
+ GEM_WARN_ON(!msm_gem_is_locked(obj));
vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (!vma)
@@ -337,7 +341,7 @@ static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
- WARN_ON(!msm_gem_is_locked(obj));
+ GEM_WARN_ON(!msm_gem_is_locked(obj));
list_for_each_entry(vma, &msm_obj->vmas, list) {
if (vma->aspace == aspace)
@@ -356,19 +360,25 @@ static void del_vma(struct msm_gem_vma *vma)
kfree(vma);
}
-/* Called with msm_obj locked */
+/**
+ * If close is true, this also closes the VMA (releasing the allocated
+ * iova range) in addition to removing the iommu mapping. In the eviction
+ * case (!close), we keep the iova allocated, but only remove the iommu
+ * mapping.
+ */
static void
-put_iova_spaces(struct drm_gem_object *obj)
+put_iova_spaces(struct drm_gem_object *obj, bool close)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
- WARN_ON(!msm_gem_is_locked(obj));
+ GEM_WARN_ON(!msm_gem_is_locked(obj));
list_for_each_entry(vma, &msm_obj->vmas, list) {
if (vma->aspace) {
msm_gem_purge_vma(vma->aspace, vma);
- msm_gem_close_vma(vma->aspace, vma);
+ if (close)
+ msm_gem_close_vma(vma->aspace, vma);
}
}
}
@@ -380,7 +390,7 @@ put_iova_vmas(struct drm_gem_object *obj)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma, *tmp;
- WARN_ON(!msm_gem_is_locked(obj));
+ GEM_WARN_ON(!msm_gem_is_locked(obj));
list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
del_vma(vma);
@@ -394,7 +404,7 @@ static int get_iova_locked(struct drm_gem_object *obj,
struct msm_gem_vma *vma;
int ret = 0;
- WARN_ON(!msm_gem_is_locked(obj));
+ GEM_WARN_ON(!msm_gem_is_locked(obj));
vma = lookup_vma(obj, aspace);
@@ -421,7 +431,7 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj,
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
struct page **pages;
- int prot = IOMMU_READ;
+ int ret, prot = IOMMU_READ;
if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
prot |= IOMMU_WRITE;
@@ -429,21 +439,26 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj,
if (msm_obj->flags & MSM_BO_MAP_PRIV)
prot |= IOMMU_PRIV;
- WARN_ON(!msm_gem_is_locked(obj));
+ GEM_WARN_ON(!msm_gem_is_locked(obj));
- if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
+ if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
return -EBUSY;
vma = lookup_vma(obj, aspace);
- if (WARN_ON(!vma))
+ if (GEM_WARN_ON(!vma))
return -EINVAL;
pages = get_pages(obj);
if (IS_ERR(pages))
return PTR_ERR(pages);
- return msm_gem_map_vma(aspace, vma, prot,
+ ret = msm_gem_map_vma(aspace, vma, prot,
msm_obj->sgt, obj->size >> PAGE_SHIFT);
+
+ if (!ret)
+ msm_obj->pin_count++;
+
+ return ret;
}
static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
@@ -453,7 +468,7 @@ static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
u64 local;
int ret;
- WARN_ON(!msm_gem_is_locked(obj));
+ GEM_WARN_ON(!msm_gem_is_locked(obj));
ret = get_iova_locked(obj, aspace, &local,
range_start, range_end);
@@ -524,7 +539,7 @@ uint64_t msm_gem_iova(struct drm_gem_object *obj,
msm_gem_lock(obj);
vma = lookup_vma(obj, aspace);
msm_gem_unlock(obj);
- WARN_ON(!vma);
+ GEM_WARN_ON(!vma);
return vma ? vma->iova : 0;
}
@@ -535,14 +550,21 @@ uint64_t msm_gem_iova(struct drm_gem_object *obj,
void msm_gem_unpin_iova_locked(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace)
{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
- WARN_ON(!msm_gem_is_locked(obj));
+ GEM_WARN_ON(!msm_gem_is_locked(obj));
vma = lookup_vma(obj, aspace);
- if (!WARN_ON(!vma))
+ if (!GEM_WARN_ON(!vma)) {
msm_gem_unmap_vma(aspace, vma);
+
+ msm_obj->pin_count--;
+ GEM_WARN_ON(msm_obj->pin_count < 0);
+
+ update_inactive(msm_obj);
+ }
}
/*
@@ -593,12 +615,12 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
int ret = 0;
- WARN_ON(!msm_gem_is_locked(obj));
+ GEM_WARN_ON(!msm_gem_is_locked(obj));
if (obj->import_attach)
return ERR_PTR(-ENODEV);
- if (WARN_ON(msm_obj->madv > madv)) {
+ if (GEM_WARN_ON(msm_obj->madv > madv)) {
DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
msm_obj->madv, madv);
return ERR_PTR(-EBUSY);
@@ -664,8 +686,8 @@ void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- WARN_ON(!msm_gem_is_locked(obj));
- WARN_ON(msm_obj->vmap_count < 1);
+ GEM_WARN_ON(!msm_gem_is_locked(obj));
+ GEM_WARN_ON(msm_obj->vmap_count < 1);
msm_obj->vmap_count--;
}
@@ -707,20 +729,23 @@ void msm_gem_purge(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- WARN_ON(!is_purgeable(msm_obj));
- WARN_ON(obj->import_attach);
+ GEM_WARN_ON(!msm_gem_is_locked(obj));
+ GEM_WARN_ON(!is_purgeable(msm_obj));
- put_iova_spaces(obj);
+ /* Get rid of any iommu mapping(s): */
+ put_iova_spaces(obj, true);
msm_gem_vunmap(obj);
+ drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
+
put_pages(obj);
put_iova_vmas(obj);
msm_obj->madv = __MSM_MADV_PURGED;
+ update_inactive(msm_obj);
- drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
drm_gem_free_mmap_offset(obj);
/* Our goal here is to return as much of the memory as
@@ -734,13 +759,36 @@ void msm_gem_purge(struct drm_gem_object *obj)
0, (loff_t)-1);
}
+/**
+ * Unpin the backing pages and make them available to be swapped out.
+ */
+void msm_gem_evict(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ GEM_WARN_ON(!msm_gem_is_locked(obj));
+ GEM_WARN_ON(is_unevictable(msm_obj));
+ GEM_WARN_ON(!msm_obj->evictable);
+ GEM_WARN_ON(msm_obj->active_count);
+
+ /* Get rid of any iommu mapping(s): */
+ put_iova_spaces(obj, false);
+
+ drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
+
+ put_pages(obj);
+
+ update_inactive(msm_obj);
+}
+
void msm_gem_vunmap(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- WARN_ON(!msm_gem_is_locked(obj));
+ GEM_WARN_ON(!msm_gem_is_locked(obj));
- if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
+ if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
return;
vunmap(msm_obj->vaddr);
@@ -788,12 +836,16 @@ void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
struct msm_drm_private *priv = obj->dev->dev_private;
might_sleep();
- WARN_ON(!msm_gem_is_locked(obj));
- WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
+ GEM_WARN_ON(!msm_gem_is_locked(obj));
+ GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
+ GEM_WARN_ON(msm_obj->dontneed);
+ GEM_WARN_ON(!msm_obj->sgt);
if (msm_obj->active_count++ == 0) {
mutex_lock(&priv->mm_lock);
- list_del_init(&msm_obj->mm_list);
+ if (msm_obj->evictable)
+ mark_unevictable(msm_obj);
+ list_del(&msm_obj->mm_list);
list_add_tail(&msm_obj->mm_list, &gpu->active_list);
mutex_unlock(&priv->mm_lock);
}
@@ -804,7 +856,7 @@ void msm_gem_active_put(struct drm_gem_object *obj)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
might_sleep();
- WARN_ON(!msm_gem_is_locked(obj));
+ GEM_WARN_ON(!msm_gem_is_locked(obj));
if (--msm_obj->active_count == 0) {
update_inactive(msm_obj);
@@ -815,14 +867,29 @@ static void update_inactive(struct msm_gem_object *msm_obj)
{
struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
+ GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
+
+ if (msm_obj->active_count != 0)
+ return;
+
mutex_lock(&priv->mm_lock);
- WARN_ON(msm_obj->active_count != 0);
- list_del_init(&msm_obj->mm_list);
- if (msm_obj->madv == MSM_MADV_WILLNEED)
+ if (msm_obj->dontneed)
+ mark_unpurgeable(msm_obj);
+ if (msm_obj->evictable)
+ mark_unevictable(msm_obj);
+
+ list_del(&msm_obj->mm_list);
+ if ((msm_obj->madv == MSM_MADV_WILLNEED) && msm_obj->sgt) {
list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
- else
+ mark_evictable(msm_obj);
+ } else if (msm_obj->madv == MSM_MADV_DONTNEED) {
list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
+ mark_purgeable(msm_obj);
+ } else {
+ GEM_WARN_ON((msm_obj->madv != __MSM_MADV_PURGED) && msm_obj->sgt);
+ list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
+ }
mutex_unlock(&priv->mm_lock);
}
@@ -863,7 +930,8 @@ static void describe_fence(struct dma_fence *fence, const char *type,
fence->seqno);
}
-void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
+void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
+ struct msm_gem_stats *stats)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct dma_resv *robj = obj->resv;
@@ -875,11 +943,28 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
msm_gem_lock(obj);
+ stats->all.count++;
+ stats->all.size += obj->size;
+
+ if (is_active(msm_obj)) {
+ stats->active.count++;
+ stats->active.size += obj->size;
+ }
+
+ if (msm_obj->pages) {
+ stats->resident.count++;
+ stats->resident.size += obj->size;
+ }
+
switch (msm_obj->madv) {
case __MSM_MADV_PURGED:
+ stats->purged.count++;
+ stats->purged.size += obj->size;
madv = " purged";
break;
case MSM_MADV_DONTNEED:
+ stats->purgeable.count++;
+ stats->purgeable.size += obj->size;
madv = " purgeable";
break;
case MSM_MADV_WILLNEED:
@@ -946,20 +1031,26 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
{
+ struct msm_gem_stats stats = {};
struct msm_gem_object *msm_obj;
- int count = 0;
- size_t size = 0;
seq_puts(m, " flags id ref offset kaddr size madv name\n");
- list_for_each_entry(msm_obj, list, mm_list) {
+ list_for_each_entry(msm_obj, list, node) {
struct drm_gem_object *obj = &msm_obj->base;
seq_puts(m, " ");
- msm_gem_describe(obj, m);
- count++;
- size += obj->size;
+ msm_gem_describe(obj, m, &stats);
}
- seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
+ seq_printf(m, "Total: %4d objects, %9zu bytes\n",
+ stats.all.count, stats.all.size);
+ seq_printf(m, "Active: %4d objects, %9zu bytes\n",
+ stats.active.count, stats.active.size);
+ seq_printf(m, "Resident: %4d objects, %9zu bytes\n",
+ stats.resident.count, stats.resident.size);
+ seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
+ stats.purgeable.count, stats.purgeable.size);
+ seq_printf(m, "Purged: %4d objects, %9zu bytes\n",
+ stats.purged.count, stats.purged.size);
}
#endif
@@ -970,19 +1061,25 @@ void msm_gem_free_object(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev;
struct msm_drm_private *priv = dev->dev_private;
+ mutex_lock(&priv->obj_lock);
+ list_del(&msm_obj->node);
+ mutex_unlock(&priv->obj_lock);
+
mutex_lock(&priv->mm_lock);
+ if (msm_obj->dontneed)
+ mark_unpurgeable(msm_obj);
list_del(&msm_obj->mm_list);
mutex_unlock(&priv->mm_lock);
msm_gem_lock(obj);
/* object should not be on active list: */
- WARN_ON(is_active(msm_obj));
+ GEM_WARN_ON(is_active(msm_obj));
- put_iova_spaces(obj);
+ put_iova_spaces(obj, true);
if (obj->import_attach) {
- WARN_ON(msm_obj->vaddr);
+ GEM_WARN_ON(msm_obj->vaddr);
/* Don't drop the pages for imported dmabuf, as they are not
* ours, just free the array we allocated:
@@ -1098,7 +1195,7 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
use_vram = true;
- if (WARN_ON(use_vram && !priv->vram.size))
+ if (GEM_WARN_ON(use_vram && !priv->vram.size))
return ERR_PTR(-EINVAL);
/* Disallow zero sized objects as they make the underlying
@@ -1153,10 +1250,13 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
}
mutex_lock(&priv->mm_lock);
- /* Initially obj is idle, obj->madv == WILLNEED: */
- list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
+ list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
mutex_unlock(&priv->mm_lock);
+ mutex_lock(&priv->obj_lock);
+ list_add_tail(&msm_obj->node, &priv->objects);
+ mutex_unlock(&priv->obj_lock);
+
return obj;
fail:
@@ -1224,9 +1324,13 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
msm_gem_unlock(obj);
mutex_lock(&priv->mm_lock);
- list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
+ list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
mutex_unlock(&priv->mm_lock);
+ mutex_lock(&priv->obj_lock);
+ list_add_tail(&msm_obj->node, &priv->objects);
+ mutex_unlock(&priv->obj_lock);
+
return obj;
fail: