summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/msm/msm_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gem.c')
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c1324
1 files changed, 757 insertions, 567 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 6277fde13df9..017411a0bf45 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -4,34 +4,113 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include <linux/dma-map-ops.h>
+#include <linux/vmalloc.h>
#include <linux/spinlock.h>
#include <linux/shmem_fs.h>
#include <linux/dma-buf.h>
-#include <linux/pfn_t.h>
+#include <drm/drm_dumb_buffers.h>
#include <drm/drm_prime.h>
+#include <drm/drm_file.h>
+#include <drm/drm_fourcc.h>
+
+#include <trace/events/gpu_mem.h>
#include "msm_drv.h"
-#include "msm_fence.h"
#include "msm_gem.h"
#include "msm_gpu.h"
-#include "msm_mmu.h"
+#include "msm_kms.h"
-static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
+static void update_device_mem(struct msm_drm_private *priv, ssize_t size)
+{
+ uint64_t total_mem = atomic64_add_return(size, &priv->total_mem);
+ trace_gpu_mem_total(0, 0, total_mem);
+}
+static void update_ctx_mem(struct drm_file *file, ssize_t size)
+{
+ struct msm_context *ctx = file->driver_priv;
+ uint64_t ctx_mem = atomic64_add_return(size, &ctx->ctx_mem);
-static dma_addr_t physaddr(struct drm_gem_object *obj)
+ rcu_read_lock(); /* Locks file->pid! */
+ trace_gpu_mem_total(0, pid_nr(rcu_dereference(file->pid)), ctx_mem);
+ rcu_read_unlock();
+
+}
+
+static int msm_gem_open(struct drm_gem_object *obj, struct drm_file *file)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_drm_private *priv = obj->dev->dev_private;
- return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
- priv->vram.paddr;
+ msm_gem_vma_get(obj);
+ update_ctx_mem(file, obj->size);
+ return 0;
}
-static bool use_pages(struct drm_gem_object *obj)
+static void put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm,
+ bool close, const char *reason);
+
+static void msm_gem_close(struct drm_gem_object *obj, struct drm_file *file)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- return !msm_obj->vram_node;
+ struct msm_context *ctx = file->driver_priv;
+ struct drm_exec exec;
+
+ update_ctx_mem(file, -obj->size);
+ msm_gem_vma_put(obj);
+
+ /*
+ * If VM isn't created yet, nothing to cleanup. And in fact calling
+ * put_iova_spaces() with vm=NULL would be bad, in that it will tear-
+ * down the mappings of shared buffers in other contexts.
+ */
+ if (!ctx->vm)
+ return;
+
+ /*
+ * VM_BIND does not depend on implicit teardown of VMAs on handle
+ * close, but instead on implicit teardown of the VM when the device
+ * is closed (see msm_gem_vm_close())
+ */
+ if (msm_context_is_vmbind(ctx))
+ return;
+
+ /*
+ * TODO we might need to kick this to a queue to avoid blocking
+ * in CLOSE ioctl
+ */
+ dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_BOOKKEEP, false,
+ MAX_SCHEDULE_TIMEOUT);
+
+ msm_gem_lock_vm_and_obj(&exec, obj, ctx->vm);
+ put_iova_spaces(obj, ctx->vm, true, "close");
+ drm_exec_fini(&exec); /* drop locks */
+}
+
+/*
+ * Get/put for kms->vm VMA
+ */
+
+void msm_gem_vma_get(struct drm_gem_object *obj)
+{
+ atomic_inc(&to_msm_bo(obj)->vma_ref);
+}
+
+void msm_gem_vma_put(struct drm_gem_object *obj)
+{
+ struct msm_drm_private *priv = obj->dev->dev_private;
+
+ if (atomic_dec_return(&to_msm_bo(obj)->vma_ref))
+ return;
+
+ if (!priv->kms)
+ return;
+
+#ifdef CONFIG_DRM_MSM_KMS
+ struct drm_exec exec;
+
+ msm_gem_lock_vm_and_obj(&exec, obj, priv->kms->vm);
+ put_iova_spaces(obj, priv->kms->vm, true, "vma_put");
+ drm_exec_fini(&exec); /* drop locks */
+#endif
}
/*
@@ -52,71 +131,71 @@ static void sync_for_device(struct msm_gem_object *msm_obj)
{
struct device *dev = msm_obj->base.dev->dev;
- if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
- dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
- msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
- } else {
- dma_map_sg(dev, msm_obj->sgt->sgl,
- msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
- }
+ dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
}
static void sync_for_cpu(struct msm_gem_object *msm_obj)
{
struct device *dev = msm_obj->base.dev->dev;
- if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
- dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
- msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
+}
+
+static void update_lru_active(struct drm_gem_object *obj)
+{
+ struct msm_drm_private *priv = obj->dev->dev_private;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ GEM_WARN_ON(!msm_obj->pages);
+
+ if (msm_obj->pin_count) {
+ drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj);
+ } else if (msm_obj->madv == MSM_MADV_WILLNEED) {
+ drm_gem_lru_move_tail_locked(&priv->lru.willneed, obj);
} else {
- dma_unmap_sg(dev, msm_obj->sgt->sgl,
- msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED);
+
+ drm_gem_lru_move_tail_locked(&priv->lru.dontneed, obj);
}
}
-/* allocate pages from VRAM carveout, used when no IOMMU: */
-static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
+static void update_lru_locked(struct drm_gem_object *obj)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_drm_private *priv = obj->dev->dev_private;
- dma_addr_t paddr;
- struct page **p;
- int ret, i;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
- p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
- if (!p)
- return ERR_PTR(-ENOMEM);
+ msm_gem_assert_locked(&msm_obj->base);
- spin_lock(&priv->vram.lock);
- ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
- spin_unlock(&priv->vram.lock);
- if (ret) {
- kvfree(p);
- return ERR_PTR(ret);
- }
+ if (!msm_obj->pages) {
+ GEM_WARN_ON(msm_obj->pin_count);
- paddr = physaddr(obj);
- for (i = 0; i < npages; i++) {
- p[i] = phys_to_page(paddr);
- paddr += PAGE_SIZE;
+ drm_gem_lru_move_tail_locked(&priv->lru.unbacked, obj);
+ } else {
+ update_lru_active(obj);
}
+}
- return p;
+static void update_lru(struct drm_gem_object *obj)
+{
+ struct msm_drm_private *priv = obj->dev->dev_private;
+
+ mutex_lock(&priv->lru.lock);
+ update_lru_locked(obj);
+ mutex_unlock(&priv->lru.lock);
}
static struct page **get_pages(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ msm_gem_assert_locked(obj);
+
if (!msm_obj->pages) {
struct drm_device *dev = obj->dev;
struct page **p;
- int npages = obj->size >> PAGE_SHIFT;
+ size_t npages = obj->size >> PAGE_SHIFT;
- if (use_pages(obj))
- p = drm_gem_get_pages(obj);
- else
- p = get_pages_vram(obj, npages);
+ p = drm_gem_get_pages(obj);
if (IS_ERR(p)) {
DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
@@ -124,9 +203,11 @@ static struct page **get_pages(struct drm_gem_object *obj)
return p;
}
+ update_device_mem(dev->dev_private, obj->size);
+
msm_obj->pages = p;
- msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
+ msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
if (IS_ERR(msm_obj->sgt)) {
void *ptr = ERR_CAST(msm_obj->sgt);
@@ -138,116 +219,114 @@ static struct page **get_pages(struct drm_gem_object *obj)
/* For non-cached buffers, ensure the new pages are clean
* because display controller, GPU, etc. are not coherent:
*/
- if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
+ if (msm_obj->flags & MSM_BO_WC)
sync_for_device(msm_obj);
+
+ update_lru(obj);
}
return msm_obj->pages;
}
-static void put_pages_vram(struct drm_gem_object *obj)
-{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_drm_private *priv = obj->dev->dev_private;
-
- spin_lock(&priv->vram.lock);
- drm_mm_remove_node(msm_obj->vram_node);
- spin_unlock(&priv->vram.lock);
-
- kvfree(msm_obj->pages);
-}
-
static void put_pages(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ /*
+ * Skip gpuvm in the object free path to avoid a WARN_ON() splat.
+ * See explaination in msm_gem_assert_locked()
+ */
+ if (kref_read(&obj->refcount))
+ drm_gpuvm_bo_gem_evict(obj, true);
+
if (msm_obj->pages) {
if (msm_obj->sgt) {
/* For non-cached buffers, ensure the new
* pages are clean because display controller,
* GPU, etc. are not coherent:
*/
- if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
+ if (msm_obj->flags & MSM_BO_WC)
sync_for_cpu(msm_obj);
sg_free_table(msm_obj->sgt);
kfree(msm_obj->sgt);
+ msm_obj->sgt = NULL;
}
- if (use_pages(obj))
- drm_gem_put_pages(obj, msm_obj->pages, true, false);
- else
- put_pages_vram(obj);
+ update_device_mem(obj->dev->dev_private, -obj->size);
+
+ drm_gem_put_pages(obj, msm_obj->pages, true, false);
msm_obj->pages = NULL;
+ update_lru(obj);
}
}
-struct page **msm_gem_get_pages(struct drm_gem_object *obj)
+struct page **msm_gem_get_pages_locked(struct drm_gem_object *obj, unsigned madv)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct page **p;
- mutex_lock(&msm_obj->lock);
+ msm_gem_assert_locked(obj);
- if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
- mutex_unlock(&msm_obj->lock);
+ if (msm_obj->madv > madv) {
+ DRM_DEV_DEBUG_DRIVER(obj->dev->dev, "Invalid madv state: %u vs %u\n",
+ msm_obj->madv, madv);
return ERR_PTR(-EBUSY);
}
- p = get_pages(obj);
- mutex_unlock(&msm_obj->lock);
- return p;
+ return get_pages(obj);
}
-void msm_gem_put_pages(struct drm_gem_object *obj)
+/*
+ * Update the pin count of the object, call under lru.lock
+ */
+void msm_gem_pin_obj_locked(struct drm_gem_object *obj)
{
- /* when we start tracking the pin count, then do something here */
+ struct msm_drm_private *priv = obj->dev->dev_private;
+
+ msm_gem_assert_locked(obj);
+
+ to_msm_bo(obj)->pin_count++;
+ drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj);
}
-int msm_gem_mmap_obj(struct drm_gem_object *obj,
- struct vm_area_struct *vma)
+static void pin_obj_locked(struct drm_gem_object *obj)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_drm_private *priv = obj->dev->dev_private;
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_flags |= VM_MIXEDMAP;
+ mutex_lock(&priv->lru.lock);
+ msm_gem_pin_obj_locked(obj);
+ mutex_unlock(&priv->lru.lock);
+}
- if (msm_obj->flags & MSM_BO_WC) {
- vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
- } else if (msm_obj->flags & MSM_BO_UNCACHED) {
- vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
- } else {
- /*
- * Shunt off cached objs to shmem file so they have their own
- * address_space (so unmap_mapping_range does what we want,
- * in particular in the case of mmap'd dmabufs)
- */
- fput(vma->vm_file);
- get_file(obj->filp);
- vma->vm_pgoff = 0;
- vma->vm_file = obj->filp;
+struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
+{
+ struct page **p;
- vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
- }
+ msm_gem_assert_locked(obj);
- return 0;
+ p = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
+ if (!IS_ERR(p))
+ pin_obj_locked(obj);
+
+ return p;
}
-int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+void msm_gem_unpin_pages_locked(struct drm_gem_object *obj)
{
- int ret;
+ msm_gem_assert_locked(obj);
- ret = drm_gem_mmap(filp, vma);
- if (ret) {
- DBG("mmap failed: %d", ret);
- return ret;
- }
+ msm_gem_unpin_locked(obj);
+}
- return msm_gem_mmap_obj(vma->vm_private_data, vma);
+static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
+{
+ if (msm_obj->flags & MSM_BO_WC)
+ return pgprot_writecombine(prot);
+ return prot;
}
-vm_fault_t msm_gem_fault(struct vm_fault *vmf)
+static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
@@ -262,14 +341,14 @@ vm_fault_t msm_gem_fault(struct vm_fault *vmf)
* vm_ops.open/drm_gem_mmap_obj and close get and put
* a reference on obj. So, we dont need to hold one here.
*/
- err = mutex_lock_interruptible(&msm_obj->lock);
+ err = msm_gem_lock_interruptible(obj);
if (err) {
ret = VM_FAULT_NOPAGE;
goto out;
}
- if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
- mutex_unlock(&msm_obj->lock);
+ if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
+ msm_gem_unlock(obj);
return VM_FAULT_SIGBUS;
}
@@ -288,9 +367,10 @@ vm_fault_t msm_gem_fault(struct vm_fault *vmf)
VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
pfn, pfn << PAGE_SHIFT);
- ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
+ ret = vmf_insert_pfn(vma, vmf->address, pfn);
+
out_unlock:
- mutex_unlock(&msm_obj->lock);
+ msm_gem_unlock(obj);
out:
return ret;
}
@@ -299,10 +379,9 @@ out:
static uint64_t mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
int ret;
- WARN_ON(!mutex_is_locked(&msm_obj->lock));
+ msm_gem_assert_locked(obj);
/* Make it mmapable */
ret = drm_gem_create_mmap_offset(obj);
@@ -318,111 +397,93 @@ static uint64_t mmap_offset(struct drm_gem_object *obj)
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
{
uint64_t offset;
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- mutex_lock(&msm_obj->lock);
+ msm_gem_lock(obj);
offset = mmap_offset(obj);
- mutex_unlock(&msm_obj->lock);
+ msm_gem_unlock(obj);
return offset;
}
-static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace)
+static struct drm_gpuva *lookup_vma(struct drm_gem_object *obj,
+ struct drm_gpuvm *vm)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_gem_vma *vma;
-
- WARN_ON(!mutex_is_locked(&msm_obj->lock));
+ struct drm_gpuvm_bo *vm_bo;
- vma = kzalloc(sizeof(*vma), GFP_KERNEL);
- if (!vma)
- return ERR_PTR(-ENOMEM);
+ msm_gem_assert_locked(obj);
- vma->aspace = aspace;
+ drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
+ struct drm_gpuva *vma;
- list_add_tail(&vma->list, &msm_obj->vmas);
+ drm_gpuvm_bo_for_each_va (vma, vm_bo) {
+ if (vma->vm == vm) {
+ /* lookup_vma() should only be used in paths
+ * with at most one vma per vm
+ */
+ GEM_WARN_ON(!list_is_singular(&vm_bo->list.gpuva));
- return vma;
-}
-
-static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace)
-{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_gem_vma *vma;
-
- WARN_ON(!mutex_is_locked(&msm_obj->lock));
-
- list_for_each_entry(vma, &msm_obj->vmas, list) {
- if (vma->aspace == aspace)
- return vma;
+ return vma;
+ }
+ }
}
return NULL;
}
-static void del_vma(struct msm_gem_vma *vma)
+/*
+ * If close is true, this also closes the VMA (releasing the allocated
+ * iova range) in addition to removing the iommu mapping. In the eviction
+ * case (!close), we keep the iova allocated, but only remove the iommu
+ * mapping.
+ */
+static void
+put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm,
+ bool close, const char *reason)
{
- if (!vma)
- return;
+ struct drm_gpuvm_bo *vm_bo, *tmp;
- list_del(&vma->list);
- kfree(vma);
-}
+ msm_gem_assert_locked(obj);
-/* Called with msm_obj->lock locked */
-static void
-put_iova(struct drm_gem_object *obj)
-{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_gem_vma *vma, *tmp;
+ drm_gem_for_each_gpuvm_bo_safe (vm_bo, tmp, obj) {
+ struct drm_gpuva *vma, *vmatmp;
+
+ if (vm && vm_bo->vm != vm)
+ continue;
- WARN_ON(!mutex_is_locked(&msm_obj->lock));
+ drm_gpuvm_bo_get(vm_bo);
- list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
- if (vma->aspace) {
- msm_gem_purge_vma(vma->aspace, vma);
- msm_gem_close_vma(vma->aspace, vma);
+ drm_gpuvm_bo_for_each_va_safe (vma, vmatmp, vm_bo) {
+ msm_gem_vma_unmap(vma, reason);
+ if (close)
+ msm_gem_vma_close(vma);
}
- del_vma(vma);
+
+ drm_gpuvm_bo_put(vm_bo);
}
}
-static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova,
- u64 range_start, u64 range_end)
+static struct drm_gpuva *get_vma_locked(struct drm_gem_object *obj,
+ struct drm_gpuvm *vm, u64 range_start,
+ u64 range_end)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_gem_vma *vma;
- int ret = 0;
+ struct drm_gpuva *vma;
- WARN_ON(!mutex_is_locked(&msm_obj->lock));
+ msm_gem_assert_locked(obj);
- vma = lookup_vma(obj, aspace);
+ vma = lookup_vma(obj, vm);
if (!vma) {
- vma = add_vma(obj, aspace);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT,
- range_start, range_end);
- if (ret) {
- del_vma(vma);
- return ret;
- }
+ vma = msm_gem_vma_new(vm, obj, 0, range_start, range_end);
+ } else {
+ GEM_WARN_ON(vma->va.addr < range_start);
+ GEM_WARN_ON((vma->va.addr + obj->size) > range_end);
}
- *iova = vma->iova;
- return 0;
+ return vma;
}
-static int msm_gem_pin_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace)
+int msm_gem_prot(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_gem_vma *vma;
- struct page **pages;
int prot = IOMMU_READ;
if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
@@ -431,89 +492,189 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj,
if (msm_obj->flags & MSM_BO_MAP_PRIV)
prot |= IOMMU_PRIV;
- WARN_ON(!mutex_is_locked(&msm_obj->lock));
+ if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
+ prot |= IOMMU_CACHE;
- if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
- return -EBUSY;
+ return prot;
+}
- vma = lookup_vma(obj, aspace);
- if (WARN_ON(!vma))
- return -EINVAL;
+int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct drm_gpuva *vma)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct page **pages;
+ int prot = msm_gem_prot(obj);
- pages = get_pages(obj);
+ msm_gem_assert_locked(obj);
+
+ pages = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
if (IS_ERR(pages))
return PTR_ERR(pages);
- return msm_gem_map_vma(aspace, vma, prot,
- msm_obj->sgt, obj->size >> PAGE_SHIFT);
+ return msm_gem_vma_map(vma, prot, msm_obj->sgt);
}
-/*
- * get iova and pin it. Should have a matching put
- * limits iova to specified range (in pages)
+void msm_gem_unpin_locked(struct drm_gem_object *obj)
+{
+ struct msm_drm_private *priv = obj->dev->dev_private;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ msm_gem_assert_locked(obj);
+
+ mutex_lock(&priv->lru.lock);
+ msm_obj->pin_count--;
+ GEM_WARN_ON(msm_obj->pin_count < 0);
+ update_lru_locked(obj);
+ mutex_unlock(&priv->lru.lock);
+}
+
+/* Special unpin path for use in fence-signaling path, avoiding the need
+ * to hold the obj lock by only depending on things that a protected by
+ * the LRU lock. In particular we know that that we already have backing
+ * and and that the object's dma_resv has the fence for the current
+ * submit/job which will prevent us racing against page eviction.
*/
-int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova,
- u64 range_start, u64 range_end)
+void msm_gem_unpin_active(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- u64 local;
+
+ msm_obj->pin_count--;
+ GEM_WARN_ON(msm_obj->pin_count < 0);
+ update_lru_active(obj);
+}
+
+struct drm_gpuva *msm_gem_get_vma_locked(struct drm_gem_object *obj,
+ struct drm_gpuvm *vm)
+{
+ return get_vma_locked(obj, vm, 0, U64_MAX);
+}
+
+static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
+ struct drm_gpuvm *vm, uint64_t *iova,
+ u64 range_start, u64 range_end)
+{
+ struct drm_gpuva *vma;
int ret;
- mutex_lock(&msm_obj->lock);
+ msm_gem_assert_locked(obj);
- ret = msm_gem_get_iova_locked(obj, aspace, &local,
- range_start, range_end);
+ if (to_msm_bo(obj)->flags & MSM_BO_NO_SHARE)
+ return -EINVAL;
- if (!ret)
- ret = msm_gem_pin_iova(obj, aspace);
+ vma = get_vma_locked(obj, vm, range_start, range_end);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
- if (!ret)
- *iova = local;
+ ret = msm_gem_pin_vma_locked(obj, vma);
+ if (!ret) {
+ *iova = vma->va.addr;
+ pin_obj_locked(obj);
+ }
+
+ return ret;
+}
+
+/*
+ * get iova and pin it. Should have a matching put
+ * limits iova to specified range (in pages)
+ */
+int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
+ struct drm_gpuvm *vm, uint64_t *iova,
+ u64 range_start, u64 range_end)
+{
+ struct drm_exec exec;
+ int ret;
+
+ msm_gem_lock_vm_and_obj(&exec, obj, vm);
+ ret = get_and_pin_iova_range_locked(obj, vm, iova, range_start, range_end);
+ drm_exec_fini(&exec); /* drop locks */
- mutex_unlock(&msm_obj->lock);
return ret;
}
/* get iova and pin it. Should have a matching put */
-int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova)
+int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm,
+ uint64_t *iova)
{
- return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
+ return msm_gem_get_and_pin_iova_range(obj, vm, iova, 0, U64_MAX);
}
/*
* Get an iova but don't pin it. Doesn't need a put because iovas are currently
* valid for the life of the object
*/
-int msm_gem_get_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova)
+int msm_gem_get_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm,
+ uint64_t *iova)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- int ret;
+ struct drm_gpuva *vma;
+ struct drm_exec exec;
+ int ret = 0;
- mutex_lock(&msm_obj->lock);
- ret = msm_gem_get_iova_locked(obj, aspace, iova, 0, U64_MAX);
- mutex_unlock(&msm_obj->lock);
+ msm_gem_lock_vm_and_obj(&exec, obj, vm);
+ vma = get_vma_locked(obj, vm, 0, U64_MAX);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ } else {
+ *iova = vma->va.addr;
+ }
+ drm_exec_fini(&exec); /* drop locks */
return ret;
}
-/* get iova without taking a reference, used in places where you have
- * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
+static int clear_iova(struct drm_gem_object *obj,
+ struct drm_gpuvm *vm)
+{
+ struct drm_gpuva *vma = lookup_vma(obj, vm);
+
+ if (!vma)
+ return 0;
+
+ msm_gem_vma_unmap(vma, NULL);
+ msm_gem_vma_close(vma);
+
+ return 0;
+}
+
+/*
+ * Get the requested iova but don't pin it. Fails if the requested iova is
+ * not available. Doesn't need a put because iovas are currently valid for
+ * the life of the object.
+ *
+ * Setting an iova of zero will clear the vma.
*/
-uint64_t msm_gem_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace)
+int msm_gem_set_iova(struct drm_gem_object *obj,
+ struct drm_gpuvm *vm, uint64_t iova)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_gem_vma *vma;
+ struct drm_exec exec;
+ int ret = 0;
+
+ msm_gem_lock_vm_and_obj(&exec, obj, vm);
+ if (!iova) {
+ ret = clear_iova(obj, vm);
+ } else {
+ struct drm_gpuva *vma;
+ vma = get_vma_locked(obj, vm, iova, iova + obj->size);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ } else if (GEM_WARN_ON(vma->va.addr != iova)) {
+ clear_iova(obj, vm);
+ ret = -EBUSY;
+ }
+ }
+ drm_exec_fini(&exec); /* drop locks */
- mutex_lock(&msm_obj->lock);
- vma = lookup_vma(obj, aspace);
- mutex_unlock(&msm_obj->lock);
- WARN_ON(!vma);
+ return ret;
+}
- return vma ? vma->iova : 0;
+static bool is_kms_vm(struct drm_gpuvm *vm)
+{
+#ifdef CONFIG_DRM_MSM_KMS
+ struct msm_drm_private *priv = vm->drm->dev_private;
+
+ return priv->kms && (priv->kms->vm == vm);
+#else
+ return false;
+#endif
}
/*
@@ -521,26 +682,50 @@ uint64_t msm_gem_iova(struct drm_gem_object *obj,
* purged until something else (shrinker, mm_notifier, destroy, etc) decides
* to get rid of it
*/
-void msm_gem_unpin_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace)
+void msm_gem_unpin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_gem_vma *vma;
+ struct drm_gpuva *vma;
+ struct drm_exec exec;
- mutex_lock(&msm_obj->lock);
- vma = lookup_vma(obj, aspace);
-
- if (!WARN_ON(!vma))
- msm_gem_unmap_vma(aspace, vma);
-
- mutex_unlock(&msm_obj->lock);
+ msm_gem_lock_vm_and_obj(&exec, obj, vm);
+ vma = lookup_vma(obj, vm);
+ if (vma) {
+ msm_gem_unpin_locked(obj);
+ }
+ if (!is_kms_vm(vm))
+ put_iova_spaces(obj, vm, true, "close");
+ drm_exec_fini(&exec); /* drop locks */
}
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
- args->pitch = align_pitch(args->width, args->bpp);
- args->size = PAGE_ALIGN(args->pitch * args->height);
+ u32 fourcc;
+ u64 pitch_align;
+ int ret;
+
+ /*
+ * Adreno needs pitch aligned to 32 pixels. Compute the number
+ * of bytes for a block of 32 pixels at the given color format.
+ * Use the result as pitch alignment.
+ */
+ fourcc = drm_driver_color_mode_format(dev, args->bpp);
+ if (fourcc != DRM_FORMAT_INVALID) {
+ const struct drm_format_info *info;
+
+ info = drm_format_info(fourcc);
+ if (!info)
+ return -EINVAL;
+ pitch_align = drm_format_info_min_pitch(info, 0, 32);
+ } else {
+ pitch_align = round_up(args->width, 32) * DIV_ROUND_UP(args->bpp, SZ_8);
+ }
+ if (!pitch_align || pitch_align > U32_MAX)
+ return -EINVAL;
+ ret = drm_mode_size_dumb(dev, args, pitch_align, 0);
+ if (ret)
+ return ret;
+
return msm_gem_new_handle(dev, file, args->size,
MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
}
@@ -560,7 +745,7 @@ int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
*offset = msm_gem_mmap_offset(obj);
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
fail:
return ret;
@@ -569,53 +754,61 @@ fail:
static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct page **pages;
int ret = 0;
- mutex_lock(&msm_obj->lock);
+ msm_gem_assert_locked(obj);
- if (WARN_ON(msm_obj->madv > madv)) {
- DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
- msm_obj->madv, madv);
- mutex_unlock(&msm_obj->lock);
- return ERR_PTR(-EBUSY);
- }
+ if (drm_gem_is_imported(obj))
+ return ERR_PTR(-ENODEV);
+
+ pages = msm_gem_get_pages_locked(obj, madv);
+ if (IS_ERR(pages))
+ return ERR_CAST(pages);
+
+ pin_obj_locked(obj);
/* increment vmap_count *before* vmap() call, so shrinker can
- * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
+ * check vmap_count (is_vunmapable()) outside of msm_obj lock.
* This guarantees that we won't try to msm_gem_vunmap() this
* same object from within the vmap() call (while we already
- * hold msm_obj->lock)
+ * hold msm_obj lock)
*/
msm_obj->vmap_count++;
if (!msm_obj->vaddr) {
- struct page **pages = get_pages(obj);
- if (IS_ERR(pages)) {
- ret = PTR_ERR(pages);
- goto fail;
- }
msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
- VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+ VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
if (msm_obj->vaddr == NULL) {
ret = -ENOMEM;
goto fail;
}
}
- mutex_unlock(&msm_obj->lock);
return msm_obj->vaddr;
fail:
msm_obj->vmap_count--;
- mutex_unlock(&msm_obj->lock);
+ msm_gem_unpin_locked(obj);
return ERR_PTR(ret);
}
-void *msm_gem_get_vaddr(struct drm_gem_object *obj)
+void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
{
return get_vaddr(obj, MSM_MADV_WILLNEED);
}
+void *msm_gem_get_vaddr(struct drm_gem_object *obj)
+{
+ void *ret;
+
+ msm_gem_lock(obj);
+ ret = msm_gem_get_vaddr_locked(obj);
+ msm_gem_unlock(obj);
+
+ return ret;
+}
+
/*
* Don't use this! It is for the very special case of dumping
* submits from GPU hangs or faults, were the bo may already
@@ -627,14 +820,22 @@ void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
return get_vaddr(obj, __MSM_MADV_PURGED);
}
-void msm_gem_put_vaddr(struct drm_gem_object *obj)
+void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- mutex_lock(&msm_obj->lock);
- WARN_ON(msm_obj->vmap_count < 1);
+ msm_gem_assert_locked(obj);
+ GEM_WARN_ON(msm_obj->vmap_count < 1);
+
msm_obj->vmap_count--;
- mutex_unlock(&msm_obj->lock);
+ msm_gem_unpin_locked(obj);
+}
+
+void msm_gem_put_vaddr(struct drm_gem_object *obj)
+{
+ msm_gem_lock(obj);
+ msm_gem_put_vaddr_locked(obj);
+ msm_gem_unlock(obj);
}
/* Update madvise status, returns true if not purged, else
@@ -642,42 +843,53 @@ void msm_gem_put_vaddr(struct drm_gem_object *obj)
*/
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
{
+ struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- mutex_lock(&msm_obj->lock);
+ msm_gem_lock(obj);
- WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
+ mutex_lock(&priv->lru.lock);
if (msm_obj->madv != __MSM_MADV_PURGED)
msm_obj->madv = madv;
madv = msm_obj->madv;
- mutex_unlock(&msm_obj->lock);
+ /* If the obj is inactive, we might need to move it
+ * between inactive lists
+ */
+ update_lru_locked(obj);
+
+ mutex_unlock(&priv->lru.lock);
+
+ msm_gem_unlock(obj);
return (madv != __MSM_MADV_PURGED);
}
-void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
+void msm_gem_purge(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
+ struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- WARN_ON(!is_purgeable(msm_obj));
- WARN_ON(obj->import_attach);
+ msm_gem_assert_locked(obj);
+ GEM_WARN_ON(!is_purgeable(msm_obj));
- mutex_lock_nested(&msm_obj->lock, subclass);
+ /* Get rid of any iommu mapping(s): */
+ put_iova_spaces(obj, NULL, false, "purge");
- put_iova(obj);
+ msm_gem_vunmap(obj);
- msm_gem_vunmap_locked(obj);
+ drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
put_pages(obj);
+ mutex_lock(&priv->lru.lock);
+ /* A one-way transition: */
msm_obj->madv = __MSM_MADV_PURGED;
+ mutex_unlock(&priv->lru.lock);
- drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
drm_gem_free_mmap_offset(obj);
/* Our goal here is to return as much of the memory as
@@ -689,92 +901,48 @@ void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
0, (loff_t)-1);
-
- mutex_unlock(&msm_obj->lock);
}
-static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
+/*
+ * Unpin the backing pages and make them available to be swapped out.
+ */
+void msm_gem_evict(struct drm_gem_object *obj)
{
+ struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- WARN_ON(!mutex_is_locked(&msm_obj->lock));
+ msm_gem_assert_locked(obj);
+ GEM_WARN_ON(is_unevictable(msm_obj));
- if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
- return;
-
- vunmap(msm_obj->vaddr);
- msm_obj->vaddr = NULL;
-}
+ /* Get rid of any iommu mapping(s): */
+ put_iova_spaces(obj, NULL, false, "evict");
-void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
-{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
- mutex_lock_nested(&msm_obj->lock, subclass);
- msm_gem_vunmap_locked(obj);
- mutex_unlock(&msm_obj->lock);
+ put_pages(obj);
}
-/* must be called before _move_to_active().. */
-int msm_gem_sync_object(struct drm_gem_object *obj,
- struct msm_fence_context *fctx, bool exclusive)
+void msm_gem_vunmap(struct drm_gem_object *obj)
{
- struct dma_resv_list *fobj;
- struct dma_fence *fence;
- int i, ret;
-
- fobj = dma_resv_get_list(obj->resv);
- if (!fobj || (fobj->shared_count == 0)) {
- fence = dma_resv_get_excl(obj->resv);
- /* don't need to wait on our own fences, since ring is fifo */
- if (fence && (fence->context != fctx->context)) {
- ret = dma_fence_wait(fence, true);
- if (ret)
- return ret;
- }
- }
-
- if (!exclusive || !fobj)
- return 0;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
- for (i = 0; i < fobj->shared_count; i++) {
- fence = rcu_dereference_protected(fobj->shared[i],
- dma_resv_held(obj->resv));
- if (fence->context != fctx->context) {
- ret = dma_fence_wait(fence, true);
- if (ret)
- return ret;
- }
- }
+ msm_gem_assert_locked(obj);
- return 0;
-}
+ if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
+ return;
-void msm_gem_move_to_active(struct drm_gem_object *obj,
- struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
-{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
- msm_obj->gpu = gpu;
- if (exclusive)
- dma_resv_add_excl_fence(obj->resv, fence);
- else
- dma_resv_add_shared_fence(obj->resv, fence);
- list_del_init(&msm_obj->mm_list);
- list_add_tail(&msm_obj->mm_list, &gpu->active_list);
+ vunmap(msm_obj->vaddr);
+ msm_obj->vaddr = NULL;
}
-void msm_gem_move_to_inactive(struct drm_gem_object *obj)
+bool msm_gem_active(struct drm_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
- struct msm_drm_private *priv = dev->dev_private;
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ msm_gem_assert_locked(obj);
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ if (to_msm_bo(obj)->pin_count)
+ return true;
- msm_obj->gpu = NULL;
- list_del_init(&msm_obj->mm_list);
- list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
+ return !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_BOOKKEEP);
}
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
@@ -784,8 +952,13 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
long ret;
- ret = dma_resv_wait_timeout_rcu(obj->resv, write,
- true, remain);
+ if (op & MSM_PREP_BOOST) {
+ dma_resv_set_deadline(obj->resv, dma_resv_usage_rw(write),
+ ktime_get());
+ }
+
+ ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
+ true, remain);
if (ret == 0)
return remain == 0 ? -EBUSY : -ETIMEDOUT;
else if (ret < 0)
@@ -803,33 +976,39 @@ int msm_gem_cpu_fini(struct drm_gem_object *obj)
}
#ifdef CONFIG_DEBUG_FS
-static void describe_fence(struct dma_fence *fence, const char *type,
- struct seq_file *m)
-{
- if (!dma_fence_is_signaled(fence))
- seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
- fence->ops->get_driver_name(fence),
- fence->ops->get_timeline_name(fence),
- fence->seqno);
-}
-
-void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
+void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
+ struct msm_gem_stats *stats)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct dma_resv *robj = obj->resv;
- struct dma_resv_list *fobj;
- struct dma_fence *fence;
- struct msm_gem_vma *vma;
uint64_t off = drm_vma_node_start(&obj->vma_node);
const char *madv;
- mutex_lock(&msm_obj->lock);
+ if (!msm_gem_trylock(obj))
+ return;
+
+ stats->all.count++;
+ stats->all.size += obj->size;
+
+ if (msm_gem_active(obj)) {
+ stats->active.count++;
+ stats->active.size += obj->size;
+ }
+
+ if (msm_obj->pages) {
+ stats->resident.count++;
+ stats->resident.size += obj->size;
+ }
switch (msm_obj->madv) {
case __MSM_MADV_PURGED:
+ stats->purged.count++;
+ stats->purged.size += obj->size;
madv = " purged";
break;
case MSM_MADV_DONTNEED:
+ stats->purgeable.count++;
+ stats->purgeable.size += obj->size;
madv = " purgeable";
break;
case MSM_MADV_WILLNEED:
@@ -839,138 +1018,167 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
}
seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
- msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
+ msm_obj->flags, msm_gem_active(obj) ? 'A' : 'I',
obj->name, kref_read(&obj->refcount),
off, msm_obj->vaddr);
seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
- if (!list_empty(&msm_obj->vmas)) {
+ if (!list_empty(&obj->gpuva.list)) {
+ struct drm_gpuvm_bo *vm_bo;
seq_puts(m, " vmas:");
- list_for_each_entry(vma, &msm_obj->vmas, list)
- seq_printf(m, " [%s: %08llx,%s,inuse=%d]",
- vma->aspace != NULL ? vma->aspace->name : NULL,
- vma->iova, vma->mapped ? "mapped" : "unmapped",
- vma->inuse);
+ drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
+ struct drm_gpuva *vma;
+
+ drm_gpuvm_bo_for_each_va (vma, vm_bo) {
+ const char *name, *comm;
+ struct msm_gem_vm *vm = to_msm_vm(vma->vm);
+ struct task_struct *task =
+ get_pid_task(vm->pid, PIDTYPE_PID);
+ if (task) {
+ comm = kstrdup(task->comm, GFP_KERNEL);
+ put_task_struct(task);
+ } else {
+ comm = NULL;
+ }
+ name = vm->base.name;
+
+ seq_printf(m, " [%s%s%s: vm=%p, %08llx, %smapped]",
+ name, comm ? ":" : "", comm ? comm : "",
+ vma->vm, vma->va.addr,
+ to_msm_vma(vma)->mapped ? "" : "un");
+ kfree(comm);
+ }
+ }
seq_puts(m, "\n");
}
- rcu_read_lock();
- fobj = rcu_dereference(robj->fence);
- if (fobj) {
- unsigned int i, shared_count = fobj->shared_count;
-
- for (i = 0; i < shared_count; i++) {
- fence = rcu_dereference(fobj->shared[i]);
- describe_fence(fence, "Shared", m);
- }
- }
-
- fence = rcu_dereference(robj->fence_excl);
- if (fence)
- describe_fence(fence, "Exclusive", m);
- rcu_read_unlock();
-
- mutex_unlock(&msm_obj->lock);
+ dma_resv_describe(robj, m);
+ msm_gem_unlock(obj);
}
void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
{
+ struct msm_gem_stats stats = {};
struct msm_gem_object *msm_obj;
- int count = 0;
- size_t size = 0;
seq_puts(m, " flags id ref offset kaddr size madv name\n");
- list_for_each_entry(msm_obj, list, mm_list) {
+ list_for_each_entry(msm_obj, list, node) {
struct drm_gem_object *obj = &msm_obj->base;
seq_puts(m, " ");
- msm_gem_describe(obj, m);
- count++;
- size += obj->size;
+ msm_gem_describe(obj, m, &stats);
}
- seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
+ seq_printf(m, "Total: %4d objects, %9zu bytes\n",
+ stats.all.count, stats.all.size);
+ seq_printf(m, "Active: %4d objects, %9zu bytes\n",
+ stats.active.count, stats.active.size);
+ seq_printf(m, "Resident: %4d objects, %9zu bytes\n",
+ stats.resident.count, stats.resident.size);
+ seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
+ stats.purgeable.count, stats.purgeable.size);
+ seq_printf(m, "Purged: %4d objects, %9zu bytes\n",
+ stats.purged.count, stats.purged.size);
}
#endif
-/* don't call directly! Use drm_gem_object_put() and friends */
-void msm_gem_free_object(struct drm_gem_object *obj)
+/* don't call directly! Use drm_gem_object_put() */
+static void msm_gem_free_object(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct drm_device *dev = obj->dev;
struct msm_drm_private *priv = dev->dev_private;
+ struct drm_exec exec;
- if (llist_add(&msm_obj->freed, &priv->free_list))
- queue_work(priv->wq, &priv->free_work);
-}
+ mutex_lock(&priv->obj_lock);
+ list_del(&msm_obj->node);
+ mutex_unlock(&priv->obj_lock);
-static void free_object(struct msm_gem_object *msm_obj)
-{
- struct drm_gem_object *obj = &msm_obj->base;
- struct drm_device *dev = obj->dev;
-
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
- /* object should not be on active list: */
- WARN_ON(is_active(msm_obj));
-
- list_del(&msm_obj->mm_list);
-
- mutex_lock(&msm_obj->lock);
-
- put_iova(obj);
+ /*
+ * We need to lock any VMs the object is still attached to, but not
+ * the object itself (see explaination in msm_gem_assert_locked()),
+ * so just open-code this special case.
+ *
+ * Note that we skip the dance if we aren't attached to any VM. This
+ * is load bearing. The driver needs to support two usage models:
+ *
+ * 1. Legacy kernel managed VM: Userspace expects the VMA's to be
+ * implicitly torn down when the object is freed, the VMA's do
+ * not hold a hard reference to the BO.
+ *
+ * 2. VM_BIND, userspace managed VM: The VMA holds a reference to the
+ * BO. This can be dropped when the VM is closed and it's associated
+ * VMAs are torn down. (See msm_gem_vm_close()).
+ *
+ * In the latter case the last reference to a BO can be dropped while
+ * we already have the VM locked. It would have already been removed
+ * from the gpuva list, but lockdep doesn't know that. Or understand
+ * the differences between the two usage models.
+ */
+ if (!list_empty(&obj->gpuva.list)) {
+ drm_exec_init(&exec, 0, 0);
+ drm_exec_until_all_locked (&exec) {
+ struct drm_gpuvm_bo *vm_bo;
+ drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
+ drm_exec_lock_obj(&exec,
+ drm_gpuvm_resv_obj(vm_bo->vm));
+ drm_exec_retry_on_contention(&exec);
+ }
+ }
+ put_iova_spaces(obj, NULL, true, "free");
+ drm_exec_fini(&exec); /* drop locks */
+ }
- if (obj->import_attach) {
- if (msm_obj->vaddr)
- dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
+ if (drm_gem_is_imported(obj)) {
+ GEM_WARN_ON(msm_obj->vaddr);
/* Don't drop the pages for imported dmabuf, as they are not
* ours, just free the array we allocated:
*/
- if (msm_obj->pages)
- kvfree(msm_obj->pages);
+ kvfree(msm_obj->pages);
drm_prime_gem_destroy(obj, msm_obj->sgt);
} else {
- msm_gem_vunmap_locked(obj);
+ msm_gem_vunmap(obj);
put_pages(obj);
}
+ /*
+ * In error paths, we could end up here before msm_gem_new_handle()
+ * has changed obj->resv to point to the shared resv. In this case,
+ * we don't want to drop a ref to the shared r_obj that we haven't
+ * taken yet.
+ */
+ if ((msm_obj->flags & MSM_BO_NO_SHARE) && (obj->resv != &obj->_resv)) {
+ struct drm_gem_object *r_obj =
+ container_of(obj->resv, struct drm_gem_object, _resv);
+
+ /* Drop reference we hold to shared resv obj: */
+ drm_gem_object_put(r_obj);
+ }
+
drm_gem_object_release(obj);
- mutex_unlock(&msm_obj->lock);
+ kfree(msm_obj->metadata);
kfree(msm_obj);
}
-void msm_gem_free_work(struct work_struct *work)
+static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
- struct msm_drm_private *priv =
- container_of(work, struct msm_drm_private, free_work);
- struct drm_device *dev = priv->dev;
- struct llist_node *freed;
- struct msm_gem_object *msm_obj, *next;
-
- while ((freed = llist_del_all(&priv->free_list))) {
-
- mutex_lock(&dev->struct_mutex);
-
- llist_for_each_entry_safe(msm_obj, next,
- freed, freed)
- free_object(msm_obj);
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
- mutex_unlock(&dev->struct_mutex);
+ vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
+ vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
- if (need_resched())
- break;
- }
+ return 0;
}
/* convenience method to construct a GEM buffer object, and userspace handle */
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
- uint32_t size, uint32_t flags, uint32_t *handle,
+ size_t size, uint32_t flags, uint32_t *handle,
char *name)
{
struct drm_gem_object *obj;
@@ -984,29 +1192,74 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
if (name)
msm_gem_object_set_name(obj, "%s", name);
+ if (flags & MSM_BO_NO_SHARE) {
+ struct msm_context *ctx = file->driver_priv;
+ struct drm_gem_object *r_obj = drm_gpuvm_resv_obj(ctx->vm);
+
+ drm_gem_object_get(r_obj);
+
+ obj->resv = r_obj->resv;
+ }
+
ret = drm_gem_handle_create(file, obj, handle);
/* drop reference from allocate - handle holds it now */
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ret;
}
-static int msm_gem_new_impl(struct drm_device *dev,
- uint32_t size, uint32_t flags,
- struct drm_gem_object **obj,
- bool struct_mutex_locked)
+static enum drm_gem_object_status msm_gem_status(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ enum drm_gem_object_status status = 0;
+
+ if (msm_obj->pages)
+ status |= DRM_GEM_OBJECT_RESIDENT;
+
+ if (msm_obj->madv == MSM_MADV_DONTNEED)
+ status |= DRM_GEM_OBJECT_PURGEABLE;
+
+ return status;
+}
+
+static const struct vm_operations_struct vm_ops = {
+ .fault = msm_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static const struct drm_gem_object_funcs msm_gem_object_funcs = {
+ .free = msm_gem_free_object,
+ .open = msm_gem_open,
+ .close = msm_gem_close,
+ .export = msm_gem_prime_export,
+ .pin = msm_gem_prime_pin,
+ .unpin = msm_gem_prime_unpin,
+ .get_sg_table = msm_gem_prime_get_sg_table,
+ .vmap = msm_gem_prime_vmap,
+ .vunmap = msm_gem_prime_vunmap,
+ .mmap = msm_gem_object_mmap,
+ .status = msm_gem_status,
+ .vm_ops = &vm_ops,
+};
+
+static int msm_gem_new_impl(struct drm_device *dev, uint32_t flags,
+ struct drm_gem_object **obj)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
switch (flags & MSM_BO_CACHE_MASK) {
- case MSM_BO_UNCACHED:
case MSM_BO_CACHED:
case MSM_BO_WC:
break;
+ case MSM_BO_CACHED_COHERENT:
+ if (priv->has_cached_coherent)
+ break;
+ fallthrough;
default:
- DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
+ DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
(flags & MSM_BO_CACHE_MASK));
return -EINVAL;
}
@@ -1015,181 +1268,140 @@ static int msm_gem_new_impl(struct drm_device *dev,
if (!msm_obj)
return -ENOMEM;
- mutex_init(&msm_obj->lock);
-
msm_obj->flags = flags;
msm_obj->madv = MSM_MADV_WILLNEED;
- INIT_LIST_HEAD(&msm_obj->submit_entry);
- INIT_LIST_HEAD(&msm_obj->vmas);
-
- if (struct_mutex_locked) {
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
- } else {
- mutex_lock(&dev->struct_mutex);
- list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
- mutex_unlock(&dev->struct_mutex);
- }
+ INIT_LIST_HEAD(&msm_obj->node);
*obj = &msm_obj->base;
+ (*obj)->funcs = &msm_gem_object_funcs;
return 0;
}
-static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
- uint32_t size, uint32_t flags, bool struct_mutex_locked)
+struct drm_gem_object *msm_gem_new(struct drm_device *dev, size_t size, uint32_t flags)
{
struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gem_object *msm_obj;
struct drm_gem_object *obj = NULL;
- bool use_vram = false;
int ret;
size = PAGE_ALIGN(size);
- if (!msm_use_mmu(dev))
- use_vram = true;
- else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
- use_vram = true;
-
- if (WARN_ON(use_vram && !priv->vram.size))
- return ERR_PTR(-EINVAL);
-
/* Disallow zero sized objects as they make the underlying
* infrastructure grumpy
*/
if (size == 0)
return ERR_PTR(-EINVAL);
- ret = msm_gem_new_impl(dev, size, flags, &obj, struct_mutex_locked);
+ ret = msm_gem_new_impl(dev, flags, &obj);
if (ret)
- goto fail;
-
- if (use_vram) {
- struct msm_gem_vma *vma;
- struct page **pages;
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ return ERR_PTR(ret);
- mutex_lock(&msm_obj->lock);
+ msm_obj = to_msm_bo(obj);
- vma = add_vma(obj, NULL);
- mutex_unlock(&msm_obj->lock);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto fail;
- }
+ ret = drm_gem_object_init(dev, obj, size);
+ if (ret)
+ goto fail;
+ /*
+ * Our buffers are kept pinned, so allocating them from the
+ * MOVABLE zone is a really bad idea, and conflicts with CMA.
+ * See comments above new_inode() why this is required _and_
+ * expected if you're going to pin these pages.
+ */
+ mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
- to_msm_bo(obj)->vram_node = &vma->node;
+ drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
- drm_gem_private_object_init(dev, obj, size);
+ mutex_lock(&priv->obj_lock);
+ list_add_tail(&msm_obj->node, &priv->objects);
+ mutex_unlock(&priv->obj_lock);
- pages = get_pages(obj);
- if (IS_ERR(pages)) {
- ret = PTR_ERR(pages);
- goto fail;
- }
-
- vma->iova = physaddr(obj);
- } else {
- ret = drm_gem_object_init(dev, obj, size);
- if (ret)
- goto fail;
- /*
- * Our buffers are kept pinned, so allocating them from the
- * MOVABLE zone is a really bad idea, and conflicts with CMA.
- * See comments above new_inode() why this is required _and_
- * expected if you're going to pin these pages.
- */
- mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
- }
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto fail;
return obj;
fail:
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ERR_PTR(ret);
}
-struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
- uint32_t size, uint32_t flags)
-{
- return _msm_gem_new(dev, size, flags, true);
-}
-
-struct drm_gem_object *msm_gem_new(struct drm_device *dev,
- uint32_t size, uint32_t flags)
-{
- return _msm_gem_new(dev, size, flags, false);
-}
-
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct dma_buf *dmabuf, struct sg_table *sgt)
{
+ struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
struct drm_gem_object *obj;
- uint32_t size;
- int ret, npages;
-
- /* if we don't have IOMMU, don't bother pretending we can import: */
- if (!msm_use_mmu(dev)) {
- DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
- return ERR_PTR(-EINVAL);
- }
+ size_t size, npages;
+ int ret;
size = PAGE_ALIGN(dmabuf->size);
- ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj, false);
+ ret = msm_gem_new_impl(dev, MSM_BO_WC, &obj);
if (ret)
- goto fail;
+ return ERR_PTR(ret);
drm_gem_private_object_init(dev, obj, size);
npages = size / PAGE_SIZE;
msm_obj = to_msm_bo(obj);
- mutex_lock(&msm_obj->lock);
+ msm_gem_lock(obj);
msm_obj->sgt = sgt;
msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (!msm_obj->pages) {
- mutex_unlock(&msm_obj->lock);
+ msm_gem_unlock(obj);
ret = -ENOMEM;
goto fail;
}
- ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
+ ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
if (ret) {
- mutex_unlock(&msm_obj->lock);
+ msm_gem_unlock(obj);
goto fail;
}
- mutex_unlock(&msm_obj->lock);
+ msm_gem_unlock(obj);
+
+ drm_gem_lru_move_tail(&priv->lru.pinned, obj);
+
+ mutex_lock(&priv->obj_lock);
+ list_add_tail(&msm_obj->node, &priv->objects);
+ mutex_unlock(&priv->obj_lock);
+
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto fail;
+
return obj;
fail:
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ERR_PTR(ret);
}
-static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
- uint32_t flags, struct msm_gem_address_space *aspace,
- struct drm_gem_object **bo, uint64_t *iova, bool locked)
+void *msm_gem_kernel_new(struct drm_device *dev, size_t size, uint32_t flags,
+ struct drm_gpuvm *vm, struct drm_gem_object **bo,
+ uint64_t *iova)
{
void *vaddr;
- struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
+ struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
int ret;
if (IS_ERR(obj))
return ERR_CAST(obj);
if (iova) {
- ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
+ ret = msm_gem_get_and_pin_iova(obj, vm, iova);
if (ret)
goto err;
}
vaddr = msm_gem_get_vaddr(obj);
if (IS_ERR(vaddr)) {
- msm_gem_unpin_iova(obj, aspace);
+ msm_gem_unpin_iova(obj, vm);
ret = PTR_ERR(vaddr);
goto err;
}
@@ -1199,42 +1411,20 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
return vaddr;
err:
- if (locked)
- drm_gem_object_put(obj);
- else
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ERR_PTR(ret);
}
-void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
- uint32_t flags, struct msm_gem_address_space *aspace,
- struct drm_gem_object **bo, uint64_t *iova)
-{
- return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
-}
-
-void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
- uint32_t flags, struct msm_gem_address_space *aspace,
- struct drm_gem_object **bo, uint64_t *iova)
-{
- return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
-}
-
-void msm_gem_kernel_put(struct drm_gem_object *bo,
- struct msm_gem_address_space *aspace, bool locked)
+void msm_gem_kernel_put(struct drm_gem_object *bo, struct drm_gpuvm *vm)
{
if (IS_ERR_OR_NULL(bo))
return;
msm_gem_put_vaddr(bo);
- msm_gem_unpin_iova(bo, aspace);
-
- if (locked)
- drm_gem_object_put(bo);
- else
- drm_gem_object_put_unlocked(bo);
+ msm_gem_unpin_iova(bo, vm);
+ drm_gem_object_put(bo);
}
void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)