summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/msm/msm_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gem.c')
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c827
1 files changed, 487 insertions, 340 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 1dee0d18abbb..017411a0bf45 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -9,30 +9,108 @@
#include <linux/spinlock.h>
#include <linux/shmem_fs.h>
#include <linux/dma-buf.h>
-#include <linux/pfn_t.h>
+#include <drm/drm_dumb_buffers.h>
#include <drm/drm_prime.h>
+#include <drm/drm_file.h>
+#include <drm/drm_fourcc.h>
+
+#include <trace/events/gpu_mem.h>
#include "msm_drv.h"
-#include "msm_fence.h"
#include "msm_gem.h"
#include "msm_gpu.h"
-#include "msm_mmu.h"
+#include "msm_kms.h"
-static void update_lru(struct drm_gem_object *obj);
+static void update_device_mem(struct msm_drm_private *priv, ssize_t size)
+{
+ uint64_t total_mem = atomic64_add_return(size, &priv->total_mem);
+ trace_gpu_mem_total(0, 0, total_mem);
+}
-static dma_addr_t physaddr(struct drm_gem_object *obj)
+static void update_ctx_mem(struct drm_file *file, ssize_t size)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_drm_private *priv = obj->dev->dev_private;
- return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
- priv->vram.paddr;
+ struct msm_context *ctx = file->driver_priv;
+ uint64_t ctx_mem = atomic64_add_return(size, &ctx->ctx_mem);
+
+ rcu_read_lock(); /* Locks file->pid! */
+ trace_gpu_mem_total(0, pid_nr(rcu_dereference(file->pid)), ctx_mem);
+ rcu_read_unlock();
+
}
-static bool use_pages(struct drm_gem_object *obj)
+static int msm_gem_open(struct drm_gem_object *obj, struct drm_file *file)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- return !msm_obj->vram_node;
+ msm_gem_vma_get(obj);
+ update_ctx_mem(file, obj->size);
+ return 0;
+}
+
+static void put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm,
+ bool close, const char *reason);
+
+static void msm_gem_close(struct drm_gem_object *obj, struct drm_file *file)
+{
+ struct msm_context *ctx = file->driver_priv;
+ struct drm_exec exec;
+
+ update_ctx_mem(file, -obj->size);
+ msm_gem_vma_put(obj);
+
+ /*
+ * If VM isn't created yet, nothing to cleanup. And in fact calling
+ * put_iova_spaces() with vm=NULL would be bad, in that it will tear-
+ * down the mappings of shared buffers in other contexts.
+ */
+ if (!ctx->vm)
+ return;
+
+ /*
+ * VM_BIND does not depend on implicit teardown of VMAs on handle
+ * close, but instead on implicit teardown of the VM when the device
+ * is closed (see msm_gem_vm_close())
+ */
+ if (msm_context_is_vmbind(ctx))
+ return;
+
+ /*
+ * TODO we might need to kick this to a queue to avoid blocking
+ * in CLOSE ioctl
+ */
+ dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_BOOKKEEP, false,
+ MAX_SCHEDULE_TIMEOUT);
+
+ msm_gem_lock_vm_and_obj(&exec, obj, ctx->vm);
+ put_iova_spaces(obj, ctx->vm, true, "close");
+ drm_exec_fini(&exec); /* drop locks */
+}
+
+/*
+ * Get/put for kms->vm VMA
+ */
+
+void msm_gem_vma_get(struct drm_gem_object *obj)
+{
+ atomic_inc(&to_msm_bo(obj)->vma_ref);
+}
+
+void msm_gem_vma_put(struct drm_gem_object *obj)
+{
+ struct msm_drm_private *priv = obj->dev->dev_private;
+
+ if (atomic_dec_return(&to_msm_bo(obj)->vma_ref))
+ return;
+
+ if (!priv->kms)
+ return;
+
+#ifdef CONFIG_DRM_MSM_KMS
+ struct drm_exec exec;
+
+ msm_gem_lock_vm_and_obj(&exec, obj, priv->kms->vm);
+ put_iova_spaces(obj, priv->kms->vm, true, "vma_put");
+ drm_exec_fini(&exec); /* drop locks */
+#endif
}
/*
@@ -63,34 +141,47 @@ static void sync_for_cpu(struct msm_gem_object *msm_obj)
dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
}
-/* allocate pages from VRAM carveout, used when no IOMMU: */
-static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
+static void update_lru_active(struct drm_gem_object *obj)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_drm_private *priv = obj->dev->dev_private;
- dma_addr_t paddr;
- struct page **p;
- int ret, i;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
- p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
- if (!p)
- return ERR_PTR(-ENOMEM);
+ GEM_WARN_ON(!msm_obj->pages);
- spin_lock(&priv->vram.lock);
- ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
- spin_unlock(&priv->vram.lock);
- if (ret) {
- kvfree(p);
- return ERR_PTR(ret);
+ if (msm_obj->pin_count) {
+ drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj);
+ } else if (msm_obj->madv == MSM_MADV_WILLNEED) {
+ drm_gem_lru_move_tail_locked(&priv->lru.willneed, obj);
+ } else {
+ GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED);
+
+ drm_gem_lru_move_tail_locked(&priv->lru.dontneed, obj);
}
+}
+
+static void update_lru_locked(struct drm_gem_object *obj)
+{
+ struct msm_drm_private *priv = obj->dev->dev_private;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ msm_gem_assert_locked(&msm_obj->base);
- paddr = physaddr(obj);
- for (i = 0; i < npages; i++) {
- p[i] = pfn_to_page(__phys_to_pfn(paddr));
- paddr += PAGE_SIZE;
+ if (!msm_obj->pages) {
+ GEM_WARN_ON(msm_obj->pin_count);
+
+ drm_gem_lru_move_tail_locked(&priv->lru.unbacked, obj);
+ } else {
+ update_lru_active(obj);
}
+}
- return p;
+static void update_lru(struct drm_gem_object *obj)
+{
+ struct msm_drm_private *priv = obj->dev->dev_private;
+
+ mutex_lock(&priv->lru.lock);
+ update_lru_locked(obj);
+ mutex_unlock(&priv->lru.lock);
}
static struct page **get_pages(struct drm_gem_object *obj)
@@ -102,12 +193,9 @@ static struct page **get_pages(struct drm_gem_object *obj)
if (!msm_obj->pages) {
struct drm_device *dev = obj->dev;
struct page **p;
- int npages = obj->size >> PAGE_SHIFT;
+ size_t npages = obj->size >> PAGE_SHIFT;
- if (use_pages(obj))
- p = drm_gem_get_pages(obj);
- else
- p = get_pages_vram(obj, npages);
+ p = drm_gem_get_pages(obj);
if (IS_ERR(p)) {
DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
@@ -115,6 +203,8 @@ static struct page **get_pages(struct drm_gem_object *obj)
return p;
}
+ update_device_mem(dev->dev_private, obj->size);
+
msm_obj->pages = p;
msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
@@ -138,22 +228,17 @@ static struct page **get_pages(struct drm_gem_object *obj)
return msm_obj->pages;
}
-static void put_pages_vram(struct drm_gem_object *obj)
-{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_drm_private *priv = obj->dev->dev_private;
-
- spin_lock(&priv->vram.lock);
- drm_mm_remove_node(msm_obj->vram_node);
- spin_unlock(&priv->vram.lock);
-
- kvfree(msm_obj->pages);
-}
-
static void put_pages(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ /*
+ * Skip gpuvm in the object free path to avoid a WARN_ON() splat.
+ * See explaination in msm_gem_assert_locked()
+ */
+ if (kref_read(&obj->refcount))
+ drm_gpuvm_bo_gem_evict(obj, true);
+
if (msm_obj->pages) {
if (msm_obj->sgt) {
/* For non-cached buffers, ensure the new
@@ -168,52 +253,70 @@ static void put_pages(struct drm_gem_object *obj)
msm_obj->sgt = NULL;
}
- if (use_pages(obj))
- drm_gem_put_pages(obj, msm_obj->pages, true, false);
- else
- put_pages_vram(obj);
+ update_device_mem(obj->dev->dev_private, -obj->size);
+
+ drm_gem_put_pages(obj, msm_obj->pages, true, false);
msm_obj->pages = NULL;
update_lru(obj);
}
}
-static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
+struct page **msm_gem_get_pages_locked(struct drm_gem_object *obj, unsigned madv)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct page **p;
msm_gem_assert_locked(obj);
- if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
+ if (msm_obj->madv > madv) {
+ DRM_DEV_DEBUG_DRIVER(obj->dev->dev, "Invalid madv state: %u vs %u\n",
+ msm_obj->madv, madv);
return ERR_PTR(-EBUSY);
}
- p = get_pages(obj);
- if (!IS_ERR(p)) {
- to_msm_bo(obj)->pin_count++;
- update_lru(obj);
- }
+ return get_pages(obj);
+}
- return p;
+/*
+ * Update the pin count of the object, call under lru.lock
+ */
+void msm_gem_pin_obj_locked(struct drm_gem_object *obj)
+{
+ struct msm_drm_private *priv = obj->dev->dev_private;
+
+ msm_gem_assert_locked(obj);
+
+ to_msm_bo(obj)->pin_count++;
+ drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj);
+}
+
+static void pin_obj_locked(struct drm_gem_object *obj)
+{
+ struct msm_drm_private *priv = obj->dev->dev_private;
+
+ mutex_lock(&priv->lru.lock);
+ msm_gem_pin_obj_locked(obj);
+ mutex_unlock(&priv->lru.lock);
}
-struct page **msm_gem_pin_pages(struct drm_gem_object *obj)
+struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
{
struct page **p;
- msm_gem_lock(obj);
- p = msm_gem_pin_pages_locked(obj);
- msm_gem_unlock(obj);
+ msm_gem_assert_locked(obj);
+
+ p = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
+ if (!IS_ERR(p))
+ pin_obj_locked(obj);
return p;
}
-void msm_gem_unpin_pages(struct drm_gem_object *obj)
+void msm_gem_unpin_pages_locked(struct drm_gem_object *obj)
{
- msm_gem_lock(obj);
+ msm_gem_assert_locked(obj);
+
msm_gem_unpin_locked(obj);
- msm_gem_unlock(obj);
}
static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
@@ -301,50 +404,31 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
return offset;
}
-static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace)
+static struct drm_gpuva *lookup_vma(struct drm_gem_object *obj,
+ struct drm_gpuvm *vm)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_gem_vma *vma;
+ struct drm_gpuvm_bo *vm_bo;
msm_gem_assert_locked(obj);
- vma = kzalloc(sizeof(*vma), GFP_KERNEL);
- if (!vma)
- return ERR_PTR(-ENOMEM);
-
- vma->aspace = aspace;
-
- list_add_tail(&vma->list, &msm_obj->vmas);
-
- return vma;
-}
-
-static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace)
-{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_gem_vma *vma;
+ drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
+ struct drm_gpuva *vma;
- msm_gem_assert_locked(obj);
+ drm_gpuvm_bo_for_each_va (vma, vm_bo) {
+ if (vma->vm == vm) {
+ /* lookup_vma() should only be used in paths
+ * with at most one vma per vm
+ */
+ GEM_WARN_ON(!list_is_singular(&vm_bo->list.gpuva));
- list_for_each_entry(vma, &msm_obj->vmas, list) {
- if (vma->aspace == aspace)
- return vma;
+ return vma;
+ }
+ }
}
return NULL;
}
-static void del_vma(struct msm_gem_vma *vma)
-{
- if (!vma)
- return;
-
- list_del(&vma->list);
- kfree(vma);
-}
-
/*
* If close is true, this also closes the VMA (releasing the allocated
* iova range) in addition to removing the iommu mapping. In the eviction
@@ -352,72 +436,55 @@ static void del_vma(struct msm_gem_vma *vma)
* mapping.
*/
static void
-put_iova_spaces(struct drm_gem_object *obj, bool close)
+put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm,
+ bool close, const char *reason)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_gem_vma *vma;
+ struct drm_gpuvm_bo *vm_bo, *tmp;
msm_gem_assert_locked(obj);
- list_for_each_entry(vma, &msm_obj->vmas, list) {
- if (vma->aspace) {
- msm_gem_purge_vma(vma->aspace, vma);
- if (close)
- msm_gem_close_vma(vma->aspace, vma);
- }
- }
-}
+ drm_gem_for_each_gpuvm_bo_safe (vm_bo, tmp, obj) {
+ struct drm_gpuva *vma, *vmatmp;
-/* Called with msm_obj locked */
-static void
-put_iova_vmas(struct drm_gem_object *obj)
-{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_gem_vma *vma, *tmp;
+ if (vm && vm_bo->vm != vm)
+ continue;
- msm_gem_assert_locked(obj);
+ drm_gpuvm_bo_get(vm_bo);
- list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
- del_vma(vma);
+ drm_gpuvm_bo_for_each_va_safe (vma, vmatmp, vm_bo) {
+ msm_gem_vma_unmap(vma, reason);
+ if (close)
+ msm_gem_vma_close(vma);
+ }
+
+ drm_gpuvm_bo_put(vm_bo);
}
}
-static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace,
- u64 range_start, u64 range_end)
+static struct drm_gpuva *get_vma_locked(struct drm_gem_object *obj,
+ struct drm_gpuvm *vm, u64 range_start,
+ u64 range_end)
{
- struct msm_gem_vma *vma;
+ struct drm_gpuva *vma;
msm_gem_assert_locked(obj);
- vma = lookup_vma(obj, aspace);
+ vma = lookup_vma(obj, vm);
if (!vma) {
- int ret;
-
- vma = add_vma(obj, aspace);
- if (IS_ERR(vma))
- return vma;
-
- ret = msm_gem_init_vma(aspace, vma, obj->size,
- range_start, range_end);
- if (ret) {
- del_vma(vma);
- return ERR_PTR(ret);
- }
+ vma = msm_gem_vma_new(vm, obj, 0, range_start, range_end);
} else {
- GEM_WARN_ON(vma->iova < range_start);
- GEM_WARN_ON((vma->iova + obj->size) > range_end);
+ GEM_WARN_ON(vma->va.addr < range_start);
+ GEM_WARN_ON((vma->va.addr + obj->size) > range_end);
}
return vma;
}
-int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
+int msm_gem_prot(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct page **pages;
- int ret, prot = IOMMU_READ;
+ int prot = IOMMU_READ;
if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
prot |= IOMMU_WRITE;
@@ -428,56 +495,80 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
prot |= IOMMU_CACHE;
- msm_gem_assert_locked(obj);
+ return prot;
+}
- if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
- return -EBUSY;
+int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct drm_gpuva *vma)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct page **pages;
+ int prot = msm_gem_prot(obj);
- pages = msm_gem_pin_pages_locked(obj);
+ msm_gem_assert_locked(obj);
+
+ pages = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
if (IS_ERR(pages))
return PTR_ERR(pages);
- ret = msm_gem_map_vma(vma->aspace, vma, prot, msm_obj->sgt, obj->size);
- if (ret)
- msm_gem_unpin_locked(obj);
-
- return ret;
+ return msm_gem_vma_map(vma, prot, msm_obj->sgt);
}
void msm_gem_unpin_locked(struct drm_gem_object *obj)
{
+ struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
msm_gem_assert_locked(obj);
+ mutex_lock(&priv->lru.lock);
msm_obj->pin_count--;
GEM_WARN_ON(msm_obj->pin_count < 0);
+ update_lru_locked(obj);
+ mutex_unlock(&priv->lru.lock);
+}
- update_lru(obj);
+/* Special unpin path for use in fence-signaling path, avoiding the need
+ * to hold the obj lock by only depending on things that a protected by
+ * the LRU lock. In particular we know that that we already have backing
+ * and and that the object's dma_resv has the fence for the current
+ * submit/job which will prevent us racing against page eviction.
+ */
+void msm_gem_unpin_active(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ msm_obj->pin_count--;
+ GEM_WARN_ON(msm_obj->pin_count < 0);
+ update_lru_active(obj);
}
-struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace)
+struct drm_gpuva *msm_gem_get_vma_locked(struct drm_gem_object *obj,
+ struct drm_gpuvm *vm)
{
- return get_vma_locked(obj, aspace, 0, U64_MAX);
+ return get_vma_locked(obj, vm, 0, U64_MAX);
}
static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova,
- u64 range_start, u64 range_end)
+ struct drm_gpuvm *vm, uint64_t *iova,
+ u64 range_start, u64 range_end)
{
- struct msm_gem_vma *vma;
+ struct drm_gpuva *vma;
int ret;
msm_gem_assert_locked(obj);
- vma = get_vma_locked(obj, aspace, range_start, range_end);
+ if (to_msm_bo(obj)->flags & MSM_BO_NO_SHARE)
+ return -EINVAL;
+
+ vma = get_vma_locked(obj, vm, range_start, range_end);
if (IS_ERR(vma))
return PTR_ERR(vma);
ret = msm_gem_pin_vma_locked(obj, vma);
- if (!ret)
- *iova = vma->iova;
+ if (!ret) {
+ *iova = vma->va.addr;
+ pin_obj_locked(obj);
+ }
return ret;
}
@@ -487,61 +578,59 @@ static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
* limits iova to specified range (in pages)
*/
int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova,
- u64 range_start, u64 range_end)
+ struct drm_gpuvm *vm, uint64_t *iova,
+ u64 range_start, u64 range_end)
{
+ struct drm_exec exec;
int ret;
- msm_gem_lock(obj);
- ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
- msm_gem_unlock(obj);
+ msm_gem_lock_vm_and_obj(&exec, obj, vm);
+ ret = get_and_pin_iova_range_locked(obj, vm, iova, range_start, range_end);
+ drm_exec_fini(&exec); /* drop locks */
return ret;
}
/* get iova and pin it. Should have a matching put */
-int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova)
+int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm,
+ uint64_t *iova)
{
- return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
+ return msm_gem_get_and_pin_iova_range(obj, vm, iova, 0, U64_MAX);
}
/*
* Get an iova but don't pin it. Doesn't need a put because iovas are currently
* valid for the life of the object
*/
-int msm_gem_get_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova)
+int msm_gem_get_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm,
+ uint64_t *iova)
{
- struct msm_gem_vma *vma;
+ struct drm_gpuva *vma;
+ struct drm_exec exec;
int ret = 0;
- msm_gem_lock(obj);
- vma = get_vma_locked(obj, aspace, 0, U64_MAX);
+ msm_gem_lock_vm_and_obj(&exec, obj, vm);
+ vma = get_vma_locked(obj, vm, 0, U64_MAX);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
} else {
- *iova = vma->iova;
+ *iova = vma->va.addr;
}
- msm_gem_unlock(obj);
+ drm_exec_fini(&exec); /* drop locks */
return ret;
}
static int clear_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace)
+ struct drm_gpuvm *vm)
{
- struct msm_gem_vma *vma = lookup_vma(obj, aspace);
+ struct drm_gpuva *vma = lookup_vma(obj, vm);
if (!vma)
return 0;
- if (msm_gem_vma_inuse(vma))
- return -EBUSY;
-
- msm_gem_purge_vma(vma->aspace, vma);
- msm_gem_close_vma(vma->aspace, vma);
- del_vma(vma);
+ msm_gem_vma_unmap(vma, NULL);
+ msm_gem_vma_close(vma);
return 0;
}
@@ -554,52 +643,89 @@ static int clear_iova(struct drm_gem_object *obj,
* Setting an iova of zero will clear the vma.
*/
int msm_gem_set_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t iova)
+ struct drm_gpuvm *vm, uint64_t iova)
{
+ struct drm_exec exec;
int ret = 0;
- msm_gem_lock(obj);
+ msm_gem_lock_vm_and_obj(&exec, obj, vm);
if (!iova) {
- ret = clear_iova(obj, aspace);
+ ret = clear_iova(obj, vm);
} else {
- struct msm_gem_vma *vma;
- vma = get_vma_locked(obj, aspace, iova, iova + obj->size);
+ struct drm_gpuva *vma;
+ vma = get_vma_locked(obj, vm, iova, iova + obj->size);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
- } else if (GEM_WARN_ON(vma->iova != iova)) {
- clear_iova(obj, aspace);
+ } else if (GEM_WARN_ON(vma->va.addr != iova)) {
+ clear_iova(obj, vm);
ret = -EBUSY;
}
}
- msm_gem_unlock(obj);
+ drm_exec_fini(&exec); /* drop locks */
return ret;
}
+static bool is_kms_vm(struct drm_gpuvm *vm)
+{
+#ifdef CONFIG_DRM_MSM_KMS
+ struct msm_drm_private *priv = vm->drm->dev_private;
+
+ return priv->kms && (priv->kms->vm == vm);
+#else
+ return false;
+#endif
+}
+
/*
* Unpin a iova by updating the reference counts. The memory isn't actually
* purged until something else (shrinker, mm_notifier, destroy, etc) decides
* to get rid of it
*/
-void msm_gem_unpin_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace)
+void msm_gem_unpin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm)
{
- struct msm_gem_vma *vma;
+ struct drm_gpuva *vma;
+ struct drm_exec exec;
- msm_gem_lock(obj);
- vma = lookup_vma(obj, aspace);
- if (!GEM_WARN_ON(!vma)) {
- msm_gem_unpin_vma(vma);
+ msm_gem_lock_vm_and_obj(&exec, obj, vm);
+ vma = lookup_vma(obj, vm);
+ if (vma) {
msm_gem_unpin_locked(obj);
}
- msm_gem_unlock(obj);
+ if (!is_kms_vm(vm))
+ put_iova_spaces(obj, vm, true, "close");
+ drm_exec_fini(&exec); /* drop locks */
}
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
- args->pitch = align_pitch(args->width, args->bpp);
- args->size = PAGE_ALIGN(args->pitch * args->height);
+ u32 fourcc;
+ u64 pitch_align;
+ int ret;
+
+ /*
+ * Adreno needs pitch aligned to 32 pixels. Compute the number
+ * of bytes for a block of 32 pixels at the given color format.
+ * Use the result as pitch alignment.
+ */
+ fourcc = drm_driver_color_mode_format(dev, args->bpp);
+ if (fourcc != DRM_FORMAT_INVALID) {
+ const struct drm_format_info *info;
+
+ info = drm_format_info(fourcc);
+ if (!info)
+ return -EINVAL;
+ pitch_align = drm_format_info_min_pitch(info, 0, 32);
+ } else {
+ pitch_align = round_up(args->width, 32) * DIV_ROUND_UP(args->bpp, SZ_8);
+ }
+ if (!pitch_align || pitch_align > U32_MAX)
+ return -EINVAL;
+ ret = drm_mode_size_dumb(dev, args, pitch_align, 0);
+ if (ret)
+ return ret;
+
return msm_gem_new_handle(dev, file, args->size,
MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
}
@@ -628,18 +754,19 @@ fail:
static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct page **pages;
int ret = 0;
msm_gem_assert_locked(obj);
- if (obj->import_attach)
+ if (drm_gem_is_imported(obj))
return ERR_PTR(-ENODEV);
- if (GEM_WARN_ON(msm_obj->madv > madv)) {
- DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
- msm_obj->madv, madv);
- return ERR_PTR(-EBUSY);
- }
+ pages = msm_gem_get_pages_locked(obj, madv);
+ if (IS_ERR(pages))
+ return ERR_CAST(pages);
+
+ pin_obj_locked(obj);
/* increment vmap_count *before* vmap() call, so shrinker can
* check vmap_count (is_vunmapable()) outside of msm_obj lock.
@@ -650,25 +777,19 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
msm_obj->vmap_count++;
if (!msm_obj->vaddr) {
- struct page **pages = get_pages(obj);
- if (IS_ERR(pages)) {
- ret = PTR_ERR(pages);
- goto fail;
- }
msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
if (msm_obj->vaddr == NULL) {
ret = -ENOMEM;
goto fail;
}
-
- update_lru(obj);
}
return msm_obj->vaddr;
fail:
msm_obj->vmap_count--;
+ msm_gem_unpin_locked(obj);
return ERR_PTR(ret);
}
@@ -707,6 +828,7 @@ void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
GEM_WARN_ON(msm_obj->vmap_count < 1);
msm_obj->vmap_count--;
+ msm_gem_unpin_locked(obj);
}
void msm_gem_put_vaddr(struct drm_gem_object *obj)
@@ -721,10 +843,13 @@ void msm_gem_put_vaddr(struct drm_gem_object *obj)
*/
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
{
+ struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
msm_gem_lock(obj);
+ mutex_lock(&priv->lru.lock);
+
if (msm_obj->madv != __MSM_MADV_PURGED)
msm_obj->madv = madv;
@@ -733,7 +858,9 @@ int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
/* If the obj is inactive, we might need to move it
* between inactive lists
*/
- update_lru(obj);
+ update_lru_locked(obj);
+
+ mutex_unlock(&priv->lru.lock);
msm_gem_unlock(obj);
@@ -743,13 +870,14 @@ int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
void msm_gem_purge(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
+ struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
msm_gem_assert_locked(obj);
GEM_WARN_ON(!is_purgeable(msm_obj));
/* Get rid of any iommu mapping(s): */
- put_iova_spaces(obj, true);
+ put_iova_spaces(obj, NULL, false, "purge");
msm_gem_vunmap(obj);
@@ -757,9 +885,10 @@ void msm_gem_purge(struct drm_gem_object *obj)
put_pages(obj);
- put_iova_vmas(obj);
-
+ mutex_lock(&priv->lru.lock);
+ /* A one-way transition: */
msm_obj->madv = __MSM_MADV_PURGED;
+ mutex_unlock(&priv->lru.lock);
drm_gem_free_mmap_offset(obj);
@@ -786,7 +915,7 @@ void msm_gem_evict(struct drm_gem_object *obj)
GEM_WARN_ON(is_unevictable(msm_obj));
/* Get rid of any iommu mapping(s): */
- put_iova_spaces(obj, false);
+ put_iova_spaces(obj, NULL, false, "evict");
drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
@@ -806,29 +935,6 @@ void msm_gem_vunmap(struct drm_gem_object *obj)
msm_obj->vaddr = NULL;
}
-static void update_lru(struct drm_gem_object *obj)
-{
- struct msm_drm_private *priv = obj->dev->dev_private;
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
-
- msm_gem_assert_locked(&msm_obj->base);
-
- if (!msm_obj->pages) {
- GEM_WARN_ON(msm_obj->pin_count);
- GEM_WARN_ON(msm_obj->vmap_count);
-
- drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
- } else if (msm_obj->pin_count || msm_obj->vmap_count) {
- drm_gem_lru_move_tail(&priv->lru.pinned, obj);
- } else if (msm_obj->madv == MSM_MADV_WILLNEED) {
- drm_gem_lru_move_tail(&priv->lru.willneed, obj);
- } else {
- GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED);
-
- drm_gem_lru_move_tail(&priv->lru.dontneed, obj);
- }
-}
-
bool msm_gem_active(struct drm_gem_object *obj)
{
msm_gem_assert_locked(obj);
@@ -836,7 +942,7 @@ bool msm_gem_active(struct drm_gem_object *obj)
if (to_msm_bo(obj)->pin_count)
return true;
- return !dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true));
+ return !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_BOOKKEEP);
}
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
@@ -846,6 +952,11 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
long ret;
+ if (op & MSM_PREP_BOOST) {
+ dma_resv_set_deadline(obj->resv, dma_resv_usage_rw(write),
+ ktime_get());
+ }
+
ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
true, remain);
if (ret == 0)
@@ -870,11 +981,11 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct dma_resv *robj = obj->resv;
- struct msm_gem_vma *vma;
uint64_t off = drm_vma_node_start(&obj->vma_node);
const char *madv;
- msm_gem_lock(obj);
+ if (!msm_gem_trylock(obj))
+ return;
stats->all.count++;
stats->all.size += obj->size;
@@ -913,32 +1024,33 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
- if (!list_empty(&msm_obj->vmas)) {
+ if (!list_empty(&obj->gpuva.list)) {
+ struct drm_gpuvm_bo *vm_bo;
seq_puts(m, " vmas:");
- list_for_each_entry(vma, &msm_obj->vmas, list) {
- const char *name, *comm;
- if (vma->aspace) {
- struct msm_gem_address_space *aspace = vma->aspace;
+ drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
+ struct drm_gpuva *vma;
+
+ drm_gpuvm_bo_for_each_va (vma, vm_bo) {
+ const char *name, *comm;
+ struct msm_gem_vm *vm = to_msm_vm(vma->vm);
struct task_struct *task =
- get_pid_task(aspace->pid, PIDTYPE_PID);
+ get_pid_task(vm->pid, PIDTYPE_PID);
if (task) {
comm = kstrdup(task->comm, GFP_KERNEL);
put_task_struct(task);
} else {
comm = NULL;
}
- name = aspace->name;
- } else {
- name = comm = NULL;
+ name = vm->base.name;
+
+ seq_printf(m, " [%s%s%s: vm=%p, %08llx, %smapped]",
+ name, comm ? ":" : "", comm ? comm : "",
+ vma->vm, vma->va.addr,
+ to_msm_vma(vma)->mapped ? "" : "un");
+ kfree(comm);
}
- seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
- name, comm ? ":" : "", comm ? comm : "",
- vma->aspace, vma->iova,
- vma->mapped ? "mapped" : "unmapped",
- msm_gem_vma_inuse(vma));
- kfree(comm);
}
seq_puts(m, "\n");
@@ -979,14 +1091,48 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct drm_device *dev = obj->dev;
struct msm_drm_private *priv = dev->dev_private;
+ struct drm_exec exec;
mutex_lock(&priv->obj_lock);
list_del(&msm_obj->node);
mutex_unlock(&priv->obj_lock);
- put_iova_spaces(obj, true);
+ /*
+ * We need to lock any VMs the object is still attached to, but not
+ * the object itself (see explaination in msm_gem_assert_locked()),
+ * so just open-code this special case.
+ *
+ * Note that we skip the dance if we aren't attached to any VM. This
+ * is load bearing. The driver needs to support two usage models:
+ *
+ * 1. Legacy kernel managed VM: Userspace expects the VMA's to be
+ * implicitly torn down when the object is freed, the VMA's do
+ * not hold a hard reference to the BO.
+ *
+ * 2. VM_BIND, userspace managed VM: The VMA holds a reference to the
+ * BO. This can be dropped when the VM is closed and it's associated
+ * VMAs are torn down. (See msm_gem_vm_close()).
+ *
+ * In the latter case the last reference to a BO can be dropped while
+ * we already have the VM locked. It would have already been removed
+ * from the gpuva list, but lockdep doesn't know that. Or understand
+ * the differences between the two usage models.
+ */
+ if (!list_empty(&obj->gpuva.list)) {
+ drm_exec_init(&exec, 0, 0);
+ drm_exec_until_all_locked (&exec) {
+ struct drm_gpuvm_bo *vm_bo;
+ drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
+ drm_exec_lock_obj(&exec,
+ drm_gpuvm_resv_obj(vm_bo->vm));
+ drm_exec_retry_on_contention(&exec);
+ }
+ }
+ put_iova_spaces(obj, NULL, true, "free");
+ drm_exec_fini(&exec); /* drop locks */
+ }
- if (obj->import_attach) {
+ if (drm_gem_is_imported(obj)) {
GEM_WARN_ON(msm_obj->vaddr);
/* Don't drop the pages for imported dmabuf, as they are not
@@ -994,17 +1140,29 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
*/
kvfree(msm_obj->pages);
- put_iova_vmas(obj);
-
drm_prime_gem_destroy(obj, msm_obj->sgt);
} else {
msm_gem_vunmap(obj);
put_pages(obj);
- put_iova_vmas(obj);
+ }
+
+ /*
+ * In error paths, we could end up here before msm_gem_new_handle()
+ * has changed obj->resv to point to the shared resv. In this case,
+ * we don't want to drop a ref to the shared r_obj that we haven't
+ * taken yet.
+ */
+ if ((msm_obj->flags & MSM_BO_NO_SHARE) && (obj->resv != &obj->_resv)) {
+ struct drm_gem_object *r_obj =
+ container_of(obj->resv, struct drm_gem_object, _resv);
+
+ /* Drop reference we hold to shared resv obj: */
+ drm_gem_object_put(r_obj);
}
drm_gem_object_release(obj);
+ kfree(msm_obj->metadata);
kfree(msm_obj);
}
@@ -1012,7 +1170,7 @@ static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
return 0;
@@ -1020,7 +1178,7 @@ static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct
/* convenience method to construct a GEM buffer object, and userspace handle */
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
- uint32_t size, uint32_t flags, uint32_t *handle,
+ size_t size, uint32_t flags, uint32_t *handle,
char *name)
{
struct drm_gem_object *obj;
@@ -1034,6 +1192,15 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
if (name)
msm_gem_object_set_name(obj, "%s", name);
+ if (flags & MSM_BO_NO_SHARE) {
+ struct msm_context *ctx = file->driver_priv;
+ struct drm_gem_object *r_obj = drm_gpuvm_resv_obj(ctx->vm);
+
+ drm_gem_object_get(r_obj);
+
+ obj->resv = r_obj->resv;
+ }
+
ret = drm_gem_handle_create(file, obj, handle);
/* drop reference from allocate - handle holds it now */
@@ -1042,6 +1209,20 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
return ret;
}
+static enum drm_gem_object_status msm_gem_status(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ enum drm_gem_object_status status = 0;
+
+ if (msm_obj->pages)
+ status |= DRM_GEM_OBJECT_RESIDENT;
+
+ if (msm_obj->madv == MSM_MADV_DONTNEED)
+ status |= DRM_GEM_OBJECT_PURGEABLE;
+
+ return status;
+}
+
static const struct vm_operations_struct vm_ops = {
.fault = msm_gem_fault,
.open = drm_gem_vm_open,
@@ -1050,18 +1231,21 @@ static const struct vm_operations_struct vm_ops = {
static const struct drm_gem_object_funcs msm_gem_object_funcs = {
.free = msm_gem_free_object,
+ .open = msm_gem_open,
+ .close = msm_gem_close,
+ .export = msm_gem_prime_export,
.pin = msm_gem_prime_pin,
.unpin = msm_gem_prime_unpin,
.get_sg_table = msm_gem_prime_get_sg_table,
.vmap = msm_gem_prime_vmap,
.vunmap = msm_gem_prime_vunmap,
.mmap = msm_gem_object_mmap,
+ .status = msm_gem_status,
.vm_ops = &vm_ops,
};
-static int msm_gem_new_impl(struct drm_device *dev,
- uint32_t size, uint32_t flags,
- struct drm_gem_object **obj)
+static int msm_gem_new_impl(struct drm_device *dev, uint32_t flags,
+ struct drm_gem_object **obj)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
@@ -1088,7 +1272,6 @@ static int msm_gem_new_impl(struct drm_device *dev,
msm_obj->madv = MSM_MADV_WILLNEED;
INIT_LIST_HEAD(&msm_obj->node);
- INIT_LIST_HEAD(&msm_obj->vmas);
*obj = &msm_obj->base;
(*obj)->funcs = &msm_gem_object_funcs;
@@ -1096,74 +1279,37 @@ static int msm_gem_new_impl(struct drm_device *dev,
return 0;
}
-struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
+struct drm_gem_object *msm_gem_new(struct drm_device *dev, size_t size, uint32_t flags)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
struct drm_gem_object *obj = NULL;
- bool use_vram = false;
int ret;
size = PAGE_ALIGN(size);
- if (!msm_use_mmu(dev))
- use_vram = true;
- else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
- use_vram = true;
-
- if (GEM_WARN_ON(use_vram && !priv->vram.size))
- return ERR_PTR(-EINVAL);
-
/* Disallow zero sized objects as they make the underlying
* infrastructure grumpy
*/
if (size == 0)
return ERR_PTR(-EINVAL);
- ret = msm_gem_new_impl(dev, size, flags, &obj);
+ ret = msm_gem_new_impl(dev, flags, &obj);
if (ret)
return ERR_PTR(ret);
msm_obj = to_msm_bo(obj);
- if (use_vram) {
- struct msm_gem_vma *vma;
- struct page **pages;
-
- drm_gem_private_object_init(dev, obj, size);
-
- msm_gem_lock(obj);
-
- vma = add_vma(obj, NULL);
- msm_gem_unlock(obj);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto fail;
- }
-
- to_msm_bo(obj)->vram_node = &vma->node;
-
- msm_gem_lock(obj);
- pages = get_pages(obj);
- msm_gem_unlock(obj);
- if (IS_ERR(pages)) {
- ret = PTR_ERR(pages);
- goto fail;
- }
-
- vma->iova = physaddr(obj);
- } else {
- ret = drm_gem_object_init(dev, obj, size);
- if (ret)
- goto fail;
- /*
- * Our buffers are kept pinned, so allocating them from the
- * MOVABLE zone is a really bad idea, and conflicts with CMA.
- * See comments above new_inode() why this is required _and_
- * expected if you're going to pin these pages.
- */
- mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
- }
+ ret = drm_gem_object_init(dev, obj, size);
+ if (ret)
+ goto fail;
+ /*
+ * Our buffers are kept pinned, so allocating them from the
+ * MOVABLE zone is a really bad idea, and conflicts with CMA.
+ * See comments above new_inode() why this is required _and_
+ * expected if you're going to pin these pages.
+ */
+ mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
@@ -1171,6 +1317,10 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32
list_add_tail(&msm_obj->node, &priv->objects);
mutex_unlock(&priv->obj_lock);
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto fail;
+
return obj;
fail:
@@ -1184,18 +1334,12 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
struct drm_gem_object *obj;
- uint32_t size;
- int ret, npages;
-
- /* if we don't have IOMMU, don't bother pretending we can import: */
- if (!msm_use_mmu(dev)) {
- DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
- return ERR_PTR(-EINVAL);
- }
+ size_t size, npages;
+ int ret;
size = PAGE_ALIGN(dmabuf->size);
- ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
+ ret = msm_gem_new_impl(dev, MSM_BO_WC, &obj);
if (ret)
return ERR_PTR(ret);
@@ -1227,6 +1371,10 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
list_add_tail(&msm_obj->node, &priv->objects);
mutex_unlock(&priv->obj_lock);
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto fail;
+
return obj;
fail:
@@ -1234,9 +1382,9 @@ fail:
return ERR_PTR(ret);
}
-void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
- uint32_t flags, struct msm_gem_address_space *aspace,
- struct drm_gem_object **bo, uint64_t *iova)
+void *msm_gem_kernel_new(struct drm_device *dev, size_t size, uint32_t flags,
+ struct drm_gpuvm *vm, struct drm_gem_object **bo,
+ uint64_t *iova)
{
void *vaddr;
struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
@@ -1246,14 +1394,14 @@ void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
return ERR_CAST(obj);
if (iova) {
- ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
+ ret = msm_gem_get_and_pin_iova(obj, vm, iova);
if (ret)
goto err;
}
vaddr = msm_gem_get_vaddr(obj);
if (IS_ERR(vaddr)) {
- msm_gem_unpin_iova(obj, aspace);
+ msm_gem_unpin_iova(obj, vm);
ret = PTR_ERR(vaddr);
goto err;
}
@@ -1269,14 +1417,13 @@ err:
}
-void msm_gem_kernel_put(struct drm_gem_object *bo,
- struct msm_gem_address_space *aspace)
+void msm_gem_kernel_put(struct drm_gem_object *bo, struct drm_gpuvm *vm)
{
if (IS_ERR_OR_NULL(bo))
return;
msm_gem_put_vaddr(bo);
- msm_gem_unpin_iova(bo, aspace);
+ msm_gem_unpin_iova(bo, vm);
drm_gem_object_put(bo);
}