summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/nouveau/nouveau_uvmm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_uvmm.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.c170
1 files changed, 99 insertions, 71 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
index 5cf892c50f43..eda7bb8624f1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -62,6 +62,8 @@ struct bind_job_op {
enum vm_bind_op op;
u32 flags;
+ struct drm_gpuvm_bo *vm_bo;
+
struct {
u64 addr;
u64 range;
@@ -929,25 +931,13 @@ nouveau_uvmm_sm_unmap_cleanup(struct nouveau_uvmm *uvmm,
static int
nouveau_uvmm_validate_range(struct nouveau_uvmm *uvmm, u64 addr, u64 range)
{
- u64 end = addr + range;
- u64 kernel_managed_end = uvmm->kernel_managed_addr +
- uvmm->kernel_managed_size;
-
if (addr & ~PAGE_MASK)
return -EINVAL;
if (range & ~PAGE_MASK)
return -EINVAL;
- if (end <= addr)
- return -EINVAL;
-
- if (addr < NOUVEAU_VA_SPACE_START ||
- end > NOUVEAU_VA_SPACE_END)
- return -EINVAL;
-
- if (addr < kernel_managed_end &&
- end > uvmm->kernel_managed_addr)
+ if (!drm_gpuvm_range_valid(&uvmm->base, addr, range))
return -EINVAL;
return 0;
@@ -1113,22 +1103,28 @@ bind_validate_region(struct nouveau_job *job)
}
static void
-bind_link_gpuvas(struct drm_gpuva_ops *ops, struct nouveau_uvma_prealloc *new)
+bind_link_gpuvas(struct bind_job_op *bop)
{
+ struct nouveau_uvma_prealloc *new = &bop->new;
+ struct drm_gpuvm_bo *vm_bo = bop->vm_bo;
+ struct drm_gpuva_ops *ops = bop->ops;
struct drm_gpuva_op *op;
drm_gpuva_for_each_op(op, ops) {
switch (op->op) {
case DRM_GPUVA_OP_MAP:
- drm_gpuva_link(&new->map->va);
+ drm_gpuva_link(&new->map->va, vm_bo);
break;
- case DRM_GPUVA_OP_REMAP:
+ case DRM_GPUVA_OP_REMAP: {
+ struct drm_gpuva *va = op->remap.unmap->va;
+
if (op->remap.prev)
- drm_gpuva_link(&new->prev->va);
+ drm_gpuva_link(&new->prev->va, va->vm_bo);
if (op->remap.next)
- drm_gpuva_link(&new->next->va);
- drm_gpuva_unlink(op->remap.unmap->va);
+ drm_gpuva_link(&new->next->va, va->vm_bo);
+ drm_gpuva_unlink(va);
break;
+ }
case DRM_GPUVA_OP_UNMAP:
drm_gpuva_unlink(op->unmap.va);
break;
@@ -1150,10 +1146,17 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
list_for_each_op(op, &bind_job->ops) {
if (op->op == OP_MAP) {
- op->gem.obj = drm_gem_object_lookup(job->file_priv,
- op->gem.handle);
- if (!op->gem.obj)
+ struct drm_gem_object *obj = op->gem.obj =
+ drm_gem_object_lookup(job->file_priv,
+ op->gem.handle);
+ if (!obj)
return -ENOENT;
+
+ dma_resv_lock(obj->resv, NULL);
+ op->vm_bo = drm_gpuvm_bo_obtain(&uvmm->base, obj);
+ dma_resv_unlock(obj->resv);
+ if (IS_ERR(op->vm_bo))
+ return PTR_ERR(op->vm_bo);
}
ret = bind_validate_op(job, op);
@@ -1364,7 +1367,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
case OP_UNMAP_SPARSE:
case OP_MAP:
case OP_UNMAP:
- bind_link_gpuvas(op->ops, &op->new);
+ bind_link_gpuvas(op);
break;
default:
break;
@@ -1511,6 +1514,12 @@ nouveau_uvmm_bind_job_free_work_fn(struct work_struct *work)
if (!IS_ERR_OR_NULL(op->ops))
drm_gpuva_ops_free(&uvmm->base, op->ops);
+ if (!IS_ERR_OR_NULL(op->vm_bo)) {
+ dma_resv_lock(obj->resv, NULL);
+ drm_gpuvm_bo_put(op->vm_bo);
+ dma_resv_unlock(obj->resv);
+ }
+
if (obj)
drm_gem_object_put(obj);
}
@@ -1648,18 +1657,6 @@ err_free:
return ret;
}
-int
-nouveau_uvmm_ioctl_vm_init(struct drm_device *dev,
- void *data,
- struct drm_file *file_priv)
-{
- struct nouveau_cli *cli = nouveau_cli(file_priv);
- struct drm_nouveau_vm_init *init = data;
-
- return nouveau_uvmm_init(&cli->uvmm, cli, init->kernel_managed_addr,
- init->kernel_managed_size);
-}
-
static int
nouveau_uvmm_vm_bind(struct nouveau_uvmm_bind_job_args *args)
{
@@ -1776,15 +1773,18 @@ void
nouveau_uvmm_bo_map_all(struct nouveau_bo *nvbo, struct nouveau_mem *mem)
{
struct drm_gem_object *obj = &nvbo->bo.base;
+ struct drm_gpuvm_bo *vm_bo;
struct drm_gpuva *va;
dma_resv_assert_held(obj->resv);
- drm_gem_for_each_gpuva(va, obj) {
- struct nouveau_uvma *uvma = uvma_from_va(va);
+ drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
+ drm_gpuvm_bo_for_each_va(va, vm_bo) {
+ struct nouveau_uvma *uvma = uvma_from_va(va);
- nouveau_uvma_map(uvma, mem);
- drm_gpuva_invalidate(va, false);
+ nouveau_uvma_map(uvma, mem);
+ drm_gpuva_invalidate(va, false);
+ }
}
}
@@ -1792,29 +1792,53 @@ void
nouveau_uvmm_bo_unmap_all(struct nouveau_bo *nvbo)
{
struct drm_gem_object *obj = &nvbo->bo.base;
+ struct drm_gpuvm_bo *vm_bo;
struct drm_gpuva *va;
dma_resv_assert_held(obj->resv);
- drm_gem_for_each_gpuva(va, obj) {
- struct nouveau_uvma *uvma = uvma_from_va(va);
+ drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
+ drm_gpuvm_bo_for_each_va(va, vm_bo) {
+ struct nouveau_uvma *uvma = uvma_from_va(va);
- nouveau_uvma_unmap(uvma);
- drm_gpuva_invalidate(va, true);
+ nouveau_uvma_unmap(uvma);
+ drm_gpuva_invalidate(va, true);
+ }
}
}
+static void
+nouveau_uvmm_free(struct drm_gpuvm *gpuvm)
+{
+ struct nouveau_uvmm *uvmm = uvmm_from_gpuvm(gpuvm);
+
+ kfree(uvmm);
+}
+
+static const struct drm_gpuvm_ops gpuvm_ops = {
+ .vm_free = nouveau_uvmm_free,
+};
+
int
-nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
- u64 kernel_managed_addr, u64 kernel_managed_size)
+nouveau_uvmm_ioctl_vm_init(struct drm_device *dev,
+ void *data,
+ struct drm_file *file_priv)
{
+ struct nouveau_uvmm *uvmm;
+ struct nouveau_cli *cli = nouveau_cli(file_priv);
+ struct drm_device *drm = cli->drm->dev;
+ struct drm_gem_object *r_obj;
+ struct drm_nouveau_vm_init *init = data;
+ u64 kernel_managed_end;
int ret;
- u64 kernel_managed_end = kernel_managed_addr + kernel_managed_size;
- mutex_init(&uvmm->mutex);
- dma_resv_init(&uvmm->resv);
- mt_init_flags(&uvmm->region_mt, MT_FLAGS_LOCK_EXTERN);
- mt_set_external_lock(&uvmm->region_mt, &uvmm->mutex);
+ if (check_add_overflow(init->kernel_managed_addr,
+ init->kernel_managed_size,
+ &kernel_managed_end))
+ return -EINVAL;
+
+ if (kernel_managed_end > NOUVEAU_VA_SPACE_END)
+ return -EINVAL;
mutex_lock(&cli->mutex);
@@ -1823,39 +1847,48 @@ nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
goto out_unlock;
}
- if (kernel_managed_end <= kernel_managed_addr) {
- ret = -EINVAL;
+ uvmm = kzalloc(sizeof(*uvmm), GFP_KERNEL);
+ if (!uvmm) {
+ ret = -ENOMEM;
goto out_unlock;
}
- if (kernel_managed_end > NOUVEAU_VA_SPACE_END) {
- ret = -EINVAL;
+ r_obj = drm_gpuvm_resv_object_alloc(drm);
+ if (!r_obj) {
+ kfree(uvmm);
+ ret = -ENOMEM;
goto out_unlock;
}
- uvmm->kernel_managed_addr = kernel_managed_addr;
- uvmm->kernel_managed_size = kernel_managed_size;
+ mutex_init(&uvmm->mutex);
+ mt_init_flags(&uvmm->region_mt, MT_FLAGS_LOCK_EXTERN);
+ mt_set_external_lock(&uvmm->region_mt, &uvmm->mutex);
- drm_gpuvm_init(&uvmm->base, cli->name,
+ drm_gpuvm_init(&uvmm->base, cli->name, 0, drm, r_obj,
NOUVEAU_VA_SPACE_START,
NOUVEAU_VA_SPACE_END,
- kernel_managed_addr, kernel_managed_size,
- NULL);
+ init->kernel_managed_addr,
+ init->kernel_managed_size,
+ &gpuvm_ops);
+ /* GPUVM takes care from here on. */
+ drm_gem_object_put(r_obj);
ret = nvif_vmm_ctor(&cli->mmu, "uvmm",
cli->vmm.vmm.object.oclass, RAW,
- kernel_managed_addr, kernel_managed_size,
- NULL, 0, &cli->uvmm.vmm.vmm);
+ init->kernel_managed_addr,
+ init->kernel_managed_size,
+ NULL, 0, &uvmm->vmm.vmm);
if (ret)
- goto out_free_gpuva_mgr;
+ goto out_gpuvm_fini;
- cli->uvmm.vmm.cli = cli;
+ uvmm->vmm.cli = cli;
+ cli->uvmm.ptr = uvmm;
mutex_unlock(&cli->mutex);
return 0;
-out_free_gpuva_mgr:
- drm_gpuvm_destroy(&uvmm->base);
+out_gpuvm_fini:
+ drm_gpuvm_put(&uvmm->base);
out_unlock:
mutex_unlock(&cli->mutex);
return ret;
@@ -1870,9 +1903,6 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm)
struct nouveau_sched_entity *entity = &cli->sched_entity;
struct drm_gpuva *va, *next;
- if (!cli)
- return;
-
rmb(); /* for list_empty to work without lock */
wait_event(entity->job.wq, list_empty(&entity->job.list.head));
@@ -1910,8 +1940,6 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm)
mutex_lock(&cli->mutex);
nouveau_vmm_fini(&uvmm->vmm);
- drm_gpuvm_destroy(&uvmm->base);
+ drm_gpuvm_put(&uvmm->base);
mutex_unlock(&cli->mutex);
-
- dma_resv_fini(&uvmm->resv);
}