summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/nouveau/nouveau_uvmm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_uvmm.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.c534
1 files changed, 311 insertions, 223 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
index aae780e4a4aa..f10809115c56 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -62,6 +62,8 @@ struct bind_job_op {
enum vm_bind_op op;
u32 flags;
+ struct drm_gpuvm_bo *vm_bo;
+
struct {
u64 addr;
u64 range;
@@ -105,34 +107,34 @@ nouveau_uvmm_vmm_sparse_unref(struct nouveau_uvmm *uvmm,
static int
nouveau_uvmm_vmm_get(struct nouveau_uvmm *uvmm,
- u64 addr, u64 range)
+ u64 addr, u64 range, u8 page_shift)
{
struct nvif_vmm *vmm = &uvmm->vmm.vmm;
- return nvif_vmm_raw_get(vmm, addr, range, PAGE_SHIFT);
+ return nvif_vmm_raw_get(vmm, addr, range, page_shift);
}
static int
nouveau_uvmm_vmm_put(struct nouveau_uvmm *uvmm,
- u64 addr, u64 range)
+ u64 addr, u64 range, u8 page_shift)
{
struct nvif_vmm *vmm = &uvmm->vmm.vmm;
- return nvif_vmm_raw_put(vmm, addr, range, PAGE_SHIFT);
+ return nvif_vmm_raw_put(vmm, addr, range, page_shift);
}
static int
nouveau_uvmm_vmm_unmap(struct nouveau_uvmm *uvmm,
- u64 addr, u64 range, bool sparse)
+ u64 addr, u64 range, u8 page_shift, bool sparse)
{
struct nvif_vmm *vmm = &uvmm->vmm.vmm;
- return nvif_vmm_raw_unmap(vmm, addr, range, PAGE_SHIFT, sparse);
+ return nvif_vmm_raw_unmap(vmm, addr, range, page_shift, sparse);
}
static int
nouveau_uvmm_vmm_map(struct nouveau_uvmm *uvmm,
- u64 addr, u64 range,
+ u64 addr, u64 range, u8 page_shift,
u64 bo_offset, u8 kind,
struct nouveau_mem *mem)
{
@@ -161,7 +163,7 @@ nouveau_uvmm_vmm_map(struct nouveau_uvmm *uvmm,
return -ENOSYS;
}
- return nvif_vmm_raw_map(vmm, addr, range, PAGE_SHIFT,
+ return nvif_vmm_raw_map(vmm, addr, range, page_shift,
&args, argc,
&mem->mem, bo_offset);
}
@@ -180,8 +182,9 @@ nouveau_uvma_vmm_put(struct nouveau_uvma *uvma)
{
u64 addr = uvma->va.va.addr;
u64 range = uvma->va.va.range;
+ u8 page_shift = uvma->page_shift;
- return nouveau_uvmm_vmm_put(to_uvmm(uvma), addr, range);
+ return nouveau_uvmm_vmm_put(to_uvmm(uvma), addr, range, page_shift);
}
static int
@@ -191,9 +194,11 @@ nouveau_uvma_map(struct nouveau_uvma *uvma,
u64 addr = uvma->va.va.addr;
u64 offset = uvma->va.gem.offset;
u64 range = uvma->va.va.range;
+ u8 page_shift = uvma->page_shift;
return nouveau_uvmm_vmm_map(to_uvmm(uvma), addr, range,
- offset, uvma->kind, mem);
+ page_shift, offset, uvma->kind,
+ mem);
}
static int
@@ -201,12 +206,13 @@ nouveau_uvma_unmap(struct nouveau_uvma *uvma)
{
u64 addr = uvma->va.va.addr;
u64 range = uvma->va.va.range;
+ u8 page_shift = uvma->page_shift;
bool sparse = !!uvma->region;
if (drm_gpuva_invalidated(&uvma->va))
return 0;
- return nouveau_uvmm_vmm_unmap(to_uvmm(uvma), addr, range, sparse);
+ return nouveau_uvmm_vmm_unmap(to_uvmm(uvma), addr, range, page_shift, sparse);
}
static int
@@ -329,7 +335,7 @@ nouveau_uvma_region_create(struct nouveau_uvmm *uvmm,
struct nouveau_uvma_region *reg;
int ret;
- if (!drm_gpuva_interval_empty(&uvmm->umgr, addr, range))
+ if (!drm_gpuvm_interval_empty(&uvmm->base, addr, range))
return -ENOSPC;
ret = nouveau_uvma_region_alloc(&reg);
@@ -384,7 +390,7 @@ nouveau_uvma_region_empty(struct nouveau_uvma_region *reg)
{
struct nouveau_uvmm *uvmm = reg->uvmm;
- return drm_gpuva_interval_empty(&uvmm->umgr,
+ return drm_gpuvm_interval_empty(&uvmm->base,
reg->va.addr,
reg->va.range);
}
@@ -436,15 +442,72 @@ nouveau_uvma_region_complete(struct nouveau_uvma_region *reg)
static void
op_map_prepare_unwind(struct nouveau_uvma *uvma)
{
+ struct drm_gpuva *va = &uvma->va;
nouveau_uvma_gem_put(uvma);
- drm_gpuva_remove(&uvma->va);
+ drm_gpuva_remove(va);
nouveau_uvma_free(uvma);
}
static void
op_unmap_prepare_unwind(struct drm_gpuva *va)
{
- drm_gpuva_insert(va->mgr, va);
+ drm_gpuva_insert(va->vm, va);
+}
+
+static bool
+op_map_aligned_to_page_shift(const struct drm_gpuva_op_map *op, u8 page_shift)
+{
+ u64 non_page_bits = (1ULL << page_shift) - 1;
+
+ return (op->va.addr & non_page_bits) == 0 &&
+ (op->va.range & non_page_bits) == 0 &&
+ (op->gem.offset & non_page_bits) == 0;
+}
+
+static u8
+select_page_shift(struct nouveau_uvmm *uvmm, struct drm_gpuva_op_map *op)
+{
+ struct nouveau_bo *nvbo = nouveau_gem_object(op->gem.obj);
+
+ /* nouveau_bo_fixup_align() guarantees that the page size will be aligned
+ * for most cases, but it can't handle cases where userspace allocates with
+ * a size and then binds with a smaller granularity. So in order to avoid
+ * breaking old userspace, we need to ensure that the VA is actually
+ * aligned before using it, and if it isn't, then we downgrade to the first
+ * granularity that will fit, which is optimal from a correctness and
+ * performance perspective.
+ */
+ if (op_map_aligned_to_page_shift(op, nvbo->page))
+ return nvbo->page;
+
+ struct nouveau_mem *mem = nouveau_mem(nvbo->bo.resource);
+ struct nvif_vmm *vmm = &uvmm->vmm.vmm;
+ int i;
+
+ /* If the given granularity doesn't fit, let's find one that will fit. */
+ for (i = 0; i < vmm->page_nr; i++) {
+ /* Ignore anything that is bigger or identical to the BO preference. */
+ if (vmm->page[i].shift >= nvbo->page)
+ continue;
+
+ /* Skip incompatible domains. */
+ if ((mem->mem.type & NVIF_MEM_VRAM) && !vmm->page[i].vram)
+ continue;
+ if ((mem->mem.type & NVIF_MEM_HOST) &&
+ (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
+ continue;
+
+ /* If it fits, return the proposed shift. */
+ if (op_map_aligned_to_page_shift(op, vmm->page[i].shift))
+ return vmm->page[i].shift;
+ }
+
+ /* If we get here then nothing can reconcile the requirements. This should never
+ * happen.
+ */
+ drm_WARN_ONCE(op->gem.obj->dev, 1, "Could not find an appropriate page size.\n");
+
+ return PAGE_SHIFT;
}
static void
@@ -466,6 +529,7 @@ nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm,
break;
case DRM_GPUVA_OP_REMAP: {
struct drm_gpuva_op_remap *r = &op->remap;
+ struct drm_gpuva *va = r->unmap->va;
if (r->next)
op_map_prepare_unwind(new->next);
@@ -473,7 +537,7 @@ nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm,
if (r->prev)
op_map_prepare_unwind(new->prev);
- op_unmap_prepare_unwind(r->unmap->va);
+ op_unmap_prepare_unwind(va);
break;
}
case DRM_GPUVA_OP_UNMAP:
@@ -497,7 +561,8 @@ nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm,
if (vmm_get_range)
nouveau_uvmm_vmm_put(uvmm, vmm_get_start,
- vmm_get_range);
+ vmm_get_range,
+ select_page_shift(uvmm, &op->map));
break;
}
case DRM_GPUVA_OP_REMAP: {
@@ -524,6 +589,7 @@ nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm,
u64 ustart = va->va.addr;
u64 urange = va->va.range;
u64 uend = ustart + urange;
+ u8 page_shift = uvma_from_va(va)->page_shift;
/* Nothing to do for mappings we merge with. */
if (uend == vmm_get_start ||
@@ -534,7 +600,8 @@ nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm,
u64 vmm_get_range = ustart - vmm_get_start;
nouveau_uvmm_vmm_put(uvmm, vmm_get_start,
- vmm_get_range);
+ vmm_get_range,
+ page_shift);
}
vmm_get_start = uend;
break;
@@ -588,8 +655,9 @@ op_map_prepare(struct nouveau_uvmm *uvmm,
uvma->region = args->region;
uvma->kind = args->kind;
+ uvma->page_shift = select_page_shift(uvmm, op);
- drm_gpuva_map(&uvmm->umgr, &uvma->va, op);
+ drm_gpuva_map(&uvmm->base, &uvma->va, op);
/* Keep a reference until this uvma is destroyed. */
nouveau_uvma_gem_get(uvma);
@@ -604,6 +672,9 @@ op_unmap_prepare(struct drm_gpuva_op_unmap *u)
drm_gpuva_unmap(u);
}
+/*
+ * Note: @args should not be NULL when calling for a map operation.
+ */
static int
nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm,
struct nouveau_uvma_prealloc *new,
@@ -624,14 +695,16 @@ nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm,
if (ret)
goto unwind;
- if (args && vmm_get_range) {
+ if (vmm_get_range) {
ret = nouveau_uvmm_vmm_get(uvmm, vmm_get_start,
- vmm_get_range);
+ vmm_get_range,
+ new->map->page_shift);
if (ret) {
op_map_prepare_unwind(new->map);
goto unwind;
}
}
+
break;
}
case DRM_GPUVA_OP_REMAP: {
@@ -681,6 +754,7 @@ nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm,
u64 ustart = va->va.addr;
u64 urange = va->va.range;
u64 uend = ustart + urange;
+ u8 page_shift = uvma_from_va(va)->page_shift;
op_unmap_prepare(u);
@@ -696,7 +770,7 @@ nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm,
u64 vmm_get_range = ustart - vmm_get_start;
ret = nouveau_uvmm_vmm_get(uvmm, vmm_get_start,
- vmm_get_range);
+ vmm_get_range, page_shift);
if (ret) {
op_unmap_prepare_unwind(va);
goto unwind;
@@ -791,10 +865,11 @@ op_unmap_range(struct drm_gpuva_op_unmap *u,
u64 addr, u64 range)
{
struct nouveau_uvma *uvma = uvma_from_va(u->va);
+ u8 page_shift = uvma->page_shift;
bool sparse = !!uvma->region;
if (!drm_gpuva_invalidated(u->va))
- nouveau_uvmm_vmm_unmap(to_uvmm(uvma), addr, range, sparse);
+ nouveau_uvmm_vmm_unmap(to_uvmm(uvma), addr, range, page_shift, sparse);
}
static void
@@ -804,15 +879,15 @@ op_remap(struct drm_gpuva_op_remap *r,
struct drm_gpuva_op_unmap *u = r->unmap;
struct nouveau_uvma *uvma = uvma_from_va(u->va);
u64 addr = uvma->va.va.addr;
- u64 range = uvma->va.va.range;
+ u64 end = uvma->va.va.addr + uvma->va.va.range;
if (r->prev)
addr = r->prev->va.addr + r->prev->va.range;
if (r->next)
- range = r->next->va.addr - addr;
+ end = r->next->va.addr;
- op_unmap_range(u, addr, range);
+ op_unmap_range(u, addr, end - addr);
}
static int
@@ -874,6 +949,7 @@ nouveau_uvmm_sm_cleanup(struct nouveau_uvmm *uvmm,
struct drm_gpuva_op_map *n = r->next;
struct drm_gpuva *va = r->unmap->va;
struct nouveau_uvma *uvma = uvma_from_va(va);
+ u8 page_shift = uvma->page_shift;
if (unmap) {
u64 addr = va->va.addr;
@@ -885,7 +961,7 @@ nouveau_uvmm_sm_cleanup(struct nouveau_uvmm *uvmm,
if (n)
end = n->va.addr;
- nouveau_uvmm_vmm_put(uvmm, addr, end - addr);
+ nouveau_uvmm_vmm_put(uvmm, addr, end - addr, page_shift);
}
nouveau_uvma_gem_put(uvma);
@@ -929,25 +1005,13 @@ nouveau_uvmm_sm_unmap_cleanup(struct nouveau_uvmm *uvmm,
static int
nouveau_uvmm_validate_range(struct nouveau_uvmm *uvmm, u64 addr, u64 range)
{
- u64 end = addr + range;
- u64 kernel_managed_end = uvmm->kernel_managed_addr +
- uvmm->kernel_managed_size;
-
if (addr & ~PAGE_MASK)
return -EINVAL;
if (range & ~PAGE_MASK)
return -EINVAL;
- if (end <= addr)
- return -EINVAL;
-
- if (addr < NOUVEAU_VA_SPACE_START ||
- end > NOUVEAU_VA_SPACE_END)
- return -EINVAL;
-
- if (addr < kernel_managed_end &&
- end > uvmm->kernel_managed_addr)
+ if (!drm_gpuvm_range_valid(&uvmm->base, addr, range))
return -EINVAL;
return 0;
@@ -970,6 +1034,12 @@ nouveau_uvmm_bind_job_free(struct kref *kref)
{
struct nouveau_uvmm_bind_job *job =
container_of(kref, struct nouveau_uvmm_bind_job, kref);
+ struct bind_job_op *op, *next;
+
+ list_for_each_op_safe(op, next, &job->ops) {
+ list_del(&op->entry);
+ kfree(op);
+ }
nouveau_job_free(&job->base);
kfree(job);
@@ -1011,14 +1081,16 @@ bind_validate_op(struct nouveau_job *job,
static void
bind_validate_map_sparse(struct nouveau_job *job, u64 addr, u64 range)
{
- struct nouveau_uvmm_bind_job *bind_job;
- struct nouveau_sched_entity *entity = job->entity;
+ struct nouveau_sched *sched = job->sched;
+ struct nouveau_job *__job;
struct bind_job_op *op;
u64 end = addr + range;
again:
- spin_lock(&entity->job.list.lock);
- list_for_each_entry(bind_job, &entity->job.list.head, entry) {
+ spin_lock(&sched->job.list.lock);
+ list_for_each_entry(__job, &sched->job.list.head, entry) {
+ struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(__job);
+
list_for_each_op(op, &bind_job->ops) {
if (op->op == OP_UNMAP) {
u64 op_addr = op->va.addr;
@@ -1026,7 +1098,7 @@ again:
if (!(end <= op_addr || addr >= op_end)) {
nouveau_uvmm_bind_job_get(bind_job);
- spin_unlock(&entity->job.list.lock);
+ spin_unlock(&sched->job.list.lock);
wait_for_completion(&bind_job->complete);
nouveau_uvmm_bind_job_put(bind_job);
goto again;
@@ -1034,7 +1106,7 @@ again:
}
}
}
- spin_unlock(&entity->job.list.lock);
+ spin_unlock(&sched->job.list.lock);
}
static int
@@ -1113,22 +1185,28 @@ bind_validate_region(struct nouveau_job *job)
}
static void
-bind_link_gpuvas(struct drm_gpuva_ops *ops, struct nouveau_uvma_prealloc *new)
+bind_link_gpuvas(struct bind_job_op *bop)
{
+ struct nouveau_uvma_prealloc *new = &bop->new;
+ struct drm_gpuvm_bo *vm_bo = bop->vm_bo;
+ struct drm_gpuva_ops *ops = bop->ops;
struct drm_gpuva_op *op;
drm_gpuva_for_each_op(op, ops) {
switch (op->op) {
case DRM_GPUVA_OP_MAP:
- drm_gpuva_link(&new->map->va);
+ drm_gpuva_link(&new->map->va, vm_bo);
break;
- case DRM_GPUVA_OP_REMAP:
+ case DRM_GPUVA_OP_REMAP: {
+ struct drm_gpuva *va = op->remap.unmap->va;
+
if (op->remap.prev)
- drm_gpuva_link(&new->prev->va);
+ drm_gpuva_link(&new->prev->va, va->vm_bo);
if (op->remap.next)
- drm_gpuva_link(&new->next->va);
- drm_gpuva_unlink(op->remap.unmap->va);
+ drm_gpuva_link(&new->next->va, va->vm_bo);
+ drm_gpuva_unlink(va);
break;
+ }
case DRM_GPUVA_OP_UNMAP:
drm_gpuva_unlink(op->unmap.va);
break;
@@ -1139,21 +1217,70 @@ bind_link_gpuvas(struct drm_gpuva_ops *ops, struct nouveau_uvma_prealloc *new)
}
static int
-nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
+bind_lock_validate(struct nouveau_job *job, struct drm_exec *exec,
+ unsigned int num_fences)
+{
+ struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
+ struct bind_job_op *op;
+ int ret;
+
+ list_for_each_op(op, &bind_job->ops) {
+ struct drm_gpuva_op *va_op;
+
+ if (!op->ops)
+ continue;
+
+ drm_gpuva_for_each_op(va_op, op->ops) {
+ struct drm_gem_object *obj = op_gem_obj(va_op);
+
+ if (unlikely(!obj))
+ continue;
+
+ ret = drm_exec_prepare_obj(exec, obj, num_fences);
+ if (ret)
+ return ret;
+
+ /* Don't validate GEMs backing mappings we're about to
+ * unmap, it's not worth the effort.
+ */
+ if (va_op->op == DRM_GPUVA_OP_UNMAP)
+ continue;
+
+ ret = nouveau_bo_validate(nouveau_gem_object(obj),
+ true, false);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int
+nouveau_uvmm_bind_job_submit(struct nouveau_job *job,
+ struct drm_gpuvm_exec *vme)
{
struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
- struct nouveau_sched_entity *entity = job->entity;
- struct drm_exec *exec = &job->exec;
+ struct drm_exec *exec = &vme->exec;
struct bind_job_op *op;
int ret;
list_for_each_op(op, &bind_job->ops) {
if (op->op == OP_MAP) {
- op->gem.obj = drm_gem_object_lookup(job->file_priv,
- op->gem.handle);
- if (!op->gem.obj)
+ struct drm_gem_object *obj = op->gem.obj =
+ drm_gem_object_lookup(job->file_priv,
+ op->gem.handle);
+ if (!obj)
return -ENOENT;
+
+ dma_resv_lock(obj->resv, NULL);
+ op->vm_bo = drm_gpuvm_bo_obtain(&uvmm->base, obj);
+ dma_resv_unlock(obj->resv);
+ if (IS_ERR(op->vm_bo))
+ return PTR_ERR(op->vm_bo);
+
+ drm_gpuvm_bo_extobj_add(op->vm_bo);
}
ret = bind_validate_op(job, op);
@@ -1176,6 +1303,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
* unwind all GPU VA space changes on failure.
*/
nouveau_uvmm_lock(uvmm);
+
list_for_each_op(op, &bind_job->ops) {
switch (op->op) {
case OP_MAP_SPARSE:
@@ -1194,7 +1322,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
goto unwind_continue;
}
- op->ops = drm_gpuva_sm_unmap_ops_create(&uvmm->umgr,
+ op->ops = drm_gpuvm_sm_unmap_ops_create(&uvmm->base,
op->va.addr,
op->va.range);
if (IS_ERR(op->ops)) {
@@ -1205,7 +1333,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
ret = nouveau_uvmm_sm_unmap_prepare(uvmm, &op->new,
op->ops);
if (ret) {
- drm_gpuva_ops_free(&uvmm->umgr, op->ops);
+ drm_gpuva_ops_free(&uvmm->base, op->ops);
op->ops = NULL;
op->reg = NULL;
goto unwind_continue;
@@ -1216,6 +1344,12 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
break;
case OP_MAP: {
struct nouveau_uvma_region *reg;
+ struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = op->va.addr,
+ .map.va.range = op->va.range,
+ .map.gem.obj = op->gem.obj,
+ .map.gem.offset = op->gem.offset,
+ };
reg = nouveau_uvma_region_find_first(uvmm,
op->va.addr,
@@ -1240,11 +1374,8 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
}
}
- op->ops = drm_gpuva_sm_map_ops_create(&uvmm->umgr,
- op->va.addr,
- op->va.range,
- op->gem.obj,
- op->gem.offset);
+ op->ops = drm_gpuvm_sm_map_ops_create(&uvmm->base,
+ &map_req);
if (IS_ERR(op->ops)) {
ret = PTR_ERR(op->ops);
goto unwind_continue;
@@ -1256,7 +1387,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
op->va.range,
op->flags & 0xff);
if (ret) {
- drm_gpuva_ops_free(&uvmm->umgr, op->ops);
+ drm_gpuva_ops_free(&uvmm->base, op->ops);
op->ops = NULL;
goto unwind_continue;
}
@@ -1264,7 +1395,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
break;
}
case OP_UNMAP:
- op->ops = drm_gpuva_sm_unmap_ops_create(&uvmm->umgr,
+ op->ops = drm_gpuvm_sm_unmap_ops_create(&uvmm->base,
op->va.addr,
op->va.range);
if (IS_ERR(op->ops)) {
@@ -1275,7 +1406,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
ret = nouveau_uvmm_sm_unmap_prepare(uvmm, &op->new,
op->ops);
if (ret) {
- drm_gpuva_ops_free(&uvmm->umgr, op->ops);
+ drm_gpuva_ops_free(&uvmm->base, op->ops);
op->ops = NULL;
goto unwind_continue;
}
@@ -1287,55 +1418,13 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
}
}
- drm_exec_init(exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
- DRM_EXEC_IGNORE_DUPLICATES);
+ drm_exec_init(exec, vme->flags, 0);
drm_exec_until_all_locked(exec) {
- list_for_each_op(op, &bind_job->ops) {
- struct drm_gpuva_op *va_op;
-
- if (IS_ERR_OR_NULL(op->ops))
- continue;
-
- drm_gpuva_for_each_op(va_op, op->ops) {
- struct drm_gem_object *obj = op_gem_obj(va_op);
-
- if (unlikely(!obj))
- continue;
-
- ret = drm_exec_prepare_obj(exec, obj, 1);
- drm_exec_retry_on_contention(exec);
- if (ret) {
- op = list_last_op(&bind_job->ops);
- goto unwind;
- }
- }
- }
- }
-
- list_for_each_op(op, &bind_job->ops) {
- struct drm_gpuva_op *va_op;
-
- if (IS_ERR_OR_NULL(op->ops))
- continue;
-
- drm_gpuva_for_each_op(va_op, op->ops) {
- struct drm_gem_object *obj = op_gem_obj(va_op);
-
- if (unlikely(!obj))
- continue;
-
- /* Don't validate GEMs backing mappings we're about to
- * unmap, it's not worth the effort.
- */
- if (unlikely(va_op->op == DRM_GPUVA_OP_UNMAP))
- continue;
-
- ret = nouveau_bo_validate(nouveau_gem_object(obj),
- true, false);
- if (ret) {
- op = list_last_op(&bind_job->ops);
- goto unwind;
- }
+ ret = bind_lock_validate(job, exec, vme->num_fences);
+ drm_exec_retry_on_contention(exec);
+ if (ret) {
+ op = list_last_op(&bind_job->ops);
+ goto unwind;
}
}
@@ -1364,7 +1453,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
case OP_UNMAP_SPARSE:
case OP_MAP:
case OP_UNMAP:
- bind_link_gpuvas(op->ops, &op->new);
+ bind_link_gpuvas(op);
break;
default:
break;
@@ -1372,10 +1461,6 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
}
nouveau_uvmm_unlock(uvmm);
- spin_lock(&entity->job.list.lock);
- list_add(&bind_job->entry, &entity->job.list.head);
- spin_unlock(&entity->job.list.lock);
-
return 0;
unwind_continue:
@@ -1404,27 +1489,23 @@ unwind:
break;
}
- drm_gpuva_ops_free(&uvmm->umgr, op->ops);
+ drm_gpuva_ops_free(&uvmm->base, op->ops);
op->ops = NULL;
op->reg = NULL;
}
nouveau_uvmm_unlock(uvmm);
- drm_exec_fini(exec);
+ drm_gpuvm_exec_unlock(vme);
return ret;
}
static void
-nouveau_uvmm_bind_job_armed_submit(struct nouveau_job *job)
+nouveau_uvmm_bind_job_armed_submit(struct nouveau_job *job,
+ struct drm_gpuvm_exec *vme)
{
- struct drm_exec *exec = &job->exec;
- struct drm_gem_object *obj;
- unsigned long index;
-
- drm_exec_for_each_locked_object(exec, index, obj)
- dma_resv_add_fence(obj->resv, job->done_fence, job->resv_usage);
-
- drm_exec_fini(exec);
+ drm_gpuvm_exec_resv_add_fence(vme, job->done_fence,
+ job->resv_usage, job->resv_usage);
+ drm_gpuvm_exec_unlock(vme);
}
static struct dma_fence *
@@ -1462,14 +1543,11 @@ out:
}
static void
-nouveau_uvmm_bind_job_free_work_fn(struct work_struct *work)
+nouveau_uvmm_bind_job_cleanup(struct nouveau_job *job)
{
- struct nouveau_uvmm_bind_job *bind_job =
- container_of(work, struct nouveau_uvmm_bind_job, work);
- struct nouveau_job *job = &bind_job->base;
+ struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
- struct nouveau_sched_entity *entity = job->entity;
- struct bind_job_op *op, *next;
+ struct bind_job_op *op;
list_for_each_op(op, &bind_job->ops) {
struct drm_gem_object *obj = op->gem.obj;
@@ -1509,44 +1587,29 @@ nouveau_uvmm_bind_job_free_work_fn(struct work_struct *work)
}
if (!IS_ERR_OR_NULL(op->ops))
- drm_gpuva_ops_free(&uvmm->umgr, op->ops);
+ drm_gpuva_ops_free(&uvmm->base, op->ops);
+
+ if (!IS_ERR_OR_NULL(op->vm_bo)) {
+ dma_resv_lock(obj->resv, NULL);
+ drm_gpuvm_bo_put(op->vm_bo);
+ dma_resv_unlock(obj->resv);
+ }
if (obj)
drm_gem_object_put(obj);
}
- spin_lock(&entity->job.list.lock);
- list_del(&bind_job->entry);
- spin_unlock(&entity->job.list.lock);
-
+ nouveau_job_done(job);
complete_all(&bind_job->complete);
- wake_up(&entity->job.wq);
-
- /* Remove and free ops after removing the bind job from the job list to
- * avoid races against bind_validate_map_sparse().
- */
- list_for_each_op_safe(op, next, &bind_job->ops) {
- list_del(&op->entry);
- kfree(op);
- }
nouveau_uvmm_bind_job_put(bind_job);
}
-static void
-nouveau_uvmm_bind_job_free_qwork(struct nouveau_job *job)
-{
- struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
- struct nouveau_sched_entity *entity = job->entity;
-
- nouveau_sched_entity_qwork(entity, &bind_job->work);
-}
-
-static struct nouveau_job_ops nouveau_bind_job_ops = {
+static const struct nouveau_job_ops nouveau_bind_job_ops = {
.submit = nouveau_uvmm_bind_job_submit,
.armed_submit = nouveau_uvmm_bind_job_armed_submit,
.run = nouveau_uvmm_bind_job_run,
- .free = nouveau_uvmm_bind_job_free_qwork,
+ .free = nouveau_uvmm_bind_job_cleanup,
};
static int
@@ -1607,7 +1670,6 @@ nouveau_uvmm_bind_job_init(struct nouveau_uvmm_bind_job **pjob,
return ret;
INIT_LIST_HEAD(&job->ops);
- INIT_LIST_HEAD(&job->entry);
for (i = 0; i < __args->op.count; i++) {
ret = bind_job_op_from_uop(&op, &__args->op.s[i]);
@@ -1618,11 +1680,12 @@ nouveau_uvmm_bind_job_init(struct nouveau_uvmm_bind_job **pjob,
}
init_completion(&job->complete);
- INIT_WORK(&job->work, nouveau_uvmm_bind_job_free_work_fn);
- args.sched_entity = __args->sched_entity;
args.file_priv = __args->file_priv;
+ args.sched = __args->sched;
+ args.credits = 1;
+
args.in_sync.count = __args->in_sync.count;
args.in_sync.s = __args->in_sync.s;
@@ -1648,18 +1711,6 @@ err_free:
return ret;
}
-int
-nouveau_uvmm_ioctl_vm_init(struct drm_device *dev,
- void *data,
- struct drm_file *file_priv)
-{
- struct nouveau_cli *cli = nouveau_cli(file_priv);
- struct drm_nouveau_vm_init *init = data;
-
- return nouveau_uvmm_init(&cli->uvmm, cli, init->kernel_managed_addr,
- init->kernel_managed_size);
-}
-
static int
nouveau_uvmm_vm_bind(struct nouveau_uvmm_bind_job_args *args)
{
@@ -1760,7 +1811,7 @@ nouveau_uvmm_ioctl_vm_bind(struct drm_device *dev,
if (ret)
return ret;
- args.sched_entity = &cli->sched_entity;
+ args.sched = cli->sched;
args.file_priv = file_priv;
ret = nouveau_uvmm_vm_bind(&args);
@@ -1776,15 +1827,18 @@ void
nouveau_uvmm_bo_map_all(struct nouveau_bo *nvbo, struct nouveau_mem *mem)
{
struct drm_gem_object *obj = &nvbo->bo.base;
+ struct drm_gpuvm_bo *vm_bo;
struct drm_gpuva *va;
dma_resv_assert_held(obj->resv);
- drm_gem_for_each_gpuva(va, obj) {
- struct nouveau_uvma *uvma = uvma_from_va(va);
+ drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
+ drm_gpuvm_bo_for_each_va(va, vm_bo) {
+ struct nouveau_uvma *uvma = uvma_from_va(va);
- nouveau_uvma_map(uvma, mem);
- drm_gpuva_invalidate(va, false);
+ nouveau_uvma_map(uvma, mem);
+ drm_gpuva_invalidate(va, false);
+ }
}
}
@@ -1792,29 +1846,63 @@ void
nouveau_uvmm_bo_unmap_all(struct nouveau_bo *nvbo)
{
struct drm_gem_object *obj = &nvbo->bo.base;
+ struct drm_gpuvm_bo *vm_bo;
struct drm_gpuva *va;
dma_resv_assert_held(obj->resv);
- drm_gem_for_each_gpuva(va, obj) {
- struct nouveau_uvma *uvma = uvma_from_va(va);
+ drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
+ drm_gpuvm_bo_for_each_va(va, vm_bo) {
+ struct nouveau_uvma *uvma = uvma_from_va(va);
- nouveau_uvma_unmap(uvma);
- drm_gpuva_invalidate(va, true);
+ nouveau_uvma_unmap(uvma);
+ drm_gpuva_invalidate(va, true);
+ }
}
}
+static void
+nouveau_uvmm_free(struct drm_gpuvm *gpuvm)
+{
+ struct nouveau_uvmm *uvmm = uvmm_from_gpuvm(gpuvm);
+
+ kfree(uvmm);
+}
+
+static int
+nouveau_uvmm_bo_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
+{
+ struct nouveau_bo *nvbo = nouveau_gem_object(vm_bo->obj);
+
+ nouveau_bo_placement_set(nvbo, nvbo->valid_domains, 0);
+ return nouveau_bo_validate(nvbo, true, false);
+}
+
+static const struct drm_gpuvm_ops gpuvm_ops = {
+ .vm_free = nouveau_uvmm_free,
+ .vm_bo_validate = nouveau_uvmm_bo_validate,
+};
+
int
-nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
- u64 kernel_managed_addr, u64 kernel_managed_size)
+nouveau_uvmm_ioctl_vm_init(struct drm_device *dev,
+ void *data,
+ struct drm_file *file_priv)
{
+ struct nouveau_uvmm *uvmm;
+ struct nouveau_cli *cli = nouveau_cli(file_priv);
+ struct drm_device *drm = cli->drm->dev;
+ struct drm_gem_object *r_obj;
+ struct drm_nouveau_vm_init *init = data;
+ u64 kernel_managed_end;
int ret;
- u64 kernel_managed_end = kernel_managed_addr + kernel_managed_size;
- mutex_init(&uvmm->mutex);
- dma_resv_init(&uvmm->resv);
- mt_init_flags(&uvmm->region_mt, MT_FLAGS_LOCK_EXTERN);
- mt_set_external_lock(&uvmm->region_mt, &uvmm->mutex);
+ if (check_add_overflow(init->kernel_managed_addr,
+ init->kernel_managed_size,
+ &kernel_managed_end))
+ return -EINVAL;
+
+ if (kernel_managed_end > NOUVEAU_VA_SPACE_END)
+ return -EINVAL;
mutex_lock(&cli->mutex);
@@ -1823,39 +1911,48 @@ nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
goto out_unlock;
}
- if (kernel_managed_end <= kernel_managed_addr) {
- ret = -EINVAL;
+ uvmm = kzalloc(sizeof(*uvmm), GFP_KERNEL);
+ if (!uvmm) {
+ ret = -ENOMEM;
goto out_unlock;
}
- if (kernel_managed_end > NOUVEAU_VA_SPACE_END) {
- ret = -EINVAL;
+ r_obj = drm_gpuvm_resv_object_alloc(drm);
+ if (!r_obj) {
+ kfree(uvmm);
+ ret = -ENOMEM;
goto out_unlock;
}
- uvmm->kernel_managed_addr = kernel_managed_addr;
- uvmm->kernel_managed_size = kernel_managed_size;
+ mutex_init(&uvmm->mutex);
+ mt_init_flags(&uvmm->region_mt, MT_FLAGS_LOCK_EXTERN);
+ mt_set_external_lock(&uvmm->region_mt, &uvmm->mutex);
- drm_gpuva_manager_init(&uvmm->umgr, cli->name,
- NOUVEAU_VA_SPACE_START,
- NOUVEAU_VA_SPACE_END,
- kernel_managed_addr, kernel_managed_size,
- NULL);
+ drm_gpuvm_init(&uvmm->base, cli->name, 0, drm, r_obj,
+ NOUVEAU_VA_SPACE_START,
+ NOUVEAU_VA_SPACE_END,
+ init->kernel_managed_addr,
+ init->kernel_managed_size,
+ &gpuvm_ops);
+ /* GPUVM takes care from here on. */
+ drm_gem_object_put(r_obj);
ret = nvif_vmm_ctor(&cli->mmu, "uvmm",
cli->vmm.vmm.object.oclass, RAW,
- kernel_managed_addr, kernel_managed_size,
- NULL, 0, &cli->uvmm.vmm.vmm);
+ init->kernel_managed_addr,
+ init->kernel_managed_size,
+ NULL, 0, &uvmm->vmm.vmm);
if (ret)
- goto out_free_gpuva_mgr;
+ goto out_gpuvm_fini;
- cli->uvmm.vmm.cli = cli;
+ uvmm->vmm.cli = cli;
+ cli->uvmm.ptr = uvmm;
mutex_unlock(&cli->mutex);
return 0;
-out_free_gpuva_mgr:
- drm_gpuva_manager_destroy(&uvmm->umgr);
+out_gpuvm_fini:
+ drm_gpuvm_put(&uvmm->base);
out_unlock:
mutex_unlock(&cli->mutex);
return ret;
@@ -1867,21 +1964,14 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm)
MA_STATE(mas, &uvmm->region_mt, 0, 0);
struct nouveau_uvma_region *reg;
struct nouveau_cli *cli = uvmm->vmm.cli;
- struct nouveau_sched_entity *entity = &cli->sched_entity;
struct drm_gpuva *va, *next;
- if (!cli)
- return;
-
- rmb(); /* for list_empty to work without lock */
- wait_event(entity->job.wq, list_empty(&entity->job.list.head));
-
nouveau_uvmm_lock(uvmm);
- drm_gpuva_for_each_va_safe(va, next, &uvmm->umgr) {
+ drm_gpuvm_for_each_va_safe(va, next, &uvmm->base) {
struct nouveau_uvma *uvma = uvma_from_va(va);
struct drm_gem_object *obj = va->gem.obj;
- if (unlikely(va == &uvmm->umgr.kernel_alloc_node))
+ if (unlikely(va == &uvmm->base.kernel_alloc_node))
continue;
drm_gpuva_remove(va);
@@ -1910,8 +2000,6 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm)
mutex_lock(&cli->mutex);
nouveau_vmm_fini(&uvmm->vmm);
- drm_gpuva_manager_destroy(&uvmm->umgr);
+ drm_gpuvm_put(&uvmm->base);
mutex_unlock(&cli->mutex);
-
- dma_resv_fini(&uvmm->resv);
}