summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c187
1 files changed, 86 insertions, 101 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 60d8bedb694d..c6a214f1e991 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -473,11 +473,16 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
return -EPERM;
/* Check if we have user pages and nobody bound the BO already */
- if (lobj->user_pages && bo->tbo.ttm->state != tt_bound) {
- size_t size = sizeof(struct page *);
-
- size *= bo->tbo.ttm->num_pages;
- memcpy(bo->tbo.ttm->pages, lobj->user_pages, size);
+ if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
+ lobj->user_pages) {
+ amdgpu_ttm_placement_from_domain(bo,
+ AMDGPU_GEM_DOMAIN_CPU);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, true,
+ false);
+ if (r)
+ return r;
+ amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
+ lobj->user_pages);
binding_userptr = true;
}
@@ -502,7 +507,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_bo_list_entry *e;
struct list_head duplicates;
- bool need_mmap_lock = false;
unsigned i, tries = 10;
int r;
@@ -510,9 +514,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
if (p->bo_list) {
- need_mmap_lock = p->bo_list->first_userptr !=
- p->bo_list->num_entries;
amdgpu_bo_list_get_list(p->bo_list, &p->validated);
+ if (p->bo_list->first_userptr != p->bo_list->num_entries)
+ p->mn = amdgpu_mn_get(p->adev);
}
INIT_LIST_HEAD(&duplicates);
@@ -521,9 +525,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
if (p->uf_entry.robj)
list_add(&p->uf_entry.tv.head, &p->validated);
- if (need_mmap_lock)
- down_read(&current->mm->mmap_sem);
-
while (1) {
struct list_head need_pages;
unsigned i;
@@ -543,23 +544,25 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
INIT_LIST_HEAD(&need_pages);
for (i = p->bo_list->first_userptr;
i < p->bo_list->num_entries; ++i) {
+ struct amdgpu_bo *bo;
e = &p->bo_list->array[i];
+ bo = e->robj;
- if (amdgpu_ttm_tt_userptr_invalidated(e->robj->tbo.ttm,
+ if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
&e->user_invalidated) && e->user_pages) {
/* We acquired a page array, but somebody
* invalidated it. Free it and try again
*/
release_pages(e->user_pages,
- e->robj->tbo.ttm->num_pages,
+ bo->tbo.ttm->num_pages,
false);
kvfree(e->user_pages);
e->user_pages = NULL;
}
- if (e->robj->tbo.ttm->state != tt_bound &&
+ if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
!e->user_pages) {
list_del(&e->tv.head);
list_add(&e->tv.head, &need_pages);
@@ -636,9 +639,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
p->bytes_moved_vis);
- fpriv->vm.last_eviction_counter =
- atomic64_read(&p->adev->num_evictions);
-
if (p->bo_list) {
struct amdgpu_bo *gds = p->bo_list->gds_obj;
struct amdgpu_bo *gws = p->bo_list->gws_obj;
@@ -679,9 +679,6 @@ error_validate:
error_free_pages:
- if (need_mmap_lock)
- up_read(&current->mm->mmap_sem);
-
if (p->bo_list) {
for (i = p->bo_list->first_userptr;
i < p->bo_list->num_entries; ++i) {
@@ -728,11 +725,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
{
unsigned i;
- if (!error)
- ttm_eu_fence_buffer_objects(&parser->ticket,
- &parser->validated,
- parser->fence);
- else if (backoff)
+ if (error && backoff)
ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated);
@@ -768,10 +761,6 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_dir_update);
- if (r)
- return r;
-
r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (r)
return r;
@@ -825,7 +814,13 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
}
- r = amdgpu_vm_clear_moved(adev, vm, &p->job->sync);
+ r = amdgpu_vm_handle_moved(adev, vm);
+ if (r)
+ return r;
+
+ r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update);
+ if (r)
+ return r;
if (amdgpu_vm_debug && p->bo_list) {
/* Invalidate all BOs to test for userspace bugs */
@@ -835,7 +830,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
if (!bo)
continue;
- amdgpu_vm_bo_invalidate(adev, bo);
+ amdgpu_vm_bo_invalidate(adev, bo, false);
}
}
@@ -860,7 +855,7 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
}
if (p->job->vm) {
- p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.bo);
+ p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.base.bo);
r = amdgpu_bo_vm_update_pte(p);
if (r)
@@ -928,11 +923,11 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
uint64_t offset;
uint8_t *kptr;
- m = amdgpu_cs_find_mapping(parser, chunk_ib->va_start,
- &aobj);
- if (!aobj) {
+ r = amdgpu_cs_find_mapping(parser, chunk_ib->va_start,
+ &aobj, &m);
+ if (r) {
DRM_ERROR("IB va_start is invalid\n");
- return -EINVAL;
+ return r;
}
if ((chunk_ib->va_start + chunk_ib->ib_bytes) >
@@ -1133,14 +1128,31 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
struct amdgpu_ring *ring = p->job->ring;
struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
struct amdgpu_job *job;
+ unsigned i;
+ uint64_t seq;
+
int r;
+ amdgpu_mn_lock(p->mn);
+ if (p->bo_list) {
+ for (i = p->bo_list->first_userptr;
+ i < p->bo_list->num_entries; ++i) {
+ struct amdgpu_bo *bo = p->bo_list->array[i].robj;
+
+ if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
+ amdgpu_mn_unlock(p->mn);
+ return -ERESTARTSYS;
+ }
+ }
+ }
+
job = p->job;
p->job = NULL;
r = amd_sched_job_init(&job->base, &ring->sched, entity, p->filp);
if (r) {
amdgpu_job_free(job);
+ amdgpu_mn_unlock(p->mn);
return r;
}
@@ -1148,14 +1160,28 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job->fence_ctx = entity->fence_context;
p->fence = dma_fence_get(&job->base.s_fence->finished);
+ r = amdgpu_ctx_add_fence(p->ctx, ring, p->fence, &seq);
+ if (r) {
+ dma_fence_put(p->fence);
+ dma_fence_put(&job->base.s_fence->finished);
+ amdgpu_job_free(job);
+ amdgpu_mn_unlock(p->mn);
+ return r;
+ }
+
amdgpu_cs_post_dependencies(p);
- cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
- job->uf_sequence = cs->out.handle;
+ cs->out.handle = seq;
+ job->uf_sequence = seq;
+
amdgpu_job_free_resources(job);
trace_amdgpu_cs_ioctl(job);
amd_sched_entity_push_job(&job->base);
+
+ ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
+ amdgpu_mn_unlock(p->mn);
+
return 0;
}
@@ -1383,6 +1409,7 @@ static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
array[i] = fence;
} else { /* NULL, the fence has been already signaled */
r = 1;
+ first = i;
goto out;
}
}
@@ -1462,78 +1489,36 @@ err_free_fences:
* virtual memory address. Returns allocation structure when found, NULL
* otherwise.
*/
-struct amdgpu_bo_va_mapping *
-amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
- uint64_t addr, struct amdgpu_bo **bo)
+int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
+ uint64_t addr, struct amdgpu_bo **bo,
+ struct amdgpu_bo_va_mapping **map)
{
+ struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va_mapping *mapping;
- unsigned i;
-
- if (!parser->bo_list)
- return NULL;
+ int r;
addr /= AMDGPU_GPU_PAGE_SIZE;
- for (i = 0; i < parser->bo_list->num_entries; i++) {
- struct amdgpu_bo_list_entry *lobj;
-
- lobj = &parser->bo_list->array[i];
- if (!lobj->bo_va)
- continue;
-
- list_for_each_entry(mapping, &lobj->bo_va->valids, list) {
- if (mapping->start > addr ||
- addr > mapping->last)
- continue;
-
- *bo = lobj->bo_va->base.bo;
- return mapping;
- }
-
- list_for_each_entry(mapping, &lobj->bo_va->invalids, list) {
- if (mapping->start > addr ||
- addr > mapping->last)
- continue;
+ mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
+ if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
+ return -EINVAL;
- *bo = lobj->bo_va->base.bo;
- return mapping;
- }
- }
+ *bo = mapping->bo_va->base.bo;
+ *map = mapping;
- return NULL;
-}
+ /* Double check that the BO is reserved by this CS */
+ if (READ_ONCE((*bo)->tbo.resv->lock.ctx) != &parser->ticket)
+ return -EINVAL;
-/**
- * amdgpu_cs_sysvm_access_required - make BOs accessible by the system VM
- *
- * @parser: command submission parser context
- *
- * Helper for UVD/VCE VM emulation, make sure BOs are accessible by the system VM.
- */
-int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser)
-{
- unsigned i;
- int r;
+ r = amdgpu_ttm_bind(&(*bo)->tbo, &(*bo)->tbo.mem);
+ if (unlikely(r))
+ return r;
- if (!parser->bo_list)
+ if ((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
return 0;
- for (i = 0; i < parser->bo_list->num_entries; i++) {
- struct amdgpu_bo *bo = parser->bo_list->array[i].robj;
-
- r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
- if (unlikely(r))
- return r;
-
- if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
- continue;
-
- bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- amdgpu_ttm_placement_from_domain(bo, bo->allowed_domains);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
- if (unlikely(r))
- return r;
- }
-
- return 0;
+ (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains);
+ return ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, false, false);
}