summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/radeon/radeon_vm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_vm.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c40
1 files changed, 20 insertions, 20 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index f60fae0aed11..21a5340aefdf 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -41,7 +41,7 @@
* (uncached system pages).
* Each VM has an ID associated with it and there is a page table
* associated with each VMID. When execting a command buffer,
- * the kernel tells the the ring what VMID to use for that command
+ * the kernel tells the ring what VMID to use for that command
* buffer. VMIDs are allocated dynamically as commands are submitted.
* The userspace drivers maintain their own address space and the kernel
* sets up their pages tables accordingly when they submit their
@@ -51,7 +51,7 @@
*/
/**
- * radeon_vm_num_pde - return the number of page directory entries
+ * radeon_vm_num_pdes - return the number of page directory entries
*
* @rdev: radeon_device pointer
*
@@ -119,6 +119,7 @@ void radeon_vm_manager_fini(struct radeon_device *rdev)
/**
* radeon_vm_get_bos - add the vm BOs to a validation list
*
+ * @rdev: radeon_device pointer
* @vm: vm providing the BOs
* @head: head of validation list
*
@@ -141,10 +142,9 @@ struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
list[0].robj = vm->page_directory;
list[0].preferred_domains = RADEON_GEM_DOMAIN_VRAM;
list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
- list[0].tv.bo = &vm->page_directory->tbo;
- list[0].tv.num_shared = 1;
+ list[0].shared = true;
list[0].tiling_flags = 0;
- list_add(&list[0].tv.head, head);
+ list_add(&list[0].list, head);
for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
if (!vm->page_tables[i].bo)
@@ -153,10 +153,9 @@ struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
list[idx].robj = vm->page_tables[i].bo;
list[idx].preferred_domains = RADEON_GEM_DOMAIN_VRAM;
list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
- list[idx].tv.bo = &list[idx].robj->tbo;
- list[idx].tv.num_shared = 1;
+ list[idx].shared = true;
list[idx].tiling_flags = 0;
- list_add(&list[idx++].tv.head, head);
+ list_add(&list[idx++].list, head);
}
return list;
@@ -188,7 +187,7 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
vm_id->last_id_use == rdev->vm_manager.active[vm_id->id])
return NULL;
- /* we definately need to flush */
+ /* we definitely need to flush */
vm_id->pd_gpu_addr = ~0ll;
/* skip over VMID 0, since it is the system VM */
@@ -625,12 +624,10 @@ static uint32_t radeon_vm_page_flags(uint32_t flags)
}
/**
- * radeon_vm_update_pdes - make sure that page directory is valid
+ * radeon_vm_update_page_directory - make sure that page directory is valid
*
* @rdev: radeon_device pointer
* @vm: requested vm
- * @start: start of GPU address range
- * @end: end of GPU address range
*
* Allocates new page tables if necessary
* and updates the page directory (cayman+).
@@ -802,6 +799,7 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev,
*
* @rdev: radeon_device pointer
* @vm: requested vm
+ * @ib: indirect buffer to use for the update
* @start: start of GPU address range
* @end: end of GPU address range
* @dst: destination address to map to
@@ -831,7 +829,7 @@ static int radeon_vm_update_ptes(struct radeon_device *rdev,
int r;
radeon_sync_resv(rdev, &ib->sync, pt->tbo.base.resv, true);
- r = dma_resv_reserve_shared(pt->tbo.base.resv, 1);
+ r = dma_resv_reserve_fences(pt->tbo.base.resv, 1);
if (r)
return r;
@@ -900,8 +898,7 @@ static void radeon_vm_fence_pts(struct radeon_vm *vm,
* radeon_vm_bo_update - map a bo into the vm page table
*
* @rdev: radeon_device pointer
- * @vm: requested vm
- * @bo: radeon buffer object
+ * @bo_va: radeon buffer virtual address object
* @mem: ttm mem
*
* Fill in the page table entries for @bo (cayman+).
@@ -911,7 +908,7 @@ static void radeon_vm_fence_pts(struct radeon_vm *vm,
*/
int radeon_vm_bo_update(struct radeon_device *rdev,
struct radeon_bo_va *bo_va,
- struct ttm_mem_reg *mem)
+ struct ttm_resource *mem)
{
struct radeon_vm *vm = bo_va->vm;
struct radeon_ib ib;
@@ -942,7 +939,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
bo_va->flags &= ~RADEON_VM_PAGE_VALID;
bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED;
- if (bo_va->bo && radeon_ttm_tt_is_readonly(bo_va->bo->tbo.ttm))
+ if (bo_va->bo && radeon_ttm_tt_is_readonly(rdev, bo_va->bo->tbo.ttm))
bo_va->flags &= ~RADEON_VM_PAGE_WRITEABLE;
if (mem) {
@@ -1145,7 +1142,6 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev,
* radeon_vm_bo_invalidate - mark the bo as invalid
*
* @rdev: radeon_device pointer
- * @vm: requested vm
* @bo: radeon buffer object
*
* Mark @bo as invalid (cayman+).
@@ -1206,13 +1202,17 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
r = radeon_bo_create(rdev, pd_size, align, true,
RADEON_GEM_DOMAIN_VRAM, 0, NULL,
NULL, &vm->page_directory);
- if (r)
+ if (r) {
+ kfree(vm->page_tables);
+ vm->page_tables = NULL;
return r;
-
+ }
r = radeon_vm_clear_bo(rdev, vm->page_directory);
if (r) {
radeon_bo_unref(&vm->page_directory);
vm->page_directory = NULL;
+ kfree(vm->page_tables);
+ vm->page_tables = NULL;
return r;
}