summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/radeon/radeon_vm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_vm.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c76
1 files changed, 39 insertions, 37 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 5f68245579a3..21a5340aefdf 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -25,7 +25,7 @@
* Alex Deucher
* Jerome Glisse
*/
-#include <drm/drmP.h>
+
#include <drm/radeon_drm.h>
#include "radeon.h"
#include "radeon_trace.h"
@@ -41,7 +41,7 @@
* (uncached system pages).
* Each VM has an ID associated with it and there is a page table
* associated with each VMID. When execting a command buffer,
- * the kernel tells the the ring what VMID to use for that command
+ * the kernel tells the ring what VMID to use for that command
* buffer. VMIDs are allocated dynamically as commands are submitted.
* The userspace drivers maintain their own address space and the kernel
* sets up their pages tables accordingly when they submit their
@@ -51,7 +51,7 @@
*/
/**
- * radeon_vm_num_pde - return the number of page directory entries
+ * radeon_vm_num_pdes - return the number of page directory entries
*
* @rdev: radeon_device pointer
*
@@ -119,6 +119,7 @@ void radeon_vm_manager_fini(struct radeon_device *rdev)
/**
* radeon_vm_get_bos - add the vm BOs to a validation list
*
+ * @rdev: radeon_device pointer
* @vm: vm providing the BOs
* @head: head of validation list
*
@@ -139,24 +140,22 @@ struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
/* add the vm page table to the list */
list[0].robj = vm->page_directory;
- list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
+ list[0].preferred_domains = RADEON_GEM_DOMAIN_VRAM;
list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
- list[0].tv.bo = &vm->page_directory->tbo;
- list[0].tv.shared = true;
+ list[0].shared = true;
list[0].tiling_flags = 0;
- list_add(&list[0].tv.head, head);
+ list_add(&list[0].list, head);
for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
if (!vm->page_tables[i].bo)
continue;
list[idx].robj = vm->page_tables[i].bo;
- list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
+ list[idx].preferred_domains = RADEON_GEM_DOMAIN_VRAM;
list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
- list[idx].tv.bo = &list[idx].robj->tbo;
- list[idx].tv.shared = true;
+ list[idx].shared = true;
list[idx].tiling_flags = 0;
- list_add(&list[idx++].tv.head, head);
+ list_add(&list[idx++].list, head);
}
return list;
@@ -188,7 +187,7 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
vm_id->last_id_use == rdev->vm_manager.active[vm_id->id])
return NULL;
- /* we definately need to flush */
+ /* we definitely need to flush */
vm_id->pd_gpu_addr = ~0ll;
/* skip over VMID 0, since it is the system VM */
@@ -296,9 +295,9 @@ struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
struct radeon_bo_va *bo_va;
list_for_each_entry(bo_va, &bo->va, bo_list) {
- if (bo_va->vm == vm) {
+ if (bo_va->vm == vm)
return bo_va;
- }
+
}
return NULL;
}
@@ -323,9 +322,9 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
struct radeon_bo_va *bo_va;
bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
- if (bo_va == NULL) {
+ if (bo_va == NULL)
return NULL;
- }
+
bo_va->vm = vm;
bo_va->bo = bo;
bo_va->it.start = 0;
@@ -387,6 +386,7 @@ static void radeon_vm_set_pages(struct radeon_device *rdev,
static int radeon_vm_clear_bo(struct radeon_device *rdev,
struct radeon_bo *bo)
{
+ struct ttm_operation_ctx ctx = { true, false };
struct radeon_ib ib;
unsigned entries;
uint64_t addr;
@@ -396,7 +396,7 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev,
if (r)
return r;
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
goto error_unreserve;
@@ -624,12 +624,10 @@ static uint32_t radeon_vm_page_flags(uint32_t flags)
}
/**
- * radeon_vm_update_pdes - make sure that page directory is valid
+ * radeon_vm_update_page_directory - make sure that page directory is valid
*
* @rdev: radeon_device pointer
* @vm: requested vm
- * @start: start of GPU address range
- * @end: end of GPU address range
*
* Allocates new page tables if necessary
* and updates the page directory (cayman+).
@@ -701,7 +699,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
if (ib.length_dw != 0) {
radeon_asic_vm_pad_ib(rdev, &ib);
- radeon_sync_resv(rdev, &ib.sync, pd->tbo.resv, true);
+ radeon_sync_resv(rdev, &ib.sync, pd->tbo.base.resv, true);
WARN_ON(ib.length_dw > ndw);
r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) {
@@ -801,6 +799,7 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev,
*
* @rdev: radeon_device pointer
* @vm: requested vm
+ * @ib: indirect buffer to use for the update
* @start: start of GPU address range
* @end: end of GPU address range
* @dst: destination address to map to
@@ -829,8 +828,8 @@ static int radeon_vm_update_ptes(struct radeon_device *rdev,
uint64_t pte;
int r;
- radeon_sync_resv(rdev, &ib->sync, pt->tbo.resv, true);
- r = reservation_object_reserve_shared(pt->tbo.resv);
+ radeon_sync_resv(rdev, &ib->sync, pt->tbo.base.resv, true);
+ r = dma_resv_reserve_fences(pt->tbo.base.resv, 1);
if (r)
return r;
@@ -899,8 +898,7 @@ static void radeon_vm_fence_pts(struct radeon_vm *vm,
* radeon_vm_bo_update - map a bo into the vm page table
*
* @rdev: radeon_device pointer
- * @vm: requested vm
- * @bo: radeon buffer object
+ * @bo_va: radeon buffer virtual address object
* @mem: ttm mem
*
* Fill in the page table entries for @bo (cayman+).
@@ -910,7 +908,7 @@ static void radeon_vm_fence_pts(struct radeon_vm *vm,
*/
int radeon_vm_bo_update(struct radeon_device *rdev,
struct radeon_bo_va *bo_va,
- struct ttm_mem_reg *mem)
+ struct ttm_resource *mem)
{
struct radeon_vm *vm = bo_va->vm;
struct radeon_ib ib;
@@ -941,14 +939,14 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
bo_va->flags &= ~RADEON_VM_PAGE_VALID;
bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED;
- if (bo_va->bo && radeon_ttm_tt_is_readonly(bo_va->bo->tbo.ttm))
+ if (bo_va->bo && radeon_ttm_tt_is_readonly(rdev, bo_va->bo->tbo.ttm))
bo_va->flags &= ~RADEON_VM_PAGE_WRITEABLE;
if (mem) {
- addr = mem->start << PAGE_SHIFT;
- if (mem->mem_type != TTM_PL_SYSTEM) {
+ addr = (u64)mem->start << PAGE_SHIFT;
+ if (mem->mem_type != TTM_PL_SYSTEM)
bo_va->flags |= RADEON_VM_PAGE_VALID;
- }
+
if (mem->mem_type == TTM_PL_TT) {
bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
if (!(bo_va->bo->flags & (RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC)))
@@ -1144,7 +1142,6 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev,
* radeon_vm_bo_invalidate - mark the bo as invalid
*
* @rdev: radeon_device pointer
- * @vm: requested vm
* @bo: radeon buffer object
*
* Mark @bo as invalid (cayman+).
@@ -1185,7 +1182,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
vm->ids[i].last_id_use = NULL;
}
mutex_init(&vm->mutex);
- vm->va = RB_ROOT;
+ vm->va = RB_ROOT_CACHED;
spin_lock_init(&vm->status_lock);
INIT_LIST_HEAD(&vm->invalidated);
INIT_LIST_HEAD(&vm->freed);
@@ -1205,13 +1202,17 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
r = radeon_bo_create(rdev, pd_size, align, true,
RADEON_GEM_DOMAIN_VRAM, 0, NULL,
NULL, &vm->page_directory);
- if (r)
+ if (r) {
+ kfree(vm->page_tables);
+ vm->page_tables = NULL;
return r;
-
+ }
r = radeon_vm_clear_bo(rdev, vm->page_directory);
if (r) {
radeon_bo_unref(&vm->page_directory);
vm->page_directory = NULL;
+ kfree(vm->page_tables);
+ vm->page_tables = NULL;
return r;
}
@@ -1232,10 +1233,11 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
struct radeon_bo_va *bo_va, *tmp;
int i, r;
- if (!RB_EMPTY_ROOT(&vm->va)) {
+ if (!RB_EMPTY_ROOT(&vm->va.rb_root))
dev_err(rdev->dev, "still active bo inside vm\n");
- }
- rbtree_postorder_for_each_entry_safe(bo_va, tmp, &vm->va, it.rb) {
+
+ rbtree_postorder_for_each_entry_safe(bo_va, tmp,
+ &vm->va.rb_root, it.rb) {
interval_tree_remove(&bo_va->it, &vm->va);
r = radeon_bo_reserve(bo_va->bo, false);
if (!r) {