diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_gem.c')
| -rw-r--r-- | drivers/gpu/drm/radeon/radeon_gem.c | 551 |
1 files changed, 417 insertions, 134 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index aa796031ab65..18ca1bcfd2f9 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -25,31 +25,74 @@ * Alex Deucher * Jerome Glisse */ -#include <drm/drmP.h> + +#include <linux/debugfs.h> +#include <linux/iosys-map.h> +#include <linux/pci.h> + +#include <drm/drm_device.h> +#include <drm/drm_file.h> +#include <drm/drm_gem_ttm_helper.h> #include <drm/radeon_drm.h> + #include "radeon.h" +#include "radeon_prime.h" -int radeon_gem_object_init(struct drm_gem_object *obj) +struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj, + int flags); +struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj); +int radeon_gem_prime_pin(struct drm_gem_object *obj); +void radeon_gem_prime_unpin(struct drm_gem_object *obj); + +static vm_fault_t radeon_gem_fault(struct vm_fault *vmf) { - BUG(); + struct ttm_buffer_object *bo = vmf->vma->vm_private_data; + struct radeon_device *rdev = radeon_get_rdev(bo->bdev); + vm_fault_t ret; - return 0; + down_read(&rdev->pm.mclk_lock); + + ret = ttm_bo_vm_reserve(bo, vmf); + if (ret) + goto unlock_mclk; + + ret = radeon_bo_fault_reserve_notify(bo); + if (ret) + goto unlock_resv; + + ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, + TTM_BO_VM_NUM_PREFAULT); + if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) + goto unlock_mclk; + +unlock_resv: + dma_resv_unlock(bo->base.resv); + +unlock_mclk: + up_read(&rdev->pm.mclk_lock); + return ret; } -void radeon_gem_object_free(struct drm_gem_object *gobj) +static const struct vm_operations_struct radeon_gem_vm_ops = { + .fault = radeon_gem_fault, + .open = ttm_bo_vm_open, + .close = ttm_bo_vm_close, + .access = ttm_bo_vm_access +}; + +static void radeon_gem_object_free(struct drm_gem_object *gobj) { struct radeon_bo *robj = gem_to_radeon_bo(gobj); if (robj) { - if (robj->gem_base.import_attach) - drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg); - radeon_bo_unref(&robj); + radeon_mn_unregister(robj); + ttm_bo_fini(&robj->tbo); } } -int radeon_gem_object_create(struct radeon_device *rdev, int size, +int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size, int alignment, int initial_domain, - bool discardable, bool kernel, + u32 flags, bool kernel, struct drm_gem_object **obj) { struct radeon_bo *robj; @@ -62,28 +105,31 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size, alignment = PAGE_SIZE; } - /* maximun bo size is the minimun btw visible vram and gtt size */ - max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size); + /* Maximum bo size is the unpinned gtt size since we use the gtt to + * handle vram to system pool migrations. + */ + max_size = rdev->mc.gtt_size - rdev->gart_pin_size; if (size > max_size) { - printk(KERN_WARNING "%s:%d alloc size %dMb bigger than %ldMb limit\n", - __func__, __LINE__, size >> 20, max_size >> 20); + DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n", + size >> 20, max_size >> 20); return -ENOMEM; } retry: - r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj); + r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, + flags, NULL, NULL, &robj); if (r) { if (r != -ERESTARTSYS) { if (initial_domain == RADEON_GEM_DOMAIN_VRAM) { initial_domain |= RADEON_GEM_DOMAIN_GTT; goto retry; } - DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", + DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n", size, initial_domain, alignment, r); } return r; } - *obj = &robj->gem_base; + *obj = &robj->tbo.base; robj->pid = task_pid_nr(current); mutex_lock(&rdev->gem.mutex); @@ -93,12 +139,12 @@ retry: return 0; } -int radeon_gem_set_domain(struct drm_gem_object *gobj, +static int radeon_gem_set_domain(struct drm_gem_object *gobj, uint32_t rdomain, uint32_t wdomain) { struct radeon_bo *robj; uint32_t domain; - int r; + long r; /* FIXME: reeimplement */ robj = gem_to_radeon_bo(gobj); @@ -109,17 +155,26 @@ int radeon_gem_set_domain(struct drm_gem_object *gobj, } if (!domain) { /* Do nothings */ - printk(KERN_WARNING "Set domain without domain !\n"); + pr_warn("Set domain without domain !\n"); return 0; } if (domain == RADEON_GEM_DOMAIN_CPU) { /* Asking for cpu access wait for object idle */ - r = radeon_bo_wait(robj, NULL, false); - if (r) { - printk(KERN_ERR "Failed to wait for object !\n"); + r = dma_resv_wait_timeout(robj->tbo.base.resv, + DMA_RESV_USAGE_BOOKKEEP, + true, 30 * HZ); + if (!r) + r = -EBUSY; + + if (r < 0 && r != -EINTR) { + pr_err("Failed to wait for object: %li\n", r); return r; } } + if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) { + /* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */ + return -EINVAL; + } return 0; } @@ -138,7 +193,7 @@ void radeon_gem_fini(struct radeon_device *rdev) * Call from drm_gem_handle_create which appear in both new and open ioctl * case. */ -int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) +static int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) { struct radeon_bo *rbo = gem_to_radeon_bo(obj); struct radeon_device *rdev = rbo->rdev; @@ -147,7 +202,8 @@ int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri struct radeon_bo_va *bo_va; int r; - if (rdev->family < CHIP_CAYMAN) { + if ((rdev->family < CHIP_CAYMAN) || + (!rdev->accel_working)) { return 0; } @@ -167,8 +223,8 @@ int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri return 0; } -void radeon_gem_object_close(struct drm_gem_object *obj, - struct drm_file *file_priv) +static void radeon_gem_object_close(struct drm_gem_object *obj, + struct drm_file *file_priv) { struct radeon_bo *rbo = gem_to_radeon_bo(obj); struct radeon_device *rdev = rbo->rdev; @@ -177,7 +233,8 @@ void radeon_gem_object_close(struct drm_gem_object *obj, struct radeon_bo_va *bo_va; int r; - if (rdev->family < CHIP_CAYMAN) { + if ((rdev->family < CHIP_CAYMAN) || + (!rdev->accel_working)) { return; } @@ -206,6 +263,31 @@ static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r) return r; } +static int radeon_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) +{ + struct radeon_bo *bo = gem_to_radeon_bo(obj); + struct radeon_device *rdev = radeon_get_rdev(bo->tbo.bdev); + + if (radeon_ttm_tt_has_userptr(rdev, bo->tbo.ttm)) + return -EPERM; + + return drm_gem_ttm_mmap(obj, vma); +} + +const struct drm_gem_object_funcs radeon_gem_object_funcs = { + .free = radeon_gem_object_free, + .open = radeon_gem_object_open, + .close = radeon_gem_object_close, + .export = radeon_gem_prime_export, + .pin = radeon_gem_prime_pin, + .unpin = radeon_gem_prime_unpin, + .get_sg_table = radeon_gem_prime_get_sg_table, + .vmap = drm_gem_ttm_vmap, + .vunmap = drm_gem_ttm_vunmap, + .mmap = radeon_gem_object_mmap, + .vm_ops = &radeon_gem_vm_ops, +}; + /* * GEM ioctls. */ @@ -214,36 +296,17 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data, { struct radeon_device *rdev = dev->dev_private; struct drm_radeon_gem_info *args = data; - struct ttm_mem_type_manager *man; - unsigned i; - - man = &rdev->mman.bdev.man[TTM_PL_VRAM]; - - args->vram_size = rdev->mc.real_vram_size; - args->vram_visible = (u64)man->size << PAGE_SHIFT; - if (rdev->stollen_vga_memory) - args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory); - args->vram_visible -= radeon_fbdev_total_size(rdev); - args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024; - for(i = 0; i < RADEON_NUM_RINGS; ++i) - args->gart_size -= rdev->ring[i].ring_size; - return 0; -} + struct ttm_resource_manager *man; -int radeon_gem_pread_ioctl(struct drm_device *dev, void *data, - struct drm_file *filp) -{ - /* TODO: implement */ - DRM_ERROR("unimplemented %s\n", __func__); - return -ENOSYS; -} + man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM); -int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data, - struct drm_file *filp) -{ - /* TODO: implement */ - DRM_ERROR("unimplemented %s\n", __func__); - return -ENOSYS; + args->vram_size = (u64)man->size << PAGE_SHIFT; + args->vram_visible = rdev->mc.visible_vram_size; + args->vram_visible -= rdev->vram_pin_size; + args->gart_size = rdev->mc.gtt_size; + args->gart_size -= rdev->gart_pin_size; + + return 0; } int radeon_gem_create_ioctl(struct drm_device *dev, void *data, @@ -259,8 +322,8 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data, /* create a gem object to contain this object in */ args->size = roundup(args->size, PAGE_SIZE); r = radeon_gem_object_create(rdev, args->size, args->alignment, - args->initial_domain, false, - false, &gobj); + args->initial_domain, args->flags, + false, &gobj); if (r) { up_read(&rdev->exclusive_lock); r = radeon_gem_handle_lockup(rdev, r); @@ -268,7 +331,7 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data, } r = drm_gem_handle_create(filp, gobj, &handle); /* drop reference from allocate - handle holds it now */ - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put(gobj); if (r) { up_read(&rdev->exclusive_lock); r = radeon_gem_handle_lockup(rdev, r); @@ -279,6 +342,97 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data, return 0; } +int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp) +{ + struct ttm_operation_ctx ctx = { true, false }; + struct radeon_device *rdev = dev->dev_private; + struct drm_radeon_gem_userptr *args = data; + struct drm_gem_object *gobj; + struct radeon_bo *bo; + uint32_t handle; + int r; + + args->addr = untagged_addr(args->addr); + + if (offset_in_page(args->addr | args->size)) + return -EINVAL; + + /* reject unknown flag values */ + if (args->flags & ~(RADEON_GEM_USERPTR_READONLY | + RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE | + RADEON_GEM_USERPTR_REGISTER)) + return -EINVAL; + + if (args->flags & RADEON_GEM_USERPTR_READONLY) { + /* readonly pages not tested on older hardware */ + if (rdev->family < CHIP_R600) + return -EINVAL; + + } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) || + !(args->flags & RADEON_GEM_USERPTR_REGISTER)) { + + /* if we want to write to it we must require anonymous + memory and install a MMU notifier */ + return -EACCES; + } + + down_read(&rdev->exclusive_lock); + + /* create a gem object to contain this object in */ + r = radeon_gem_object_create(rdev, args->size, 0, + RADEON_GEM_DOMAIN_CPU, 0, + false, &gobj); + if (r) + goto handle_lockup; + + bo = gem_to_radeon_bo(gobj); + r = radeon_ttm_tt_set_userptr(rdev, bo->tbo.ttm, args->addr, args->flags); + if (r) + goto release_object; + + if (args->flags & RADEON_GEM_USERPTR_REGISTER) { + r = radeon_mn_register(bo, args->addr); + if (r) + goto release_object; + } + + if (args->flags & RADEON_GEM_USERPTR_VALIDATE) { + mmap_read_lock(current->mm); + r = radeon_bo_reserve(bo, true); + if (r) { + mmap_read_unlock(current->mm); + goto release_object; + } + + radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT); + r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + radeon_bo_unreserve(bo); + mmap_read_unlock(current->mm); + if (r) + goto release_object; + } + + r = drm_gem_handle_create(filp, gobj, &handle); + /* drop reference from allocate - handle holds it now */ + drm_gem_object_put(gobj); + if (r) + goto handle_lockup; + + args->handle = handle; + up_read(&rdev->exclusive_lock); + return 0; + +release_object: + drm_gem_object_put(gobj); + +handle_lockup: + up_read(&rdev->exclusive_lock); + r = radeon_gem_handle_lockup(rdev, r); + + return r; +} + int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { @@ -287,7 +441,6 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, struct radeon_device *rdev = dev->dev_private; struct drm_radeon_gem_set_domain *args = data; struct drm_gem_object *gobj; - struct radeon_bo *robj; int r; /* for now if someone requests domain CPU - @@ -295,18 +448,17 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, down_read(&rdev->exclusive_lock); /* just do a BO wait for now */ - gobj = drm_gem_object_lookup(dev, filp, args->handle); + gobj = drm_gem_object_lookup(filp, args->handle); if (gobj == NULL) { up_read(&rdev->exclusive_lock); return -ENOENT; } - robj = gem_to_radeon_bo(gobj); r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put(gobj); up_read(&rdev->exclusive_lock); - r = radeon_gem_handle_lockup(robj->rdev, r); + r = radeon_gem_handle_lockup(rdev, r); return r; } @@ -317,13 +469,17 @@ int radeon_mode_dumb_mmap(struct drm_file *filp, struct drm_gem_object *gobj; struct radeon_bo *robj; - gobj = drm_gem_object_lookup(dev, filp, handle); + gobj = drm_gem_object_lookup(filp, handle); if (gobj == NULL) { return -ENOENT; } robj = gem_to_radeon_bo(gobj); + if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm)) { + drm_gem_object_put(gobj); + return -EPERM; + } *offset_p = radeon_bo_mmap_offset(robj); - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put(gobj); return 0; } @@ -338,33 +494,27 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { - struct radeon_device *rdev = dev->dev_private; struct drm_radeon_gem_busy *args = data; struct drm_gem_object *gobj; struct radeon_bo *robj; int r; uint32_t cur_placement = 0; - gobj = drm_gem_object_lookup(dev, filp, args->handle); + gobj = drm_gem_object_lookup(filp, args->handle); if (gobj == NULL) { return -ENOENT; } robj = gem_to_radeon_bo(gobj); - r = radeon_bo_wait(robj, &cur_placement, true); - switch (cur_placement) { - case TTM_PL_VRAM: - args->domain = RADEON_GEM_DOMAIN_VRAM; - break; - case TTM_PL_TT: - args->domain = RADEON_GEM_DOMAIN_GTT; - break; - case TTM_PL_SYSTEM: - args->domain = RADEON_GEM_DOMAIN_CPU; - default: - break; - } - drm_gem_object_unreference_unlocked(gobj); - r = radeon_gem_handle_lockup(rdev, r); + + r = dma_resv_test_signaled(robj->tbo.base.resv, DMA_RESV_USAGE_READ); + if (r == 0) + r = -EBUSY; + else + r = 0; + + cur_placement = READ_ONCE(robj->tbo.resource->mem_type); + args->domain = radeon_mem_type_to_domain(cur_placement); + drm_gem_object_put(gobj); return r; } @@ -375,18 +525,29 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_radeon_gem_wait_idle *args = data; struct drm_gem_object *gobj; struct radeon_bo *robj; - int r; + int r = 0; + uint32_t cur_placement = 0; + long ret; - gobj = drm_gem_object_lookup(dev, filp, args->handle); + gobj = drm_gem_object_lookup(filp, args->handle); if (gobj == NULL) { return -ENOENT; } robj = gem_to_radeon_bo(gobj); - r = radeon_bo_wait(robj, NULL, false); - /* callback hw specific functions if any */ - if (rdev->asic->ioctl_wait_idle) - robj->rdev->asic->ioctl_wait_idle(rdev, robj); - drm_gem_object_unreference_unlocked(gobj); + + ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ, + true, 30 * HZ); + if (ret == 0) + r = -EBUSY; + else if (ret < 0) + r = ret; + + /* Flush HDP cache via MMIO if necessary */ + cur_placement = READ_ONCE(robj->tbo.resource->mem_type); + if (rdev->asic->mmio_hdp_flush && + radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) + robj->rdev->asic->mmio_hdp_flush(rdev); + drm_gem_object_put(gobj); r = radeon_gem_handle_lockup(rdev, r); return r; } @@ -400,12 +561,12 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, int r = 0; DRM_DEBUG("%d \n", args->handle); - gobj = drm_gem_object_lookup(dev, filp, args->handle); + gobj = drm_gem_object_lookup(filp, args->handle); if (gobj == NULL) return -ENOENT; robj = gem_to_radeon_bo(gobj); r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put(gobj); return r; } @@ -418,7 +579,7 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, int r = 0; DRM_DEBUG("\n"); - gobj = drm_gem_object_lookup(dev, filp, args->handle); + gobj = drm_gem_object_lookup(filp, args->handle); if (gobj == NULL) return -ENOENT; rbo = gem_to_radeon_bo(gobj); @@ -428,10 +589,77 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); radeon_bo_unreserve(rbo); out: - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put(gobj); return r; } +/** + * radeon_gem_va_update_vm -update the bo_va in its VM + * + * @rdev: radeon_device pointer + * @bo_va: bo_va to update + * + * Update the bo_va directly after setting it's address. Errors are not + * vital here, so they are not reported back to userspace. + */ +static void radeon_gem_va_update_vm(struct radeon_device *rdev, + struct radeon_bo_va *bo_va) +{ + struct radeon_bo_list *vm_bos, *entry; + struct list_head list; + struct drm_exec exec; + unsigned domain; + int r; + + INIT_LIST_HEAD(&list); + + vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list); + if (!vm_bos) + return; + + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); + drm_exec_until_all_locked(&exec) { + list_for_each_entry(entry, &list, list) { + r = drm_exec_prepare_obj(&exec, &entry->robj->tbo.base, + 1); + drm_exec_retry_on_contention(&exec); + if (unlikely(r)) + goto error_cleanup; + } + + r = drm_exec_prepare_obj(&exec, &bo_va->bo->tbo.base, 1); + drm_exec_retry_on_contention(&exec); + if (unlikely(r)) + goto error_cleanup; + } + + list_for_each_entry(entry, &list, list) { + domain = radeon_mem_type_to_domain(entry->robj->tbo.resource->mem_type); + /* if anything is swapped out don't swap it in here, + just abort and wait for the next CS */ + if (domain == RADEON_GEM_DOMAIN_CPU) + goto error_cleanup; + } + + mutex_lock(&bo_va->vm->mutex); + r = radeon_vm_clear_freed(rdev, bo_va->vm); + if (r) + goto error_unlock; + + if (bo_va->it.start && bo_va->bo) + r = radeon_vm_bo_update(rdev, bo_va, bo_va->bo->tbo.resource); + +error_unlock: + mutex_unlock(&bo_va->vm->mutex); + +error_cleanup: + drm_exec_fini(&exec); + kvfree(vm_bos); + + if (r && r != -ERESTARTSYS) + DRM_ERROR("Couldn't update BO_VA (%d)\n", r); +} + int radeon_gem_va_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { @@ -450,7 +678,7 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data, } /* !! DONT REMOVE !! - * We don't support vm_id yet, to be sure we don't have have broken + * We don't support vm_id yet, to be sure we don't have broken * userspace, reject anyone trying to use non 0 value thus moving * forward we can use those fields without breaking existant userspace */ @@ -460,7 +688,7 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data, } if (args->offset < RADEON_VA_RESERVED_SIZE) { - dev_err(&dev->pdev->dev, + dev_err(dev->dev, "offset 0x%lX is in reserved area 0x%X\n", (unsigned long)args->offset, RADEON_VA_RESERVED_SIZE); @@ -474,29 +702,24 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data, */ invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM; if ((args->flags & invalid_flags)) { - dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n", + dev_err(dev->dev, "invalid flags 0x%08X vs 0x%08X\n", args->flags, invalid_flags); args->operation = RADEON_VA_RESULT_ERROR; return -EINVAL; } - if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) { - dev_err(&dev->pdev->dev, "only supported snooped mapping for now\n"); - args->operation = RADEON_VA_RESULT_ERROR; - return -EINVAL; - } switch (args->operation) { case RADEON_VA_MAP: case RADEON_VA_UNMAP: break; default: - dev_err(&dev->pdev->dev, "unsupported operation %d\n", + dev_err(dev->dev, "unsupported operation %d\n", args->operation); args->operation = RADEON_VA_RESULT_ERROR; return -EINVAL; } - gobj = drm_gem_object_lookup(dev, filp, args->handle); + gobj = drm_gem_object_lookup(filp, args->handle); if (gobj == NULL) { args->operation = RADEON_VA_RESULT_ERROR; return -ENOENT; @@ -505,21 +728,23 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data, r = radeon_bo_reserve(rbo, false); if (r) { args->operation = RADEON_VA_RESULT_ERROR; - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put(gobj); return r; } bo_va = radeon_vm_bo_find(&fpriv->vm, rbo); if (!bo_va) { args->operation = RADEON_VA_RESULT_ERROR; - drm_gem_object_unreference_unlocked(gobj); + radeon_bo_unreserve(rbo); + drm_gem_object_put(gobj); return -ENOENT; } switch (args->operation) { case RADEON_VA_MAP: - if (bo_va->soffset) { + if (bo_va->it.start) { args->operation = RADEON_VA_RESULT_VA_EXIST; - args->offset = bo_va->soffset; + args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE; + radeon_bo_unreserve(rbo); goto out; } r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags); @@ -530,16 +755,82 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data, default: break; } + if (!r) + radeon_gem_va_update_vm(rdev, bo_va); args->operation = RADEON_VA_RESULT_OK; if (r) { args->operation = RADEON_VA_RESULT_ERROR; } out: - radeon_bo_unreserve(rbo); - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put(gobj); return r; } +int radeon_gem_op_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp) +{ + struct drm_radeon_gem_op *args = data; + struct drm_gem_object *gobj; + struct radeon_bo *robj; + int r; + + gobj = drm_gem_object_lookup(filp, args->handle); + if (gobj == NULL) { + return -ENOENT; + } + robj = gem_to_radeon_bo(gobj); + + r = -EPERM; + if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm)) + goto out; + + r = radeon_bo_reserve(robj, false); + if (unlikely(r)) + goto out; + + switch (args->op) { + case RADEON_GEM_OP_GET_INITIAL_DOMAIN: + args->value = robj->initial_domain; + break; + case RADEON_GEM_OP_SET_INITIAL_DOMAIN: + robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM | + RADEON_GEM_DOMAIN_GTT | + RADEON_GEM_DOMAIN_CPU); + break; + default: + r = -EINVAL; + } + + radeon_bo_unreserve(robj); +out: + drm_gem_object_put(gobj); + return r; +} + +int radeon_align_pitch(struct radeon_device *rdev, int width, int cpp, bool tiled) +{ + int aligned = width; + int align_large = (ASIC_IS_AVIVO(rdev)) || tiled; + int pitch_mask = 0; + + switch (cpp) { + case 1: + pitch_mask = align_large ? 255 : 127; + break; + case 2: + pitch_mask = align_large ? 127 : 31; + break; + case 3: + case 4: + pitch_mask = align_large ? 63 : 15; + break; + } + + aligned += pitch_mask; + aligned &= ~pitch_mask; + return aligned * cpp; +} + int radeon_mode_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args) @@ -549,20 +840,20 @@ int radeon_mode_dumb_create(struct drm_file *file_priv, uint32_t handle; int r; - args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8); - args->size = args->pitch * args->height; + args->pitch = radeon_align_pitch(rdev, args->width, + DIV_ROUND_UP(args->bpp, 8), 0); + args->size = (u64)args->pitch * args->height; args->size = ALIGN(args->size, PAGE_SIZE); r = radeon_gem_object_create(rdev, args->size, 0, - RADEON_GEM_DOMAIN_VRAM, - false, ttm_bo_type_device, - &gobj); + RADEON_GEM_DOMAIN_VRAM, 0, + false, &gobj); if (r) return -ENOMEM; r = drm_gem_handle_create(file_priv, gobj, &handle); /* drop reference from allocate - handle holds it now */ - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put(gobj); if (r) { return r; } @@ -570,19 +861,10 @@ int radeon_mode_dumb_create(struct drm_file *file_priv, return 0; } -int radeon_mode_dumb_destroy(struct drm_file *file_priv, - struct drm_device *dev, - uint32_t handle) -{ - return drm_gem_handle_delete(file_priv, handle); -} - #if defined(CONFIG_DEBUG_FS) -static int radeon_debugfs_gem_info(struct seq_file *m, void *data) +static int radeon_debugfs_gem_info_show(struct seq_file *m, void *unused) { - struct drm_info_node *node = (struct drm_info_node *)m->private; - struct drm_device *dev = node->minor->dev; - struct radeon_device *rdev = dev->dev_private; + struct radeon_device *rdev = m->private; struct radeon_bo *rbo; unsigned i = 0; @@ -591,7 +873,7 @@ static int radeon_debugfs_gem_info(struct seq_file *m, void *data) unsigned domain; const char *placement; - domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type); + domain = radeon_mem_type_to_domain(rbo->tbo.resource->mem_type); switch (domain) { case RADEON_GEM_DOMAIN_VRAM: placement = "VRAM"; @@ -613,15 +895,16 @@ static int radeon_debugfs_gem_info(struct seq_file *m, void *data) return 0; } -static struct drm_info_list radeon_debugfs_gem_list[] = { - {"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL}, -}; +DEFINE_SHOW_ATTRIBUTE(radeon_debugfs_gem_info); #endif -int radeon_gem_debugfs_init(struct radeon_device *rdev) +void radeon_gem_debugfs_init(struct radeon_device *rdev) { #if defined(CONFIG_DEBUG_FS) - return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1); + struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root; + + debugfs_create_file("radeon_gem_info", 0444, root, rdev, + &radeon_debugfs_gem_info_fops); + #endif - return 0; } |
