diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_object.c')
| -rw-r--r-- | drivers/gpu/drm/radeon/radeon_object.c | 517 |
1 files changed, 344 insertions, 173 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 0219d263e2df..a0fc0801abb0 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -29,16 +29,19 @@ * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> * Dave Airlie */ + +#include <linux/io.h> #include <linux/list.h> #include <linux/slab.h> -#include <drm/drmP.h> + +#include <drm/drm_cache.h> +#include <drm/drm_prime.h> #include <drm/radeon_drm.h> + #include "radeon.h" #include "radeon_trace.h" +#include "radeon_ttm.h" - -int radeon_ttm_init(struct radeon_device *rdev); -void radeon_ttm_fini(struct radeon_device *rdev); static void radeon_bo_clear_surface_reg(struct radeon_bo *bo); /* @@ -46,27 +49,20 @@ static void radeon_bo_clear_surface_reg(struct radeon_bo *bo); * function are calling it. */ -void radeon_bo_clear_va(struct radeon_bo *bo) -{ - struct radeon_bo_va *bo_va, *tmp; - - list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) { - /* remove from all vm address space */ - radeon_vm_bo_rmv(bo->rdev, bo_va); - } -} - static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) { struct radeon_bo *bo; bo = container_of(tbo, struct radeon_bo, tbo); + mutex_lock(&bo->rdev->gem.mutex); list_del_init(&bo->list); mutex_unlock(&bo->rdev->gem.mutex); radeon_bo_clear_surface_reg(bo); - radeon_bo_clear_va(bo); - drm_gem_object_release(&bo->gem_base); + WARN_ON_ONCE(!list_empty(&bo->va)); + if (bo->tbo.base.import_attach) + drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg); + drm_gem_object_release(&bo->tbo.base); kfree(bo); } @@ -79,48 +75,69 @@ bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo) void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) { - u32 c = 0; + u32 c = 0, i; - rbo->placement.fpfn = 0; - rbo->placement.lpfn = 0; rbo->placement.placement = rbo->placements; - rbo->placement.busy_placement = rbo->placements; - if (domain & RADEON_GEM_DOMAIN_VRAM) - rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_VRAM; - if (domain & RADEON_GEM_DOMAIN_GTT) { - if (rbo->rdev->flags & RADEON_IS_AGP) { - rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT; - } else { - rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; + if (domain & RADEON_GEM_DOMAIN_VRAM) { + /* Try placing BOs which don't need CPU access outside of the + * CPU accessible part of VRAM + */ + if ((rbo->flags & RADEON_GEM_NO_CPU_ACCESS) && + rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) { + rbo->placements[c].fpfn = + rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; + rbo->placements[c].mem_type = TTM_PL_VRAM; + rbo->placements[c++].flags = 0; } + + rbo->placements[c].fpfn = 0; + rbo->placements[c].mem_type = TTM_PL_VRAM; + rbo->placements[c++].flags = 0; } + + if (domain & RADEON_GEM_DOMAIN_GTT) { + rbo->placements[c].fpfn = 0; + rbo->placements[c].mem_type = TTM_PL_TT; + rbo->placements[c++].flags = 0; + } + if (domain & RADEON_GEM_DOMAIN_CPU) { - if (rbo->rdev->flags & RADEON_IS_AGP) { - rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM; - } else { - rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM; - } + rbo->placements[c].fpfn = 0; + rbo->placements[c].mem_type = TTM_PL_SYSTEM; + rbo->placements[c++].flags = 0; + } + if (!c) { + rbo->placements[c].fpfn = 0; + rbo->placements[c].mem_type = TTM_PL_SYSTEM; + rbo->placements[c++].flags = 0; } - if (!c) - rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + rbo->placement.num_placement = c; - rbo->placement.num_busy_placement = c; + + for (i = 0; i < c; ++i) { + if ((rbo->flags & RADEON_GEM_CPU_ACCESS) && + (rbo->placements[i].mem_type == TTM_PL_VRAM) && + !rbo->placements[i].fpfn) + rbo->placements[i].lpfn = + rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; + else + rbo->placements[i].lpfn = 0; + } } int radeon_bo_create(struct radeon_device *rdev, - unsigned long size, int byte_align, bool kernel, u32 domain, - struct sg_table *sg, struct radeon_bo **bo_ptr) + unsigned long size, int byte_align, bool kernel, + u32 domain, u32 flags, struct sg_table *sg, + struct dma_resv *resv, + struct radeon_bo **bo_ptr) { struct radeon_bo *bo; enum ttm_bo_type type; unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; - size_t acc_size; int r; size = ALIGN(size, PAGE_SIZE); - rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping; if (kernel) { type = ttm_bo_type_kernel; } else if (sg) { @@ -130,28 +147,63 @@ int radeon_bo_create(struct radeon_device *rdev, } *bo_ptr = NULL; - acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size, - sizeof(struct radeon_bo)); - bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); if (bo == NULL) return -ENOMEM; - r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size); - if (unlikely(r)) { - kfree(bo); - return r; - } + drm_gem_private_object_init(rdev_to_drm(rdev), &bo->tbo.base, size); + bo->tbo.base.funcs = &radeon_gem_object_funcs; bo->rdev = rdev; - bo->gem_base.driver_private = NULL; bo->surface_reg = -1; INIT_LIST_HEAD(&bo->list); INIT_LIST_HEAD(&bo->va); + bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM | + RADEON_GEM_DOMAIN_GTT | + RADEON_GEM_DOMAIN_CPU); + + bo->flags = flags; + /* PCI GART is always snooped */ + if (!(rdev->flags & RADEON_IS_PCIE)) + bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); + + /* Write-combined CPU mappings of GTT cause GPU hangs with RV6xx + * See https://bugs.freedesktop.org/show_bug.cgi?id=91268 + */ + if (rdev->family >= CHIP_RV610 && rdev->family <= CHIP_RV635) + bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); + +#ifdef CONFIG_X86_32 + /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit + * See https://bugs.freedesktop.org/show_bug.cgi?id=84627 + */ + bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); +#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT) + /* Don't try to enable write-combining when it can't work, or things + * may be slow + * See https://bugs.freedesktop.org/show_bug.cgi?id=88758 + */ +#ifndef CONFIG_COMPILE_TEST +#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ + thanks to write-combining +#endif + + if (bo->flags & RADEON_GEM_GTT_WC) + DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " + "better performance thanks to write-combining\n"); + bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); +#else + /* For architectures that don't support WC memory, + * mask out the WC flag from the BO + */ + if (!drm_arch_can_wc_memory()) + bo->flags &= ~RADEON_GEM_GTT_WC; +#endif + radeon_ttm_placement_from_domain(bo, domain); /* Kernel allocation are uninterruptible */ down_read(&rdev->pm.mclk_lock); - r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, - &bo->placement, page_align, !kernel, NULL, - acc_size, sg, &radeon_ttm_bo_destroy); + r = ttm_bo_init_validate(&rdev->mman.bdev, &bo->tbo, type, + &bo->placement, page_align, !kernel, sg, resv, + &radeon_ttm_bo_destroy); up_read(&rdev->pm.mclk_lock); if (unlikely(r != 0)) { return r; @@ -166,7 +218,12 @@ int radeon_bo_create(struct radeon_device *rdev, int radeon_bo_kmap(struct radeon_bo *bo, void **ptr) { bool is_iomem; - int r; + long r; + + r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL, + false, MAX_SCHEDULE_TIMEOUT); + if (r < 0) + return r; if (bo->kptr) { if (ptr) { @@ -174,7 +231,7 @@ int radeon_bo_kmap(struct radeon_bo *bo, void **ptr) } return 0; } - r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); + r = ttm_bo_kmap(&bo->tbo, 0, PFN_UP(bo->tbo.base.size), &bo->kmap); if (r) { return r; } @@ -195,29 +252,34 @@ void radeon_bo_kunmap(struct radeon_bo *bo) ttm_bo_kunmap(&bo->kmap); } -void radeon_bo_unref(struct radeon_bo **bo) +struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo) { - struct ttm_buffer_object *tbo; - struct radeon_device *rdev; + if (bo == NULL) + return NULL; + + drm_gem_object_get(&bo->tbo.base); + return bo; +} +void radeon_bo_unref(struct radeon_bo **bo) +{ if ((*bo) == NULL) return; - rdev = (*bo)->rdev; - tbo = &((*bo)->tbo); - down_read(&rdev->pm.mclk_lock); - ttm_bo_unref(&tbo); - up_read(&rdev->pm.mclk_lock); - if (tbo == NULL) - *bo = NULL; + drm_gem_object_put(&(*bo)->tbo.base); + *bo = NULL; } int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, u64 *gpu_addr) { + struct ttm_operation_ctx ctx = { false, false }; int r, i; - if (bo->pin_count) { - bo->pin_count++; + if (radeon_ttm_tt_has_userptr(bo->rdev, bo->tbo.ttm)) + return -EPERM; + + if (bo->tbo.pin_count) { + ttm_bo_pin(&bo->tbo); if (gpu_addr) *gpu_addr = radeon_bo_gpu_offset(bo); @@ -234,30 +296,35 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, return 0; } + if (bo->prime_shared_count && domain == RADEON_GEM_DOMAIN_VRAM) { + /* A BO shared as a dma-buf cannot be sensibly migrated to VRAM */ + return -EINVAL; + } + radeon_ttm_placement_from_domain(bo, domain); - if (domain == RADEON_GEM_DOMAIN_VRAM) { + for (i = 0; i < bo->placement.num_placement; i++) { /* force to pin into visible video ram */ - bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; + if ((bo->placements[i].mem_type == TTM_PL_VRAM) && + !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) && + (!max_offset || max_offset > bo->rdev->mc.visible_vram_size)) + bo->placements[i].lpfn = + bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; + else + bo->placements[i].lpfn = max_offset >> PAGE_SHIFT; } - if (max_offset) { - u64 lpfn = max_offset >> PAGE_SHIFT; - - if (!bo->placement.lpfn) - bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT; - if (lpfn < bo->placement.lpfn) - bo->placement.lpfn = lpfn; - } - for (i = 0; i < bo->placement.num_placement; i++) - bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; - r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); + r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); if (likely(r == 0)) { - bo->pin_count = 1; + ttm_bo_pin(&bo->tbo); if (gpu_addr != NULL) *gpu_addr = radeon_bo_gpu_offset(bo); - } - if (unlikely(r != 0)) + if (domain == RADEON_GEM_DOMAIN_VRAM) + bo->rdev->vram_pin_size += radeon_bo_size(bo); + else + bo->rdev->gart_pin_size += radeon_bo_size(bo); + } else { dev_err(bo->rdev->dev, "%p pin failed\n", bo); + } return r; } @@ -266,34 +333,34 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr); } -int radeon_bo_unpin(struct radeon_bo *bo) +void radeon_bo_unpin(struct radeon_bo *bo) { - int r, i; - - if (!bo->pin_count) { - dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo); - return 0; + ttm_bo_unpin(&bo->tbo); + if (!bo->tbo.pin_count) { + if (bo->tbo.resource->mem_type == TTM_PL_VRAM) + bo->rdev->vram_pin_size -= radeon_bo_size(bo); + else + bo->rdev->gart_pin_size -= radeon_bo_size(bo); } - bo->pin_count--; - if (bo->pin_count) - return 0; - for (i = 0; i < bo->placement.num_placement; i++) - bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; - r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); - if (unlikely(r != 0)) - dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); - return r; } int radeon_bo_evict_vram(struct radeon_device *rdev) { + struct ttm_device *bdev = &rdev->mman.bdev; + struct ttm_resource_manager *man; + /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ - if (0 && (rdev->flags & RADEON_IS_IGP)) { +#ifndef CONFIG_HIBERNATION + if (rdev->flags & RADEON_IS_IGP) { if (rdev->mc.igp_sideport_enabled == false) /* Useless to evict on IGP chips */ return 0; } - return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); +#endif + man = ttm_manager_type(bdev, TTM_PL_VRAM); + if (!man) + return 0; + return ttm_resource_manager_evict_all(bdev, man); } void radeon_bo_force_delete(struct radeon_device *rdev) @@ -305,21 +372,23 @@ void radeon_bo_force_delete(struct radeon_device *rdev) } dev_err(rdev->dev, "Userspace still has active objects !\n"); list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { - mutex_lock(&rdev->ddev->struct_mutex); dev_err(rdev->dev, "%p %p %lu %lu force free\n", - &bo->gem_base, bo, (unsigned long)bo->gem_base.size, - *((unsigned long *)&bo->gem_base.refcount)); + &bo->tbo.base, bo, (unsigned long)bo->tbo.base.size, + *((unsigned long *)&bo->tbo.base.refcount)); mutex_lock(&bo->rdev->gem.mutex); list_del_init(&bo->list); mutex_unlock(&bo->rdev->gem.mutex); /* this should unref the ttm bo */ - drm_gem_object_unreference(&bo->gem_base); - mutex_unlock(&rdev->ddev->struct_mutex); + drm_gem_object_put(&bo->tbo.base); } } int radeon_bo_init(struct radeon_device *rdev) { + /* reserve PAT memory space to WC for VRAM */ + arch_io_reserve_memtype_wc(rdev->mc.aper_base, + rdev->mc.aper_size); + /* Add an MTRR for the VRAM */ if (!rdev->fastfb_working) { rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base, @@ -337,44 +406,120 @@ void radeon_bo_fini(struct radeon_device *rdev) { radeon_ttm_fini(rdev); arch_phys_wc_del(rdev->mc.vram_mtrr); + arch_io_free_memtype_wc(rdev->mc.aper_base, rdev->mc.aper_size); } -void radeon_bo_list_add_object(struct radeon_bo_list *lobj, - struct list_head *head) +/* Returns how many bytes TTM can move per IB. + */ +static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev) { - if (lobj->written) { - list_add(&lobj->tv.head, head); - } else { - list_add_tail(&lobj->tv.head, head); - } + u64 real_vram_size = rdev->mc.real_vram_size; + struct ttm_resource_manager *man = + ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM); + u64 vram_usage = ttm_resource_manager_usage(man); + + /* This function is based on the current VRAM usage. + * + * - If all of VRAM is free, allow relocating the number of bytes that + * is equal to 1/4 of the size of VRAM for this IB. + + * - If more than one half of VRAM is occupied, only allow relocating + * 1 MB of data for this IB. + * + * - From 0 to one half of used VRAM, the threshold decreases + * linearly. + * __________________ + * 1/4 of -|\ | + * VRAM | \ | + * | \ | + * | \ | + * | \ | + * | \ | + * | \ | + * | \________|1 MB + * |----------------| + * VRAM 0 % 100 % + * used used + * + * Note: It's a threshold, not a limit. The threshold must be crossed + * for buffer relocations to stop, so any buffer of an arbitrary size + * can be moved as long as the threshold isn't crossed before + * the relocation takes place. We don't want to disable buffer + * relocations completely. + * + * The idea is that buffers should be placed in VRAM at creation time + * and TTM should only do a minimum number of relocations during + * command submission. In practice, you need to submit at least + * a dozen IBs to move all buffers to VRAM if they are in GTT. + * + * Also, things can get pretty crazy under memory pressure and actual + * VRAM usage can change a lot, so playing safe even at 50% does + * consistently increase performance. + */ + + u64 half_vram = real_vram_size >> 1; + u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage; + u64 bytes_moved_threshold = half_free_vram >> 1; + return max(bytes_moved_threshold, 1024*1024ull); } -int radeon_bo_list_validate(struct ww_acquire_ctx *ticket, +int radeon_bo_list_validate(struct radeon_device *rdev, + struct drm_exec *exec, struct list_head *head, int ring) { + struct ttm_operation_ctx ctx = { true, false }; struct radeon_bo_list *lobj; - struct radeon_bo *bo; - u32 domain; + u64 bytes_moved = 0, initial_bytes_moved; + u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev); int r; - r = ttm_eu_reserve_buffers(ticket, head); - if (unlikely(r != 0)) { - return r; + drm_exec_until_all_locked(exec) { + list_for_each_entry(lobj, head, list) { + r = drm_exec_prepare_obj(exec, &lobj->robj->tbo.base, + 1); + drm_exec_retry_on_contention(exec); + if (unlikely(r && r != -EALREADY)) + return r; + } } - list_for_each_entry(lobj, head, tv.head) { - bo = lobj->bo; - if (!bo->pin_count) { - domain = lobj->domain; - + + list_for_each_entry(lobj, head, list) { + struct radeon_bo *bo = lobj->robj; + if (!bo->tbo.pin_count) { + u32 domain = lobj->preferred_domains; + u32 allowed = lobj->allowed_domains; + u32 current_domain = + radeon_mem_type_to_domain(bo->tbo.resource->mem_type); + + /* Check if this buffer will be moved and don't move it + * if we have moved too many buffers for this IB already. + * + * Note that this allows moving at least one buffer of + * any size, because it doesn't take the current "bo" + * into account. We don't want to disallow buffer moves + * completely. + */ + if ((allowed & current_domain) != 0 && + (domain & current_domain) == 0 && /* will be moved */ + bytes_moved > bytes_moved_threshold) { + /* don't move it */ + domain = current_domain; + } + retry: radeon_ttm_placement_from_domain(bo, domain); if (ring == R600_RING_TYPE_UVD_INDEX) - radeon_uvd_force_into_uvd_segment(bo); - r = ttm_bo_validate(&bo->tbo, &bo->placement, - true, false); + radeon_uvd_force_into_uvd_segment(bo, allowed); + + initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved); + r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + bytes_moved += atomic64_read(&rdev->num_bytes_moved) - + initial_bytes_moved; + if (unlikely(r)) { - if (r != -ERESTARTSYS && domain != lobj->alt_domain) { - domain = lobj->alt_domain; + if (r != -ERESTARTSYS && + domain != lobj->allowed_domains) { + domain = lobj->allowed_domains; goto retry; } return r; @@ -383,13 +528,8 @@ int radeon_bo_list_validate(struct ww_acquire_ctx *ticket, lobj->gpu_offset = radeon_bo_gpu_offset(bo); lobj->tiling_flags = bo->tiling_flags; } - return 0; -} -int radeon_bo_fbdev_mmap(struct radeon_bo *bo, - struct vm_area_struct *vma) -{ - return ttm_fbdev_mmap(vma, &bo->tbo); + return 0; } int radeon_bo_get_surface_reg(struct radeon_bo *bo) @@ -400,13 +540,12 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo) int steal; int i; - lockdep_assert_held(&bo->tbo.resv->lock.base); + dma_resv_assert_held(bo->tbo.base.resv); if (!bo->tiling_flags) return 0; if (bo->surface_reg >= 0) { - reg = &rdev->surface_regs[bo->surface_reg]; i = bo->surface_reg; goto out; } @@ -419,7 +558,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo) break; old_object = reg->bo; - if (old_object->pin_count == 0) + if (old_object->tbo.pin_count == 0) steal = i; } @@ -442,8 +581,8 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo) out: radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch, - bo->tbo.mem.start << PAGE_SHIFT, - bo->tbo.num_pages << PAGE_SHIFT); + bo->tbo.resource->start << PAGE_SHIFT, + bo->tbo.base.size); return 0; } @@ -526,7 +665,7 @@ void radeon_bo_get_tiling_flags(struct radeon_bo *bo, uint32_t *tiling_flags, uint32_t *pitch) { - lockdep_assert_held(&bo->tbo.resv->lock.base); + dma_resv_assert_held(bo->tbo.base.resv); if (tiling_flags) *tiling_flags = bo->tiling_flags; @@ -538,7 +677,7 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, bool force_drop) { if (!force_drop) - lockdep_assert_held(&bo->tbo.resv->lock.base); + dma_resv_assert_held(bo->tbo.base.resv); if (!(bo->tiling_flags & RADEON_TILING_SURFACE)) return 0; @@ -548,7 +687,7 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, return 0; } - if (bo->tbo.mem.mem_type != TTM_PL_VRAM) { + if (bo->tbo.resource->mem_type != TTM_PL_VRAM) { if (!has_moved) return 0; @@ -563,61 +702,93 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, return radeon_bo_get_surface_reg(bo); } -void radeon_bo_move_notify(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem) +void radeon_bo_move_notify(struct ttm_buffer_object *bo) { struct radeon_bo *rbo; + if (!radeon_ttm_bo_is_radeon_bo(bo)) return; + rbo = container_of(bo, struct radeon_bo, tbo); radeon_bo_check_tiling(rbo, 0, 1); radeon_vm_bo_invalidate(rbo->rdev, rbo); } -int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) +vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) { + struct ttm_operation_ctx ctx = { false, false }; struct radeon_device *rdev; struct radeon_bo *rbo; - unsigned long offset, size; - int r; + unsigned long offset, size, lpfn; + int i, r; if (!radeon_ttm_bo_is_radeon_bo(bo)) return 0; rbo = container_of(bo, struct radeon_bo, tbo); radeon_bo_check_tiling(rbo, 0, 0); rdev = rbo->rdev; - if (bo->mem.mem_type == TTM_PL_VRAM) { - size = bo->mem.num_pages << PAGE_SHIFT; - offset = bo->mem.start << PAGE_SHIFT; - if ((offset + size) > rdev->mc.visible_vram_size) { - /* hurrah the memory is not visible ! */ - radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); - rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; - r = ttm_bo_validate(bo, &rbo->placement, false, false); - if (unlikely(r != 0)) - return r; - offset = bo->mem.start << PAGE_SHIFT; - /* this should not happen */ - if ((offset + size) > rdev->mc.visible_vram_size) - return -EINVAL; - } + if (bo->resource->mem_type != TTM_PL_VRAM) + return 0; + + size = bo->resource->size; + offset = bo->resource->start << PAGE_SHIFT; + if ((offset + size) <= rdev->mc.visible_vram_size) + return 0; + + /* Can't move a pinned BO to visible VRAM */ + if (rbo->tbo.pin_count > 0) + return VM_FAULT_SIGBUS; + + /* hurrah the memory is not visible ! */ + radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); + lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; + for (i = 0; i < rbo->placement.num_placement; i++) { + /* Force into visible VRAM */ + if ((rbo->placements[i].mem_type == TTM_PL_VRAM) && + (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn)) + rbo->placements[i].lpfn = lpfn; + } + r = ttm_bo_validate(bo, &rbo->placement, &ctx); + if (unlikely(r == -ENOMEM)) { + radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); + r = ttm_bo_validate(bo, &rbo->placement, &ctx); + } else if (likely(!r)) { + offset = bo->resource->start << PAGE_SHIFT; + /* this should never happen */ + if ((offset + size) > rdev->mc.visible_vram_size) + return VM_FAULT_SIGBUS; } + + if (unlikely(r == -EBUSY || r == -ERESTARTSYS)) + return VM_FAULT_NOPAGE; + else if (unlikely(r)) + return VM_FAULT_SIGBUS; + + ttm_bo_move_to_lru_tail_unlocked(bo); return 0; } -int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) +/** + * radeon_bo_fence - add fence to buffer object + * + * @bo: buffer object in question + * @fence: fence to add + * @shared: true if fence should be added shared + * + */ +void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence, + bool shared) { + struct dma_resv *resv = bo->tbo.base.resv; int r; - r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); - if (unlikely(r != 0)) - return r; - spin_lock(&bo->tbo.bdev->fence_lock); - if (mem_type) - *mem_type = bo->tbo.mem.mem_type; - if (bo->tbo.sync_obj) - r = ttm_bo_wait(&bo->tbo, true, true, no_wait); - spin_unlock(&bo->tbo.bdev->fence_lock); - ttm_bo_unreserve(&bo->tbo); - return r; + r = dma_resv_reserve_fences(resv, 1); + if (r) { + /* As last resort on OOM we block for the fence */ + dma_fence_wait(&fence->base, false); + return; + } + + dma_resv_add_fence(resv, &fence->base, shared ? + DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE); } |
