summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_gem_execbuffer.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2016-08-18 17:16:53 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2016-08-18 22:36:47 +0100
commite8cb909ac3abbcac5184825638903a2b9a225725 (patch)
treea778d7010e0ff371c691ca37dd97c99735f84482 /drivers/gpu/drm/i915/i915_gem_execbuffer.c
parentd50415cc6c8395602052b39a1a39290fba3d313e (diff)
drm/i915: Fallback to single page GTT mmappings for relocations
If we cannot pin the entire object into the mappable region of the GTT, try to pin a single page instead. This is much more likely to succeed, and prevents us falling back to the clflush slow path. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20160818161718.27187-14-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_execbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c62
1 files changed, 51 insertions, 11 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 8d0df7d81d8b..c970aabfffa3 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -331,6 +331,7 @@ static void reloc_cache_init(struct reloc_cache *cache,
cache->vaddr = 0;
cache->i915 = i915;
cache->use_64bit_reloc = INTEL_GEN(cache->i915) >= 8;
+ cache->node.allocated = false;
}
static inline void *unmask_page(unsigned long p)
@@ -360,8 +361,19 @@ static void reloc_cache_fini(struct reloc_cache *cache)
kunmap_atomic(vaddr);
i915_gem_obj_finish_shmem_access((struct drm_i915_gem_object *)cache->node.mm);
} else {
+ wmb();
io_mapping_unmap_atomic((void __iomem *)vaddr);
- i915_vma_unpin((struct i915_vma *)cache->node.mm);
+ if (cache->node.allocated) {
+ struct i915_ggtt *ggtt = &cache->i915->ggtt;
+
+ ggtt->base.clear_range(&ggtt->base,
+ cache->node.start,
+ cache->node.size,
+ true);
+ drm_mm_remove_node(&cache->node);
+ } else {
+ i915_vma_unpin((struct i915_vma *)cache->node.mm);
+ }
}
}
@@ -401,8 +413,19 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
struct reloc_cache *cache,
int page)
{
+ struct i915_ggtt *ggtt = &cache->i915->ggtt;
+ unsigned long offset;
void *vaddr;
+ if (cache->node.allocated) {
+ wmb();
+ ggtt->base.insert_page(&ggtt->base,
+ i915_gem_object_get_dma_address(obj, page),
+ cache->node.start, I915_CACHE_NONE, 0);
+ cache->page = page;
+ return unmask_page(cache->vaddr);
+ }
+
if (cache->vaddr) {
io_mapping_unmap_atomic(unmask_page(cache->vaddr));
} else {
@@ -418,21 +441,38 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
PIN_MAPPABLE | PIN_NONBLOCK);
- if (IS_ERR(vma))
- return NULL;
+ if (IS_ERR(vma)) {
+ memset(&cache->node, 0, sizeof(cache->node));
+ ret = drm_mm_insert_node_in_range_generic
+ (&ggtt->base.mm, &cache->node,
+ 4096, 0, 0,
+ 0, ggtt->mappable_end,
+ DRM_MM_SEARCH_DEFAULT,
+ DRM_MM_CREATE_DEFAULT);
+ if (ret)
+ return ERR_PTR(ret);
+ } else {
+ ret = i915_gem_object_put_fence(obj);
+ if (ret) {
+ i915_vma_unpin(vma);
+ return ERR_PTR(ret);
+ }
- ret = i915_gem_object_put_fence(obj);
- if (ret) {
- i915_vma_unpin(vma);
- return ERR_PTR(ret);
+ cache->node.start = vma->node.start;
+ cache->node.mm = (void *)vma;
}
+ }
- cache->node.start = vma->node.start;
- cache->node.mm = (void *)vma;
+ offset = cache->node.start;
+ if (cache->node.allocated) {
+ ggtt->base.insert_page(&ggtt->base,
+ i915_gem_object_get_dma_address(obj, page),
+ offset, I915_CACHE_NONE, 0);
+ } else {
+ offset += page << PAGE_SHIFT;
}
- vaddr = io_mapping_map_atomic_wc(cache->i915->ggtt.mappable,
- cache->node.start + (page << PAGE_SHIFT));
+ vaddr = io_mapping_map_atomic_wc(cache->i915->ggtt.mappable, offset);
cache->page = page;
cache->vaddr = (unsigned long)vaddr;