summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2010-09-16 19:37:04 +0200
committerChris Wilson <chris@chris-wilson.co.uk>2010-10-27 23:31:05 +0100
commit16e809acc167c3ede231cafcdab1be93bab3e429 (patch)
tree2b9878d9d300ea48c1554bc10d821b9230de39b5 /drivers/gpu
parent920afa77ced7124c8bb7d0c4839885618a3b4a54 (diff)
drm/i915: unbind unmappable objects on fault/pin
In i915_gem_object_pin obviously unbind only if mappable is true. This is the last part to enable gtt_mappable_end != gtt_size, which the next patch will do. v2: Fences on g33/pineview only work in the mappable part of the gtt. Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c25
1 files changed, 22 insertions, 3 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ef14546fc08d..7b0680714101 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -260,6 +260,16 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
return 0;
}
+static bool
+i915_gem_object_cpu_accessible(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ return obj->gtt_space == NULL ||
+ obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
+}
+
static inline int
fast_shmem_read(struct page **pages,
loff_t page_base, int page_offset,
@@ -1255,6 +1265,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/* Now bind it into the GTT if needed */
mutex_lock(&dev->struct_mutex);
+ if (!i915_gem_object_cpu_accessible(obj_priv))
+ i915_gem_object_unbind(obj);
+
if (!obj_priv->gtt_space) {
ret = i915_gem_object_bind_to_gtt(obj, 0, true);
if (ret)
@@ -3465,11 +3478,15 @@ i915_gem_execbuffer_pin(struct drm_device *dev,
ret = 0;
for (i = 0; i < count; i++) {
struct drm_i915_gem_exec_object2 *entry = &exec_list[i];
- struct drm_i915_gem_object *obj= to_intel_bo(object_list[i]);
+ struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
bool need_fence =
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
+ /* g33/pnv can't fence buffers in the unmappable part */
+ bool need_mappable =
+ entry->relocation_count ? true : need_fence;
+
/* Check fence reg constraints and rebind if necessary */
if (need_fence &&
!i915_gem_object_fence_offset_ok(&obj->base,
@@ -3480,7 +3497,8 @@ i915_gem_execbuffer_pin(struct drm_device *dev,
}
ret = i915_gem_object_pin(&obj->base,
- entry->alignment, true);
+ entry->alignment,
+ need_mappable);
if (ret)
break;
@@ -4064,7 +4082,8 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment,
if (obj_priv->gtt_space != NULL) {
if (alignment == 0)
alignment = i915_gem_get_gtt_alignment(obj);
- if (obj_priv->gtt_offset & (alignment - 1)) {
+ if (obj_priv->gtt_offset & (alignment - 1) ||
+ (mappable && !i915_gem_object_cpu_accessible(obj_priv))) {
WARN(obj_priv->pin_count,
"bo is already pinned with incorrect alignment:"
" offset=%x, req.alignment=%x\n",