diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 934 |
1 files changed, 691 insertions, 243 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index e2a73b38abe9..460ee1026fca 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -27,26 +27,15 @@ * */ -#include "drmP.h" -#include "drm.h" +#include <drm/drmP.h> #include "i915_drv.h" -#include "i915_drm.h" +#include <drm/i915_drm.h> #include "i915_trace.h" #include "intel_drv.h" -/* - * 965+ support PIPE_CONTROL commands, which provide finer grained control - * over cache flushing. - */ -struct pipe_control { - struct drm_i915_gem_object *obj; - volatile u32 *cpu_page; - u32 gtt_offset; -}; - static inline int ring_space(struct intel_ring_buffer *ring) { - int space = (ring->head & HEAD_ADDR) - (ring->tail + 8); + int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE); if (space < 0) space += ring->size; return space; @@ -176,8 +165,7 @@ gen4_render_ring_flush(struct intel_ring_buffer *ring, static int intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring) { - struct pipe_control *pc = ring->private; - u32 scratch_addr = pc->gtt_offset + 128; + u32 scratch_addr = ring->scratch.gtt_offset + 128; int ret; @@ -214,8 +202,7 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring, u32 invalidate_domains, u32 flush_domains) { u32 flags = 0; - struct pipe_control *pc = ring->private; - u32 scratch_addr = pc->gtt_offset + 128; + u32 scratch_addr = ring->scratch.gtt_offset + 128; int ret; /* Force SNB workarounds for PIPE_CONTROL flushes */ @@ -246,7 +233,7 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring, /* * TLB invalidate requires a post-sync write. */ - flags |= PIPE_CONTROL_QW_WRITE; + flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL; } ret = intel_ring_begin(ring, 4); @@ -262,6 +249,107 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring, return 0; } +static int +gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring) +{ + int ret; + + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; + + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); + intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | + PIPE_CONTROL_STALL_AT_SCOREBOARD); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_advance(ring); + + return 0; +} + +static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value) +{ + int ret; + + if (!ring->fbc_dirty) + return 0; + + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; + intel_ring_emit(ring, MI_NOOP); + /* WaFbcNukeOn3DBlt:ivb/hsw */ + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit(ring, MSG_FBC_REND_STATE); + intel_ring_emit(ring, value); + intel_ring_advance(ring); + + ring->fbc_dirty = false; + return 0; +} + +static int +gen7_render_ring_flush(struct intel_ring_buffer *ring, + u32 invalidate_domains, u32 flush_domains) +{ + u32 flags = 0; + u32 scratch_addr = ring->scratch.gtt_offset + 128; + int ret; + + /* + * Ensure that any following seqno writes only happen when the render + * cache is indeed flushed. + * + * Workaround: 4th PIPE_CONTROL command (except the ones with only + * read-cache invalidate bits set) must have the CS_STALL bit set. We + * don't try to be clever and just set it unconditionally. + */ + flags |= PIPE_CONTROL_CS_STALL; + + /* Just flush everything. Experiments have shown that reducing the + * number of bits based on the write domains has little performance + * impact. + */ + if (flush_domains) { + flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; + flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + } + if (invalidate_domains) { + flags |= PIPE_CONTROL_TLB_INVALIDATE; + flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; + /* + * TLB invalidate requires a post-sync write. + */ + flags |= PIPE_CONTROL_QW_WRITE; + flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; + + /* Workaround: we must issue a pipe_control with CS-stall bit + * set before a pipe_control command that has the state cache + * invalidate bit set. */ + gen7_render_ring_cs_stall_wa(ring); + } + + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; + + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); + intel_ring_emit(ring, flags); + intel_ring_emit(ring, scratch_addr); + intel_ring_emit(ring, 0); + intel_ring_advance(ring); + + if (flush_domains) + return gen7_ring_fbc_flush(ring, FBC_REND_NUKE); + + return 0; +} + static void ring_write_tail(struct intel_ring_buffer *ring, u32 value) { @@ -278,6 +366,17 @@ u32 intel_ring_get_active_head(struct intel_ring_buffer *ring) return I915_READ(acthd_reg); } +static void ring_setup_phys_status_page(struct intel_ring_buffer *ring) +{ + struct drm_i915_private *dev_priv = ring->dev->dev_private; + u32 addr; + + addr = dev_priv->status_page_dmah->busaddr; + if (INTEL_INFO(ring->dev)->gen >= 4) + addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; + I915_WRITE(HWS_PGA, addr); +} + static int init_ring_common(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; @@ -289,6 +388,11 @@ static int init_ring_common(struct intel_ring_buffer *ring) if (HAS_FORCE_WAKE(dev)) gen6_gt_force_wake_get(dev_priv); + if (I915_NEED_GFX_HWS(dev)) + intel_ring_setup_status_page(ring); + else + ring_setup_phys_status_page(ring); + /* Stop the ring if it's running. */ I915_WRITE_CTL(ring, 0); I915_WRITE_HEAD(ring, 0); @@ -323,14 +427,14 @@ static int init_ring_common(struct intel_ring_buffer *ring) * registers with the above sequence (the readback of the HEAD registers * also enforces ordering), otherwise the hw might lose the new ring * register values. */ - I915_WRITE_START(ring, obj->gtt_offset); + I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj)); I915_WRITE_CTL(ring, ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID); /* If the head is still not zero, the ring is dead */ if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 && - I915_READ_START(ring) == obj->gtt_offset && + I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) && (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) { DRM_ERROR("%s initialization failed " "ctl %08x head %08x tail %08x start %08x\n", @@ -352,6 +456,8 @@ static int init_ring_common(struct intel_ring_buffer *ring) ring->last_retired_head = -1; } + memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); + out: if (HAS_FORCE_WAKE(dev)) gen6_gt_force_wake_put(dev_priv); @@ -362,79 +468,70 @@ out: static int init_pipe_control(struct intel_ring_buffer *ring) { - struct pipe_control *pc; - struct drm_i915_gem_object *obj; int ret; - if (ring->private) + if (ring->scratch.obj) return 0; - pc = kmalloc(sizeof(*pc), GFP_KERNEL); - if (!pc) - return -ENOMEM; - - obj = i915_gem_alloc_object(ring->dev, 4096); - if (obj == NULL) { + ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096); + if (ring->scratch.obj == NULL) { DRM_ERROR("Failed to allocate seqno page\n"); ret = -ENOMEM; goto err; } - i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); + i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC); - ret = i915_gem_object_pin(obj, 4096, true); + ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, true, false); if (ret) goto err_unref; - pc->gtt_offset = obj->gtt_offset; - pc->cpu_page = kmap(obj->pages[0]); - if (pc->cpu_page == NULL) + ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj); + ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl)); + if (ring->scratch.cpu_page == NULL) { + ret = -ENOMEM; goto err_unpin; + } - pc->obj = obj; - ring->private = pc; + DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", + ring->name, ring->scratch.gtt_offset); return 0; err_unpin: - i915_gem_object_unpin(obj); + i915_gem_object_unpin(ring->scratch.obj); err_unref: - drm_gem_object_unreference(&obj->base); + drm_gem_object_unreference(&ring->scratch.obj->base); err: - kfree(pc); return ret; } -static void -cleanup_pipe_control(struct intel_ring_buffer *ring) -{ - struct pipe_control *pc = ring->private; - struct drm_i915_gem_object *obj; - - if (!ring->private) - return; - - obj = pc->obj; - kunmap(obj->pages[0]); - i915_gem_object_unpin(obj); - drm_gem_object_unreference(&obj->base); - - kfree(pc); - ring->private = NULL; -} - static int init_render_ring(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; int ret = init_ring_common(ring); - if (INTEL_INFO(dev)->gen > 3) { + if (INTEL_INFO(dev)->gen > 3) I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); - if (IS_GEN7(dev)) - I915_WRITE(GFX_MODE_GEN7, - _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | - _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); - } + + /* We need to disable the AsyncFlip performance optimisations in order + * to use MI_WAIT_FOR_EVENT within the CS. It should already be + * programmed to '1' on all products. + * + * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv + */ + if (INTEL_INFO(dev)->gen >= 6) + I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); + + /* Required for the hardware to program scanline values for waiting */ + if (INTEL_INFO(dev)->gen == 6) + I915_WRITE(GFX_MODE, + _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS)); + + if (IS_GEN7(dev)) + I915_WRITE(GFX_MODE_GEN7, + _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | + _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); if (INTEL_INFO(dev)->gen >= 5) { ret = init_pipe_control(ring); @@ -462,31 +559,42 @@ static int init_render_ring(struct intel_ring_buffer *ring) if (INTEL_INFO(dev)->gen >= 6) I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); - if (IS_IVYBRIDGE(dev)) - I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); + if (HAS_L3_GPU_CACHE(dev)) + I915_WRITE_IMR(ring, ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); return ret; } static void render_ring_cleanup(struct intel_ring_buffer *ring) { - if (!ring->private) + struct drm_device *dev = ring->dev; + + if (ring->scratch.obj == NULL) return; - cleanup_pipe_control(ring); + if (INTEL_INFO(dev)->gen >= 5) { + kunmap(sg_page(ring->scratch.obj->pages->sgl)); + i915_gem_object_unpin(ring->scratch.obj); + } + + drm_gem_object_unreference(&ring->scratch.obj->base); + ring->scratch.obj = NULL; } static void update_mboxes(struct intel_ring_buffer *ring, - u32 seqno, - u32 mmio_offset) -{ - intel_ring_emit(ring, MI_SEMAPHORE_MBOX | - MI_SEMAPHORE_GLOBAL_GTT | - MI_SEMAPHORE_REGISTER | - MI_SEMAPHORE_UPDATE); - intel_ring_emit(ring, seqno); + u32 mmio_offset) +{ +/* NB: In order to be able to do semaphore MBOX updates for varying number + * of rings, it's easiest if we round up each individual update to a + * multiple of 2 (since ring updates must always be a multiple of 2) + * even though the actual update only requires 3 dwords. + */ +#define MBOX_UPDATE_DWORDS 4 + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); intel_ring_emit(ring, mmio_offset); + intel_ring_emit(ring, ring->outstanding_lazy_request); + intel_ring_emit(ring, MI_NOOP); } /** @@ -499,33 +607,42 @@ update_mboxes(struct intel_ring_buffer *ring, * This acts like a signal in the canonical semaphore. */ static int -gen6_add_request(struct intel_ring_buffer *ring, - u32 *seqno) +gen6_add_request(struct intel_ring_buffer *ring) { - u32 mbox1_reg; - u32 mbox2_reg; - int ret; + struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_ring_buffer *useless; + int i, ret; - ret = intel_ring_begin(ring, 10); + ret = intel_ring_begin(ring, ((I915_NUM_RINGS-1) * + MBOX_UPDATE_DWORDS) + + 4); if (ret) return ret; +#undef MBOX_UPDATE_DWORDS - mbox1_reg = ring->signal_mbox[0]; - mbox2_reg = ring->signal_mbox[1]; - - *seqno = i915_gem_next_request_seqno(ring); + for_each_ring(useless, dev_priv, i) { + u32 mbox_reg = ring->signal_mbox[i]; + if (mbox_reg != GEN6_NOSYNC) + update_mboxes(ring, mbox_reg); + } - update_mboxes(ring, *seqno, mbox1_reg); - update_mboxes(ring, *seqno, mbox2_reg); intel_ring_emit(ring, MI_STORE_DWORD_INDEX); intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - intel_ring_emit(ring, *seqno); + intel_ring_emit(ring, ring->outstanding_lazy_request); intel_ring_emit(ring, MI_USER_INTERRUPT); intel_ring_advance(ring); return 0; } +static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev, + u32 seqno) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + return dev_priv->last_seqno < seqno; +} + /** * intel_ring_sync - sync the waiter to the signaller on seqno * @@ -556,11 +673,20 @@ gen6_ring_sync(struct intel_ring_buffer *waiter, if (ret) return ret; - intel_ring_emit(waiter, - dw1 | signaller->semaphore_register[waiter->id]); - intel_ring_emit(waiter, seqno); - intel_ring_emit(waiter, 0); - intel_ring_emit(waiter, MI_NOOP); + /* If seqno wrap happened, omit the wait with no-ops */ + if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) { + intel_ring_emit(waiter, + dw1 | + signaller->semaphore_register[waiter->id]); + intel_ring_emit(waiter, seqno); + intel_ring_emit(waiter, 0); + intel_ring_emit(waiter, MI_NOOP); + } else { + intel_ring_emit(waiter, MI_NOOP); + intel_ring_emit(waiter, MI_NOOP); + intel_ring_emit(waiter, MI_NOOP); + intel_ring_emit(waiter, MI_NOOP); + } intel_ring_advance(waiter); return 0; @@ -576,12 +702,9 @@ do { \ } while (0) static int -pc_render_add_request(struct intel_ring_buffer *ring, - u32 *result) +pc_render_add_request(struct intel_ring_buffer *ring) { - u32 seqno = i915_gem_next_request_seqno(ring); - struct pipe_control *pc = ring->private; - u32 scratch_addr = pc->gtt_offset + 128; + u32 scratch_addr = ring->scratch.gtt_offset + 128; int ret; /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently @@ -599,8 +722,8 @@ pc_render_add_request(struct intel_ring_buffer *ring, intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_WRITE_FLUSH | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); - intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); - intel_ring_emit(ring, seqno); + intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(ring, ring->outstanding_lazy_request); intel_ring_emit(ring, 0); PIPE_CONTROL_FLUSH(ring, scratch_addr); scratch_addr += 128; /* write to separate cachelines */ @@ -618,39 +741,47 @@ pc_render_add_request(struct intel_ring_buffer *ring, PIPE_CONTROL_WRITE_FLUSH | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | PIPE_CONTROL_NOTIFY); - intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); - intel_ring_emit(ring, seqno); + intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(ring, ring->outstanding_lazy_request); intel_ring_emit(ring, 0); intel_ring_advance(ring); - *result = seqno; return 0; } static u32 -gen6_ring_get_seqno(struct intel_ring_buffer *ring) +gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) { - struct drm_device *dev = ring->dev; - /* Workaround to force correct ordering between irq and seqno writes on * ivb (and maybe also on snb) by reading from a CS register (like * ACTHD) before reading the status page. */ - if (IS_GEN6(dev) || IS_GEN7(dev)) + if (!lazy_coherency) intel_ring_get_active_head(ring); return intel_read_status_page(ring, I915_GEM_HWS_INDEX); } static u32 -ring_get_seqno(struct intel_ring_buffer *ring) +ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) { return intel_read_status_page(ring, I915_GEM_HWS_INDEX); } +static void +ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno) +{ + intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); +} + static u32 -pc_render_get_seqno(struct intel_ring_buffer *ring) +pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) +{ + return ring->scratch.cpu_page[0]; +} + +static void +pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno) { - struct pipe_control *pc = ring->private; - return pc->cpu_page[0]; + ring->scratch.cpu_page[0] = seqno; } static bool @@ -664,11 +795,8 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring) return false; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (ring->irq_refcount++ == 0) { - dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; - I915_WRITE(GTIMR, dev_priv->gt_irq_mask); - POSTING_READ(GTIMR); - } + if (ring->irq_refcount++ == 0) + ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, flags); return true; @@ -682,11 +810,8 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring) unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--ring->irq_refcount == 0) { - dev_priv->gt_irq_mask |= ring->irq_enable_mask; - I915_WRITE(GTIMR, dev_priv->gt_irq_mask); - POSTING_READ(GTIMR); - } + if (--ring->irq_refcount == 0) + ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, flags); } @@ -784,6 +909,9 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring) case VCS: mmio = BSD_HWS_PGA_GEN7; break; + case VECS: + mmio = VEBOX_HWS_PGA_GEN7; + break; } } else if (IS_GEN6(ring->dev)) { mmio = RING_HWS_PGA_GEN6(ring->mmio_base); @@ -793,6 +921,18 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring) I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); POSTING_READ(mmio); + + /* Flush the TLB for this page */ + if (INTEL_INFO(dev)->gen >= 6) { + u32 reg = RING_INSTPM(ring->mmio_base); + I915_WRITE(reg, + _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | + INSTPM_SYNC_FLUSH)); + if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0, + 1000)) + DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", + ring->name); + } } static int @@ -813,25 +953,20 @@ bsd_ring_flush(struct intel_ring_buffer *ring, } static int -i9xx_add_request(struct intel_ring_buffer *ring, - u32 *result) +i9xx_add_request(struct intel_ring_buffer *ring) { - u32 seqno; int ret; ret = intel_ring_begin(ring, 4); if (ret) return ret; - seqno = i915_gem_next_request_seqno(ring); - intel_ring_emit(ring, MI_STORE_DWORD_INDEX); intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - intel_ring_emit(ring, seqno); + intel_ring_emit(ring, ring->outstanding_lazy_request); intel_ring_emit(ring, MI_USER_INTERRUPT); intel_ring_advance(ring); - *result = seqno; return 0; } @@ -852,14 +987,13 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring) spin_lock_irqsave(&dev_priv->irq_lock, flags); if (ring->irq_refcount++ == 0) { - if (IS_IVYBRIDGE(dev) && ring->id == RCS) - I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | - GEN6_RENDER_L3_PARITY_ERROR)); + if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) + I915_WRITE_IMR(ring, + ~(ring->irq_enable_mask | + GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); else I915_WRITE_IMR(ring, ~ring->irq_enable_mask); - dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; - I915_WRITE(GTIMR, dev_priv->gt_irq_mask); - POSTING_READ(GTIMR); + ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask); } spin_unlock_irqrestore(&dev_priv->irq_lock, flags); @@ -875,21 +1009,60 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring) spin_lock_irqsave(&dev_priv->irq_lock, flags); if (--ring->irq_refcount == 0) { - if (IS_IVYBRIDGE(dev) && ring->id == RCS) - I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); + if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) + I915_WRITE_IMR(ring, + ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); else I915_WRITE_IMR(ring, ~0); - dev_priv->gt_irq_mask |= ring->irq_enable_mask; - I915_WRITE(GTIMR, dev_priv->gt_irq_mask); - POSTING_READ(GTIMR); + ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask); } spin_unlock_irqrestore(&dev_priv->irq_lock, flags); gen6_gt_force_wake_put(dev_priv); } +static bool +hsw_vebox_get_irq(struct intel_ring_buffer *ring) +{ + struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long flags; + + if (!dev->irq_enabled) + return false; + + spin_lock_irqsave(&dev_priv->irq_lock, flags); + if (ring->irq_refcount++ == 0) { + I915_WRITE_IMR(ring, ~ring->irq_enable_mask); + snb_enable_pm_irq(dev_priv, ring->irq_enable_mask); + } + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + + return true; +} + +static void +hsw_vebox_put_irq(struct intel_ring_buffer *ring) +{ + struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long flags; + + if (!dev->irq_enabled) + return; + + spin_lock_irqsave(&dev_priv->irq_lock, flags); + if (--ring->irq_refcount == 0) { + I915_WRITE_IMR(ring, ~0); + snb_disable_pm_irq(dev_priv, ring->irq_enable_mask); + } + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); +} + static int -i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) +i965_dispatch_execbuffer(struct intel_ring_buffer *ring, + u32 offset, u32 length, + unsigned flags) { int ret; @@ -900,35 +1073,70 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT | - MI_BATCH_NON_SECURE_I965); + (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); intel_ring_emit(ring, offset); intel_ring_advance(ring); return 0; } +/* Just userspace ABI convention to limit the wa batch bo to a resonable size */ +#define I830_BATCH_LIMIT (256*1024) static int i830_dispatch_execbuffer(struct intel_ring_buffer *ring, - u32 offset, u32 len) + u32 offset, u32 len, + unsigned flags) { int ret; - ret = intel_ring_begin(ring, 4); - if (ret) - return ret; + if (flags & I915_DISPATCH_PINNED) { + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; - intel_ring_emit(ring, MI_BATCH_BUFFER); - intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); - intel_ring_emit(ring, offset + len - 8); - intel_ring_emit(ring, 0); - intel_ring_advance(ring); + intel_ring_emit(ring, MI_BATCH_BUFFER); + intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); + intel_ring_emit(ring, offset + len - 8); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + } else { + u32 cs_offset = ring->scratch.gtt_offset; + + if (len > I830_BATCH_LIMIT) + return -ENOSPC; + + ret = intel_ring_begin(ring, 9+3); + if (ret) + return ret; + /* Blit the batch (which has now all relocs applied) to the stable batch + * scratch bo area (so that the CS never stumbles over its tlb + * invalidation bug) ... */ + intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD | + XY_SRC_COPY_BLT_WRITE_ALPHA | + XY_SRC_COPY_BLT_WRITE_RGB); + intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024); + intel_ring_emit(ring, cs_offset); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 4096); + intel_ring_emit(ring, offset); + intel_ring_emit(ring, MI_FLUSH); + + /* ... and execute it. */ + intel_ring_emit(ring, MI_BATCH_BUFFER); + intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); + intel_ring_emit(ring, cs_offset + len - 8); + intel_ring_advance(ring); + } return 0; } static int i915_dispatch_execbuffer(struct intel_ring_buffer *ring, - u32 offset, u32 len) + u32 offset, u32 len, + unsigned flags) { int ret; @@ -937,7 +1145,7 @@ i915_dispatch_execbuffer(struct intel_ring_buffer *ring, return ret; intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); - intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); + intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); intel_ring_advance(ring); return 0; @@ -951,7 +1159,7 @@ static void cleanup_status_page(struct intel_ring_buffer *ring) if (obj == NULL) return; - kunmap(obj->pages[0]); + kunmap(sg_page(obj->pages->sgl)); i915_gem_object_unpin(obj); drm_gem_object_unreference(&obj->base); ring->status_page.obj = NULL; @@ -972,13 +1180,13 @@ static int init_status_page(struct intel_ring_buffer *ring) i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); - ret = i915_gem_object_pin(obj, 4096, true); + ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false); if (ret != 0) { goto err_unref; } - ring->status_page.gfx_addr = obj->gtt_offset; - ring->status_page.page_addr = kmap(obj->pages[0]); + ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj); + ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); if (ring->status_page.page_addr == NULL) { ret = -ENOMEM; goto err_unpin; @@ -986,7 +1194,6 @@ static int init_status_page(struct intel_ring_buffer *ring) ring->status_page.obj = obj; memset(ring->status_page.page_addr, 0, PAGE_SIZE); - intel_ring_setup_status_page(ring); DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", ring->name, ring->status_page.gfx_addr); @@ -1000,6 +1207,23 @@ err: return ret; } +static int init_phys_status_page(struct intel_ring_buffer *ring) +{ + struct drm_i915_private *dev_priv = ring->dev->dev_private; + + if (!dev_priv->status_page_dmah) { + dev_priv->status_page_dmah = + drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE); + if (!dev_priv->status_page_dmah) + return -ENOMEM; + } + + ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; + memset(ring->status_page.page_addr, 0, PAGE_SIZE); + + return 0; +} + static int intel_init_ring_buffer(struct drm_device *dev, struct intel_ring_buffer *ring) { @@ -1010,8 +1234,8 @@ static int intel_init_ring_buffer(struct drm_device *dev, ring->dev = dev; INIT_LIST_HEAD(&ring->active_list); INIT_LIST_HEAD(&ring->request_list); - INIT_LIST_HEAD(&ring->gpu_write_list); ring->size = 32 * PAGE_SIZE; + memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno)); init_waitqueue_head(&ring->irq_queue); @@ -1019,9 +1243,18 @@ static int intel_init_ring_buffer(struct drm_device *dev, ret = init_status_page(ring); if (ret) return ret; + } else { + BUG_ON(ring->id != RCS); + ret = init_phys_status_page(ring); + if (ret) + return ret; } - obj = i915_gem_alloc_object(dev, ring->size); + obj = NULL; + if (!HAS_LLC(dev)) + obj = i915_gem_object_create_stolen(dev, ring->size); + if (obj == NULL) + obj = i915_gem_alloc_object(dev, ring->size); if (obj == NULL) { DRM_ERROR("Failed to allocate ringbuffer\n"); ret = -ENOMEM; @@ -1030,7 +1263,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, ring->obj = obj; - ret = i915_gem_object_pin(obj, PAGE_SIZE, true); + ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, true, false); if (ret) goto err_unref; @@ -1039,7 +1272,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, goto err_unpin; ring->virtual_start = - ioremap_wc(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset, + ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj), ring->size); if (ring->virtual_start == NULL) { DRM_ERROR("Failed to map ringbuffer.\n"); @@ -1083,7 +1316,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) /* Disable the ring buffer. The ring must be idle at this point */ dev_priv = ring->dev->dev_private; - ret = intel_wait_ring_idle(ring); + ret = intel_ring_idle(ring); if (ret) DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", ring->name, ret); @@ -1102,28 +1335,6 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) cleanup_status_page(ring); } -static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) -{ - uint32_t __iomem *virt; - int rem = ring->size - ring->tail; - - if (ring->space < rem) { - int ret = intel_wait_ring_buffer(ring, rem); - if (ret) - return ret; - } - - virt = ring->virtual_start + ring->tail; - rem /= 4; - while (rem--) - iowrite32(MI_NOOP, virt++); - - ring->tail = 0; - ring->space = ring_space(ring); - - return 0; -} - static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno) { int ret; @@ -1157,7 +1368,7 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n) if (request->tail == -1) continue; - space = request->tail - (ring->tail + 8); + space = request->tail - (ring->tail + I915_RING_FREE_SPACE); if (space < 0) space += ring->size; if (space >= n) { @@ -1192,7 +1403,7 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n) return 0; } -int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) +static int ring_wait_for_space(struct intel_ring_buffer *ring, int n) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -1227,7 +1438,8 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) msleep(1); - ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible); + ret = i915_gem_check_wedge(&dev_priv->gpu_error, + dev_priv->mm.interruptible); if (ret) return ret; } while (!time_after(jiffies, end)); @@ -1235,39 +1447,123 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) return -EBUSY; } -int intel_ring_begin(struct intel_ring_buffer *ring, - int num_dwords) +static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) { - drm_i915_private_t *dev_priv = ring->dev->dev_private; - int n = 4*num_dwords; + uint32_t __iomem *virt; + int rem = ring->size - ring->tail; + + if (ring->space < rem) { + int ret = ring_wait_for_space(ring, rem); + if (ret) + return ret; + } + + virt = ring->virtual_start + ring->tail; + rem /= 4; + while (rem--) + iowrite32(MI_NOOP, virt++); + + ring->tail = 0; + ring->space = ring_space(ring); + + return 0; +} + +int intel_ring_idle(struct intel_ring_buffer *ring) +{ + u32 seqno; int ret; - ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible); - if (ret) - return ret; + /* We need to add any requests required to flush the objects and ring */ + if (ring->outstanding_lazy_request) { + ret = i915_add_request(ring, NULL); + if (ret) + return ret; + } - if (unlikely(ring->tail + n > ring->effective_size)) { + /* Wait upon the last request to be completed */ + if (list_empty(&ring->request_list)) + return 0; + + seqno = list_entry(ring->request_list.prev, + struct drm_i915_gem_request, + list)->seqno; + + return i915_wait_seqno(ring, seqno); +} + +static int +intel_ring_alloc_seqno(struct intel_ring_buffer *ring) +{ + if (ring->outstanding_lazy_request) + return 0; + + return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request); +} + +static int __intel_ring_begin(struct intel_ring_buffer *ring, + int bytes) +{ + int ret; + + if (unlikely(ring->tail + bytes > ring->effective_size)) { ret = intel_wrap_ring_buffer(ring); if (unlikely(ret)) return ret; } - if (unlikely(ring->space < n)) { - ret = intel_wait_ring_buffer(ring, n); + if (unlikely(ring->space < bytes)) { + ret = ring_wait_for_space(ring, bytes); if (unlikely(ret)) return ret; } - ring->space -= n; + ring->space -= bytes; return 0; } +int intel_ring_begin(struct intel_ring_buffer *ring, + int num_dwords) +{ + drm_i915_private_t *dev_priv = ring->dev->dev_private; + int ret; + + ret = i915_gem_check_wedge(&dev_priv->gpu_error, + dev_priv->mm.interruptible); + if (ret) + return ret; + + /* Preallocate the olr before touching the ring */ + ret = intel_ring_alloc_seqno(ring); + if (ret) + return ret; + + return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t)); +} + +void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno) +{ + struct drm_i915_private *dev_priv = ring->dev->dev_private; + + BUG_ON(ring->outstanding_lazy_request); + + if (INTEL_INFO(ring->dev)->gen >= 6) { + I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); + I915_WRITE(RING_SYNC_1(ring->mmio_base), 0); + if (HAS_VEBOX(ring->dev)) + I915_WRITE(RING_SYNC_2(ring->mmio_base), 0); + } + + ring->set_seqno(ring, seqno); + ring->hangcheck.seqno = seqno; +} + void intel_ring_advance(struct intel_ring_buffer *ring) { struct drm_i915_private *dev_priv = ring->dev->dev_private; ring->tail &= ring->size - 1; - if (dev_priv->stop_rings & intel_ring_flag(ring)) + if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring)) return; ring->write_tail(ring, ring->tail); } @@ -1306,8 +1602,8 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); } -static int gen6_ring_flush(struct intel_ring_buffer *ring, - u32 invalidate, u32 flush) +static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring, + u32 invalidate, u32 flush) { uint32_t cmd; int ret; @@ -1317,10 +1613,17 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring, return ret; cmd = MI_FLUSH_DW; + /* + * Bspec vol 1c.5 - video engine command streamer: + * "If ENABLED, all TLBs will be invalidated once the flush + * operation is complete. This bit is only valid when the + * Post-Sync Operation field is a value of 1h or 3h." + */ if (invalidate & I915_GEM_GPU_DOMAINS) - cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD; + cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD | + MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; intel_ring_emit(ring, cmd); - intel_ring_emit(ring, 0); + intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); intel_ring_emit(ring, 0); intel_ring_emit(ring, MI_NOOP); intel_ring_advance(ring); @@ -1328,8 +1631,30 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring, } static int +hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, + u32 offset, u32 len, + unsigned flags) +{ + int ret; + + ret = intel_ring_begin(ring, 2); + if (ret) + return ret; + + intel_ring_emit(ring, + MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW | + (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW)); + /* bit0-7 is the length on GEN6+ */ + intel_ring_emit(ring, offset); + intel_ring_advance(ring); + + return 0; +} + +static int gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, - u32 offset, u32 len) + u32 offset, u32 len, + unsigned flags) { int ret; @@ -1337,7 +1662,9 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, if (ret) return ret; - intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); + intel_ring_emit(ring, + MI_BATCH_BUFFER_START | + (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); /* bit0-7 is the length on GEN6+ */ intel_ring_emit(ring, offset); intel_ring_advance(ring); @@ -1347,9 +1674,10 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, /* Blitter support (SandyBridge+) */ -static int blt_ring_flush(struct intel_ring_buffer *ring, - u32 invalidate, u32 flush) +static int gen6_ring_flush(struct intel_ring_buffer *ring, + u32 invalidate, u32 flush) { + struct drm_device *dev = ring->dev; uint32_t cmd; int ret; @@ -1358,13 +1686,24 @@ static int blt_ring_flush(struct intel_ring_buffer *ring, return ret; cmd = MI_FLUSH_DW; + /* + * Bspec vol 1c.3 - blitter engine command streamer: + * "If ENABLED, all TLBs will be invalidated once the flush + * operation is complete. This bit is only valid when the + * Post-Sync Operation field is a value of 1h or 3h." + */ if (invalidate & I915_GEM_DOMAIN_RENDER) - cmd |= MI_INVALIDATE_TLB; + cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX | + MI_FLUSH_DW_OP_STOREDW; intel_ring_emit(ring, cmd); - intel_ring_emit(ring, 0); + intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); intel_ring_emit(ring, 0); intel_ring_emit(ring, MI_NOOP); intel_ring_advance(ring); + + if (IS_GEN7(dev) && flush) + return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN); + return 0; } @@ -1379,24 +1718,32 @@ int intel_init_render_ring_buffer(struct drm_device *dev) if (INTEL_INFO(dev)->gen >= 6) { ring->add_request = gen6_add_request; - ring->flush = gen6_render_ring_flush; + ring->flush = gen7_render_ring_flush; + if (INTEL_INFO(dev)->gen == 6) + ring->flush = gen6_render_ring_flush; ring->irq_get = gen6_ring_get_irq; ring->irq_put = gen6_ring_put_irq; - ring->irq_enable_mask = GT_USER_INTERRUPT; + ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT; ring->get_seqno = gen6_ring_get_seqno; + ring->set_seqno = ring_set_seqno; ring->sync_to = gen6_ring_sync; - ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID; - ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV; - ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB; - ring->signal_mbox[0] = GEN6_VRSYNC; - ring->signal_mbox[1] = GEN6_BRSYNC; + ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_INVALID; + ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_RV; + ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_RB; + ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_RVE; + ring->signal_mbox[RCS] = GEN6_NOSYNC; + ring->signal_mbox[VCS] = GEN6_VRSYNC; + ring->signal_mbox[BCS] = GEN6_BRSYNC; + ring->signal_mbox[VECS] = GEN6_VERSYNC; } else if (IS_GEN5(dev)) { ring->add_request = pc_render_add_request; ring->flush = gen4_render_ring_flush; ring->get_seqno = pc_render_get_seqno; + ring->set_seqno = pc_render_set_seqno; ring->irq_get = gen5_ring_get_irq; ring->irq_put = gen5_ring_put_irq; - ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY; + ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT | + GT_RENDER_PIPECTL_NOTIFY_INTERRUPT; } else { ring->add_request = i9xx_add_request; if (INTEL_INFO(dev)->gen < 4) @@ -1404,6 +1751,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) else ring->flush = gen4_render_ring_flush; ring->get_seqno = ring_get_seqno; + ring->set_seqno = ring_set_seqno; if (IS_GEN2(dev)) { ring->irq_get = i8xx_ring_get_irq; ring->irq_put = i8xx_ring_put_irq; @@ -1414,7 +1762,9 @@ int intel_init_render_ring_buffer(struct drm_device *dev) ring->irq_enable_mask = I915_USER_INTERRUPT; } ring->write_tail = ring_write_tail; - if (INTEL_INFO(dev)->gen >= 6) + if (IS_HASWELL(dev)) + ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; + else if (INTEL_INFO(dev)->gen >= 6) ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; else if (INTEL_INFO(dev)->gen >= 4) ring->dispatch_execbuffer = i965_dispatch_execbuffer; @@ -1425,10 +1775,26 @@ int intel_init_render_ring_buffer(struct drm_device *dev) ring->init = init_render_ring; ring->cleanup = render_ring_cleanup; + /* Workaround batchbuffer to combat CS tlb bug. */ + if (HAS_BROKEN_CS_TLB(dev)) { + struct drm_i915_gem_object *obj; + int ret; - if (!I915_NEED_GFX_HWS(dev)) { - ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; - memset(ring->status_page.page_addr, 0, PAGE_SIZE); + obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT); + if (obj == NULL) { + DRM_ERROR("Failed to allocate batch bo\n"); + return -ENOMEM; + } + + ret = i915_gem_obj_ggtt_pin(obj, 0, true, false); + if (ret != 0) { + drm_gem_object_unreference(&obj->base); + DRM_ERROR("Failed to ping batch bo\n"); + return ret; + } + + ring->scratch.obj = obj; + ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj); } return intel_init_ring_buffer(dev, ring); @@ -1438,6 +1804,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) { drm_i915_private_t *dev_priv = dev->dev_private; struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; + int ret; ring->name = "render ring"; ring->id = RCS; @@ -1457,6 +1824,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) else ring->flush = gen4_render_ring_flush; ring->get_seqno = ring_get_seqno; + ring->set_seqno = ring_set_seqno; if (IS_GEN2(dev)) { ring->irq_get = i8xx_ring_get_irq; ring->irq_put = i8xx_ring_put_irq; @@ -1475,17 +1843,13 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) ring->init = init_render_ring; ring->cleanup = render_ring_cleanup; - if (!I915_NEED_GFX_HWS(dev)) - ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; - ring->dev = dev; INIT_LIST_HEAD(&ring->active_list); INIT_LIST_HEAD(&ring->request_list); - INIT_LIST_HEAD(&ring->gpu_write_list); ring->size = size; ring->effective_size = ring->size; - if (IS_I830(ring->dev)) + if (IS_I830(ring->dev) || IS_845G(ring->dev)) ring->effective_size -= 128; ring->virtual_start = ioremap_wc(start, size); @@ -1495,6 +1859,12 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) return -ENOMEM; } + if (!I915_NEED_GFX_HWS(dev)) { + ret = init_phys_status_page(ring); + if (ret) + return ret; + } + return 0; } @@ -1512,26 +1882,31 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) /* gen6 bsd needs a special wa for tail updates */ if (IS_GEN6(dev)) ring->write_tail = gen6_bsd_ring_write_tail; - ring->flush = gen6_ring_flush; + ring->flush = gen6_bsd_ring_flush; ring->add_request = gen6_add_request; ring->get_seqno = gen6_ring_get_seqno; - ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT; + ring->set_seqno = ring_set_seqno; + ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; ring->irq_get = gen6_ring_get_irq; ring->irq_put = gen6_ring_put_irq; ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; ring->sync_to = gen6_ring_sync; - ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR; - ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID; - ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB; - ring->signal_mbox[0] = GEN6_RVSYNC; - ring->signal_mbox[1] = GEN6_BVSYNC; + ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR; + ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID; + ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VB; + ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_VVE; + ring->signal_mbox[RCS] = GEN6_RVSYNC; + ring->signal_mbox[VCS] = GEN6_NOSYNC; + ring->signal_mbox[BCS] = GEN6_BVSYNC; + ring->signal_mbox[VECS] = GEN6_VEVSYNC; } else { ring->mmio_base = BSD_RING_BASE; ring->flush = bsd_ring_flush; ring->add_request = i9xx_add_request; ring->get_seqno = ring_get_seqno; + ring->set_seqno = ring_set_seqno; if (IS_GEN5(dev)) { - ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; + ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT; ring->irq_get = gen5_ring_get_irq; ring->irq_put = gen5_ring_put_irq; } else { @@ -1543,7 +1918,6 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) } ring->init = init_ring_common; - return intel_init_ring_buffer(dev, ring); } @@ -1557,20 +1931,94 @@ int intel_init_blt_ring_buffer(struct drm_device *dev) ring->mmio_base = BLT_RING_BASE; ring->write_tail = ring_write_tail; - ring->flush = blt_ring_flush; + ring->flush = gen6_ring_flush; ring->add_request = gen6_add_request; ring->get_seqno = gen6_ring_get_seqno; - ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT; + ring->set_seqno = ring_set_seqno; + ring->irq_enable_mask = GT_BLT_USER_INTERRUPT; ring->irq_get = gen6_ring_get_irq; ring->irq_put = gen6_ring_put_irq; ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; ring->sync_to = gen6_ring_sync; - ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR; - ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV; - ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID; - ring->signal_mbox[0] = GEN6_RBSYNC; - ring->signal_mbox[1] = GEN6_VBSYNC; + ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR; + ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV; + ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_INVALID; + ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_BVE; + ring->signal_mbox[RCS] = GEN6_RBSYNC; + ring->signal_mbox[VCS] = GEN6_VBSYNC; + ring->signal_mbox[BCS] = GEN6_NOSYNC; + ring->signal_mbox[VECS] = GEN6_VEBSYNC; + ring->init = init_ring_common; + + return intel_init_ring_buffer(dev, ring); +} + +int intel_init_vebox_ring_buffer(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = &dev_priv->ring[VECS]; + + ring->name = "video enhancement ring"; + ring->id = VECS; + + ring->mmio_base = VEBOX_RING_BASE; + ring->write_tail = ring_write_tail; + ring->flush = gen6_ring_flush; + ring->add_request = gen6_add_request; + ring->get_seqno = gen6_ring_get_seqno; + ring->set_seqno = ring_set_seqno; + ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; + ring->irq_get = hsw_vebox_get_irq; + ring->irq_put = hsw_vebox_put_irq; + ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; + ring->sync_to = gen6_ring_sync; + ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER; + ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV; + ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VEB; + ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_INVALID; + ring->signal_mbox[RCS] = GEN6_RVESYNC; + ring->signal_mbox[VCS] = GEN6_VVESYNC; + ring->signal_mbox[BCS] = GEN6_BVESYNC; + ring->signal_mbox[VECS] = GEN6_NOSYNC; ring->init = init_ring_common; return intel_init_ring_buffer(dev, ring); } + +int +intel_ring_flush_all_caches(struct intel_ring_buffer *ring) +{ + int ret; + + if (!ring->gpu_caches_dirty) + return 0; + + ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS); + if (ret) + return ret; + + trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS); + + ring->gpu_caches_dirty = false; + return 0; +} + +int +intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring) +{ + uint32_t flush_domains; + int ret; + + flush_domains = 0; + if (ring->gpu_caches_dirty) + flush_domains = I915_GEM_GPU_DOMAINS; + + ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains); + if (ret) + return ret; + + trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains); + + ring->gpu_caches_dirty = false; + return 0; +} |