diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2016-04-09 10:57:54 +0100 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2016-04-09 12:09:05 +0100 |
commit | c04e0f3b4ea01b3b1d81ccfce0a73bb0b297ba46 (patch) | |
tree | e2123092223d8c5365624d207021eb42f401863f /drivers/gpu/drm/i915/intel_ringbuffer.c | |
parent | 9b9ed3093613288247a27a55a6dd07f1222150f1 (diff) |
drm/i915: Separate out the seqno-barrier from engine->get_seqno
In order to simplify future patches, extract the
lazy_coherency optimisation our of the engine->get_seqno() vfunc into
its own callback.
v2: Rename the barrier to engine->irq_seqno_barrier to try and better
reflect that the barrier is only required after the user interrupt before
reading the seqno (to ensure that the seqno update lands in time as we
do not have strict seqno-irq ordering on all platforms).
Reviewed-by: Dave Gordon <david.s.gordon@intel.com> [#v2]
v3: Comments for hangcheck paranoia. Mika wanted to keep the extra
barrier inside the hangcheck, just in case. I can argue that it doesn't
provide a barrier against anything, but the side-effects of applying the
barrier may prevent a false declaration of a hung GPU.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Cc: Dave Gordon <david.s.gordon@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1460195877-20520-2-git-send-email-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 34 |
1 files changed, 18 insertions, 16 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 69cc3bc20495..e144f4f301bf 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1568,8 +1568,8 @@ pc_render_add_request(struct drm_i915_gem_request *req) return 0; } -static u32 -gen6_ring_get_seqno(struct intel_engine_cs *engine, bool lazy_coherency) +static void +gen6_seqno_barrier(struct intel_engine_cs *engine) { /* Workaround to force correct ordering between irq and seqno writes on * ivb (and maybe also on snb) by reading from a CS register (like @@ -1583,16 +1583,12 @@ gen6_ring_get_seqno(struct intel_engine_cs *engine, bool lazy_coherency) * batch i.e. much more frequent than a delay when waiting for the * interrupt (with the same net latency). */ - if (!lazy_coherency) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; - POSTING_READ_FW(RING_ACTHD(engine->mmio_base)); - } - - return intel_read_status_page(engine, I915_GEM_HWS_INDEX); + struct drm_i915_private *dev_priv = engine->dev->dev_private; + POSTING_READ_FW(RING_ACTHD(engine->mmio_base)); } static u32 -ring_get_seqno(struct intel_engine_cs *engine, bool lazy_coherency) +ring_get_seqno(struct intel_engine_cs *engine) { return intel_read_status_page(engine, I915_GEM_HWS_INDEX); } @@ -1604,7 +1600,7 @@ ring_set_seqno(struct intel_engine_cs *engine, u32 seqno) } static u32 -pc_render_get_seqno(struct intel_engine_cs *engine, bool lazy_coherency) +pc_render_get_seqno(struct intel_engine_cs *engine) { return engine->scratch.cpu_page[0]; } @@ -2828,7 +2824,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev) engine->irq_get = gen8_ring_get_irq; engine->irq_put = gen8_ring_put_irq; engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; - engine->get_seqno = gen6_ring_get_seqno; + engine->irq_seqno_barrier = gen6_seqno_barrier; + engine->get_seqno = ring_get_seqno; engine->set_seqno = ring_set_seqno; if (i915_semaphore_is_enabled(dev)) { WARN_ON(!dev_priv->semaphore_obj); @@ -2845,7 +2842,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev) engine->irq_get = gen6_ring_get_irq; engine->irq_put = gen6_ring_put_irq; engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; - engine->get_seqno = gen6_ring_get_seqno; + engine->irq_seqno_barrier = gen6_seqno_barrier; + engine->get_seqno = ring_get_seqno; engine->set_seqno = ring_set_seqno; if (i915_semaphore_is_enabled(dev)) { engine->semaphore.sync_to = gen6_ring_sync; @@ -2960,7 +2958,8 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) engine->write_tail = gen6_bsd_ring_write_tail; engine->flush = gen6_bsd_ring_flush; engine->add_request = gen6_add_request; - engine->get_seqno = gen6_ring_get_seqno; + engine->irq_seqno_barrier = gen6_seqno_barrier; + engine->get_seqno = ring_get_seqno; engine->set_seqno = ring_set_seqno; if (INTEL_INFO(dev)->gen >= 8) { engine->irq_enable_mask = @@ -3033,7 +3032,8 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev) engine->mmio_base = GEN8_BSD2_RING_BASE; engine->flush = gen6_bsd_ring_flush; engine->add_request = gen6_add_request; - engine->get_seqno = gen6_ring_get_seqno; + engine->irq_seqno_barrier = gen6_seqno_barrier; + engine->get_seqno = ring_get_seqno; engine->set_seqno = ring_set_seqno; engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT; @@ -3064,7 +3064,8 @@ int intel_init_blt_ring_buffer(struct drm_device *dev) engine->write_tail = ring_write_tail; engine->flush = gen6_ring_flush; engine->add_request = gen6_add_request; - engine->get_seqno = gen6_ring_get_seqno; + engine->irq_seqno_barrier = gen6_seqno_barrier; + engine->get_seqno = ring_get_seqno; engine->set_seqno = ring_set_seqno; if (INTEL_INFO(dev)->gen >= 8) { engine->irq_enable_mask = @@ -3122,7 +3123,8 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev) engine->write_tail = ring_write_tail; engine->flush = gen6_ring_flush; engine->add_request = gen6_add_request; - engine->get_seqno = gen6_ring_get_seqno; + engine->irq_seqno_barrier = gen6_seqno_barrier; + engine->get_seqno = ring_get_seqno; engine->set_seqno = ring_set_seqno; if (INTEL_INFO(dev)->gen >= 8) { |