diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2015-02-10 11:35:36 -0800 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2015-02-10 11:35:36 -0800 |
commit | 4ba24fef3eb3b142197135223b90ced2f319cd53 (patch) | |
tree | a20c125b27740ec7b4c761b11d801108e1b316b2 /drivers/gpu/drm/i915/i915_irq.c | |
parent | 47c1ffb2b6b630894e9a16442611c056ab21c057 (diff) | |
parent | 98a4a59ee31a12105a2b84f5b8b515ac2cb208ef (diff) |
Merge branch 'next' into for-linus
Prepare first round of input updates for 3.20.
Diffstat (limited to 'drivers/gpu/drm/i915/i915_irq.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 1256 |
1 files changed, 495 insertions, 761 deletions
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 0050ee9470f1..d0d3dfbe6d2a 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -37,6 +37,14 @@ #include "i915_trace.h" #include "intel_drv.h" +/** + * DOC: interrupt handling + * + * These functions provide the basic support for enabling and disabling the + * interrupt handling support. There's a lot more functionality in i915_irq.c + * and related files, but that will be described in separate chapters. + */ + static const u32 hpd_ibx[] = { [HPD_CRT] = SDE_CRT_HOTPLUG, [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, @@ -118,20 +126,22 @@ static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \ - I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ - POSTING_READ(GEN8_##type##_IER(which)); \ + I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ + POSTING_READ(GEN8_##type##_IMR(which)); \ } while (0) #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \ GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \ - I915_WRITE(type##IMR, (imr_val)); \ I915_WRITE(type##IER, (ier_val)); \ - POSTING_READ(type##IER); \ + I915_WRITE(type##IMR, (imr_val)); \ + POSTING_READ(type##IMR); \ } while (0) +static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); + /* For display hotplug interrupt */ -static void +void ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) { assert_spin_locked(&dev_priv->irq_lock); @@ -146,12 +156,12 @@ ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) } } -static void +void ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask) { assert_spin_locked(&dev_priv->irq_lock); - if (!intel_irqs_enabled(dev_priv)) + if (WARN_ON(!intel_irqs_enabled(dev_priv))) return; if ((dev_priv->irq_mask & mask) != mask) { @@ -192,71 +202,28 @@ void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) ilk_update_gt_irq(dev_priv, mask, 0); } -/** - * snb_update_pm_irq - update GEN6_PMIMR - * @dev_priv: driver private - * @interrupt_mask: mask of interrupt bits to update - * @enabled_irq_mask: mask of interrupt bits to enable - */ -static void snb_update_pm_irq(struct drm_i915_private *dev_priv, - uint32_t interrupt_mask, - uint32_t enabled_irq_mask) -{ - uint32_t new_val; - - assert_spin_locked(&dev_priv->irq_lock); - - if (WARN_ON(!intel_irqs_enabled(dev_priv))) - return; - - new_val = dev_priv->pm_irq_mask; - new_val &= ~interrupt_mask; - new_val |= (~enabled_irq_mask & interrupt_mask); - - if (new_val != dev_priv->pm_irq_mask) { - dev_priv->pm_irq_mask = new_val; - I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); - POSTING_READ(GEN6_PMIMR); - } -} - -void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) +static u32 gen6_pm_iir(struct drm_i915_private *dev_priv) { - snb_update_pm_irq(dev_priv, mask, mask); + return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; } -void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) +static u32 gen6_pm_imr(struct drm_i915_private *dev_priv) { - snb_update_pm_irq(dev_priv, mask, 0); + return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR; } -static bool ivb_can_enable_err_int(struct drm_device *dev) +static u32 gen6_pm_ier(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *crtc; - enum pipe pipe; - - assert_spin_locked(&dev_priv->irq_lock); - - for_each_pipe(pipe) { - crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); - - if (crtc->cpu_fifo_underrun_disabled) - return false; - } - - return true; + return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER; } /** - * bdw_update_pm_irq - update GT interrupt 2 + * snb_update_pm_irq - update GEN6_PMIMR * @dev_priv: driver private * @interrupt_mask: mask of interrupt bits to update * @enabled_irq_mask: mask of interrupt bits to enable - * - * Copied from the snb function, updated with relevant register offsets */ -static void bdw_update_pm_irq(struct drm_i915_private *dev_priv, +static void snb_update_pm_irq(struct drm_i915_private *dev_priv, uint32_t interrupt_mask, uint32_t enabled_irq_mask) { @@ -264,144 +231,91 @@ static void bdw_update_pm_irq(struct drm_i915_private *dev_priv, assert_spin_locked(&dev_priv->irq_lock); - if (WARN_ON(!intel_irqs_enabled(dev_priv))) - return; - new_val = dev_priv->pm_irq_mask; new_val &= ~interrupt_mask; new_val |= (~enabled_irq_mask & interrupt_mask); if (new_val != dev_priv->pm_irq_mask) { dev_priv->pm_irq_mask = new_val; - I915_WRITE(GEN8_GT_IMR(2), dev_priv->pm_irq_mask); - POSTING_READ(GEN8_GT_IMR(2)); + I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask); + POSTING_READ(gen6_pm_imr(dev_priv)); } } -void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) +void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) { - bdw_update_pm_irq(dev_priv, mask, mask); + if (WARN_ON(!intel_irqs_enabled(dev_priv))) + return; + + snb_update_pm_irq(dev_priv, mask, mask); } -void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) +static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv, + uint32_t mask) { - bdw_update_pm_irq(dev_priv, mask, 0); + snb_update_pm_irq(dev_priv, mask, 0); } -static bool cpt_can_enable_serr_int(struct drm_device *dev) +void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) { - struct drm_i915_private *dev_priv = dev->dev_private; - enum pipe pipe; - struct intel_crtc *crtc; - - assert_spin_locked(&dev_priv->irq_lock); - - for_each_pipe(pipe) { - crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); - - if (crtc->pch_fifo_underrun_disabled) - return false; - } + if (WARN_ON(!intel_irqs_enabled(dev_priv))) + return; - return true; + __gen6_disable_pm_irq(dev_priv, mask); } -void i9xx_check_fifo_underruns(struct drm_device *dev) +void gen6_reset_rps_interrupts(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *crtc; - unsigned long flags; - - spin_lock_irqsave(&dev_priv->irq_lock, flags); + uint32_t reg = gen6_pm_iir(dev_priv); - for_each_intel_crtc(dev, crtc) { - u32 reg = PIPESTAT(crtc->pipe); - u32 pipestat; - - if (crtc->cpu_fifo_underrun_disabled) - continue; - - pipestat = I915_READ(reg) & 0xffff0000; - if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0) - continue; - - I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS); - POSTING_READ(reg); - - DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe)); - } - - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + spin_lock_irq(&dev_priv->irq_lock); + I915_WRITE(reg, dev_priv->pm_rps_events); + I915_WRITE(reg, dev_priv->pm_rps_events); + POSTING_READ(reg); + spin_unlock_irq(&dev_priv->irq_lock); } -static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev, - enum pipe pipe, - bool enable, bool old) +void gen6_enable_rps_interrupts(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - u32 reg = PIPESTAT(pipe); - u32 pipestat = I915_READ(reg) & 0xffff0000; - - assert_spin_locked(&dev_priv->irq_lock); - if (enable) { - I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS); - POSTING_READ(reg); - } else { - if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS) - DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); - } -} + spin_lock_irq(&dev_priv->irq_lock); -static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, - enum pipe pipe, bool enable) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : - DE_PIPEB_FIFO_UNDERRUN; + WARN_ON(dev_priv->rps.pm_iir); + WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); + dev_priv->rps.interrupts_enabled = true; + I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) | + dev_priv->pm_rps_events); + gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); - if (enable) - ironlake_enable_display_irq(dev_priv, bit); - else - ironlake_disable_display_irq(dev_priv, bit); + spin_unlock_irq(&dev_priv->irq_lock); } -static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, - enum pipe pipe, - bool enable, bool old) +void gen6_disable_rps_interrupts(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - if (enable) { - I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); - if (!ivb_can_enable_err_int(dev)) - return; + spin_lock_irq(&dev_priv->irq_lock); + dev_priv->rps.interrupts_enabled = false; + spin_unlock_irq(&dev_priv->irq_lock); - ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); - } else { - ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); + cancel_work_sync(&dev_priv->rps.work); - if (old && - I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) { - DRM_ERROR("uncleared fifo underrun on pipe %c\n", - pipe_name(pipe)); - } - } -} + spin_lock_irq(&dev_priv->irq_lock); -static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev, - enum pipe pipe, bool enable) -{ - struct drm_i915_private *dev_priv = dev->dev_private; + I915_WRITE(GEN6_PMINTRMSK, INTEL_INFO(dev_priv)->gen >= 8 ? + ~GEN8_PMINTR_REDIRECT_TO_NON_DISP : ~0); - assert_spin_locked(&dev_priv->irq_lock); + __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); + I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) & + ~dev_priv->pm_rps_events); + I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events); + I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events); - if (enable) - dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN; - else - dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN; - I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); - POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); + dev_priv->rps.pm_iir = 0; + + spin_unlock_irq(&dev_priv->irq_lock); } /** @@ -410,9 +324,9 @@ static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev, * @interrupt_mask: mask of interrupt bits to update * @enabled_irq_mask: mask of interrupt bits to enable */ -static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, - uint32_t interrupt_mask, - uint32_t enabled_irq_mask) +void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, + uint32_t interrupt_mask, + uint32_t enabled_irq_mask) { uint32_t sdeimr = I915_READ(SDEIMR); sdeimr &= ~interrupt_mask; @@ -426,160 +340,6 @@ static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, I915_WRITE(SDEIMR, sdeimr); POSTING_READ(SDEIMR); } -#define ibx_enable_display_interrupt(dev_priv, bits) \ - ibx_display_interrupt_update((dev_priv), (bits), (bits)) -#define ibx_disable_display_interrupt(dev_priv, bits) \ - ibx_display_interrupt_update((dev_priv), (bits), 0) - -static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, - enum transcoder pch_transcoder, - bool enable) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t bit = (pch_transcoder == TRANSCODER_A) ? - SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; - - if (enable) - ibx_enable_display_interrupt(dev_priv, bit); - else - ibx_disable_display_interrupt(dev_priv, bit); -} - -static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, - enum transcoder pch_transcoder, - bool enable, bool old) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (enable) { - I915_WRITE(SERR_INT, - SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); - - if (!cpt_can_enable_serr_int(dev)) - return; - - ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); - } else { - ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); - - if (old && I915_READ(SERR_INT) & - SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) { - DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n", - transcoder_name(pch_transcoder)); - } - } -} - -/** - * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages - * @dev: drm device - * @pipe: pipe - * @enable: true if we want to report FIFO underrun errors, false otherwise - * - * This function makes us disable or enable CPU fifo underruns for a specific - * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun - * reporting for one pipe may also disable all the other CPU error interruts for - * the other pipes, due to the fact that there's just one interrupt mask/enable - * bit for all the pipes. - * - * Returns the previous state of underrun reporting. - */ -static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, - enum pipe pipe, bool enable) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - bool old; - - assert_spin_locked(&dev_priv->irq_lock); - - old = !intel_crtc->cpu_fifo_underrun_disabled; - intel_crtc->cpu_fifo_underrun_disabled = !enable; - - if (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev)) - i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old); - else if (IS_GEN5(dev) || IS_GEN6(dev)) - ironlake_set_fifo_underrun_reporting(dev, pipe, enable); - else if (IS_GEN7(dev)) - ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old); - else if (IS_GEN8(dev)) - broadwell_set_fifo_underrun_reporting(dev, pipe, enable); - - return old; -} - -bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, - enum pipe pipe, bool enable) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long flags; - bool ret; - - spin_lock_irqsave(&dev_priv->irq_lock, flags); - ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable); - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); - - return ret; -} - -static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev, - enum pipe pipe) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - - return !intel_crtc->cpu_fifo_underrun_disabled; -} - -/** - * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages - * @dev: drm device - * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older) - * @enable: true if we want to report FIFO underrun errors, false otherwise - * - * This function makes us disable or enable PCH fifo underruns for a specific - * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO - * underrun reporting for one transcoder may also disable all the other PCH - * error interruts for the other transcoders, due to the fact that there's just - * one interrupt mask/enable bit for all the transcoders. - * - * Returns the previous state of underrun reporting. - */ -bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, - enum transcoder pch_transcoder, - bool enable) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - unsigned long flags; - bool old; - - /* - * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT - * has only one pch transcoder A that all pipes can use. To avoid racy - * pch transcoder -> pipe lookups from interrupt code simply store the - * underrun statistics in crtc A. Since we never expose this anywhere - * nor use it outside of the fifo underrun code here using the "wrong" - * crtc on LPT won't cause issues. - */ - - spin_lock_irqsave(&dev_priv->irq_lock, flags); - - old = !intel_crtc->pch_fifo_underrun_disabled; - intel_crtc->pch_fifo_underrun_disabled = !enable; - - if (HAS_PCH_IBX(dev)) - ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable); - else - cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable, old); - - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); - return old; -} - static void __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, @@ -589,6 +349,7 @@ __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; assert_spin_locked(&dev_priv->irq_lock); + WARN_ON(!intel_irqs_enabled(dev_priv)); if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || status_mask & ~PIPESTAT_INT_STATUS_MASK, @@ -615,6 +376,7 @@ __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; assert_spin_locked(&dev_priv->irq_lock); + WARN_ON(!intel_irqs_enabled(dev_priv)); if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || status_mask & ~PIPESTAT_INT_STATUS_MASK, @@ -694,19 +456,18 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, static void i915_enable_asle_pipestat(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long irqflags; if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) return; - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + spin_lock_irq(&dev_priv->irq_lock); i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); if (INTEL_INFO(dev)->gen >= 4) i915_enable_pipestat(dev_priv, PIPE_A, PIPE_LEGACY_BLC_EVENT_STATUS); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + spin_unlock_irq(&dev_priv->irq_lock); } /** @@ -1020,7 +781,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, /* In vblank? */ if (in_vbl) - ret |= DRM_SCANOUTPOS_INVBL; + ret |= DRM_SCANOUTPOS_IN_VBLANK; return ret; } @@ -1094,18 +855,17 @@ static void i915_digport_work_func(struct work_struct *work) { struct drm_i915_private *dev_priv = container_of(work, struct drm_i915_private, dig_port_work); - unsigned long irqflags; u32 long_port_mask, short_port_mask; struct intel_digital_port *intel_dig_port; int i, ret; u32 old_bits = 0; - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + spin_lock_irq(&dev_priv->irq_lock); long_port_mask = dev_priv->long_hpd_port_mask; dev_priv->long_hpd_port_mask = 0; short_port_mask = dev_priv->short_hpd_port_mask; dev_priv->short_hpd_port_mask = 0; - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + spin_unlock_irq(&dev_priv->irq_lock); for (i = 0; i < I915_MAX_PORTS; i++) { bool valid = false; @@ -1130,9 +890,9 @@ static void i915_digport_work_func(struct work_struct *work) } if (old_bits) { - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + spin_lock_irq(&dev_priv->irq_lock); dev_priv->hpd_event_bits |= old_bits; - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + spin_unlock_irq(&dev_priv->irq_lock); schedule_work(&dev_priv->hotplug_work); } } @@ -1151,7 +911,6 @@ static void i915_hotplug_work_func(struct work_struct *work) struct intel_connector *intel_connector; struct intel_encoder *intel_encoder; struct drm_connector *connector; - unsigned long irqflags; bool hpd_disabled = false; bool changed = false; u32 hpd_event_bits; @@ -1159,7 +918,7 @@ static void i915_hotplug_work_func(struct work_struct *work) mutex_lock(&mode_config->mutex); DRM_DEBUG_KMS("running encoder hotplug functions\n"); - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + spin_lock_irq(&dev_priv->irq_lock); hpd_event_bits = dev_priv->hpd_event_bits; dev_priv->hpd_event_bits = 0; @@ -1193,7 +952,7 @@ static void i915_hotplug_work_func(struct work_struct *work) msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); } - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + spin_unlock_irq(&dev_priv->irq_lock); list_for_each_entry(connector, &mode_config->connector_list, head) { intel_connector = to_intel_connector(connector); @@ -1260,11 +1019,7 @@ static void notify_ring(struct drm_device *dev, trace_i915_gem_request_complete(ring); - if (drm_core_check_feature(dev, DRIVER_MODESET)) - intel_notify_mmio_flip(ring); - wake_up_all(&ring->irq_queue); - i915_queue_hangcheck(dev); } static u32 vlv_c0_residency(struct drm_i915_private *dev_priv, @@ -1322,10 +1077,10 @@ static u32 vlv_c0_residency(struct drm_i915_private *dev_priv, * @dev_priv: DRM device private * */ -static u32 vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv) +static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv) { u32 residency_C0_up = 0, residency_C0_down = 0; - u8 new_delay, adj; + int new_delay, adj; dev_priv->rps.ei_interrupt_count++; @@ -1400,14 +1155,15 @@ static void gen6_pm_rps_work(struct work_struct *work) int new_delay, adj; spin_lock_irq(&dev_priv->irq_lock); + /* Speed up work cancelation during disabling rps interrupts. */ + if (!dev_priv->rps.interrupts_enabled) { + spin_unlock_irq(&dev_priv->irq_lock); + return; + } pm_iir = dev_priv->rps.pm_iir; dev_priv->rps.pm_iir = 0; - if (INTEL_INFO(dev_priv->dev)->gen >= 8) - gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); - else { - /* Make sure not to corrupt PMIMR state used by ringbuffer */ - gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); - } + /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ + gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); spin_unlock_irq(&dev_priv->irq_lock); /* Make sure we didn't queue anything we're not going to process. */ @@ -1488,7 +1244,6 @@ static void ivybridge_parity_work(struct work_struct *work) u32 error_status, row, bank, subbank; char *parity_event[6]; uint32_t misccpctl; - unsigned long flags; uint8_t slice = 0; /* We must turn off DOP level clock gating to access the L3 registers. @@ -1547,9 +1302,9 @@ static void ivybridge_parity_work(struct work_struct *work) out: WARN_ON(dev_priv->l3_parity.which_slice); - spin_lock_irqsave(&dev_priv->irq_lock, flags); + spin_lock_irq(&dev_priv->irq_lock); gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + spin_unlock_irq(&dev_priv->irq_lock); mutex_unlock(&dev_priv->dev->struct_mutex); } @@ -1601,32 +1356,18 @@ static void snb_gt_irq_handler(struct drm_device *dev, if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | GT_BSD_CS_ERROR_INTERRUPT | - GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) { - i915_handle_error(dev, false, "GT error interrupt 0x%08x", - gt_iir); - } + GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) + DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); if (gt_iir & GT_PARITY_ERROR(dev)) ivybridge_parity_error_irq_handler(dev, gt_iir); } -static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) -{ - if ((pm_iir & dev_priv->pm_rps_events) == 0) - return; - - spin_lock(&dev_priv->irq_lock); - dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; - gen8_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); - spin_unlock(&dev_priv->irq_lock); - - queue_work(dev_priv->wq, &dev_priv->rps.work); -} - static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, struct drm_i915_private *dev_priv, u32 master_ctl) { + struct intel_engine_cs *ring; u32 rcs, bcs, vcs; uint32_t tmp = 0; irqreturn_t ret = IRQ_NONE; @@ -1636,12 +1377,20 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, if (tmp) { I915_WRITE(GEN8_GT_IIR(0), tmp); ret = IRQ_HANDLED; + rcs = tmp >> GEN8_RCS_IRQ_SHIFT; - bcs = tmp >> GEN8_BCS_IRQ_SHIFT; + ring = &dev_priv->ring[RCS]; if (rcs & GT_RENDER_USER_INTERRUPT) - notify_ring(dev, &dev_priv->ring[RCS]); + notify_ring(dev, ring); + if (rcs & GT_CONTEXT_SWITCH_INTERRUPT) + intel_execlists_handle_ctx_events(ring); + + bcs = tmp >> GEN8_BCS_IRQ_SHIFT; + ring = &dev_priv->ring[BCS]; if (bcs & GT_RENDER_USER_INTERRUPT) - notify_ring(dev, &dev_priv->ring[BCS]); + notify_ring(dev, ring); + if (bcs & GT_CONTEXT_SWITCH_INTERRUPT) + intel_execlists_handle_ctx_events(ring); } else DRM_ERROR("The master control interrupt lied (GT0)!\n"); } @@ -1651,12 +1400,20 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, if (tmp) { I915_WRITE(GEN8_GT_IIR(1), tmp); ret = IRQ_HANDLED; + vcs = tmp >> GEN8_VCS1_IRQ_SHIFT; + ring = &dev_priv->ring[VCS]; if (vcs & GT_RENDER_USER_INTERRUPT) - notify_ring(dev, &dev_priv->ring[VCS]); + notify_ring(dev, ring); + if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) + intel_execlists_handle_ctx_events(ring); + vcs = tmp >> GEN8_VCS2_IRQ_SHIFT; + ring = &dev_priv->ring[VCS2]; if (vcs & GT_RENDER_USER_INTERRUPT) - notify_ring(dev, &dev_priv->ring[VCS2]); + notify_ring(dev, ring); + if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) + intel_execlists_handle_ctx_events(ring); } else DRM_ERROR("The master control interrupt lied (GT1)!\n"); } @@ -1667,7 +1424,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, I915_WRITE(GEN8_GT_IIR(2), tmp & dev_priv->pm_rps_events); ret = IRQ_HANDLED; - gen8_rps_irq_handler(dev_priv, tmp); + gen6_rps_irq_handler(dev_priv, tmp); } else DRM_ERROR("The master control interrupt lied (PM)!\n"); } @@ -1677,9 +1434,13 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, if (tmp) { I915_WRITE(GEN8_GT_IIR(3), tmp); ret = IRQ_HANDLED; + vcs = tmp >> GEN8_VECS_IRQ_SHIFT; + ring = &dev_priv->ring[VECS]; if (vcs & GT_RENDER_USER_INTERRUPT) - notify_ring(dev, &dev_priv->ring[VECS]); + notify_ring(dev, ring); + if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) + intel_execlists_handle_ctx_events(ring); } else DRM_ERROR("The master control interrupt lied (GT3)!\n"); } @@ -1690,7 +1451,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, #define HPD_STORM_DETECT_PERIOD 1000 #define HPD_STORM_THRESHOLD 5 -static int ilk_port_to_hotplug_shift(enum port port) +static int pch_port_to_hotplug_shift(enum port port) { switch (port) { case PORT_A: @@ -1706,7 +1467,7 @@ static int ilk_port_to_hotplug_shift(enum port port) } } -static int g4x_port_to_hotplug_shift(enum port port) +static int i915_port_to_hotplug_shift(enum port port) { switch (port) { case PORT_A: @@ -1764,15 +1525,17 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev, if (port && dev_priv->hpd_irq_port[port]) { bool long_hpd; - if (IS_G4X(dev)) { - dig_shift = g4x_port_to_hotplug_shift(port); - long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; - } else { - dig_shift = ilk_port_to_hotplug_shift(port); + if (HAS_PCH_SPLIT(dev)) { + dig_shift = pch_port_to_hotplug_shift(port); long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; + } else { + dig_shift = i915_port_to_hotplug_shift(port); + long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; } - DRM_DEBUG_DRIVER("digital hpd port %d %d\n", port, long_hpd); + DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", + port_name(port), + long_hpd ? "long" : "short"); /* for long HPD pulses we want to have the digital queue happen, but we still want HPD storm detection to function. */ if (long_hpd) { @@ -1875,7 +1638,7 @@ static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, if (!pipe_crc->entries) { spin_unlock(&pipe_crc->lock); - DRM_ERROR("spurious interrupt\n"); + DRM_DEBUG_KMS("spurious interrupt\n"); return; } @@ -1961,37 +1724,38 @@ static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) * the work queue. */ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) { + /* TODO: RPS on GEN9+ is not supported yet. */ + if (WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9, + "GEN9+: unexpected RPS IRQ\n")) + return; + if (pm_iir & dev_priv->pm_rps_events) { spin_lock(&dev_priv->irq_lock); - dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); + if (dev_priv->rps.interrupts_enabled) { + dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; + queue_work(dev_priv->wq, &dev_priv->rps.work); + } spin_unlock(&dev_priv->irq_lock); - - queue_work(dev_priv->wq, &dev_priv->rps.work); } + if (INTEL_INFO(dev_priv)->gen >= 8) + return; + if (HAS_VEBOX(dev_priv->dev)) { if (pm_iir & PM_VEBOX_USER_INTERRUPT) notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); - if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { - i915_handle_error(dev_priv->dev, false, - "VEBOX CS error interrupt 0x%08x", - pm_iir); - } + if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) + DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); } } static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe) { - struct intel_crtc *crtc; - if (!drm_handle_vblank(dev, pipe)) return false; - crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe)); - wake_up(&crtc->vbl_wait); - return true; } @@ -2002,7 +1766,7 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) int pipe; spin_lock(&dev_priv->irq_lock); - for_each_pipe(pipe) { + for_each_pipe(dev_priv, pipe) { int reg; u32 mask, iir_bit = 0; @@ -2013,9 +1777,9 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) * we need to be careful that we only handle what we want to * handle. */ - mask = 0; - if (__cpu_fifo_underrun_reporting_enabled(dev, pipe)) - mask |= PIPE_FIFO_UNDERRUN_STATUS; + + /* fifo underruns are filterered in the underrun handler. */ + mask = PIPE_FIFO_UNDERRUN_STATUS; switch (pipe) { case PIPE_A: @@ -2047,9 +1811,10 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) } spin_unlock(&dev_priv->irq_lock); - for_each_pipe(pipe) { - if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) - intel_pipe_handle_vblank(dev, pipe); + for_each_pipe(dev_priv, pipe) { + if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && + intel_pipe_handle_vblank(dev, pipe)) + intel_check_page_flip(dev, pipe); if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { intel_prepare_page_flip(dev, pipe); @@ -2059,9 +1824,8 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) i9xx_pipe_crc_irq_handler(dev, pipe); - if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && - intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) - DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) + intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); } if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) @@ -2216,7 +1980,7 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) DRM_ERROR("PCH poison interrupt\n"); if (pch_iir & SDE_FDI_MASK) - for_each_pipe(pipe) + for_each_pipe(dev_priv, pipe) DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", pipe_name(pipe), I915_READ(FDI_RX_IIR(pipe))); @@ -2228,14 +1992,10 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); if (pch_iir & SDE_TRANSA_FIFO_UNDER) - if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, - false)) - DRM_ERROR("PCH transcoder A FIFO underrun\n"); + intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); if (pch_iir & SDE_TRANSB_FIFO_UNDER) - if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, - false)) - DRM_ERROR("PCH transcoder B FIFO underrun\n"); + intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); } static void ivb_err_int_handler(struct drm_device *dev) @@ -2247,13 +2007,9 @@ static void ivb_err_int_handler(struct drm_device *dev) if (err_int & ERR_INT_POISON) DRM_ERROR("Poison interrupt\n"); - for_each_pipe(pipe) { - if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) { - if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, - false)) - DRM_ERROR("Pipe %c FIFO underrun\n", - pipe_name(pipe)); - } + for_each_pipe(dev_priv, pipe) { + if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) + intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { if (IS_IVYBRIDGE(dev)) @@ -2275,19 +2031,13 @@ static void cpt_serr_int_handler(struct drm_device *dev) DRM_ERROR("PCH poison interrupt\n"); if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) - if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, - false)) - DRM_ERROR("PCH transcoder A FIFO underrun\n"); + intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) - if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, - false)) - DRM_ERROR("PCH transcoder B FIFO underrun\n"); + intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) - if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C, - false)) - DRM_ERROR("PCH transcoder C FIFO underrun\n"); + intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C); I915_WRITE(SERR_INT, serr_int); } @@ -2324,7 +2074,7 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); if (pch_iir & SDE_FDI_MASK_CPT) - for_each_pipe(pipe) + for_each_pipe(dev_priv, pipe) DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", pipe_name(pipe), I915_READ(FDI_RX_IIR(pipe))); @@ -2347,14 +2097,13 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) if (de_iir & DE_POISON) DRM_ERROR("Poison interrupt\n"); - for_each_pipe(pipe) { - if (de_iir & DE_PIPE_VBLANK(pipe)) - intel_pipe_handle_vblank(dev, pipe); + for_each_pipe(dev_priv, pipe) { + if (de_iir & DE_PIPE_VBLANK(pipe) && + intel_pipe_handle_vblank(dev, pipe)) + intel_check_page_flip(dev, pipe); if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) - if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) - DRM_ERROR("Pipe %c FIFO underrun\n", - pipe_name(pipe)); + intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); if (de_iir & DE_PIPE_CRC_DONE(pipe)) i9xx_pipe_crc_irq_handler(dev, pipe); @@ -2397,9 +2146,10 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) if (de_iir & DE_GSE_IVB) intel_opregion_asle_intr(dev); - for_each_pipe(pipe) { - if (de_iir & (DE_PIPE_VBLANK_IVB(pipe))) - intel_pipe_handle_vblank(dev, pipe); + for_each_pipe(dev_priv, pipe) { + if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) && + intel_pipe_handle_vblank(dev, pipe)) + intel_check_page_flip(dev, pipe); /* plane/pipes map 1:1 on ilk+ */ if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) { @@ -2503,6 +2253,11 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) irqreturn_t ret = IRQ_NONE; uint32_t tmp = 0; enum pipe pipe; + u32 aux_mask = GEN8_AUX_CHANNEL_A; + + if (IS_GEN9(dev)) + aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | + GEN9_AUX_CHANNEL_D; master_ctl = I915_READ(GEN8_MASTER_IRQ); master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; @@ -2535,7 +2290,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) if (tmp) { I915_WRITE(GEN8_DE_PORT_IIR, tmp); ret = IRQ_HANDLED; - if (tmp & GEN8_AUX_CHANNEL_A) + + if (tmp & aux_mask) dp_aux_irq_handler(dev); else DRM_ERROR("Unexpected DE Port interrupt\n"); @@ -2544,8 +2300,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); } - for_each_pipe(pipe) { - uint32_t pipe_iir; + for_each_pipe(dev_priv, pipe) { + uint32_t pipe_iir, flip_done = 0, fault_errors = 0; if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) continue; @@ -2554,10 +2310,17 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) if (pipe_iir) { ret = IRQ_HANDLED; I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); - if (pipe_iir & GEN8_PIPE_VBLANK) - intel_pipe_handle_vblank(dev, pipe); - if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) { + if (pipe_iir & GEN8_PIPE_VBLANK && + intel_pipe_handle_vblank(dev, pipe)) + intel_check_page_flip(dev, pipe); + + if (IS_GEN9(dev)) + flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE; + else + flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE; + + if (flip_done) { intel_prepare_page_flip(dev, pipe); intel_finish_page_flip_plane(dev, pipe); } @@ -2565,18 +2328,20 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) hsw_pipe_crc_irq_handler(dev, pipe); - if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) { - if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, - false)) - DRM_ERROR("Pipe %c FIFO underrun\n", - pipe_name(pipe)); - } + if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) + intel_cpu_fifo_underrun_irq_handler(dev_priv, + pipe); - if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) { + + if (IS_GEN9(dev)) + fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS; + else + fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS; + + if (fault_errors) DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", pipe_name(pipe), pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); - } } else DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); } @@ -2675,6 +2440,9 @@ static void i915_error_work_func(struct work_struct *work) * simulated reset via debugs, so get an RPM reference. */ intel_runtime_pm_get(dev_priv); + + intel_prepare_reset(dev); + /* * All state reset _must_ be completed before we update the * reset counter, for otherwise waiters might miss the reset @@ -2683,7 +2451,7 @@ static void i915_error_work_func(struct work_struct *work) */ ret = i915_reset(dev); - intel_display_handle_reset(dev); + intel_finish_reset(dev); intel_runtime_pm_put(dev_priv); @@ -2763,7 +2531,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) if (eir & I915_ERROR_MEMORY_REFRESH) { pr_err("memory refresh error:\n"); - for_each_pipe(pipe) + for_each_pipe(dev_priv, pipe) pr_err("pipe %c stat: 0x%08x\n", pipe_name(pipe), I915_READ(PIPESTAT(pipe))); /* pipestat has already been acked */ @@ -2860,52 +2628,6 @@ void i915_handle_error(struct drm_device *dev, bool wedged, schedule_work(&dev_priv->gpu_error.work); } -static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct drm_i915_gem_object *obj; - struct intel_unpin_work *work; - unsigned long flags; - bool stall_detected; - - /* Ignore early vblank irqs */ - if (intel_crtc == NULL) - return; - - spin_lock_irqsave(&dev->event_lock, flags); - work = intel_crtc->unpin_work; - - if (work == NULL || - atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || - !work->enable_stall_check) { - /* Either the pending flip IRQ arrived, or we're too early. Don't check */ - spin_unlock_irqrestore(&dev->event_lock, flags); - return; - } - - /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ - obj = work->pending_flip_obj; - if (INTEL_INFO(dev)->gen >= 4) { - int dspsurf = DSPSURF(intel_crtc->plane); - stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == - i915_gem_obj_ggtt_offset(obj); - } else { - int dspaddr = DSPADDR(intel_crtc->plane); - stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) + - crtc->y * crtc->primary->fb->pitches[0] + - crtc->x * crtc->primary->fb->bits_per_pixel/8); - } - - spin_unlock_irqrestore(&dev->event_lock, flags); - - if (stall_detected) { - DRM_DEBUG_DRIVER("Pageflip stall detected\n"); - intel_prepare_page_flip(dev, intel_crtc->plane); - } -} - /* Called from drm generic code, passed 'crtc' which * we use as a pipe index */ @@ -3354,10 +3076,15 @@ static void i915_hangcheck_elapsed(unsigned long data) void i915_queue_hangcheck(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + struct timer_list *timer = &dev_priv->gpu_error.hangcheck_timer; + if (!i915.enable_hangcheck) return; - mod_timer(&dev_priv->gpu_error.hangcheck_timer, + /* Don't continually defer the hangcheck, but make sure it is active */ + if (timer_pending(timer)) + return; + mod_timer(timer, round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); } @@ -3420,10 +3147,22 @@ static void ironlake_irq_reset(struct drm_device *dev) ibx_irq_reset(dev); } +static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) +{ + enum pipe pipe; + + I915_WRITE(PORT_HOTPLUG_EN, 0); + I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); + + for_each_pipe(dev_priv, pipe) + I915_WRITE(PIPESTAT(pipe), 0xffff); + + GEN5_IRQ_RESET(VLV_); +} + static void valleyview_irq_preinstall(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - int pipe; /* VLV magic */ I915_WRITE(VLV_IMR, 0); @@ -3431,22 +3170,11 @@ static void valleyview_irq_preinstall(struct drm_device *dev) I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); I915_WRITE(RING_IMR(BLT_RING_BASE), 0); - /* and GT */ - I915_WRITE(GTIIR, I915_READ(GTIIR)); - I915_WRITE(GTIIR, I915_READ(GTIIR)); - gen5_gt_irq_reset(dev); - I915_WRITE(DPINVGTT, 0xff); + I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); - I915_WRITE(PORT_HOTPLUG_EN, 0); - I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); - for_each_pipe(pipe) - I915_WRITE(PIPESTAT(pipe), 0xffff); - I915_WRITE(VLV_IIR, 0xffffffff); - I915_WRITE(VLV_IMR, 0xffffffff); - I915_WRITE(VLV_IER, 0x0); - POSTING_READ(VLV_IER); + vlv_display_irq_reset(dev_priv); } static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) @@ -3467,9 +3195,9 @@ static void gen8_irq_reset(struct drm_device *dev) gen8_gt_irq_reset(dev_priv); - for_each_pipe(pipe) - if (intel_display_power_enabled(dev_priv, - POWER_DOMAIN_PIPE(pipe))) + for_each_pipe(dev_priv, pipe) + if (intel_display_power_is_enabled(dev_priv, + POWER_DOMAIN_PIPE(pipe))) GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); GEN5_IRQ_RESET(GEN8_DE_PORT_); @@ -3481,20 +3209,19 @@ static void gen8_irq_reset(struct drm_device *dev) void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv) { - unsigned long irqflags; + uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + spin_lock_irq(&dev_priv->irq_lock); GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B], - ~dev_priv->de_irq_mask[PIPE_B]); + ~dev_priv->de_irq_mask[PIPE_B] | extra_ier); GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C], - ~dev_priv->de_irq_mask[PIPE_C]); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + ~dev_priv->de_irq_mask[PIPE_C] | extra_ier); + spin_unlock_irq(&dev_priv->irq_lock); } static void cherryview_irq_preinstall(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - int pipe; I915_WRITE(GEN8_MASTER_IRQ, 0); POSTING_READ(GEN8_MASTER_IRQ); @@ -3503,37 +3230,25 @@ static void cherryview_irq_preinstall(struct drm_device *dev) GEN5_IRQ_RESET(GEN8_PCU_); - POSTING_READ(GEN8_PCU_IIR); - I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); - I915_WRITE(PORT_HOTPLUG_EN, 0); - I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); - - for_each_pipe(pipe) - I915_WRITE(PIPESTAT(pipe), 0xffff); - - I915_WRITE(VLV_IMR, 0xffffffff); - I915_WRITE(VLV_IER, 0x0); - I915_WRITE(VLV_IIR, 0xffffffff); - POSTING_READ(VLV_IIR); + vlv_display_irq_reset(dev_priv); } static void ibx_hpd_irq_setup(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_mode_config *mode_config = &dev->mode_config; struct intel_encoder *intel_encoder; u32 hotplug_irqs, hotplug, enabled_irqs = 0; if (HAS_PCH_IBX(dev)) { hotplug_irqs = SDE_HOTPLUG_MASK; - list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) + for_each_intel_encoder(dev, intel_encoder) if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; } else { hotplug_irqs = SDE_HOTPLUG_MASK_CPT; - list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) + for_each_intel_encoder(dev, intel_encoder) if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; } @@ -3596,8 +3311,10 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev) GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); if (INTEL_INFO(dev)->gen >= 6) { - pm_irqs |= dev_priv->pm_rps_events; - + /* + * RPS interrupts will get enabled/disabled on demand when RPS + * itself is enabled/disabled. + */ if (HAS_VEBOX(dev)) pm_irqs |= PM_VEBOX_USER_INTERRUPT; @@ -3608,7 +3325,6 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev) static int ironlake_irq_postinstall(struct drm_device *dev) { - unsigned long irqflags; struct drm_i915_private *dev_priv = dev->dev_private; u32 display_mask, extra_mask; @@ -3647,9 +3363,9 @@ static int ironlake_irq_postinstall(struct drm_device *dev) * spinlocking not required here for correctness since interrupt * setup is guaranteed to run in single-threaded context. But we * need it to make the assert_spin_locked happy. */ - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + spin_lock_irq(&dev_priv->irq_lock); ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + spin_unlock_irq(&dev_priv->irq_lock); } return 0; @@ -3659,45 +3375,51 @@ static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv) { u32 pipestat_mask; u32 iir_mask; + enum pipe pipe; pipestat_mask = PIPESTAT_INT_STATUS_MASK | PIPE_FIFO_UNDERRUN_STATUS; - I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask); - I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask); + for_each_pipe(dev_priv, pipe) + I915_WRITE(PIPESTAT(pipe), pipestat_mask); POSTING_READ(PIPESTAT(PIPE_A)); pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | PIPE_CRC_DONE_INTERRUPT_STATUS; - i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask | - PIPE_GMBUS_INTERRUPT_STATUS); - i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask); + i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); + for_each_pipe(dev_priv, pipe) + i915_enable_pipestat(dev_priv, pipe, pipestat_mask); iir_mask = I915_DISPLAY_PORT_INTERRUPT | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; + if (IS_CHERRYVIEW(dev_priv)) + iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; dev_priv->irq_mask &= ~iir_mask; I915_WRITE(VLV_IIR, iir_mask); I915_WRITE(VLV_IIR, iir_mask); - I915_WRITE(VLV_IMR, dev_priv->irq_mask); I915_WRITE(VLV_IER, ~dev_priv->irq_mask); - POSTING_READ(VLV_IER); + I915_WRITE(VLV_IMR, dev_priv->irq_mask); + POSTING_READ(VLV_IMR); } static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv) { u32 pipestat_mask; u32 iir_mask; + enum pipe pipe; iir_mask = I915_DISPLAY_PORT_INTERRUPT | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; + if (IS_CHERRYVIEW(dev_priv)) + iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; dev_priv->irq_mask |= iir_mask; - I915_WRITE(VLV_IER, ~dev_priv->irq_mask); I915_WRITE(VLV_IMR, dev_priv->irq_mask); + I915_WRITE(VLV_IER, ~dev_priv->irq_mask); I915_WRITE(VLV_IIR, iir_mask); I915_WRITE(VLV_IIR, iir_mask); POSTING_READ(VLV_IIR); @@ -3705,14 +3427,15 @@ static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv) pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | PIPE_CRC_DONE_INTERRUPT_STATUS; - i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask | - PIPE_GMBUS_INTERRUPT_STATUS); - i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask); + i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); + for_each_pipe(dev_priv, pipe) + i915_disable_pipestat(dev_priv, pipe, pipestat_mask); pipestat_mask = PIPESTAT_INT_STATUS_MASK | PIPE_FIFO_UNDERRUN_STATUS; - I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask); - I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask); + + for_each_pipe(dev_priv, pipe) + I915_WRITE(PIPESTAT(pipe), pipestat_mask); POSTING_READ(PIPESTAT(PIPE_A)); } @@ -3725,7 +3448,7 @@ void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) dev_priv->display_irqs_enabled = true; - if (dev_priv->dev->irq_enabled) + if (intel_irqs_enabled(dev_priv)) valleyview_display_irqs_install(dev_priv); } @@ -3738,34 +3461,36 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) dev_priv->display_irqs_enabled = false; - if (dev_priv->dev->irq_enabled) + if (intel_irqs_enabled(dev_priv)) valleyview_display_irqs_uninstall(dev_priv); } -static int valleyview_irq_postinstall(struct drm_device *dev) +static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long irqflags; - dev_priv->irq_mask = ~0; I915_WRITE(PORT_HOTPLUG_EN, 0); POSTING_READ(PORT_HOTPLUG_EN); - I915_WRITE(VLV_IMR, dev_priv->irq_mask); - I915_WRITE(VLV_IER, ~dev_priv->irq_mask); I915_WRITE(VLV_IIR, 0xffffffff); - POSTING_READ(VLV_IER); + I915_WRITE(VLV_IIR, 0xffffffff); + I915_WRITE(VLV_IER, ~dev_priv->irq_mask); + I915_WRITE(VLV_IMR, dev_priv->irq_mask); + POSTING_READ(VLV_IMR); /* Interrupt setup is already guaranteed to be single-threaded, this is * just to make the assert_spin_locked check happy. */ - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->display_irqs_enabled) valleyview_display_irqs_install(dev_priv); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + spin_unlock_irq(&dev_priv->irq_lock); +} - I915_WRITE(VLV_IIR, 0xffffffff); - I915_WRITE(VLV_IIR, 0xffffffff); +static int valleyview_irq_postinstall(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + vlv_display_irq_postinstall(dev_priv); gen5_gt_irq_postinstall(dev); @@ -3782,46 +3507,64 @@ static int valleyview_irq_postinstall(struct drm_device *dev) static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) { - int i; - /* These are interrupts we'll toggle with the ring mask register */ uint32_t gt_interrupts[] = { GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | + GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | GT_RENDER_L3_PARITY_ERROR_INTERRUPT | - GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT, + GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | + GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | - GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, + GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | + GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | + GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 0, - GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT + GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | + GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT }; - for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++) - GEN8_IRQ_INIT_NDX(GT, i, ~gt_interrupts[i], gt_interrupts[i]); - dev_priv->pm_irq_mask = 0xffffffff; + GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); + GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); + /* + * RPS interrupts will get enabled/disabled on demand when RPS itself + * is enabled/disabled. + */ + GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0); + GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); } static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; - uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE | - GEN8_PIPE_CDCLK_CRC_DONE | - GEN8_DE_PIPE_IRQ_FAULT_ERRORS; - uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | - GEN8_PIPE_FIFO_UNDERRUN; + uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; + uint32_t de_pipe_enables; int pipe; + u32 aux_en = GEN8_AUX_CHANNEL_A; + + if (IS_GEN9(dev_priv)) { + de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE | + GEN9_DE_PIPE_IRQ_FAULT_ERRORS; + aux_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | + GEN9_AUX_CHANNEL_D; + } else + de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE | + GEN8_DE_PIPE_IRQ_FAULT_ERRORS; + + de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | + GEN8_PIPE_FIFO_UNDERRUN; + dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; - for_each_pipe(pipe) - if (intel_display_power_enabled(dev_priv, + for_each_pipe(dev_priv, pipe) + if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, dev_priv->de_irq_mask[pipe], de_pipe_enables); - GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A); + GEN5_IRQ_INIT(GEN8_DE_PORT_, ~aux_en, aux_en); } static int gen8_irq_postinstall(struct drm_device *dev) @@ -3844,33 +3587,8 @@ static int gen8_irq_postinstall(struct drm_device *dev) static int cherryview_irq_postinstall(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - u32 enable_mask = I915_DISPLAY_PORT_INTERRUPT | - I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | - I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | - I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; - u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV | - PIPE_CRC_DONE_INTERRUPT_STATUS; - unsigned long irqflags; - int pipe; - - /* - * Leave vblank interrupts masked initially. enable/disable will - * toggle them based on usage. - */ - dev_priv->irq_mask = ~enable_mask; - - for_each_pipe(pipe) - I915_WRITE(PIPESTAT(pipe), 0xffff); - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); - for_each_pipe(pipe) - i915_enable_pipestat(dev_priv, pipe, pipestat_enable); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); - - I915_WRITE(VLV_IIR, 0xffffffff); - I915_WRITE(VLV_IMR, dev_priv->irq_mask); - I915_WRITE(VLV_IER, enable_mask); + vlv_display_irq_postinstall(dev_priv); gen8_gt_irq_postinstall(dev_priv); @@ -3890,41 +3608,39 @@ static void gen8_irq_uninstall(struct drm_device *dev) gen8_irq_reset(dev); } +static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv) +{ + /* Interrupt setup is already guaranteed to be single-threaded, this is + * just to make the assert_spin_locked check happy. */ + spin_lock_irq(&dev_priv->irq_lock); + if (dev_priv->display_irqs_enabled) + valleyview_display_irqs_uninstall(dev_priv); + spin_unlock_irq(&dev_priv->irq_lock); + + vlv_display_irq_reset(dev_priv); + + dev_priv->irq_mask = ~0; +} + static void valleyview_irq_uninstall(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long irqflags; - int pipe; if (!dev_priv) return; I915_WRITE(VLV_MASTER_IER, 0); - for_each_pipe(pipe) - I915_WRITE(PIPESTAT(pipe), 0xffff); + gen5_gt_irq_reset(dev); I915_WRITE(HWSTAM, 0xffffffff); - I915_WRITE(PORT_HOTPLUG_EN, 0); - I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); - - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - if (dev_priv->display_irqs_enabled) - valleyview_display_irqs_uninstall(dev_priv); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); - dev_priv->irq_mask = 0; - - I915_WRITE(VLV_IIR, 0xffffffff); - I915_WRITE(VLV_IMR, 0xffffffff); - I915_WRITE(VLV_IER, 0x0); - POSTING_READ(VLV_IER); + vlv_display_irq_uninstall(dev_priv); } static void cherryview_irq_uninstall(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - int pipe; if (!dev_priv) return; @@ -3932,44 +3648,11 @@ static void cherryview_irq_uninstall(struct drm_device *dev) I915_WRITE(GEN8_MASTER_IRQ, 0); POSTING_READ(GEN8_MASTER_IRQ); -#define GEN8_IRQ_FINI_NDX(type, which) \ -do { \ - I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ - I915_WRITE(GEN8_##type##_IER(which), 0); \ - I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ - POSTING_READ(GEN8_##type##_IIR(which)); \ - I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ -} while (0) - -#define GEN8_IRQ_FINI(type) \ -do { \ - I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \ - I915_WRITE(GEN8_##type##_IER, 0); \ - I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ - POSTING_READ(GEN8_##type##_IIR); \ - I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ -} while (0) - - GEN8_IRQ_FINI_NDX(GT, 0); - GEN8_IRQ_FINI_NDX(GT, 1); - GEN8_IRQ_FINI_NDX(GT, 2); - GEN8_IRQ_FINI_NDX(GT, 3); - - GEN8_IRQ_FINI(PCU); - -#undef GEN8_IRQ_FINI -#undef GEN8_IRQ_FINI_NDX - - I915_WRITE(PORT_HOTPLUG_EN, 0); - I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); + gen8_gt_irq_reset(dev_priv); - for_each_pipe(pipe) - I915_WRITE(PIPESTAT(pipe), 0xffff); + GEN5_IRQ_RESET(GEN8_PCU_); - I915_WRITE(VLV_IMR, 0xffffffff); - I915_WRITE(VLV_IER, 0x0); - I915_WRITE(VLV_IIR, 0xffffffff); - POSTING_READ(VLV_IIR); + vlv_display_irq_uninstall(dev_priv); } static void ironlake_irq_uninstall(struct drm_device *dev) @@ -3987,7 +3670,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev) struct drm_i915_private *dev_priv = dev->dev_private; int pipe; - for_each_pipe(pipe) + for_each_pipe(dev_priv, pipe) I915_WRITE(PIPESTAT(pipe), 0); I915_WRITE16(IMR, 0xffff); I915_WRITE16(IER, 0x0); @@ -3997,7 +3680,6 @@ static void i8xx_irq_preinstall(struct drm_device * dev) static int i8xx_irq_postinstall(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long irqflags; I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); @@ -4020,10 +3702,10 @@ static int i8xx_irq_postinstall(struct drm_device *dev) /* Interrupt setup is already guaranteed to be single-threaded, this is * just to make the assert_spin_locked check happy. */ - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + spin_lock_irq(&dev_priv->irq_lock); i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + spin_unlock_irq(&dev_priv->irq_lock); return 0; } @@ -4041,9 +3723,7 @@ static bool i8xx_handle_vblank(struct drm_device *dev, return false; if ((iir & flip_pending) == 0) - return false; - - intel_prepare_page_flip(dev, plane); + goto check_page_flip; /* We detect FlipDone by looking for the change in PendingFlip from '1' * to '0' on the following vblank, i.e. IIR has the Pendingflip @@ -4052,11 +3732,15 @@ static bool i8xx_handle_vblank(struct drm_device *dev, * an interrupt per se, we watch for the change at vblank. */ if (I915_READ16(ISR) & flip_pending) - return false; + goto check_page_flip; + intel_prepare_page_flip(dev, plane); intel_finish_page_flip(dev, pipe); - return true; + +check_page_flip: + intel_check_page_flip(dev, pipe); + return false; } static irqreturn_t i8xx_irq_handler(int irq, void *arg) @@ -4065,7 +3749,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) struct drm_i915_private *dev_priv = dev->dev_private; u16 iir, new_iir; u32 pipe_stats[2]; - unsigned long irqflags; int pipe; u16 flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | @@ -4081,13 +3764,11 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) * It doesn't set the bit in iir again, but it still produces * interrupts (for non-MSI). */ - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + spin_lock(&dev_priv->irq_lock); if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) - i915_handle_error(dev, false, - "Command parser error, iir 0x%08x", - iir); + DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); - for_each_pipe(pipe) { + for_each_pipe(dev_priv, pipe) { int reg = PIPESTAT(pipe); pipe_stats[pipe] = I915_READ(reg); @@ -4097,17 +3778,15 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) if (pipe_stats[pipe] & 0x8000ffff) I915_WRITE(reg, pipe_stats[pipe]); } - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + spin_unlock(&dev_priv->irq_lock); I915_WRITE16(IIR, iir & ~flip_mask); new_iir = I915_READ16(IIR); /* Flush posted writes */ - i915_update_dri1_breadcrumb(dev); - if (iir & I915_USER_INTERRUPT) notify_ring(dev, &dev_priv->ring[RCS]); - for_each_pipe(pipe) { + for_each_pipe(dev_priv, pipe) { int plane = pipe; if (HAS_FBC(dev)) plane = !plane; @@ -4119,9 +3798,9 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) i9xx_pipe_crc_irq_handler(dev, pipe); - if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && - intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) - DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) + intel_cpu_fifo_underrun_irq_handler(dev_priv, + pipe); } iir = new_iir; @@ -4135,7 +3814,7 @@ static void i8xx_irq_uninstall(struct drm_device * dev) struct drm_i915_private *dev_priv = dev->dev_private; int pipe; - for_each_pipe(pipe) { + for_each_pipe(dev_priv, pipe) { /* Clear enable bits; then clear status bits */ I915_WRITE(PIPESTAT(pipe), 0); I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); @@ -4156,7 +3835,7 @@ static void i915_irq_preinstall(struct drm_device * dev) } I915_WRITE16(HWSTAM, 0xeffe); - for_each_pipe(pipe) + for_each_pipe(dev_priv, pipe) I915_WRITE(PIPESTAT(pipe), 0); I915_WRITE(IMR, 0xffffffff); I915_WRITE(IER, 0x0); @@ -4167,7 +3846,6 @@ static int i915_irq_postinstall(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 enable_mask; - unsigned long irqflags; I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); @@ -4205,10 +3883,10 @@ static int i915_irq_postinstall(struct drm_device *dev) /* Interrupt setup is already guaranteed to be single-threaded, this is * just to make the assert_spin_locked check happy. */ - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + spin_lock_irq(&dev_priv->irq_lock); i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + spin_unlock_irq(&dev_priv->irq_lock); return 0; } @@ -4226,9 +3904,7 @@ static bool i915_handle_vblank(struct drm_device *dev, return false; if ((iir & flip_pending) == 0) - return false; - - intel_prepare_page_flip(dev, plane); + goto check_page_flip; /* We detect FlipDone by looking for the change in PendingFlip from '1' * to '0' on the following vblank, i.e. IIR has the Pendingflip @@ -4237,11 +3913,15 @@ static bool i915_handle_vblank(struct drm_device *dev, * an interrupt per se, we watch for the change at vblank. */ if (I915_READ(ISR) & flip_pending) - return false; + goto check_page_flip; + intel_prepare_page_flip(dev, plane); intel_finish_page_flip(dev, pipe); - return true; + +check_page_flip: + intel_check_page_flip(dev, pipe); + return false; } static irqreturn_t i915_irq_handler(int irq, void *arg) @@ -4249,7 +3929,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) struct drm_device *dev = arg; struct drm_i915_private *dev_priv = dev->dev_private; u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; - unsigned long irqflags; u32 flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; @@ -4265,13 +3944,11 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) * It doesn't set the bit in iir again, but it still produces * interrupts (for non-MSI). */ - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + spin_lock(&dev_priv->irq_lock); if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) - i915_handle_error(dev, false, - "Command parser error, iir 0x%08x", - iir); + DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); - for_each_pipe(pipe) { + for_each_pipe(dev_priv, pipe) { int reg = PIPESTAT(pipe); pipe_stats[pipe] = I915_READ(reg); @@ -4281,7 +3958,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) irq_received = true; } } - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + spin_unlock(&dev_priv->irq_lock); if (!irq_received) break; @@ -4297,7 +3974,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) if (iir & I915_USER_INTERRUPT) notify_ring(dev, &dev_priv->ring[RCS]); - for_each_pipe(pipe) { + for_each_pipe(dev_priv, pipe) { int plane = pipe; if (HAS_FBC(dev)) plane = !plane; @@ -4312,9 +3989,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) i9xx_pipe_crc_irq_handler(dev, pipe); - if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && - intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) - DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) + intel_cpu_fifo_underrun_irq_handler(dev_priv, + pipe); } if (blc_event || (iir & I915_ASLE_INTERRUPT)) @@ -4339,8 +4016,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) iir = new_iir; } while (iir & ~flip_mask); - i915_update_dri1_breadcrumb(dev); - return ret; } @@ -4355,7 +4030,7 @@ static void i915_irq_uninstall(struct drm_device * dev) } I915_WRITE16(HWSTAM, 0xffff); - for_each_pipe(pipe) { + for_each_pipe(dev_priv, pipe) { /* Clear enable bits; then clear status bits */ I915_WRITE(PIPESTAT(pipe), 0); I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); @@ -4375,7 +4050,7 @@ static void i965_irq_preinstall(struct drm_device * dev) I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); I915_WRITE(HWSTAM, 0xeffe); - for_each_pipe(pipe) + for_each_pipe(dev_priv, pipe) I915_WRITE(PIPESTAT(pipe), 0); I915_WRITE(IMR, 0xffffffff); I915_WRITE(IER, 0x0); @@ -4387,7 +4062,6 @@ static int i965_irq_postinstall(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; u32 enable_mask; u32 error_mask; - unsigned long irqflags; /* Unmask the interrupts that we always want on. */ dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | @@ -4408,11 +4082,11 @@ static int i965_irq_postinstall(struct drm_device *dev) /* Interrupt setup is already guaranteed to be single-threaded, this is * just to make the assert_spin_locked check happy. */ - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + spin_lock_irq(&dev_priv->irq_lock); i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + spin_unlock_irq(&dev_priv->irq_lock); /* * Enable some error detection, note the instruction error mask @@ -4444,7 +4118,6 @@ static int i965_irq_postinstall(struct drm_device *dev) static void i915_hpd_irq_setup(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_mode_config *mode_config = &dev->mode_config; struct intel_encoder *intel_encoder; u32 hotplug_en; @@ -4455,7 +4128,7 @@ static void i915_hpd_irq_setup(struct drm_device *dev) hotplug_en &= ~HOTPLUG_INT_EN_MASK; /* Note HDMI and DP share hotplug bits */ /* enable bits are the same for all generations */ - list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) + for_each_intel_encoder(dev, intel_encoder) if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; /* Programming the CRT detection parameters tends @@ -4478,7 +4151,6 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) struct drm_i915_private *dev_priv = dev->dev_private; u32 iir, new_iir; u32 pipe_stats[I915_MAX_PIPES]; - unsigned long irqflags; int ret = IRQ_NONE, pipe; u32 flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | @@ -4495,13 +4167,11 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) * It doesn't set the bit in iir again, but it still produces * interrupts (for non-MSI). */ - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + spin_lock(&dev_priv->irq_lock); if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) - i915_handle_error(dev, false, - "Command parser error, iir 0x%08x", - iir); + DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); - for_each_pipe(pipe) { + for_each_pipe(dev_priv, pipe) { int reg = PIPESTAT(pipe); pipe_stats[pipe] = I915_READ(reg); @@ -4513,7 +4183,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) irq_received = true; } } - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + spin_unlock(&dev_priv->irq_lock); if (!irq_received) break; @@ -4532,7 +4202,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) if (iir & I915_BSD_USER_INTERRUPT) notify_ring(dev, &dev_priv->ring[VCS]); - for_each_pipe(pipe) { + for_each_pipe(dev_priv, pipe) { if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && i915_handle_vblank(dev, pipe, pipe, iir)) flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); @@ -4543,9 +4213,8 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) i9xx_pipe_crc_irq_handler(dev, pipe); - if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && - intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) - DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) + intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); } if (blc_event || (iir & I915_ASLE_INTERRUPT)) @@ -4572,8 +4241,6 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) iir = new_iir; } - i915_update_dri1_breadcrumb(dev); - return ret; } @@ -4589,30 +4256,29 @@ static void i965_irq_uninstall(struct drm_device * dev) I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); I915_WRITE(HWSTAM, 0xffffffff); - for_each_pipe(pipe) + for_each_pipe(dev_priv, pipe) I915_WRITE(PIPESTAT(pipe), 0); I915_WRITE(IMR, 0xffffffff); I915_WRITE(IER, 0x0); - for_each_pipe(pipe) + for_each_pipe(dev_priv, pipe) I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)) & 0x8000ffff); I915_WRITE(IIR, I915_READ(IIR)); } -static void intel_hpd_irq_reenable(struct work_struct *work) +static void intel_hpd_irq_reenable_work(struct work_struct *work) { struct drm_i915_private *dev_priv = container_of(work, typeof(*dev_priv), hotplug_reenable_work.work); struct drm_device *dev = dev_priv->dev; struct drm_mode_config *mode_config = &dev->mode_config; - unsigned long irqflags; int i; intel_runtime_pm_get(dev_priv); - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + spin_lock_irq(&dev_priv->irq_lock); for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { struct drm_connector *connector; @@ -4636,14 +4302,21 @@ static void intel_hpd_irq_reenable(struct work_struct *work) } if (dev_priv->display.hpd_irq_setup) dev_priv->display.hpd_irq_setup(dev); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + spin_unlock_irq(&dev_priv->irq_lock); intel_runtime_pm_put(dev_priv); } -void intel_irq_init(struct drm_device *dev) +/** + * intel_irq_init - initializes irq support + * @dev_priv: i915 device instance + * + * This function initializes all the irq support including work items, timers + * and all the vtables. It does not setup the interrupt itself though. + */ +void intel_irq_init(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_device *dev = dev_priv->dev; INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func); @@ -4652,8 +4325,8 @@ void intel_irq_init(struct drm_device *dev) INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); /* Let's track the enabled rps events */ - if (IS_VALLEYVIEW(dev)) - /* WaGsvRC0ResidenncyMethod:VLV */ + if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) + /* WaGsvRC0ResidencyMethod:vlv */ dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; else dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; @@ -4662,17 +4335,14 @@ void intel_irq_init(struct drm_device *dev) i915_hangcheck_elapsed, (unsigned long) dev); INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work, - intel_hpd_irq_reenable); + intel_hpd_irq_reenable_work); pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); - /* Haven't installed the IRQ handler yet */ - dev_priv->pm._irqs_disabled = true; - - if (IS_GEN2(dev)) { + if (IS_GEN2(dev_priv)) { dev->max_vblank_count = 0; dev->driver->get_vblank_counter = i8xx_get_vblank_counter; - } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { + } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) { dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ dev->driver->get_vblank_counter = gm45_get_vblank_counter; } else { @@ -4680,12 +4350,20 @@ void intel_irq_init(struct drm_device *dev) dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ } + /* + * Opt out of the vblank disable timer on everything except gen2. + * Gen2 doesn't have a hardware frame counter and so depends on + * vblank interrupts to produce sane vblank seuquence numbers. + */ + if (!IS_GEN2(dev_priv)) + dev->vblank_disable_immediate = true; + if (drm_core_check_feature(dev, DRIVER_MODESET)) { dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; } - if (IS_CHERRYVIEW(dev)) { + if (IS_CHERRYVIEW(dev_priv)) { dev->driver->irq_handler = cherryview_irq_handler; dev->driver->irq_preinstall = cherryview_irq_preinstall; dev->driver->irq_postinstall = cherryview_irq_postinstall; @@ -4693,7 +4371,7 @@ void intel_irq_init(struct drm_device *dev) dev->driver->enable_vblank = valleyview_enable_vblank; dev->driver->disable_vblank = valleyview_disable_vblank; dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; - } else if (IS_VALLEYVIEW(dev)) { + } else if (IS_VALLEYVIEW(dev_priv)) { dev->driver->irq_handler = valleyview_irq_handler; dev->driver->irq_preinstall = valleyview_irq_preinstall; dev->driver->irq_postinstall = valleyview_irq_postinstall; @@ -4701,7 +4379,7 @@ void intel_irq_init(struct drm_device *dev) dev->driver->enable_vblank = valleyview_enable_vblank; dev->driver->disable_vblank = valleyview_disable_vblank; dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; - } else if (IS_GEN8(dev)) { + } else if (INTEL_INFO(dev_priv)->gen >= 8) { dev->driver->irq_handler = gen8_irq_handler; dev->driver->irq_preinstall = gen8_irq_reset; dev->driver->irq_postinstall = gen8_irq_postinstall; @@ -4718,12 +4396,12 @@ void intel_irq_init(struct drm_device *dev) dev->driver->disable_vblank = ironlake_disable_vblank; dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; } else { - if (INTEL_INFO(dev)->gen == 2) { + if (INTEL_INFO(dev_priv)->gen == 2) { dev->driver->irq_preinstall = i8xx_irq_preinstall; dev->driver->irq_postinstall = i8xx_irq_postinstall; dev->driver->irq_handler = i8xx_irq_handler; dev->driver->irq_uninstall = i8xx_irq_uninstall; - } else if (INTEL_INFO(dev)->gen == 3) { + } else if (INTEL_INFO(dev_priv)->gen == 3) { dev->driver->irq_preinstall = i915_irq_preinstall; dev->driver->irq_postinstall = i915_irq_postinstall; dev->driver->irq_uninstall = i915_irq_uninstall; @@ -4741,12 +4419,23 @@ void intel_irq_init(struct drm_device *dev) } } -void intel_hpd_init(struct drm_device *dev) +/** + * intel_hpd_init - initializes and enables hpd support + * @dev_priv: i915 device instance + * + * This function enables the hotplug support. It requires that interrupts have + * already been enabled with intel_irq_init_hw(). From this point on hotplug and + * poll request can run concurrently to other code, so locking rules must be + * obeyed. + * + * This is a separate step from interrupt enabling to simplify the locking rules + * in the driver load and resume code. + */ +void intel_hpd_init(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_device *dev = dev_priv->dev; struct drm_mode_config *mode_config = &dev->mode_config; struct drm_connector *connector; - unsigned long irqflags; int i; for (i = 1; i < HPD_NUM_PINS; i++) { @@ -4764,27 +4453,72 @@ void intel_hpd_init(struct drm_device *dev) /* Interrupt setup is already guaranteed to be single-threaded, this is * just to make the assert_spin_locked checks happy. */ - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->display.hpd_irq_setup) dev_priv->display.hpd_irq_setup(dev); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + spin_unlock_irq(&dev_priv->irq_lock); } -/* Disable interrupts so we can allow runtime PM. */ -void intel_runtime_pm_disable_interrupts(struct drm_device *dev) +/** + * intel_irq_install - enables the hardware interrupt + * @dev_priv: i915 device instance + * + * This function enables the hardware interrupt handling, but leaves the hotplug + * handling still disabled. It is called after intel_irq_init(). + * + * In the driver load and resume code we need working interrupts in a few places + * but don't want to deal with the hassle of concurrent probe and hotplug + * workers. Hence the split into this two-stage approach. + */ +int intel_irq_install(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; + /* + * We enable some interrupt sources in our postinstall hooks, so mark + * interrupts as enabled _before_ actually enabling them to avoid + * special cases in our ordering checks. + */ + dev_priv->pm.irqs_enabled = true; - dev->driver->irq_uninstall(dev); - dev_priv->pm._irqs_disabled = true; + return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq); } -/* Restore interrupts so we can recover from runtime PM. */ -void intel_runtime_pm_restore_interrupts(struct drm_device *dev) +/** + * intel_irq_uninstall - finilizes all irq handling + * @dev_priv: i915 device instance + * + * This stops interrupt and hotplug handling and unregisters and frees all + * resources acquired in the init functions. + */ +void intel_irq_uninstall(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; + drm_irq_uninstall(dev_priv->dev); + intel_hpd_cancel_work(dev_priv); + dev_priv->pm.irqs_enabled = false; +} - dev_priv->pm._irqs_disabled = false; - dev->driver->irq_preinstall(dev); - dev->driver->irq_postinstall(dev); +/** + * intel_runtime_pm_disable_interrupts - runtime interrupt disabling + * @dev_priv: i915 device instance + * + * This function is used to disable interrupts at runtime, both in the runtime + * pm and the system suspend/resume code. + */ +void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) +{ + dev_priv->dev->driver->irq_uninstall(dev_priv->dev); + dev_priv->pm.irqs_enabled = false; +} + +/** + * intel_runtime_pm_enable_interrupts - runtime interrupt enabling + * @dev_priv: i915 device instance + * + * This function is used to enable interrupts at runtime, both in the runtime + * pm and the system suspend/resume code. + */ +void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) +{ + dev_priv->pm.irqs_enabled = true; + dev_priv->dev->driver->irq_preinstall(dev_priv->dev); + dev_priv->dev->driver->irq_postinstall(dev_priv->dev); } |