summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_irq.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-05-08 21:35:19 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-05-08 21:35:19 -0700
commita2d635decbfa9c1e4ae15cb05b68b2559f7f827c (patch)
tree1c3766c35215450ff9e4228efed578d5e6ba65d1 /drivers/gpu/drm/i915/i915_irq.c
parent89c3b37af87ec183b666d83428cb28cc421671a6 (diff)
parenteb85d03e01c3e9f3b0ba7282b2e3515a635decb2 (diff)
Merge tag 'drm-next-2019-05-09' of git://anongit.freedesktop.org/drm/drm
Pull drm updates from Dave Airlie: "This has two exciting community drivers for ARM Mali accelerators. Since ARM has never been open source friendly on the GPU side of the house, the community has had to create open source drivers for the Mali GPUs. Lima covers the older t4xx and panfrost the newer 6xx/7xx series. Well done to all involved and hopefully this will help ARM head in the right direction. There is also now the ability if you don't have any of the legacy drivers enabled (pre-KMS) to remove all the pre-KMS support code from the core drm, this saves 10% or so in codesize on my machine. i915 also enable Icelake/Elkhart Lake Gen11 GPUs by default, vboxvideo moves out of staging. There are also some rcar-du patches which crossover with media tree but all should be acked by Mauro. Summary: uapi changes: - Colorspace connector property - fourcc - new YUV formts - timeline sync objects initially merged - expose FB_DAMAGE_CLIPS to atomic userspace new drivers: - vboxvideo: moved out of staging - aspeed: ASPEED SoC BMC chip display support - lima: ARM Mali4xx GPU acceleration driver support - panfrost: ARM Mali6xx/7xx Midgard/Bitfrost acceleration driver support core: - component helper docs - unplugging fixes - devm device init - MIPI/DSI rate control - shmem backed gem objects - connector, display_info, edid_quirks cleanups - dma_buf fence chain support - 64-bit dma-fence seqno comparison fixes - move initial fb config code to core - gem fence array helpers for Lima - ability to remove legacy support code if no drivers requires it (removes 10% of drm.ko size) - lease fixes ttm: - unified DRM_FILE_PAGE_OFFSET handling - Account for kernel allocations in kernel zone only panel: - OSD070T1718-19TS panel support - panel-tpo-td028ttec1 backlight support - Ronbo RB070D30 MIPI/DSI - Feiyang FY07024DI26A30-D MIPI-DSI panel - Rocktech jh057n00900 MIPI-DSI panel i915: - Comet Lake (Gen9) PCI IDs - Updated Icelake PCI IDs - Elkhartlake (Gen11) support - DP MST property addtions - plane and watermark fixes - Icelake port sync and VEBOX disable fixes - struct_mutex usage reduction - Icelake gamma fix - GuC reset fixes - make mmap more asynchronous - sound display power well race fixes - DDI/MIPI-DSI clocks for Icelake - Icelake RPS frequency changing support - Icelake workarounds amdgpu: - Use HMM for userptr - vega20 experimental smu11 support - RAS support for vega20 - BACO support for vega12 + fixes for vega20 - reworked IH interrupt handling - amdkfd RAS support - Freesync improvements - initial timeline sync object support - DC Z ordering fixes - NV12 planes support - colorspace properties for planes= - eDP opts if eDP already initialized nouveau: - misc fixes etnaviv: - misc fixes msm: - GPU zap shader support expansion - robustness ABI addition exynos: - Logging cleanups tegra: - Shared reset fix - CPU cache maintenance fix cirrus: - driver rewritten using simple helpers meson: - G12A support vmwgfx: - Resource dirtying management improvements - Userspace logging improvements virtio: - PRIME fixes rockchip: - rk3066 hdmi support sun4i: - DSI burst mode support vc4: - load tracker to detect underflow v3d: - v3d v4.2 support malidp: - initial Mali D71 support in komeda driver tfp410: - omap related improvement omapdrm: - drm bridge/panel support - drop some omap specific panels rcar-du: - Display writeback support" * tag 'drm-next-2019-05-09' of git://anongit.freedesktop.org/drm/drm: (1507 commits) drm/msm/a6xx: No zap shader is not an error drm/cma-helper: Fix drm_gem_cma_free_object() drm: Fix timestamp docs for variable refresh properties. drm/komeda: Mark the local functions as static drm/komeda: Fixed warning: Function parameter or member not described drm/komeda: Expose bus_width to Komeda-CORE drm/komeda: Add sysfs attribute: core_id and config_id drm: add non-desktop quirk for Valve HMDs drm/panfrost: Show stored feature registers drm/panfrost: Don't scream about deferred probe drm/panfrost: Disable PM on probe failure drm/panfrost: Set DMA masks earlier drm/panfrost: Add sanity checks to submit IOCTL drm/etnaviv: initialize idle mask before querying the HW db drm: introduce a capability flag for syncobj timeline support drm: report consistent errors when checking syncobj capibility drm/nouveau/nouveau: forward error generated while resuming objects tree drm/nouveau/fb/ramgk104: fix spelling mistake "sucessfully" -> "successfully" drm/nouveau/i2c: Disable i2c bus access after ->fini() drm/nouveau: Remove duplicate ACPI_VIDEO_NOTIFY_PROBE definition ...
Diffstat (limited to 'drivers/gpu/drm/i915/i915_irq.c')
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c665
1 files changed, 432 insertions, 233 deletions
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 441d2674b272..b92cfd69134b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -28,15 +28,19 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/sysrq.h>
-#include <linux/slab.h>
#include <linux/circ_buf.h>
-#include <drm/drm_irq.h>
+#include <linux/cpuidle.h>
+#include <linux/slab.h>
+#include <linux/sysrq.h>
+
#include <drm/drm_drv.h>
+#include <drm/drm_irq.h>
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
+#include "intel_psr.h"
/**
* DOC: interrupt handling
@@ -132,92 +136,120 @@ static const u32 hpd_icp[HPD_NUM_PINS] = {
[HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP
};
-/* IIR can theoretically queue up two events. Be paranoid. */
-#define GEN8_IRQ_RESET_NDX(type, which) do { \
- I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
- POSTING_READ(GEN8_##type##_IMR(which)); \
- I915_WRITE(GEN8_##type##_IER(which), 0); \
- I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
- POSTING_READ(GEN8_##type##_IIR(which)); \
- I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
- POSTING_READ(GEN8_##type##_IIR(which)); \
-} while (0)
-
-#define GEN3_IRQ_RESET(type) do { \
- I915_WRITE(type##IMR, 0xffffffff); \
- POSTING_READ(type##IMR); \
- I915_WRITE(type##IER, 0); \
- I915_WRITE(type##IIR, 0xffffffff); \
- POSTING_READ(type##IIR); \
- I915_WRITE(type##IIR, 0xffffffff); \
- POSTING_READ(type##IIR); \
-} while (0)
-
-#define GEN2_IRQ_RESET(type) do { \
- I915_WRITE16(type##IMR, 0xffff); \
- POSTING_READ16(type##IMR); \
- I915_WRITE16(type##IER, 0); \
- I915_WRITE16(type##IIR, 0xffff); \
- POSTING_READ16(type##IIR); \
- I915_WRITE16(type##IIR, 0xffff); \
- POSTING_READ16(type##IIR); \
-} while (0)
+static void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
+ i915_reg_t iir, i915_reg_t ier)
+{
+ intel_uncore_write(uncore, imr, 0xffffffff);
+ intel_uncore_posting_read(uncore, imr);
+
+ intel_uncore_write(uncore, ier, 0);
+
+ /* IIR can theoretically queue up two events. Be paranoid. */
+ intel_uncore_write(uncore, iir, 0xffffffff);
+ intel_uncore_posting_read(uncore, iir);
+ intel_uncore_write(uncore, iir, 0xffffffff);
+ intel_uncore_posting_read(uncore, iir);
+}
+
+static void gen2_irq_reset(struct intel_uncore *uncore)
+{
+ intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
+ intel_uncore_posting_read16(uncore, GEN2_IMR);
+
+ intel_uncore_write16(uncore, GEN2_IER, 0);
+
+ /* IIR can theoretically queue up two events. Be paranoid. */
+ intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
+ intel_uncore_posting_read16(uncore, GEN2_IIR);
+ intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
+ intel_uncore_posting_read16(uncore, GEN2_IIR);
+}
+
+#define GEN8_IRQ_RESET_NDX(uncore, type, which) \
+({ \
+ unsigned int which_ = which; \
+ gen3_irq_reset((uncore), GEN8_##type##_IMR(which_), \
+ GEN8_##type##_IIR(which_), GEN8_##type##_IER(which_)); \
+})
+
+#define GEN3_IRQ_RESET(uncore, type) \
+ gen3_irq_reset((uncore), type##IMR, type##IIR, type##IER)
+
+#define GEN2_IRQ_RESET(uncore) \
+ gen2_irq_reset(uncore)
/*
* We should clear IMR at preinstall/uninstall, and just check at postinstall.
*/
-static void gen3_assert_iir_is_zero(struct drm_i915_private *dev_priv,
- i915_reg_t reg)
+static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
{
- u32 val = I915_READ(reg);
+ u32 val = intel_uncore_read(uncore, reg);
if (val == 0)
return;
WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
i915_mmio_reg_offset(reg), val);
- I915_WRITE(reg, 0xffffffff);
- POSTING_READ(reg);
- I915_WRITE(reg, 0xffffffff);
- POSTING_READ(reg);
+ intel_uncore_write(uncore, reg, 0xffffffff);
+ intel_uncore_posting_read(uncore, reg);
+ intel_uncore_write(uncore, reg, 0xffffffff);
+ intel_uncore_posting_read(uncore, reg);
}
-static void gen2_assert_iir_is_zero(struct drm_i915_private *dev_priv,
- i915_reg_t reg)
+static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
{
- u16 val = I915_READ16(reg);
+ u16 val = intel_uncore_read16(uncore, GEN2_IIR);
if (val == 0)
return;
WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
- i915_mmio_reg_offset(reg), val);
- I915_WRITE16(reg, 0xffff);
- POSTING_READ16(reg);
- I915_WRITE16(reg, 0xffff);
- POSTING_READ16(reg);
-}
-
-#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
- gen3_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
- I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
- I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
- POSTING_READ(GEN8_##type##_IMR(which)); \
-} while (0)
-
-#define GEN3_IRQ_INIT(type, imr_val, ier_val) do { \
- gen3_assert_iir_is_zero(dev_priv, type##IIR); \
- I915_WRITE(type##IER, (ier_val)); \
- I915_WRITE(type##IMR, (imr_val)); \
- POSTING_READ(type##IMR); \
-} while (0)
-
-#define GEN2_IRQ_INIT(type, imr_val, ier_val) do { \
- gen2_assert_iir_is_zero(dev_priv, type##IIR); \
- I915_WRITE16(type##IER, (ier_val)); \
- I915_WRITE16(type##IMR, (imr_val)); \
- POSTING_READ16(type##IMR); \
-} while (0)
+ i915_mmio_reg_offset(GEN2_IIR), val);
+ intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
+ intel_uncore_posting_read16(uncore, GEN2_IIR);
+ intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
+ intel_uncore_posting_read16(uncore, GEN2_IIR);
+}
+
+static void gen3_irq_init(struct intel_uncore *uncore,
+ i915_reg_t imr, u32 imr_val,
+ i915_reg_t ier, u32 ier_val,
+ i915_reg_t iir)
+{
+ gen3_assert_iir_is_zero(uncore, iir);
+
+ intel_uncore_write(uncore, ier, ier_val);
+ intel_uncore_write(uncore, imr, imr_val);
+ intel_uncore_posting_read(uncore, imr);
+}
+
+static void gen2_irq_init(struct intel_uncore *uncore,
+ u32 imr_val, u32 ier_val)
+{
+ gen2_assert_iir_is_zero(uncore);
+
+ intel_uncore_write16(uncore, GEN2_IER, ier_val);
+ intel_uncore_write16(uncore, GEN2_IMR, imr_val);
+ intel_uncore_posting_read16(uncore, GEN2_IMR);
+}
+
+#define GEN8_IRQ_INIT_NDX(uncore, type, which, imr_val, ier_val) \
+({ \
+ unsigned int which_ = which; \
+ gen3_irq_init((uncore), \
+ GEN8_##type##_IMR(which_), imr_val, \
+ GEN8_##type##_IER(which_), ier_val, \
+ GEN8_##type##_IIR(which_)); \
+})
+
+#define GEN3_IRQ_INIT(uncore, type, imr_val, ier_val) \
+ gen3_irq_init((uncore), \
+ type##IMR, imr_val, \
+ type##IER, ier_val, \
+ type##IIR)
+
+#define GEN2_IRQ_INIT(uncore, imr_val, ier_val) \
+ gen2_irq_init((uncore), imr_val, ier_val)
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
@@ -268,7 +300,7 @@ static bool gen11_reset_one_iir(struct drm_i915_private * const i915,
const unsigned int bank,
const unsigned int bit)
{
- void __iomem * const regs = i915->regs;
+ void __iomem * const regs = i915->uncore.regs;
u32 dw;
lockdep_assert_held(&i915->irq_lock);
@@ -365,24 +397,41 @@ static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
}
-static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
+static void write_pm_imr(struct drm_i915_private *dev_priv)
{
- if (INTEL_GEN(dev_priv) >= 11)
- return GEN11_GPM_WGBOXPERF_INTR_MASK;
- else if (INTEL_GEN(dev_priv) >= 8)
- return GEN8_GT_IMR(2);
- else
- return GEN6_PMIMR;
+ i915_reg_t reg;
+ u32 mask = dev_priv->pm_imr;
+
+ if (INTEL_GEN(dev_priv) >= 11) {
+ reg = GEN11_GPM_WGBOXPERF_INTR_MASK;
+ /* pm is in upper half */
+ mask = mask << 16;
+ } else if (INTEL_GEN(dev_priv) >= 8) {
+ reg = GEN8_GT_IMR(2);
+ } else {
+ reg = GEN6_PMIMR;
+ }
+
+ I915_WRITE(reg, mask);
+ POSTING_READ(reg);
}
-static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
+static void write_pm_ier(struct drm_i915_private *dev_priv)
{
- if (INTEL_GEN(dev_priv) >= 11)
- return GEN11_GPM_WGBOXPERF_INTR_ENABLE;
- else if (INTEL_GEN(dev_priv) >= 8)
- return GEN8_GT_IER(2);
- else
- return GEN6_PMIER;
+ i915_reg_t reg;
+ u32 mask = dev_priv->pm_ier;
+
+ if (INTEL_GEN(dev_priv) >= 11) {
+ reg = GEN11_GPM_WGBOXPERF_INTR_ENABLE;
+ /* pm is in upper half */
+ mask = mask << 16;
+ } else if (INTEL_GEN(dev_priv) >= 8) {
+ reg = GEN8_GT_IER(2);
+ } else {
+ reg = GEN6_PMIER;
+ }
+
+ I915_WRITE(reg, mask);
}
/**
@@ -407,8 +456,7 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
if (new_val != dev_priv->pm_imr) {
dev_priv->pm_imr = new_val;
- I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr);
- POSTING_READ(gen6_pm_imr(dev_priv));
+ write_pm_imr(dev_priv);
}
}
@@ -449,7 +497,7 @@ static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mas
lockdep_assert_held(&dev_priv->irq_lock);
dev_priv->pm_ier |= enable_mask;
- I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
+ write_pm_ier(dev_priv);
gen6_unmask_pm_irq(dev_priv, enable_mask);
/* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
}
@@ -460,7 +508,7 @@ static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_m
dev_priv->pm_ier &= ~disable_mask;
__gen6_mask_pm_irq(dev_priv, disable_mask);
- I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
+ write_pm_ier(dev_priv);
/* though a barrier is missing here, but don't really need a one */
}
@@ -748,13 +796,21 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv,
POSTING_READ(reg);
}
+static bool i915_has_asle(struct drm_i915_private *dev_priv)
+{
+ if (!dev_priv->opregion.asle)
+ return false;
+
+ return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
+}
+
/**
* i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
* @dev_priv: i915 device private
*/
static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
{
- if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
+ if (!i915_has_asle(dev_priv))
return;
spin_lock_irq(&dev_priv->irq_lock);
@@ -1288,6 +1344,18 @@ static void gen6_pm_rps_work(struct work_struct *work)
rps->last_adj = adj;
+ /*
+ * Limit deboosting and boosting to keep ourselves at the extremes
+ * when in the respective power modes (i.e. slowly decrease frequencies
+ * while in the HIGH_POWER zone and slowly increase frequencies while
+ * in the LOW_POWER zone). On idle, we will hit the timeout and drop
+ * to the next level quickly, and conversely if busy we expect to
+ * hit a waitboost and rapidly switch into max power.
+ */
+ if ((adj < 0 && rps->power.mode == HIGH_POWER) ||
+ (adj > 0 && rps->power.mode == LOW_POWER))
+ rps->last_adj = 0;
+
/* sysfs frequency interfaces may have snuck in while servicing the
* interrupt
*/
@@ -1415,20 +1483,20 @@ static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
if (gt_iir & ILK_BSD_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[VCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
}
static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
if (gt_iir & GT_BSD_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[VCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
if (gt_iir & GT_BLT_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[BCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[BCS0]);
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT |
@@ -1449,7 +1517,7 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
if (iir & GT_RENDER_USER_INTERRUPT) {
intel_engine_breadcrumbs_irq(engine);
- tasklet |= USES_GUC_SUBMISSION(engine->i915);
+ tasklet |= intel_engine_needs_breadcrumb_tasklet(engine);
}
if (tasklet)
@@ -1459,12 +1527,12 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
static void gen8_gt_irq_ack(struct drm_i915_private *i915,
u32 master_ctl, u32 gt_iir[4])
{
- void __iomem * const regs = i915->regs;
+ void __iomem * const regs = i915->uncore.regs;
#define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
GEN8_GT_BCS_IRQ | \
+ GEN8_GT_VCS0_IRQ | \
GEN8_GT_VCS1_IRQ | \
- GEN8_GT_VCS2_IRQ | \
GEN8_GT_VECS_IRQ | \
GEN8_GT_PM_IRQ | \
GEN8_GT_GUC_IRQ)
@@ -1475,7 +1543,7 @@ static void gen8_gt_irq_ack(struct drm_i915_private *i915,
raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
}
- if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
+ if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
if (likely(gt_iir[1]))
raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
@@ -1498,21 +1566,21 @@ static void gen8_gt_irq_handler(struct drm_i915_private *i915,
u32 master_ctl, u32 gt_iir[4])
{
if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
- gen8_cs_irq_handler(i915->engine[RCS],
+ gen8_cs_irq_handler(i915->engine[RCS0],
gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
- gen8_cs_irq_handler(i915->engine[BCS],
+ gen8_cs_irq_handler(i915->engine[BCS0],
gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
}
- if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
- gen8_cs_irq_handler(i915->engine[VCS],
+ if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
+ gen8_cs_irq_handler(i915->engine[VCS0],
+ gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT);
+ gen8_cs_irq_handler(i915->engine[VCS1],
gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
- gen8_cs_irq_handler(i915->engine[VCS2],
- gt_iir[1] >> GEN8_VCS2_IRQ_SHIFT);
}
if (master_ctl & GEN8_GT_VECS_IRQ) {
- gen8_cs_irq_handler(i915->engine[VECS],
+ gen8_cs_irq_handler(i915->engine[VECS0],
gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
}
@@ -1693,7 +1761,9 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
{
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- u32 crcs[5];
+ u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
+
+ trace_intel_pipe_crc(crtc, crcs);
spin_lock(&pipe_crc->lock);
/*
@@ -1712,11 +1782,6 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
}
spin_unlock(&pipe_crc->lock);
- crcs[0] = crc0;
- crcs[1] = crc1;
- crcs[2] = crc2;
- crcs[3] = crc3;
- crcs[4] = crc4;
drm_crtc_add_crc_entry(&crtc->base, true,
drm_crtc_accurate_vblank_count(&crtc->base),
crcs);
@@ -1775,6 +1840,25 @@ static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
/* The RPS events need forcewake, so we add them to a work queue and mask their
* IMR bits until the work is done. Other interrupts can be processed without
* the work queue. */
+static void gen11_rps_irq_handler(struct drm_i915_private *i915, u32 pm_iir)
+{
+ struct intel_rps *rps = &i915->gt_pm.rps;
+ const u32 events = i915->pm_rps_events & pm_iir;
+
+ lockdep_assert_held(&i915->irq_lock);
+
+ if (unlikely(!events))
+ return;
+
+ gen6_mask_pm_irq(i915, events);
+
+ if (!rps->interrupts_enabled)
+ return;
+
+ rps->pm_iir |= events;
+ schedule_work(&rps->work);
+}
+
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
{
struct intel_rps *rps = &dev_priv->gt_pm.rps;
@@ -1792,13 +1876,11 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
if (INTEL_GEN(dev_priv) >= 8)
return;
- if (HAS_VEBOX(dev_priv)) {
- if (pm_iir & PM_VEBOX_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[VECS]);
+ if (pm_iir & PM_VEBOX_USER_INTERRUPT)
+ intel_engine_breadcrumbs_irq(dev_priv->engine[VECS0]);
- if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
- DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
- }
+ if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
+ DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
}
static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
@@ -2667,6 +2749,25 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
}
+static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
+{
+ u32 mask = GEN8_AUX_CHANNEL_A;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ mask |= GEN9_AUX_CHANNEL_B |
+ GEN9_AUX_CHANNEL_C |
+ GEN9_AUX_CHANNEL_D;
+
+ if (IS_CNL_WITH_PORT_F(dev_priv))
+ mask |= CNL_AUX_CHANNEL_F;
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ mask |= ICL_AUX_CHANNEL_E |
+ CNL_AUX_CHANNEL_F;
+
+ return mask;
+}
+
static irqreturn_t
gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
{
@@ -2722,20 +2823,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
I915_WRITE(GEN8_DE_PORT_IIR, iir);
ret = IRQ_HANDLED;
- tmp_mask = GEN8_AUX_CHANNEL_A;
- if (INTEL_GEN(dev_priv) >= 9)
- tmp_mask |= GEN9_AUX_CHANNEL_B |
- GEN9_AUX_CHANNEL_C |
- GEN9_AUX_CHANNEL_D;
-
- if (INTEL_GEN(dev_priv) >= 11)
- tmp_mask |= ICL_AUX_CHANNEL_E;
-
- if (IS_CNL_WITH_PORT_F(dev_priv) ||
- INTEL_GEN(dev_priv) >= 11)
- tmp_mask |= CNL_AUX_CHANNEL_F;
-
- if (iir & tmp_mask) {
+ if (iir & gen8_de_port_aux_mask(dev_priv)) {
dp_aux_irq_handler(dev_priv);
found = true;
}
@@ -2816,11 +2904,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
I915_WRITE(SDEIIR, iir);
ret = IRQ_HANDLED;
- if (HAS_PCH_ICP(dev_priv))
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
icp_irq_handler(dev_priv, iir);
- else if (HAS_PCH_SPT(dev_priv) ||
- HAS_PCH_KBP(dev_priv) ||
- HAS_PCH_CNP(dev_priv))
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
spt_irq_handler(dev_priv, iir);
else
cpt_irq_handler(dev_priv, iir);
@@ -2857,7 +2943,7 @@ static inline void gen8_master_intr_enable(void __iomem * const regs)
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = to_i915(arg);
- void __iomem * const regs = dev_priv->regs;
+ void __iomem * const regs = dev_priv->uncore.regs;
u32 master_ctl;
u32 gt_iir[4];
@@ -2891,7 +2977,7 @@ static u32
gen11_gt_engine_identity(struct drm_i915_private * const i915,
const unsigned int bank, const unsigned int bit)
{
- void __iomem * const regs = i915->regs;
+ void __iomem * const regs = i915->uncore.regs;
u32 timeout_ts;
u32 ident;
@@ -2926,7 +3012,7 @@ gen11_other_irq_handler(struct drm_i915_private * const i915,
const u8 instance, const u16 iir)
{
if (instance == OTHER_GTPM_INSTANCE)
- return gen6_rps_irq_handler(i915, iir);
+ return gen11_rps_irq_handler(i915, iir);
WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
instance, iir);
@@ -2975,7 +3061,7 @@ static void
gen11_gt_bank_handler(struct drm_i915_private * const i915,
const unsigned int bank)
{
- void __iomem * const regs = i915->regs;
+ void __iomem * const regs = i915->uncore.regs;
unsigned long intr_dw;
unsigned int bit;
@@ -2983,14 +3069,8 @@ gen11_gt_bank_handler(struct drm_i915_private * const i915,
intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
- if (unlikely(!intr_dw)) {
- DRM_ERROR("GT_INTR_DW%u blank!\n", bank);
- return;
- }
-
for_each_set_bit(bit, &intr_dw, 32) {
- const u32 ident = gen11_gt_engine_identity(i915,
- bank, bit);
+ const u32 ident = gen11_gt_engine_identity(i915, bank, bit);
gen11_gt_identity_handler(i915, ident);
}
@@ -3018,7 +3098,7 @@ gen11_gt_irq_handler(struct drm_i915_private * const i915,
static u32
gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
{
- void __iomem * const regs = dev_priv->regs;
+ void __iomem * const regs = dev_priv->uncore.regs;
u32 iir;
if (!(master_ctl & GEN11_GU_MISC_IRQ))
@@ -3059,7 +3139,7 @@ static inline void gen11_master_intr_enable(void __iomem * const regs)
static irqreturn_t gen11_irq_handler(int irq, void *arg)
{
struct drm_i915_private * const i915 = to_i915(arg);
- void __iomem * const regs = i915->regs;
+ void __iomem * const regs = i915->uncore.regs;
u32 master_ctl;
u32 gu_misc_iir;
@@ -3112,6 +3192,16 @@ static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
return 0;
}
+static int i945gm_enable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ if (dev_priv->i945gm_vblank.enabled++ == 0)
+ schedule_work(&dev_priv->i945gm_vblank.work);
+
+ return i8xx_enable_vblank(dev, pipe);
+}
+
static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -3176,6 +3266,16 @@ static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
+static void i945gm_disable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ i8xx_disable_vblank(dev, pipe);
+
+ if (--dev_priv->i945gm_vblank.enabled == 0)
+ schedule_work(&dev_priv->i945gm_vblank.work);
+}
+
static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -3209,12 +3309,68 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
+static void i945gm_vblank_work_func(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private, i945gm_vblank.work);
+
+ /*
+ * Vblank interrupts fail to wake up the device from C3,
+ * hence we want to prevent C3 usage while vblank interrupts
+ * are enabled.
+ */
+ pm_qos_update_request(&dev_priv->i945gm_vblank.pm_qos,
+ READ_ONCE(dev_priv->i945gm_vblank.enabled) ?
+ dev_priv->i945gm_vblank.c3_disable_latency :
+ PM_QOS_DEFAULT_VALUE);
+}
+
+static int cstate_disable_latency(const char *name)
+{
+ const struct cpuidle_driver *drv;
+ int i;
+
+ drv = cpuidle_get_driver();
+ if (!drv)
+ return 0;
+
+ for (i = 0; i < drv->state_count; i++) {
+ const struct cpuidle_state *state = &drv->states[i];
+
+ if (!strcmp(state->name, name))
+ return state->exit_latency ?
+ state->exit_latency - 1 : 0;
+ }
+
+ return 0;
+}
+
+static void i945gm_vblank_work_init(struct drm_i915_private *dev_priv)
+{
+ INIT_WORK(&dev_priv->i945gm_vblank.work,
+ i945gm_vblank_work_func);
+
+ dev_priv->i945gm_vblank.c3_disable_latency =
+ cstate_disable_latency("C3");
+ pm_qos_add_request(&dev_priv->i945gm_vblank.pm_qos,
+ PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
+}
+
+static void i945gm_vblank_work_fini(struct drm_i915_private *dev_priv)
+{
+ cancel_work_sync(&dev_priv->i945gm_vblank.work);
+ pm_qos_remove_request(&dev_priv->i945gm_vblank.pm_qos);
+}
+
static void ibx_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
if (HAS_PCH_NOP(dev_priv))
return;
- GEN3_IRQ_RESET(SDE);
+ GEN3_IRQ_RESET(uncore, SDE);
if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
I915_WRITE(SERR_INT, 0xffffffff);
@@ -3242,13 +3398,17 @@ static void ibx_irq_pre_postinstall(struct drm_device *dev)
static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
{
- GEN3_IRQ_RESET(GT);
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
+ GEN3_IRQ_RESET(uncore, GT);
if (INTEL_GEN(dev_priv) >= 6)
- GEN3_IRQ_RESET(GEN6_PM);
+ GEN3_IRQ_RESET(uncore, GEN6_PM);
}
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
if (IS_CHERRYVIEW(dev_priv))
I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
else
@@ -3259,12 +3419,14 @@ static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
i9xx_pipestat_irq_reset(dev_priv);
- GEN3_IRQ_RESET(VLV_);
+ GEN3_IRQ_RESET(uncore, VLV_);
dev_priv->irq_mask = ~0u;
}
static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
u32 pipestat_mask;
u32 enable_mask;
enum pipe pipe;
@@ -3289,7 +3451,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
dev_priv->irq_mask = ~enable_mask;
- GEN3_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
+ GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
}
/* drm_dma.h hooks
@@ -3297,8 +3459,9 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
static void ironlake_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
- GEN3_IRQ_RESET(DE);
+ GEN3_IRQ_RESET(uncore, DE);
if (IS_GEN(dev_priv, 7))
I915_WRITE(GEN7_ERR_INT, 0xffffffff);
@@ -3329,18 +3492,21 @@ static void valleyview_irq_reset(struct drm_device *dev)
static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
{
- GEN8_IRQ_RESET_NDX(GT, 0);
- GEN8_IRQ_RESET_NDX(GT, 1);
- GEN8_IRQ_RESET_NDX(GT, 2);
- GEN8_IRQ_RESET_NDX(GT, 3);
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
+ GEN8_IRQ_RESET_NDX(uncore, GT, 0);
+ GEN8_IRQ_RESET_NDX(uncore, GT, 1);
+ GEN8_IRQ_RESET_NDX(uncore, GT, 2);
+ GEN8_IRQ_RESET_NDX(uncore, GT, 3);
}
static void gen8_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
int pipe;
- gen8_master_intr_disable(dev_priv->regs);
+ gen8_master_intr_disable(dev_priv->uncore.regs);
gen8_gt_irq_reset(dev_priv);
@@ -3350,11 +3516,11 @@ static void gen8_irq_reset(struct drm_device *dev)
for_each_pipe(dev_priv, pipe)
if (intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe)))
- GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
+ GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
- GEN3_IRQ_RESET(GEN8_DE_PORT_);
- GEN3_IRQ_RESET(GEN8_DE_MISC_);
- GEN3_IRQ_RESET(GEN8_PCU_);
+ GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
+ GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
+ GEN3_IRQ_RESET(uncore, GEN8_PCU_);
if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_reset(dev_priv);
@@ -3380,9 +3546,10 @@ static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv)
static void gen11_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_uncore *uncore = &dev_priv->uncore;
int pipe;
- gen11_master_intr_disable(dev_priv->regs);
+ gen11_master_intr_disable(dev_priv->uncore.regs);
gen11_gt_irq_reset(dev_priv);
@@ -3394,21 +3561,23 @@ static void gen11_irq_reset(struct drm_device *dev)
for_each_pipe(dev_priv, pipe)
if (intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe)))
- GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
+ GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
- GEN3_IRQ_RESET(GEN8_DE_PORT_);
- GEN3_IRQ_RESET(GEN8_DE_MISC_);
- GEN3_IRQ_RESET(GEN11_DE_HPD_);
- GEN3_IRQ_RESET(GEN11_GU_MISC_);
- GEN3_IRQ_RESET(GEN8_PCU_);
+ GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
+ GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
+ GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
+ GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
+ GEN3_IRQ_RESET(uncore, GEN8_PCU_);
- if (HAS_PCH_ICP(dev_priv))
- GEN3_IRQ_RESET(SDE);
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ GEN3_IRQ_RESET(uncore, SDE);
}
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
u8 pipe_mask)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
enum pipe pipe;
@@ -3420,7 +3589,7 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
}
for_each_pipe_masked(dev_priv, pipe, pipe_mask)
- GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
+ GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
dev_priv->de_irq_mask[pipe],
~dev_priv->de_irq_mask[pipe] | extra_ier);
@@ -3430,6 +3599,7 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
u8 pipe_mask)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
enum pipe pipe;
spin_lock_irq(&dev_priv->irq_lock);
@@ -3440,7 +3610,7 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
}
for_each_pipe_masked(dev_priv, pipe, pipe_mask)
- GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
+ GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
spin_unlock_irq(&dev_priv->irq_lock);
@@ -3451,13 +3621,14 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
static void cherryview_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
I915_WRITE(GEN8_MASTER_IRQ, 0);
POSTING_READ(GEN8_MASTER_IRQ);
gen8_gt_irq_reset(dev_priv);
- GEN3_IRQ_RESET(GEN8_PCU_);
+ GEN3_IRQ_RESET(uncore, GEN8_PCU_);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
@@ -3583,7 +3754,7 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
gen11_hpd_detection_setup(dev_priv);
- if (HAS_PCH_ICP(dev_priv))
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
icp_hpd_irq_setup(dev_priv);
}
@@ -3729,7 +3900,7 @@ static void ibx_irq_postinstall(struct drm_device *dev)
else
mask = SDE_GMBUS_CPT;
- gen3_assert_iir_is_zero(dev_priv, SDEIIR);
+ gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
I915_WRITE(SDEIMR, ~mask);
if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
@@ -3742,6 +3913,7 @@ static void ibx_irq_postinstall(struct drm_device *dev)
static void gen5_gt_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
u32 pm_irqs, gt_irqs;
pm_irqs = gt_irqs = 0;
@@ -3760,26 +3932,27 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
}
- GEN3_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
+ GEN3_IRQ_INIT(uncore, GT, dev_priv->gt_irq_mask, gt_irqs);
if (INTEL_GEN(dev_priv) >= 6) {
/*
* RPS interrupts will get enabled/disabled on demand when RPS
* itself is enabled/disabled.
*/
- if (HAS_VEBOX(dev_priv)) {
+ if (HAS_ENGINE(dev_priv, VECS0)) {
pm_irqs |= PM_VEBOX_USER_INTERRUPT;
dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
}
dev_priv->pm_imr = 0xffffffff;
- GEN3_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs);
+ GEN3_IRQ_INIT(uncore, GEN6_PM, dev_priv->pm_imr, pm_irqs);
}
}
static int ironlake_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
u32 display_mask, extra_mask;
if (INTEL_GEN(dev_priv) >= 7) {
@@ -3798,7 +3971,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
}
if (IS_HASWELL(dev_priv)) {
- gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR);
+ gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
display_mask |= DE_EDP_PSR_INT_HSW;
}
@@ -3807,7 +3980,8 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
ibx_irq_pre_postinstall(dev);
- GEN3_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
+ GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
+ display_mask | extra_mask);
gen5_gt_irq_postinstall(dev);
@@ -3877,35 +4051,42 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
/* These are interrupts we'll toggle with the ring mask register */
u32 gt_interrupts[] = {
- GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
- GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
- GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
- GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
+ (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
+ GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT),
+
+ (GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
+ GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT),
+
0,
- GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
- };
+
+ (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)
+ };
dev_priv->pm_ier = 0x0;
dev_priv->pm_imr = ~dev_priv->pm_ier;
- GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
- GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
+ GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
+ GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
/*
* RPS interrupts will get enabled/disabled on demand when RPS itself
* is enabled/disabled. Same wil be the case for GuC interrupts.
*/
- GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
- GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
+ GEN8_IRQ_INIT_NDX(uncore, GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
+ GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
}
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
u32 de_pipe_enables;
u32 de_port_masked = GEN8_AUX_CHANNEL_A;
@@ -3941,7 +4122,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
else if (IS_BROADWELL(dev_priv))
de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
- gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR);
+ gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
for_each_pipe(dev_priv, pipe) {
@@ -3949,20 +4130,21 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
if (intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe)))
- GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
+ GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
dev_priv->de_irq_mask[pipe],
de_pipe_enables);
}
- GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
- GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
+ GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
+ GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
if (INTEL_GEN(dev_priv) >= 11) {
u32 de_hpd_masked = 0;
u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
GEN11_DE_TBT_HOTPLUG_MASK;
- GEN3_IRQ_INIT(GEN11_DE_HPD_, ~de_hpd_masked, de_hpd_enables);
+ GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
+ de_hpd_enables);
gen11_hpd_detection_setup(dev_priv);
} else if (IS_GEN9_LP(dev_priv)) {
bxt_hpd_detection_setup(dev_priv);
@@ -3984,7 +4166,7 @@ static int gen8_irq_postinstall(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_postinstall(dev);
- gen8_master_intr_enable(dev_priv->regs);
+ gen8_master_intr_enable(dev_priv->uncore.regs);
return 0;
}
@@ -4025,7 +4207,7 @@ static void icp_irq_postinstall(struct drm_device *dev)
I915_WRITE(SDEIER, 0xffffffff);
POSTING_READ(SDEIER);
- gen3_assert_iir_is_zero(dev_priv, SDEIIR);
+ gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
I915_WRITE(SDEIMR, ~mask);
icp_hpd_detection_setup(dev_priv);
@@ -4034,19 +4216,20 @@ static void icp_irq_postinstall(struct drm_device *dev)
static int gen11_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_uncore *uncore = &dev_priv->uncore;
u32 gu_misc_masked = GEN11_GU_MISC_GSE;
- if (HAS_PCH_ICP(dev_priv))
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
icp_irq_postinstall(dev);
gen11_gt_irq_postinstall(dev_priv);
gen8_de_irq_postinstall(dev_priv);
- GEN3_IRQ_INIT(GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
+ GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
- gen11_master_intr_enable(dev_priv->regs);
+ gen11_master_intr_enable(dev_priv->uncore.regs);
POSTING_READ(GEN11_GFX_MSTR_IRQ);
return 0;
@@ -4072,15 +4255,17 @@ static int cherryview_irq_postinstall(struct drm_device *dev)
static void i8xx_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
i9xx_pipestat_irq_reset(dev_priv);
- GEN2_IRQ_RESET();
+ GEN2_IRQ_RESET(uncore);
}
static int i8xx_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
u16 enable_mask;
I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE |
@@ -4098,7 +4283,7 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
I915_MASTER_ERROR_INTERRUPT |
I915_USER_INTERRUPT;
- GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
+ GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask);
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
@@ -4202,7 +4387,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
u16 eir = 0, eir_stuck = 0;
u16 iir;
- iir = I915_READ16(IIR);
+ iir = I915_READ16(GEN2_IIR);
if (iir == 0)
break;
@@ -4215,10 +4400,10 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
if (iir & I915_MASTER_ERROR_INTERRUPT)
i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
- I915_WRITE16(IIR, iir);
+ I915_WRITE16(GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -4234,6 +4419,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
static void i915_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
if (I915_HAS_HOTPLUG(dev_priv)) {
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
@@ -4242,12 +4428,13 @@ static void i915_irq_reset(struct drm_device *dev)
i9xx_pipestat_irq_reset(dev_priv);
- GEN3_IRQ_RESET();
+ GEN3_IRQ_RESET(uncore, GEN2_);
}
static int i915_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
u32 enable_mask;
I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
@@ -4274,7 +4461,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
}
- GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
+ GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
@@ -4306,7 +4493,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
u32 hotplug_status = 0;
u32 iir;
- iir = I915_READ(IIR);
+ iir = I915_READ(GEN2_IIR);
if (iir == 0)
break;
@@ -4323,10 +4510,10 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
- I915_WRITE(IIR, iir);
+ I915_WRITE(GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -4345,18 +4532,20 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
static void i965_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
i9xx_pipestat_irq_reset(dev_priv);
- GEN3_IRQ_RESET();
+ GEN3_IRQ_RESET(uncore, GEN2_);
}
static int i965_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
u32 enable_mask;
u32 error_mask;
@@ -4394,7 +4583,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
if (IS_G4X(dev_priv))
enable_mask |= I915_BSD_USER_INTERRUPT;
- GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
+ GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
@@ -4452,7 +4641,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
u32 hotplug_status = 0;
u32 iir;
- iir = I915_READ(IIR);
+ iir = I915_READ(GEN2_IIR);
if (iir == 0)
break;
@@ -4468,13 +4657,13 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
- I915_WRITE(IIR, iir);
+ I915_WRITE(GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
if (iir & I915_BSD_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[VCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -4503,6 +4692,9 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
struct intel_rps *rps = &dev_priv->gt_pm.rps;
int i;
+ if (IS_I945GM(dev_priv))
+ i945gm_vblank_work_init(dev_priv);
+
intel_hpd_init_work(dev_priv);
INIT_WORK(&rps->work, gen6_pm_rps_work);
@@ -4523,6 +4715,10 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
GEN6_PM_RP_DOWN_THRESHOLD |
GEN6_PM_RP_DOWN_TIMEOUT);
+ /* We share the register with other engine */
+ if (INTEL_GEN(dev_priv) > 9)
+ GEM_WARN_ON(dev_priv->pm_rps_events & 0xffff0000);
+
rps->pm_intrmsk_mbz = 0;
/*
@@ -4542,13 +4738,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
else if (INTEL_GEN(dev_priv) >= 3)
dev->driver->get_vblank_counter = i915_get_vblank_counter;
- /*
- * Opt out of the vblank disable timer on everything except gen2.
- * Gen2 doesn't have a hardware frame counter and so depends on
- * vblank interrupts to produce sane vblank seuquence numbers.
- */
- if (!IS_GEN(dev_priv, 2))
- dev->vblank_disable_immediate = true;
+ dev->vblank_disable_immediate = true;
/* Most platforms treat the display irq block as an always-on
* power domain. vlv/chv can disable it at runtime and need
@@ -4605,8 +4795,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->disable_vblank = gen8_disable_vblank;
if (IS_GEN9_LP(dev_priv))
dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
- else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
- HAS_PCH_CNP(dev_priv))
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
else
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
@@ -4626,6 +4815,13 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->irq_uninstall = i8xx_irq_reset;
dev->driver->enable_vblank = i8xx_enable_vblank;
dev->driver->disable_vblank = i8xx_disable_vblank;
+ } else if (IS_I945GM(dev_priv)) {
+ dev->driver->irq_preinstall = i915_irq_reset;
+ dev->driver->irq_postinstall = i915_irq_postinstall;
+ dev->driver->irq_uninstall = i915_irq_reset;
+ dev->driver->irq_handler = i915_irq_handler;
+ dev->driver->enable_vblank = i945gm_enable_vblank;
+ dev->driver->disable_vblank = i945gm_disable_vblank;
} else if (IS_GEN(dev_priv, 3)) {
dev->driver->irq_preinstall = i915_irq_reset;
dev->driver->irq_postinstall = i915_irq_postinstall;
@@ -4656,6 +4852,9 @@ void intel_irq_fini(struct drm_i915_private *i915)
{
int i;
+ if (IS_I945GM(i915))
+ i945gm_vblank_work_fini(i915);
+
for (i = 0; i < MAX_L3_SLICES; ++i)
kfree(i915->l3_parity.remap_info[i]);
}