summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gvt/interrupt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gvt/interrupt.c')
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c142
1 files changed, 81 insertions, 61 deletions
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 7a041b368f68..3e66269bc4ee 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -29,10 +29,32 @@
*
*/
+#include <linux/eventfd.h>
+
+#include <drm/drm_print.h>
+
#include "i915_drv.h"
+#include "i915_reg.h"
+#include "display/intel_display_regs.h"
#include "gvt.h"
#include "trace.h"
+struct intel_gvt_irq_info {
+ char *name;
+ i915_reg_t reg_base;
+ enum intel_gvt_event_type bit_to_event[INTEL_GVT_IRQ_BITWIDTH];
+ int group;
+ DECLARE_BITMAP(downstream_irq_bitmap, INTEL_GVT_IRQ_BITWIDTH);
+ bool has_upstream_irq;
+};
+
+struct intel_gvt_irq_map {
+ int up_irq_group;
+ int up_irq_bit;
+ int down_irq_group;
+ u32 down_irq_bitmask;
+};
+
/* common offset among interrupt control registers */
#define regbase_to_isr(base) (base)
#define regbase_to_imr(base) (base + 0x4)
@@ -126,7 +148,7 @@ static const char * const irq_name[INTEL_GVT_EVENT_MAX] = {
[FDI_RX_INTERRUPTS_TRANSCODER_C] = "FDI RX Interrupts Combined C",
[AUDIO_CP_CHANGE_TRANSCODER_C] = "Audio CP Change Transcoder C",
[AUDIO_CP_REQUEST_TRANSCODER_C] = "Audio CP Request Transcoder C",
- [ERR_AND_DBG] = "South Error and Debug Interupts Combined",
+ [ERR_AND_DBG] = "South Error and Debug Interrupts Combined",
[GMBUS] = "Gmbus",
[SDVO_B_HOTPLUG] = "SDVO B hotplug",
[CRT_HOTPLUG] = "CRT Hotplug",
@@ -176,7 +198,7 @@ int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu,
unsigned int reg, void *p_data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
- struct intel_gvt_irq_ops *ops = gvt->irq.ops;
+ const struct intel_gvt_irq_ops *ops = gvt->irq.ops;
u32 imr = *(u32 *)p_data;
trace_write_ir(vgpu->id, "IMR", reg, imr, vgpu_vreg(vgpu, reg),
@@ -206,7 +228,7 @@ int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu,
unsigned int reg, void *p_data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
- struct intel_gvt_irq_ops *ops = gvt->irq.ops;
+ const struct intel_gvt_irq_ops *ops = gvt->irq.ops;
u32 ier = *(u32 *)p_data;
u32 virtual_ier = vgpu_vreg(vgpu, reg);
@@ -245,7 +267,8 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
unsigned int reg, void *p_data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
- struct intel_gvt_irq_ops *ops = gvt->irq.ops;
+ struct drm_i915_private *i915 = gvt->gt->i915;
+ const struct intel_gvt_irq_ops *ops = gvt->irq.ops;
struct intel_gvt_irq_info *info;
u32 ier = *(u32 *)p_data;
@@ -255,7 +278,7 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
vgpu_vreg(vgpu, reg) = ier;
info = regbase_to_irq_info(gvt, ier_to_regbase(reg));
- if (WARN_ON(!info))
+ if (drm_WARN_ON(&i915->drm, !info))
return -EINVAL;
if (info->has_upstream_irq)
@@ -282,6 +305,7 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
void *p_data, unsigned int bytes)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_gvt_irq_info *info = regbase_to_irq_info(vgpu->gvt,
iir_to_regbase(reg));
u32 iir = *(u32 *)p_data;
@@ -289,7 +313,7 @@ int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
trace_write_ir(vgpu->id, "IIR", reg, iir, vgpu_vreg(vgpu, reg),
(vgpu_vreg(vgpu, reg) ^ iir));
- if (WARN_ON(!info))
+ if (drm_WARN_ON(&i915->drm, !info))
return -EINVAL;
vgpu_vreg(vgpu, reg) &= ~iir;
@@ -319,6 +343,7 @@ static struct intel_gvt_irq_map gen8_irq_map[] = {
static void update_upstream_irq(struct intel_vgpu *vgpu,
struct intel_gvt_irq_info *info)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_gvt_irq *irq = &vgpu->gvt->irq;
struct intel_gvt_irq_map *map = irq->irq_map;
struct intel_gvt_irq_info *up_irq_info = NULL;
@@ -340,7 +365,8 @@ static void update_upstream_irq(struct intel_vgpu *vgpu,
if (!up_irq_info)
up_irq_info = irq->info[map->up_irq_group];
else
- WARN_ON(up_irq_info != irq->info[map->up_irq_group]);
+ drm_WARN_ON(&i915->drm, up_irq_info !=
+ irq->info[map->up_irq_group]);
bit = map->up_irq_bit;
@@ -350,7 +376,8 @@ static void update_upstream_irq(struct intel_vgpu *vgpu,
clear_bits |= (1 << bit);
}
- WARN_ON(!up_irq_info);
+ if (drm_WARN_ON(&i915->drm, !up_irq_info))
+ return;
if (up_irq_info->group == INTEL_GVT_IRQ_INFO_MASTER) {
u32 isr = i915_mmio_reg_offset(up_irq_info->reg_base);
@@ -391,9 +418,44 @@ static void init_irq_map(struct intel_gvt_irq *irq)
}
/* =======================vEvent injection===================== */
-static int inject_virtual_interrupt(struct intel_vgpu *vgpu)
+
+#define MSI_CAP_CONTROL(offset) (offset + 2)
+#define MSI_CAP_ADDRESS(offset) (offset + 4)
+#define MSI_CAP_DATA(offset) (offset + 8)
+#define MSI_CAP_EN 0x1
+
+static void inject_virtual_interrupt(struct intel_vgpu *vgpu)
{
- return intel_gvt_hypervisor_inject_msi(vgpu);
+ unsigned long offset = vgpu->gvt->device_info.msi_cap_offset;
+ u16 control, data;
+ u32 addr;
+
+ control = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_CONTROL(offset));
+ addr = *(u32 *)(vgpu_cfg_space(vgpu) + MSI_CAP_ADDRESS(offset));
+ data = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_DATA(offset));
+
+ /* Do not generate MSI if MSIEN is disabled */
+ if (!(control & MSI_CAP_EN))
+ return;
+
+ if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
+ return;
+
+ trace_inject_msi(vgpu->id, addr, data);
+
+ /*
+ * When guest is powered off, msi_trigger is set to NULL, but vgpu's
+ * config and mmio register isn't restored to default during guest
+ * poweroff. If this vgpu is still used in next vm, this vgpu's pipe
+ * may be enabled, then once this vgpu is active, it will get inject
+ * vblank interrupt request. But msi_trigger is null until msi is
+ * enabled by guest. so if msi_trigger is null, success is still
+ * returned and don't inject interrupt into guest.
+ */
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
+ return;
+ if (vgpu->msi_trigger)
+ eventfd_signal(vgpu->msi_trigger);
}
static void propagate_event(struct intel_gvt_irq *irq,
@@ -535,7 +597,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1);
SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1);
- if (HAS_BSD2(gvt->dev_priv)) {
+ if (HAS_ENGINE(gvt->gt, VCS1)) {
SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT,
INTEL_GVT_IRQ_INFO_GT1);
SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW,
@@ -567,7 +629,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 22, DP_C_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
SET_BIT_INFO(irq, 23, DP_D_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
- if (IS_BROADWELL(gvt->dev_priv)) {
+ if (IS_BROADWELL(gvt->gt->i915)) {
SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_PCH);
SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_PCH);
SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_PCH);
@@ -580,7 +642,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
- } else if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv)) {
+ } else if (GRAPHICS_VER(gvt->gt->i915) >= 9) {
SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);
@@ -599,7 +661,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 25, PCU_PCODE2DRIVER_MAILBOX, INTEL_GVT_IRQ_INFO_PCU);
}
-static struct intel_gvt_irq_ops gen8_irq_ops = {
+static const struct intel_gvt_irq_ops gen8_irq_ops = {
.init_irq = gen8_init_irq,
.check_pending_irq = gen8_check_pending_irq,
};
@@ -617,13 +679,14 @@ static struct intel_gvt_irq_ops gen8_irq_ops = {
void intel_vgpu_trigger_virtual_event(struct intel_vgpu *vgpu,
enum intel_gvt_event_type event)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_irq *irq = &gvt->irq;
gvt_event_virt_handler_t handler;
- struct intel_gvt_irq_ops *ops = gvt->irq.ops;
+ const struct intel_gvt_irq_ops *ops = gvt->irq.ops;
handler = get_event_virt_handler(irq, event);
- WARN_ON(!handler);
+ drm_WARN_ON(&i915->drm, !handler);
handler(irq, event, vgpu);
@@ -641,38 +704,6 @@ static void init_events(
}
}
-static enum hrtimer_restart vblank_timer_fn(struct hrtimer *data)
-{
- struct intel_gvt_vblank_timer *vblank_timer;
- struct intel_gvt_irq *irq;
- struct intel_gvt *gvt;
-
- vblank_timer = container_of(data, struct intel_gvt_vblank_timer, timer);
- irq = container_of(vblank_timer, struct intel_gvt_irq, vblank_timer);
- gvt = container_of(irq, struct intel_gvt, irq);
-
- intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EMULATE_VBLANK);
- hrtimer_add_expires_ns(&vblank_timer->timer, vblank_timer->period);
- return HRTIMER_RESTART;
-}
-
-/**
- * intel_gvt_clean_irq - clean up GVT-g IRQ emulation subsystem
- * @gvt: a GVT device
- *
- * This function is called at driver unloading stage, to clean up GVT-g IRQ
- * emulation subsystem.
- *
- */
-void intel_gvt_clean_irq(struct intel_gvt *gvt)
-{
- struct intel_gvt_irq *irq = &gvt->irq;
-
- hrtimer_cancel(&irq->vblank_timer.timer);
-}
-
-#define VBLNAK_TIMER_PERIOD 16000000
-
/**
* intel_gvt_init_irq - initialize GVT-g IRQ emulation subsystem
* @gvt: a GVT device
@@ -686,18 +717,11 @@ void intel_gvt_clean_irq(struct intel_gvt *gvt)
int intel_gvt_init_irq(struct intel_gvt *gvt)
{
struct intel_gvt_irq *irq = &gvt->irq;
- struct intel_gvt_vblank_timer *vblank_timer = &irq->vblank_timer;
gvt_dbg_core("init irq framework\n");
- if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
- || IS_KABYLAKE(gvt->dev_priv)) {
- irq->ops = &gen8_irq_ops;
- irq->irq_map = gen8_irq_map;
- } else {
- WARN_ON(1);
- return -ENODEV;
- }
+ irq->ops = &gen8_irq_ops;
+ irq->irq_map = gen8_irq_map;
/* common event initialization */
init_events(irq);
@@ -707,9 +731,5 @@ int intel_gvt_init_irq(struct intel_gvt *gvt)
init_irq_map(irq);
- hrtimer_init(&vblank_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- vblank_timer->timer.function = vblank_timer_fn;
- vblank_timer->period = VBLNAK_TIMER_PERIOD;
-
return 0;
}