summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gvt
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gvt')
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c57
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c119
-rw-r--r--drivers/gpu/drm/i915/gvt/debug.h8
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c139
-rw-r--r--drivers/gpu/drm/i915/gvt/display.h20
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c13
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c29
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c114
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h14
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c474
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c49
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c104
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c15
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c18
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c136
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c72
20 files changed, 953 insertions, 445 deletions
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 3b6caaca9751..325618d969fe 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -242,7 +242,7 @@ static int alloc_resource(struct intel_vgpu *vgpu,
const char *item;
if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) {
- gvt_err("Invalid vGPU creation params\n");
+ gvt_vgpu_err("Invalid vGPU creation params\n");
return -EINVAL;
}
@@ -285,9 +285,9 @@ static int alloc_resource(struct intel_vgpu *vgpu,
return 0;
no_enough_resource:
- gvt_err("vgpu%d: fail to allocate resource %s\n", vgpu->id, item);
- gvt_err("vgpu%d: request %luMB avail %luMB max %luMB taken %luMB\n",
- vgpu->id, BYTES_TO_MB(request), BYTES_TO_MB(avail),
+ gvt_vgpu_err("fail to allocate resource %s\n", item);
+ gvt_vgpu_err("request %luMB avail %luMB max %luMB taken %luMB\n",
+ BYTES_TO_MB(request), BYTES_TO_MB(avail),
BYTES_TO_MB(max), BYTES_TO_MB(taken));
return -ENOSPC;
}
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index 4a6a2ed65732..b7d7721e72fa 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -41,6 +41,54 @@ enum {
INTEL_GVT_PCI_BAR_MAX,
};
+/* bitmap for writable bits (RW or RW1C bits, but cannot co-exist in one
+ * byte) byte by byte in standard pci configuration space. (not the full
+ * 256 bytes.)
+ */
+static const u8 pci_cfg_space_rw_bmp[PCI_INTERRUPT_LINE + 4] = {
+ [PCI_COMMAND] = 0xff, 0x07,
+ [PCI_STATUS] = 0x00, 0xf9, /* the only one RW1C byte */
+ [PCI_CACHE_LINE_SIZE] = 0xff,
+ [PCI_BASE_ADDRESS_0 ... PCI_CARDBUS_CIS - 1] = 0xff,
+ [PCI_ROM_ADDRESS] = 0x01, 0xf8, 0xff, 0xff,
+ [PCI_INTERRUPT_LINE] = 0xff,
+};
+
+/**
+ * vgpu_pci_cfg_mem_write - write virtual cfg space memory
+ *
+ * Use this function to write virtual cfg space memory.
+ * For standard cfg space, only RW bits can be changed,
+ * and we emulates the RW1C behavior of PCI_STATUS register.
+ */
+static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off,
+ u8 *src, unsigned int bytes)
+{
+ u8 *cfg_base = vgpu_cfg_space(vgpu);
+ u8 mask, new, old;
+ int i = 0;
+
+ for (; i < bytes && (off + i < sizeof(pci_cfg_space_rw_bmp)); i++) {
+ mask = pci_cfg_space_rw_bmp[off + i];
+ old = cfg_base[off + i];
+ new = src[i] & mask;
+
+ /**
+ * The PCI_STATUS high byte has RW1C bits, here
+ * emulates clear by writing 1 for these bits.
+ * Writing a 0b to RW1C bits has no effect.
+ */
+ if (off + i == PCI_STATUS + 1)
+ new = (~new & old) & mask;
+
+ cfg_base[off + i] = (old & ~mask) | new;
+ }
+
+ /* For other configuration space directly copy as it is. */
+ if (i < bytes)
+ memcpy(cfg_base + off + i, src + i, bytes - i);
+}
+
/**
* intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read
*
@@ -123,7 +171,7 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
u8 changed = old ^ new;
int ret;
- memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
+ vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
if (!(changed & PCI_COMMAND_MEMORY))
return 0;
@@ -237,6 +285,9 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
{
int ret;
+ if (vgpu->failsafe)
+ return 0;
+
if (WARN_ON(bytes > 4))
return -EINVAL;
@@ -274,10 +325,10 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
if (ret)
return ret;
- memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
+ vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
break;
default:
- memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
+ vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
break;
}
return 0;
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index b9c8e2407682..2b92cc8a7d1a 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -668,7 +668,7 @@ static inline void print_opcode(u32 cmd, int ring_id)
if (d_info == NULL)
return;
- gvt_err("opcode=0x%x %s sub_ops:",
+ gvt_dbg_cmd("opcode=0x%x %s sub_ops:",
cmd >> (32 - d_info->op_len), d_info->name);
for (i = 0; i < d_info->nr_sub_op; i++)
@@ -693,23 +693,23 @@ static void parser_exec_state_dump(struct parser_exec_state *s)
int cnt = 0;
int i;
- gvt_err(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
+ gvt_dbg_cmd(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
" ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id,
s->ring_id, s->ring_start, s->ring_start + s->ring_size,
s->ring_head, s->ring_tail);
- gvt_err(" %s %s ip_gma(%08lx) ",
+ gvt_dbg_cmd(" %s %s ip_gma(%08lx) ",
s->buf_type == RING_BUFFER_INSTRUCTION ?
"RING_BUFFER" : "BATCH_BUFFER",
s->buf_addr_type == GTT_BUFFER ?
"GTT" : "PPGTT", s->ip_gma);
if (s->ip_va == NULL) {
- gvt_err(" ip_va(NULL)");
+ gvt_dbg_cmd(" ip_va(NULL)");
return;
}
- gvt_err(" ip_va=%p: %08x %08x %08x %08x\n",
+ gvt_dbg_cmd(" ip_va=%p: %08x %08x %08x %08x\n",
s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
cmd_val(s, 2), cmd_val(s, 3));
@@ -817,6 +817,25 @@ static bool is_shadowed_mmio(unsigned int offset)
return ret;
}
+static inline bool is_force_nonpriv_mmio(unsigned int offset)
+{
+ return (offset >= 0x24d0 && offset < 0x2500);
+}
+
+static int force_nonpriv_reg_handler(struct parser_exec_state *s,
+ unsigned int offset, unsigned int index)
+{
+ struct intel_gvt *gvt = s->vgpu->gvt;
+ unsigned int data = cmd_val(s, index + 1);
+
+ if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data)) {
+ gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n",
+ offset, data);
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int cmd_reg_handler(struct parser_exec_state *s,
unsigned int offset, unsigned int index, char *cmd)
{
@@ -824,23 +843,26 @@ static int cmd_reg_handler(struct parser_exec_state *s,
struct intel_gvt *gvt = vgpu->gvt;
if (offset + 4 > gvt->device_info.mmio_size) {
- gvt_err("%s access to (%x) outside of MMIO range\n",
+ gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
cmd, offset);
return -EINVAL;
}
if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
- gvt_err("vgpu%d: %s access to non-render register (%x)\n",
- s->vgpu->id, cmd, offset);
+ gvt_vgpu_err("%s access to non-render register (%x)\n",
+ cmd, offset);
return 0;
}
if (is_shadowed_mmio(offset)) {
- gvt_err("vgpu%d: found access of shadowed MMIO %x\n",
- s->vgpu->id, offset);
+ gvt_vgpu_err("found access of shadowed MMIO %x\n", offset);
return 0;
}
+ if (is_force_nonpriv_mmio(offset) &&
+ force_nonpriv_reg_handler(s, offset, index))
+ return -EINVAL;
+
if (offset == i915_mmio_reg_offset(DERRMR) ||
offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
/* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
@@ -1008,7 +1030,7 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl");
else if (post_sync == 1) {
/* check ggtt*/
- if ((cmd_val(s, 2) & (1 << 2))) {
+ if ((cmd_val(s, 1) & PIPE_CONTROL_GLOBAL_GTT_IVB)) {
gma = cmd_val(s, 2) & GENMASK(31, 3);
if (gmadr_bytes == 8)
gma |= (cmd_gma_hi(s, 3)) << 32;
@@ -1129,6 +1151,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+ struct intel_vgpu *vgpu = s->vgpu;
u32 dword0 = cmd_val(s, 0);
u32 dword1 = cmd_val(s, 1);
u32 dword2 = cmd_val(s, 2);
@@ -1167,7 +1190,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
break;
default:
- gvt_err("unknown plane code %d\n", plane);
+ gvt_vgpu_err("unknown plane code %d\n", plane);
return -EINVAL;
}
@@ -1274,25 +1297,26 @@ static int update_plane_mmio_from_mi_display_flip(
static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
{
struct mi_display_flip_command_info info;
+ struct intel_vgpu *vgpu = s->vgpu;
int ret;
int i;
int len = cmd_length(s);
ret = decode_mi_display_flip(s, &info);
if (ret) {
- gvt_err("fail to decode MI display flip command\n");
+ gvt_vgpu_err("fail to decode MI display flip command\n");
return ret;
}
ret = check_mi_display_flip(s, &info);
if (ret) {
- gvt_err("invalid MI display flip command\n");
+ gvt_vgpu_err("invalid MI display flip command\n");
return ret;
}
ret = update_plane_mmio_from_mi_display_flip(s, &info);
if (ret) {
- gvt_err("fail to update plane mmio\n");
+ gvt_vgpu_err("fail to update plane mmio\n");
return ret;
}
@@ -1350,7 +1374,8 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
int ret;
if (op_size > max_surface_size) {
- gvt_err("command address audit fail name %s\n", s->info->name);
+ gvt_vgpu_err("command address audit fail name %s\n",
+ s->info->name);
return -EINVAL;
}
@@ -1367,7 +1392,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
}
return 0;
err:
- gvt_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
+ gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
s->info->name, guest_gma, op_size);
pr_err("cmd dump: ");
@@ -1412,8 +1437,10 @@ static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s)
static inline int unexpected_cmd(struct parser_exec_state *s)
{
- gvt_err("vgpu%d: Unexpected %s in command buffer!\n",
- s->vgpu->id, s->info->name);
+ struct intel_vgpu *vgpu = s->vgpu;
+
+ gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name);
+
return -EINVAL;
}
@@ -1516,7 +1543,7 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
while (gma != end_gma) {
gpa = intel_vgpu_gma_to_gpa(mm, gma);
if (gpa == INTEL_GVT_INVALID_ADDR) {
- gvt_err("invalid gma address: %lx\n", gma);
+ gvt_vgpu_err("invalid gma address: %lx\n", gma);
return -EFAULT;
}
@@ -1557,6 +1584,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
uint32_t bb_size = 0;
uint32_t cmd_len = 0;
bool met_bb_end = false;
+ struct intel_vgpu *vgpu = s->vgpu;
u32 cmd;
/* get the start gm address of the batch buffer */
@@ -1565,7 +1593,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
if (info == NULL) {
- gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
+ gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
cmd, get_opcode(cmd, s->ring_id));
return -EINVAL;
}
@@ -1574,7 +1602,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
gma, gma + 4, &cmd);
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
if (info == NULL) {
- gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
+ gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
cmd, get_opcode(cmd, s->ring_id));
return -EINVAL;
}
@@ -1599,6 +1627,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
static int perform_bb_shadow(struct parser_exec_state *s)
{
struct intel_shadow_bb_entry *entry_obj;
+ struct intel_vgpu *vgpu = s->vgpu;
unsigned long gma = 0;
uint32_t bb_size;
void *dst = NULL;
@@ -1633,7 +1662,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false);
if (ret) {
- gvt_err("failed to set shadow batch to CPU\n");
+ gvt_vgpu_err("failed to set shadow batch to CPU\n");
goto unmap_src;
}
@@ -1645,7 +1674,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
gma, gma + bb_size,
dst);
if (ret) {
- gvt_err("fail to copy guest ring buffer\n");
+ gvt_vgpu_err("fail to copy guest ring buffer\n");
goto unmap_src;
}
@@ -1676,15 +1705,16 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
{
bool second_level;
int ret = 0;
+ struct intel_vgpu *vgpu = s->vgpu;
if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
- gvt_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
+ gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
return -EINVAL;
}
second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
- gvt_err("Jumping to 2nd level BB from RB is not allowed\n");
+ gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n");
return -EINVAL;
}
@@ -1702,7 +1732,7 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
if (batch_buffer_needs_scan(s)) {
ret = perform_bb_shadow(s);
if (ret < 0)
- gvt_err("invalid shadow batch buffer\n");
+ gvt_vgpu_err("invalid shadow batch buffer\n");
} else {
/* emulate a batch buffer end to do return right */
ret = cmd_handler_mi_batch_buffer_end(s);
@@ -2429,6 +2459,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
int ret = 0;
cycles_t t0, t1, t2;
struct parser_exec_state s_before_advance_custom;
+ struct intel_vgpu *vgpu = s->vgpu;
t0 = get_cycles();
@@ -2436,7 +2467,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
if (info == NULL) {
- gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
+ gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
cmd, get_opcode(cmd, s->ring_id));
return -EINVAL;
}
@@ -2452,7 +2483,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
if (info->handler) {
ret = info->handler(s);
if (ret < 0) {
- gvt_err("%s handler error\n", info->name);
+ gvt_vgpu_err("%s handler error\n", info->name);
return ret;
}
}
@@ -2463,7 +2494,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
ret = cmd_advance_default(s);
if (ret) {
- gvt_err("%s IP advance error\n", info->name);
+ gvt_vgpu_err("%s IP advance error\n", info->name);
return ret;
}
}
@@ -2486,6 +2517,7 @@ static int command_scan(struct parser_exec_state *s,
unsigned long gma_head, gma_tail, gma_bottom;
int ret = 0;
+ struct intel_vgpu *vgpu = s->vgpu;
gma_head = rb_start + rb_head;
gma_tail = rb_start + rb_tail;
@@ -2497,7 +2529,7 @@ static int command_scan(struct parser_exec_state *s,
if (s->buf_type == RING_BUFFER_INSTRUCTION) {
if (!(s->ip_gma >= rb_start) ||
!(s->ip_gma < gma_bottom)) {
- gvt_err("ip_gma %lx out of ring scope."
+ gvt_vgpu_err("ip_gma %lx out of ring scope."
"(base:0x%lx, bottom: 0x%lx)\n",
s->ip_gma, rb_start,
gma_bottom);
@@ -2505,7 +2537,7 @@ static int command_scan(struct parser_exec_state *s,
return -EINVAL;
}
if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
- gvt_err("ip_gma %lx out of range."
+ gvt_vgpu_err("ip_gma %lx out of range."
"base 0x%lx head 0x%lx tail 0x%lx\n",
s->ip_gma, rb_start,
rb_head, rb_tail);
@@ -2515,7 +2547,7 @@ static int command_scan(struct parser_exec_state *s,
}
ret = cmd_parser_exec(s);
if (ret) {
- gvt_err("cmd parser error\n");
+ gvt_vgpu_err("cmd parser error\n");
parser_exec_state_dump(s);
break;
}
@@ -2639,7 +2671,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
gma_head, gma_top,
workload->shadow_ring_buffer_va);
if (ret) {
- gvt_err("fail to copy guest ring buffer\n");
+ gvt_vgpu_err("fail to copy guest ring buffer\n");
return ret;
}
copy_len = gma_top - gma_head;
@@ -2651,7 +2683,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
gma_head, gma_tail,
workload->shadow_ring_buffer_va + copy_len);
if (ret) {
- gvt_err("fail to copy guest ring buffer\n");
+ gvt_vgpu_err("fail to copy guest ring buffer\n");
return ret;
}
ring->tail += workload->rb_len;
@@ -2662,16 +2694,17 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
{
int ret;
+ struct intel_vgpu *vgpu = workload->vgpu;
ret = shadow_workload_ring_buffer(workload);
if (ret) {
- gvt_err("fail to shadow workload ring_buffer\n");
+ gvt_vgpu_err("fail to shadow workload ring_buffer\n");
return ret;
}
ret = scan_workload(workload);
if (ret) {
- gvt_err("scan workload error\n");
+ gvt_vgpu_err("scan workload error\n");
return ret;
}
return 0;
@@ -2681,6 +2714,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
int ctx_size = wa_ctx->indirect_ctx.size;
unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
+ struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
struct drm_i915_gem_object *obj;
int ret = 0;
void *map;
@@ -2694,14 +2728,14 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
/* get the va of the shadow batch buffer */
map = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(map)) {
- gvt_err("failed to vmap shadow indirect ctx\n");
+ gvt_vgpu_err("failed to vmap shadow indirect ctx\n");
ret = PTR_ERR(map);
goto put_obj;
}
ret = i915_gem_object_set_to_cpu_domain(obj, false);
if (ret) {
- gvt_err("failed to set shadow indirect ctx to CPU\n");
+ gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n");
goto unmap_src;
}
@@ -2710,7 +2744,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
guest_gma, guest_gma + ctx_size,
map);
if (ret) {
- gvt_err("fail to copy guest indirect ctx\n");
+ gvt_vgpu_err("fail to copy guest indirect ctx\n");
goto unmap_src;
}
@@ -2744,13 +2778,14 @@ static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
int ret;
+ struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
if (wa_ctx->indirect_ctx.size == 0)
return 0;
ret = shadow_indirect_ctx(wa_ctx);
if (ret) {
- gvt_err("fail to shadow indirect ctx\n");
+ gvt_vgpu_err("fail to shadow indirect ctx\n");
return ret;
}
@@ -2758,7 +2793,7 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
ret = scan_wa_ctx(wa_ctx);
if (ret) {
- gvt_err("scan wa ctx error\n");
+ gvt_vgpu_err("scan wa ctx error\n");
return ret;
}
diff --git a/drivers/gpu/drm/i915/gvt/debug.h b/drivers/gpu/drm/i915/gvt/debug.h
index 68cba7bd980a..b0cff4dc2684 100644
--- a/drivers/gpu/drm/i915/gvt/debug.h
+++ b/drivers/gpu/drm/i915/gvt/debug.h
@@ -27,6 +27,14 @@
#define gvt_err(fmt, args...) \
DRM_ERROR("gvt: "fmt, ##args)
+#define gvt_vgpu_err(fmt, args...) \
+do { \
+ if (IS_ERR_OR_NULL(vgpu)) \
+ DRM_DEBUG_DRIVER("gvt: "fmt, ##args); \
+ else \
+ DRM_DEBUG_DRIVER("gvt: vgpu %d: "fmt, vgpu->id, ##args);\
+} while (0)
+
#define gvt_dbg_core(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args)
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 6d8fde880c39..5419ae6ec633 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -83,44 +83,80 @@ static int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
return 0;
}
+static unsigned char virtual_dp_monitor_edid[GVT_EDID_NUM][EDID_SIZE] = {
+ {
+/* EDID with 1024x768 as its resolution */
+ /*Header*/
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ /* Vendor & Product Identification */
+ 0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17,
+ /* Version & Revision */
+ 0x01, 0x04,
+ /* Basic Display Parameters & Features */
+ 0xa5, 0x34, 0x20, 0x78, 0x23,
+ /* Color Characteristics */
+ 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54,
+ /* Established Timings: maximum resolution is 1024x768 */
+ 0x21, 0x08, 0x00,
+ /* Standard Timings. All invalid */
+ 0x00, 0xc0, 0x00, 0xc0, 0x00, 0x40, 0x00, 0x80, 0x00, 0x00,
+ 0x00, 0x40, 0x00, 0x00, 0x00, 0x01,
+ /* 18 Byte Data Blocks 1: invalid */
+ 0x00, 0x00, 0x80, 0xa0, 0x70, 0xb0,
+ 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a,
+ /* 18 Byte Data Blocks 2: invalid */
+ 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ /* 18 Byte Data Blocks 3: invalid */
+ 0x00, 0x00, 0x00, 0xfc, 0x00, 0x48,
+ 0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20,
+ /* 18 Byte Data Blocks 4: invalid */
+ 0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30,
+ 0x44, 0x58, 0x51, 0x0a, 0x20, 0x20,
+ /* Extension Block Count */
+ 0x00,
+ /* Checksum */
+ 0xef,
+ },
+ {
/* EDID with 1920x1200 as its resolution */
-static unsigned char virtual_dp_monitor_edid[] = {
- /*Header*/
- 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
- /* Vendor & Product Identification */
- 0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17,
- /* Version & Revision */
- 0x01, 0x04,
- /* Basic Display Parameters & Features */
- 0xa5, 0x34, 0x20, 0x78, 0x23,
- /* Color Characteristics */
- 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54,
- /* Established Timings: maximum resolution is 1024x768 */
- 0x21, 0x08, 0x00,
- /*
- * Standard Timings.
- * below new resolutions can be supported:
- * 1920x1080, 1280x720, 1280x960, 1280x1024,
- * 1440x900, 1600x1200, 1680x1050
- */
- 0xd1, 0xc0, 0x81, 0xc0, 0x81, 0x40, 0x81, 0x80, 0x95, 0x00,
- 0xa9, 0x40, 0xb3, 0x00, 0x01, 0x01,
- /* 18 Byte Data Blocks 1: max resolution is 1920x1200 */
- 0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0,
- 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a,
- /* 18 Byte Data Blocks 2: invalid */
- 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- /* 18 Byte Data Blocks 3: invalid */
- 0x00, 0x00, 0x00, 0xfc, 0x00, 0x48,
- 0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20,
- /* 18 Byte Data Blocks 4: invalid */
- 0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30,
- 0x44, 0x58, 0x51, 0x0a, 0x20, 0x20,
- /* Extension Block Count */
- 0x00,
- /* Checksum */
- 0x45,
+ /*Header*/
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ /* Vendor & Product Identification */
+ 0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17,
+ /* Version & Revision */
+ 0x01, 0x04,
+ /* Basic Display Parameters & Features */
+ 0xa5, 0x34, 0x20, 0x78, 0x23,
+ /* Color Characteristics */
+ 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54,
+ /* Established Timings: maximum resolution is 1024x768 */
+ 0x21, 0x08, 0x00,
+ /*
+ * Standard Timings.
+ * below new resolutions can be supported:
+ * 1920x1080, 1280x720, 1280x960, 1280x1024,
+ * 1440x900, 1600x1200, 1680x1050
+ */
+ 0xd1, 0xc0, 0x81, 0xc0, 0x81, 0x40, 0x81, 0x80, 0x95, 0x00,
+ 0xa9, 0x40, 0xb3, 0x00, 0x01, 0x01,
+ /* 18 Byte Data Blocks 1: max resolution is 1920x1200 */
+ 0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0,
+ 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a,
+ /* 18 Byte Data Blocks 2: invalid */
+ 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ /* 18 Byte Data Blocks 3: invalid */
+ 0x00, 0x00, 0x00, 0xfc, 0x00, 0x48,
+ 0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20,
+ /* 18 Byte Data Blocks 4: invalid */
+ 0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30,
+ 0x44, 0x58, 0x51, 0x0a, 0x20, 0x20,
+ /* Extension Block Count */
+ 0x00,
+ /* Checksum */
+ 0x45,
+ },
};
#define DPCD_HEADER_SIZE 0xb
@@ -140,14 +176,20 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
SDE_PORTE_HOTPLUG_SPT);
- if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B))
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
+ vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
+ }
- if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C))
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
+ vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED;
+ }
- if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D))
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) {
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
+ vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
+ }
if (IS_SKYLAKE(dev_priv) &&
intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
@@ -160,6 +202,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
GEN8_PORT_DP_A_HOTPLUG;
else
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT;
+
+ vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED;
}
}
@@ -175,10 +219,13 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
}
static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
- int type)
+ int type, unsigned int resolution)
{
struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
+ if (WARN_ON(resolution >= GVT_EDID_NUM))
+ return -EINVAL;
+
port->edid = kzalloc(sizeof(*(port->edid)), GFP_KERNEL);
if (!port->edid)
return -ENOMEM;
@@ -189,7 +236,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
return -ENOMEM;
}
- memcpy(port->edid->edid_block, virtual_dp_monitor_edid,
+ memcpy(port->edid->edid_block, virtual_dp_monitor_edid[resolution],
EDID_SIZE);
port->edid->data_valid = true;
@@ -322,16 +369,18 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
* Zero on success, negative error code if failed.
*
*/
-int intel_vgpu_init_display(struct intel_vgpu *vgpu)
+int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
intel_vgpu_init_i2c_edid(vgpu);
if (IS_SKYLAKE(dev_priv))
- return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D);
+ return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D,
+ resolution);
else
- return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B);
+ return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B,
+ resolution);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h
index 8b234ea961f6..d73de22102e2 100644
--- a/drivers/gpu/drm/i915/gvt/display.h
+++ b/drivers/gpu/drm/i915/gvt/display.h
@@ -154,10 +154,28 @@ struct intel_vgpu_port {
int type;
};
+enum intel_vgpu_edid {
+ GVT_EDID_1024_768,
+ GVT_EDID_1920_1200,
+ GVT_EDID_NUM,
+};
+
+static inline char *vgpu_edid_str(enum intel_vgpu_edid id)
+{
+ switch (id) {
+ case GVT_EDID_1024_768:
+ return "1024x768";
+ case GVT_EDID_1920_1200:
+ return "1920x1200";
+ default:
+ return "";
+ }
+}
+
void intel_gvt_emulate_vblank(struct intel_gvt *gvt);
void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt);
-int intel_vgpu_init_display(struct intel_vgpu *vgpu);
+int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution);
void intel_vgpu_reset_display(struct intel_vgpu *vgpu);
void intel_vgpu_clean_display(struct intel_vgpu *vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index bda85dff7b2a..f1648fe5e5ea 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -52,16 +52,16 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
unsigned char chr = 0;
if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) {
- gvt_err("Driver tries to read EDID without proper sequence!\n");
+ gvt_vgpu_err("Driver tries to read EDID without proper sequence!\n");
return 0;
}
if (edid->current_edid_read >= EDID_SIZE) {
- gvt_err("edid_get_byte() exceeds the size of EDID!\n");
+ gvt_vgpu_err("edid_get_byte() exceeds the size of EDID!\n");
return 0;
}
if (!edid->edid_available) {
- gvt_err("Reading EDID but EDID is not available!\n");
+ gvt_vgpu_err("Reading EDID but EDID is not available!\n");
return 0;
}
@@ -72,7 +72,7 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
chr = edid_data->edid_block[edid->current_edid_read];
edid->current_edid_read++;
} else {
- gvt_err("No EDID available during the reading?\n");
+ gvt_vgpu_err("No EDID available during the reading?\n");
}
return chr;
}
@@ -223,7 +223,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE;
break;
default:
- gvt_err("Unknown/reserved GMBUS cycle detected!\n");
+ gvt_vgpu_err("Unknown/reserved GMBUS cycle detected!\n");
break;
}
/*
@@ -292,8 +292,7 @@ static int gmbus3_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
*/
} else {
memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
- gvt_err("vgpu%d: warning: gmbus3 read with nothing returned\n",
- vgpu->id);
+ gvt_vgpu_err("warning: gmbus3 read with nothing returned\n");
}
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 46eb9fd3c03f..f1f426a97aa9 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -172,6 +172,7 @@ static int emulate_execlist_ctx_schedule_out(
struct intel_vgpu_execlist *execlist,
struct execlist_ctx_descriptor_format *ctx)
{
+ struct intel_vgpu *vgpu = execlist->vgpu;
struct intel_vgpu_execlist_slot *running = execlist->running_slot;
struct intel_vgpu_execlist_slot *pending = execlist->pending_slot;
struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0];
@@ -183,7 +184,7 @@ static int emulate_execlist_ctx_schedule_out(
gvt_dbg_el("schedule out context id %x\n", ctx->context_id);
if (WARN_ON(!same_context(ctx, execlist->running_context))) {
- gvt_err("schedule out context is not running context,"
+ gvt_vgpu_err("schedule out context is not running context,"
"ctx id %x running ctx id %x\n",
ctx->context_id,
execlist->running_context->context_id);
@@ -254,7 +255,7 @@ static struct intel_vgpu_execlist_slot *get_next_execlist_slot(
status.udw = vgpu_vreg(vgpu, status_reg + 4);
if (status.execlist_queue_full) {
- gvt_err("virtual execlist slots are full\n");
+ gvt_vgpu_err("virtual execlist slots are full\n");
return NULL;
}
@@ -270,11 +271,12 @@ static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist,
struct execlist_ctx_descriptor_format *ctx0, *ctx1;
struct execlist_context_status_format status;
+ struct intel_vgpu *vgpu = execlist->vgpu;
gvt_dbg_el("emulate schedule-in\n");
if (!slot) {
- gvt_err("no available execlist slot\n");
+ gvt_vgpu_err("no available execlist slot\n");
return -EINVAL;
}
@@ -375,7 +377,6 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
if (IS_ERR(vma)) {
- gvt_err("Cannot pin\n");
return;
}
@@ -428,7 +429,6 @@ static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
0, CACHELINE_BYTES, 0);
if (IS_ERR(vma)) {
- gvt_err("Cannot pin indirect ctx obj\n");
return;
}
@@ -561,6 +561,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
{
struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
struct intel_vgpu_mm *mm;
+ struct intel_vgpu *vgpu = workload->vgpu;
int page_table_level;
u32 pdp[8];
@@ -569,7 +570,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
} else if (desc->addressing_mode == 3) { /* legacy 64 bit */
page_table_level = 4;
} else {
- gvt_err("Advanced Context mode(SVM) is not supported!\n");
+ gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
return -EINVAL;
}
@@ -583,7 +584,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
pdp, page_table_level, 0);
if (IS_ERR(mm)) {
- gvt_err("fail to create mm object.\n");
+ gvt_vgpu_err("fail to create mm object.\n");
return PTR_ERR(mm);
}
}
@@ -609,7 +610,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
(u32)((desc->lrca + 1) << GTT_PAGE_SHIFT));
if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
- gvt_err("invalid guest context LRCA: %x\n", desc->lrca);
+ gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
return -EINVAL;
}
@@ -724,8 +725,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
continue;
if (!desc[i]->privilege_access) {
- gvt_err("vgpu%d: unexpected GGTT elsp submission\n",
- vgpu->id);
+ gvt_vgpu_err("unexpected GGTT elsp submission\n");
return -EINVAL;
}
@@ -735,15 +735,13 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
}
if (!valid_desc_bitmap) {
- gvt_err("vgpu%d: no valid desc in a elsp submission\n",
- vgpu->id);
+ gvt_vgpu_err("no valid desc in a elsp submission\n");
return -EINVAL;
}
if (!test_bit(0, (void *)&valid_desc_bitmap) &&
test_bit(1, (void *)&valid_desc_bitmap)) {
- gvt_err("vgpu%d: weird elsp submission, desc 0 is not valid\n",
- vgpu->id);
+ gvt_vgpu_err("weird elsp submission, desc 0 is not valid\n");
return -EINVAL;
}
@@ -752,8 +750,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
ret = submit_context(vgpu, ring_id, &valid_desc[i],
emulate_schedule_in);
if (ret) {
- gvt_err("vgpu%d: fail to schedule workload\n",
- vgpu->id);
+ gvt_vgpu_err("fail to schedule workload\n");
return ret;
}
emulate_schedule_in = false;
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index 1cb29b2d7dc6..933a7c211a1c 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -80,7 +80,7 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
int ret;
size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1;
- firmware = vmalloc(size);
+ firmware = vzalloc(size);
if (!firmware)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 28c92346db0e..da7312715824 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -49,8 +49,8 @@ bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
{
if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
&& !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
- gvt_err("vgpu%d: invalid range gmadr 0x%llx size 0x%x\n",
- vgpu->id, addr, size);
+ gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n",
+ addr, size);
return false;
}
return true;
@@ -430,7 +430,7 @@ static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p,
mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn);
if (mfn == INTEL_GVT_INVALID_ADDR) {
- gvt_err("fail to translate gfn: 0x%lx\n", gfn);
+ gvt_vgpu_err("fail to translate gfn: 0x%lx\n", gfn);
return -ENXIO;
}
@@ -611,7 +611,7 @@ static inline int init_shadow_page(struct intel_vgpu *vgpu,
daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
if (dma_mapping_error(kdev, daddr)) {
- gvt_err("fail to map dma addr\n");
+ gvt_vgpu_err("fail to map dma addr\n");
return -EINVAL;
}
@@ -735,7 +735,7 @@ retry:
if (reclaim_one_mm(vgpu->gvt))
goto retry;
- gvt_err("fail to allocate ppgtt shadow page\n");
+ gvt_vgpu_err("fail to allocate ppgtt shadow page\n");
return ERR_PTR(-ENOMEM);
}
@@ -750,14 +750,14 @@ retry:
*/
ret = init_shadow_page(vgpu, &spt->shadow_page, type);
if (ret) {
- gvt_err("fail to initialize shadow page for spt\n");
+ gvt_vgpu_err("fail to initialize shadow page for spt\n");
goto err;
}
ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page,
gfn, ppgtt_write_protection_handler, NULL);
if (ret) {
- gvt_err("fail to initialize guest page for spt\n");
+ gvt_vgpu_err("fail to initialize guest page for spt\n");
goto err;
}
@@ -776,8 +776,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
if (p)
return shadow_page_to_ppgtt_spt(p);
- gvt_err("vgpu%d: fail to find ppgtt shadow page: 0x%lx\n",
- vgpu->id, mfn);
+ gvt_vgpu_err("fail to find ppgtt shadow page: 0x%lx\n", mfn);
return NULL;
}
@@ -827,8 +826,8 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
}
s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
if (!s) {
- gvt_err("vgpu%d: fail to find shadow page: mfn: 0x%lx\n",
- vgpu->id, ops->get_pfn(e));
+ gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n",
+ ops->get_pfn(e));
return -ENXIO;
}
return ppgtt_invalidate_shadow_page(s);
@@ -836,6 +835,7 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
{
+ struct intel_vgpu *vgpu = spt->vgpu;
struct intel_gvt_gtt_entry e;
unsigned long index;
int ret;
@@ -854,7 +854,7 @@ static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
for_each_present_shadow_entry(spt, &e, index) {
if (!gtt_type_is_pt(get_next_pt_type(e.type))) {
- gvt_err("GVT doesn't support pse bit for now\n");
+ gvt_vgpu_err("GVT doesn't support pse bit for now\n");
return -EINVAL;
}
ret = ppgtt_invalidate_shadow_page_by_shadow_entry(
@@ -868,8 +868,8 @@ release:
ppgtt_free_shadow_page(spt);
return 0;
fail:
- gvt_err("vgpu%d: fail: shadow page %p shadow entry 0x%llx type %d\n",
- spt->vgpu->id, spt, e.val64, e.type);
+ gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n",
+ spt, e.val64, e.type);
return ret;
}
@@ -914,8 +914,8 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
}
return s;
fail:
- gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
- vgpu->id, s, we->val64, we->type);
+ gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
+ s, we->val64, we->type);
return ERR_PTR(ret);
}
@@ -953,7 +953,7 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
for_each_present_guest_entry(spt, &ge, i) {
if (!gtt_type_is_pt(get_next_pt_type(ge.type))) {
- gvt_err("GVT doesn't support pse bit now\n");
+ gvt_vgpu_err("GVT doesn't support pse bit now\n");
ret = -EINVAL;
goto fail;
}
@@ -969,8 +969,8 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
}
return 0;
fail:
- gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
- vgpu->id, spt, ge.val64, ge.type);
+ gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
+ spt, ge.val64, ge.type);
return ret;
}
@@ -999,7 +999,7 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
struct intel_vgpu_ppgtt_spt *s =
ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e));
if (!s) {
- gvt_err("fail to find guest page\n");
+ gvt_vgpu_err("fail to find guest page\n");
ret = -ENXIO;
goto fail;
}
@@ -1011,8 +1011,8 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
ppgtt_set_shadow_entry(spt, &e, index);
return 0;
fail:
- gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
- vgpu->id, spt, e.val64, e.type);
+ gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
+ spt, e.val64, e.type);
return ret;
}
@@ -1046,8 +1046,8 @@ static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt,
}
return 0;
fail:
- gvt_err("vgpu%d: fail: spt %p guest entry 0x%llx type %d\n", vgpu->id,
- spt, we->val64, we->type);
+ gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n",
+ spt, we->val64, we->type);
return ret;
}
@@ -1250,8 +1250,8 @@ static int ppgtt_handle_guest_write_page_table(
}
return 0;
fail:
- gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d.\n",
- vgpu->id, spt, we->val64, we->type);
+ gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
+ spt, we->val64, we->type);
return ret;
}
@@ -1493,7 +1493,7 @@ static int shadow_mm(struct intel_vgpu_mm *mm)
spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
if (IS_ERR(spt)) {
- gvt_err("fail to populate guest root pointer\n");
+ gvt_vgpu_err("fail to populate guest root pointer\n");
ret = PTR_ERR(spt);
goto fail;
}
@@ -1566,7 +1566,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
ret = gtt->mm_alloc_page_table(mm);
if (ret) {
- gvt_err("fail to allocate page table for mm\n");
+ gvt_vgpu_err("fail to allocate page table for mm\n");
goto fail;
}
@@ -1584,7 +1584,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
}
return mm;
fail:
- gvt_err("fail to create mm\n");
+ gvt_vgpu_err("fail to create mm\n");
if (mm)
intel_gvt_mm_unreference(mm);
return ERR_PTR(ret);
@@ -1760,7 +1760,7 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
mm->page_table_level, gma, gpa);
return gpa;
err:
- gvt_err("invalid mm type: %d gma %lx\n", mm->type, gma);
+ gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma);
return INTEL_GVT_INVALID_ADDR;
}
@@ -1825,11 +1825,8 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
gma = g_gtt_index << GTT_PAGE_SHIFT;
/* the VM may configure the whole GM space when ballooning is used */
- if (WARN_ONCE(!vgpu_gmadr_is_valid(vgpu, gma),
- "vgpu%d: found oob ggtt write, offset %x\n",
- vgpu->id, off)) {
+ if (!vgpu_gmadr_is_valid(vgpu, gma))
return 0;
- }
ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index);
@@ -1839,8 +1836,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
if (ops->test_present(&e)) {
ret = gtt_entry_p2m(vgpu, &e, &m);
if (ret) {
- gvt_err("vgpu%d: fail to translate guest gtt entry\n",
- vgpu->id);
+ gvt_vgpu_err("fail to translate guest gtt entry\n");
return ret;
}
} else {
@@ -1896,14 +1892,14 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
if (!scratch_pt) {
- gvt_err("fail to allocate scratch page\n");
+ gvt_vgpu_err("fail to allocate scratch page\n");
return -ENOMEM;
}
daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0,
4096, PCI_DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, daddr)) {
- gvt_err("fail to dmamap scratch_pt\n");
+ gvt_vgpu_err("fail to dmamap scratch_pt\n");
__free_page(virt_to_page(scratch_pt));
return -ENOMEM;
}
@@ -2006,7 +2002,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
NULL, 1, 0);
if (IS_ERR(ggtt_mm)) {
- gvt_err("fail to create mm for ggtt.\n");
+ gvt_vgpu_err("fail to create mm for ggtt.\n");
return PTR_ERR(ggtt_mm);
}
@@ -2015,6 +2011,22 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
return create_scratch_page_tree(vgpu);
}
+static void intel_vgpu_free_mm(struct intel_vgpu *vgpu, int type)
+{
+ struct list_head *pos, *n;
+ struct intel_vgpu_mm *mm;
+
+ list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) {
+ mm = container_of(pos, struct intel_vgpu_mm, list);
+ if (mm->type == type) {
+ vgpu->gvt->gtt.mm_free_page_table(mm);
+ list_del(&mm->list);
+ list_del(&mm->lru_list);
+ kfree(mm);
+ }
+ }
+}
+
/**
* intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
* @vgpu: a vGPU
@@ -2027,19 +2039,11 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
*/
void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
{
- struct list_head *pos, *n;
- struct intel_vgpu_mm *mm;
-
ppgtt_free_all_shadow_page(vgpu);
release_scratch_page_tree(vgpu);
- list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) {
- mm = container_of(pos, struct intel_vgpu_mm, list);
- vgpu->gvt->gtt.mm_free_page_table(mm);
- list_del(&mm->list);
- list_del(&mm->lru_list);
- kfree(mm);
- }
+ intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
+ intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_GGTT);
}
static void clean_spt_oos(struct intel_gvt *gvt)
@@ -2071,7 +2075,6 @@ static int setup_spt_oos(struct intel_gvt *gvt)
for (i = 0; i < preallocated_oos_pages; i++) {
oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
if (!oos_page) {
- gvt_err("fail to pre-allocate oos page\n");
ret = -ENOMEM;
goto fail;
}
@@ -2161,7 +2164,7 @@ int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT,
pdp, page_table_level, 0);
if (IS_ERR(mm)) {
- gvt_err("fail to create mm\n");
+ gvt_vgpu_err("fail to create mm\n");
return PTR_ERR(mm);
}
}
@@ -2191,7 +2194,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
if (!mm) {
- gvt_err("fail to find ppgtt instance.\n");
+ gvt_vgpu_err("fail to find ppgtt instance.\n");
return -EINVAL;
}
intel_gvt_mm_unreference(mm);
@@ -2322,6 +2325,13 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr)
int i;
ppgtt_free_all_shadow_page(vgpu);
+
+ /* Shadow pages are only created when there is no page
+ * table tracking data, so remove page tracking data after
+ * removing the shadow pages.
+ */
+ intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
+
if (!dmlr)
return;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index e227caf5859e..6dfc48b63b71 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -143,6 +143,8 @@ struct intel_vgpu {
int id;
unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
bool active;
+ bool pv_notified;
+ bool failsafe;
bool resetting;
void *sched_data;
@@ -160,7 +162,6 @@ struct intel_vgpu {
atomic_t running_workload_num;
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
struct i915_gem_context *shadow_ctx;
- struct notifier_block shadow_ctx_notifier_block;
#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
struct {
@@ -203,18 +204,18 @@ struct intel_gvt_firmware {
};
struct intel_gvt_opregion {
- void __iomem *opregion_va;
+ void *opregion_va;
u32 opregion_pa;
};
#define NR_MAX_INTEL_VGPU_TYPES 20
struct intel_vgpu_type {
char name[16];
- unsigned int max_instance;
unsigned int avail_instance;
unsigned int low_gm_size;
unsigned int high_gm_size;
unsigned int fence;
+ enum intel_vgpu_edid resolution;
};
struct intel_gvt {
@@ -231,6 +232,7 @@ struct intel_gvt {
struct intel_gvt_gtt gtt;
struct intel_gvt_opregion opregion;
struct intel_gvt_workload_scheduler scheduler;
+ struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES];
DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
struct intel_vgpu_type *types;
unsigned int num_types;
@@ -317,6 +319,7 @@ struct intel_vgpu_creation_params {
__u64 low_gm_sz; /* in MB */
__u64 high_gm_sz; /* in MB */
__u64 fence_sz;
+ __u64 resolution;
__s32 primary;
__u64 vgpu_id;
};
@@ -449,6 +452,11 @@ struct intel_gvt_ops {
};
+enum {
+ GVT_FAILSAFE_UNSUPPORTED_GUEST,
+ GVT_FAILSAFE_INSUFFICIENT_RESOURCE,
+};
+
#include "mpt.h"
#endif
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 1d450627ff65..eaff45d417e8 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -121,6 +121,7 @@ static int new_mmio_info(struct intel_gvt *gvt,
info->size = size;
info->length = (i + 4) < end ? 4 : (end - i);
info->addr_mask = addr_mask;
+ info->ro_mask = ro_mask;
info->device = device;
info->read = read ? read : intel_vgpu_default_mmio_read;
info->write = write ? write : intel_vgpu_default_mmio_write;
@@ -150,15 +151,42 @@ static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg)
#define fence_num_to_offset(num) \
(num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0)))
+
+static void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
+{
+ switch (reason) {
+ case GVT_FAILSAFE_UNSUPPORTED_GUEST:
+ pr_err("Detected your guest driver doesn't support GVT-g.\n");
+ break;
+ case GVT_FAILSAFE_INSUFFICIENT_RESOURCE:
+ pr_err("Graphics resource is not enough for the guest\n");
+ default:
+ break;
+ }
+ pr_err("Now vgpu %d will enter failsafe mode.\n", vgpu->id);
+ vgpu->failsafe = true;
+}
+
static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
unsigned int fence_num, void *p_data, unsigned int bytes)
{
if (fence_num >= vgpu_fence_sz(vgpu)) {
- gvt_err("vgpu%d: found oob fence register access\n",
- vgpu->id);
- gvt_err("vgpu%d: total fence num %d access fence num %d\n",
- vgpu->id, vgpu_fence_sz(vgpu), fence_num);
+
+ /* When guest access oob fence regs without access
+ * pv_info first, we treat guest not supporting GVT,
+ * and we will let vgpu enter failsafe mode.
+ */
+ if (!vgpu->pv_notified)
+ enter_failsafe_mode(vgpu,
+ GVT_FAILSAFE_UNSUPPORTED_GUEST);
+
+ if (!vgpu->mmio.disable_warn_untrack) {
+ gvt_vgpu_err("found oob fence register access\n");
+ gvt_vgpu_err("total fence %d, access fence %d\n",
+ vgpu_fence_sz(vgpu), fence_num);
+ }
memset(p_data, 0, bytes);
+ return -EINVAL;
}
return 0;
}
@@ -219,7 +247,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
break;
default:
/*should not hit here*/
- gvt_err("invalid forcewake offset 0x%x\n", offset);
+ gvt_vgpu_err("invalid forcewake offset 0x%x\n", offset);
return -EINVAL;
}
} else {
@@ -369,6 +397,74 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
return 0;
}
+/* ascendingly sorted */
+static i915_reg_t force_nonpriv_white_list[] = {
+ GEN9_CS_DEBUG_MODE1, //_MMIO(0x20ec)
+ GEN9_CTX_PREEMPT_REG,//_MMIO(0x2248)
+ GEN8_CS_CHICKEN1,//_MMIO(0x2580)
+ _MMIO(0x2690),
+ _MMIO(0x2694),
+ _MMIO(0x2698),
+ _MMIO(0x4de0),
+ _MMIO(0x4de4),
+ _MMIO(0x4dfc),
+ GEN7_COMMON_SLICE_CHICKEN1,//_MMIO(0x7010)
+ _MMIO(0x7014),
+ HDC_CHICKEN0,//_MMIO(0x7300)
+ GEN8_HDC_CHICKEN1,//_MMIO(0x7304)
+ _MMIO(0x7700),
+ _MMIO(0x7704),
+ _MMIO(0x7708),
+ _MMIO(0x770c),
+ _MMIO(0xb110),
+ GEN8_L3SQCREG4,//_MMIO(0xb118)
+ _MMIO(0xe100),
+ _MMIO(0xe18c),
+ _MMIO(0xe48c),
+ _MMIO(0xe5f4),
+};
+
+/* a simple bsearch */
+static inline bool in_whitelist(unsigned int reg)
+{
+ int left = 0, right = ARRAY_SIZE(force_nonpriv_white_list);
+ i915_reg_t *array = force_nonpriv_white_list;
+
+ while (left < right) {
+ int mid = (left + right)/2;
+
+ if (reg > array[mid].reg)
+ left = mid + 1;
+ else if (reg < array[mid].reg)
+ right = mid;
+ else
+ return true;
+ }
+ return false;
+}
+
+static int force_nonpriv_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 reg_nonpriv = *(u32 *)p_data;
+ int ret = -EINVAL;
+
+ if ((bytes != 4) || ((offset & (bytes - 1)) != 0)) {
+ gvt_err("vgpu(%d) Invalid FORCE_NONPRIV offset %x(%dB)\n",
+ vgpu->id, offset, bytes);
+ return ret;
+ }
+
+ if (in_whitelist(reg_nonpriv)) {
+ ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data,
+ bytes);
+ } else {
+ gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x\n",
+ vgpu->id, reg_nonpriv);
+ }
+ return ret;
+}
+
static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
@@ -432,7 +528,7 @@ static int check_fdi_rx_train_status(struct intel_vgpu *vgpu,
fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2;
fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK;
} else {
- gvt_err("Invalid train pattern %d\n", train_pattern);
+ gvt_vgpu_err("Invalid train pattern %d\n", train_pattern);
return -EINVAL;
}
@@ -490,7 +586,7 @@ static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
index = FDI_RX_IMR_TO_PIPE(offset);
else {
- gvt_err("Unsupport registers %x\n", offset);
+ gvt_vgpu_err("Unsupport registers %x\n", offset);
return -EINVAL;
}
@@ -720,7 +816,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
u32 data;
if (!dpy_is_valid_port(port_index)) {
- gvt_err("GVT(%d): Unsupported DP port access!\n", vgpu->id);
+ gvt_vgpu_err("Unsupported DP port access!\n");
return 0;
}
@@ -918,8 +1014,7 @@ static void write_virtual_sbi_register(struct intel_vgpu *vgpu,
if (i == num) {
if (num == SBI_REG_MAX) {
- gvt_err("vgpu%d: SBI caching meets maximum limits\n",
- vgpu->id);
+ gvt_vgpu_err("SBI caching meets maximum limits\n");
return;
}
display->sbi.number++;
@@ -999,8 +1094,9 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
break;
}
if (invalid_read)
- gvt_err("invalid pvinfo read: [%x:%x] = %x\n",
+ gvt_vgpu_err("invalid pvinfo read: [%x:%x] = %x\n",
offset, bytes, *(u32 *)p_data);
+ vgpu->pv_notified = true;
return 0;
}
@@ -1026,7 +1122,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
case 1: /* Remove this in guest driver. */
break;
default:
- gvt_err("Invalid PV notification %d\n", notification);
+ gvt_vgpu_err("Invalid PV notification %d\n", notification);
}
return ret;
}
@@ -1039,7 +1135,7 @@ static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
char vmid_str[20];
char display_ready_str[20];
- snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d\n", ready);
+ snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d", ready);
env[0] = display_ready_str;
snprintf(vmid_str, 20, "VMID=%d", vgpu->id);
@@ -1078,8 +1174,11 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
case _vgtif_reg(execlist_context_descriptor_lo):
case _vgtif_reg(execlist_context_descriptor_hi):
break;
+ case _vgtif_reg(rsv5[0])..._vgtif_reg(rsv5[3]):
+ enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE);
+ break;
default:
- gvt_err("invalid pvinfo write offset %x bytes %x data %x\n",
+ gvt_vgpu_err("invalid pvinfo write offset %x bytes %x data %x\n",
offset, bytes, data);
break;
}
@@ -1203,26 +1302,37 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
u32 *data0 = &vgpu_vreg(vgpu, GEN6_PCODE_DATA);
switch (cmd) {
- case 0x6:
- /**
- * "Read memory latency" command on gen9.
- * Below memory latency values are read
- * from skylake platform.
- */
- if (!*data0)
- *data0 = 0x1e1a1100;
- else
- *data0 = 0x61514b3d;
+ case GEN9_PCODE_READ_MEM_LATENCY:
+ if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
+ /**
+ * "Read memory latency" command on gen9.
+ * Below memory latency values are read
+ * from skylake platform.
+ */
+ if (!*data0)
+ *data0 = 0x1e1a1100;
+ else
+ *data0 = 0x61514b3d;
+ }
+ break;
+ case SKL_PCODE_CDCLK_CONTROL:
+ if (IS_SKYLAKE(vgpu->gvt->dev_priv))
+ *data0 = SKL_CDCLK_READY_FOR_CHANGE;
break;
- case 0x5:
+ case GEN6_PCODE_READ_RC6VIDS:
*data0 |= 0x1;
break;
}
gvt_dbg_core("VM(%d) write %x to mailbox, return data0 %x\n",
vgpu->id, value, *data0);
-
- value &= ~(1 << 31);
+ /**
+ * PCODE_READY clear means ready for pcode read/write,
+ * PCODE_ERROR_MASK clear means no error happened. In GVT-g we
+ * always emulate as pcode read/write success and ready for access
+ * anytime, since we don't touch real physical registers here.
+ */
+ value &= ~(GEN6_PCODE_READY | GEN6_PCODE_ERROR_MASK);
return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
}
@@ -1302,7 +1412,8 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
if (execlist->elsp_dwords.index == 3) {
ret = intel_vgpu_submit_execlist(vgpu, ring_id);
if(ret)
- gvt_err("fail submit workload on ring %d\n", ring_id);
+ gvt_vgpu_err("fail submit workload on ring %d\n",
+ ring_id);
}
++execlist->elsp_dwords.index;
@@ -1318,6 +1429,17 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
bool enable_execlist;
write_vreg(vgpu, offset, p_data, bytes);
+
+ /* when PPGTT mode enabled, we will check if guest has called
+ * pvinfo, if not, we will treat this guest as non-gvtg-aware
+ * guest, and stop emulating its cfg space, mmio, gtt, etc.
+ */
+ if (((data & _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)) ||
+ (data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)))
+ && !vgpu->pv_notified) {
+ enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
+ return 0;
+ }
if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE))
|| (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) {
enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
@@ -1400,6 +1522,9 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
#define MMIO_GM(reg, d, r, w) \
MMIO_F(reg, 4, F_GMADR, 0xFFFFF000, 0, d, r, w)
+#define MMIO_GM_RDR(reg, d, r, w) \
+ MMIO_F(reg, 4, F_GMADR | F_CMD_ACCESS, 0xFFFFF000, 0, d, r, w)
+
#define MMIO_RO(reg, d, f, rm, r, w) \
MMIO_F(reg, 4, F_RO | f, 0, rm, d, r, w)
@@ -1419,6 +1544,9 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
#define MMIO_RING_GM(prefix, d, r, w) \
MMIO_RING_F(prefix, 4, F_GMADR, 0xFFFF0000, 0, d, r, w)
+#define MMIO_RING_GM_RDR(prefix, d, r, w) \
+ MMIO_RING_F(prefix, 4, F_GMADR | F_CMD_ACCESS, 0xFFFF0000, 0, d, r, w)
+
#define MMIO_RING_RO(prefix, d, f, rm, r, w) \
MMIO_RING_F(prefix, 4, F_RO | f, 0, rm, d, r, w)
@@ -1427,73 +1555,81 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
struct drm_i915_private *dev_priv = gvt->dev_priv;
int ret;
- MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
+ MMIO_RING_DFH(RING_IMR, D_ALL, F_CMD_ACCESS, NULL,
+ intel_vgpu_reg_imr_handler);
MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler);
MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler);
MMIO_D(SDEISR, D_ALL);
- MMIO_RING_D(RING_HWSTAM, D_ALL);
+ MMIO_RING_DFH(RING_HWSTAM, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_GM(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL);
- MMIO_GM(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
- MMIO_GM(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
- MMIO_GM(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+ MMIO_GM_RDR(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+ MMIO_GM_RDR(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+ MMIO_GM_RDR(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+ MMIO_GM_RDR(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
#define RING_REG(base) (base + 0x28)
- MMIO_RING_D(RING_REG, D_ALL);
+ MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
#undef RING_REG
#define RING_REG(base) (base + 0x134)
- MMIO_RING_D(RING_REG, D_ALL);
+ MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
#undef RING_REG
- MMIO_GM(0x2148, D_ALL, NULL, NULL);
- MMIO_GM(CCID, D_ALL, NULL, NULL);
- MMIO_GM(0x12198, D_ALL, NULL, NULL);
+ MMIO_GM_RDR(0x2148, D_ALL, NULL, NULL);
+ MMIO_GM_RDR(CCID, D_ALL, NULL, NULL);
+ MMIO_GM_RDR(0x12198, D_ALL, NULL, NULL);
MMIO_D(GEN7_CXT_SIZE, D_ALL);
- MMIO_RING_D(RING_TAIL, D_ALL);
- MMIO_RING_D(RING_HEAD, D_ALL);
- MMIO_RING_D(RING_CTL, D_ALL);
- MMIO_RING_D(RING_ACTHD, D_ALL);
- MMIO_RING_GM(RING_START, D_ALL, NULL, NULL);
+ MMIO_RING_DFH(RING_TAIL, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_RING_DFH(RING_HEAD, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_RING_DFH(RING_CTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_RING_DFH(RING_ACTHD, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_RING_GM_RDR(RING_START, D_ALL, NULL, NULL);
/* RING MODE */
#define RING_REG(base) (base + 0x29c)
- MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK, NULL, ring_mode_mmio_write);
+ MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
+ ring_mode_mmio_write);
#undef RING_REG
- MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK, NULL, NULL);
- MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
+ NULL, NULL);
+ MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
+ NULL, NULL);
MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS,
ring_timestamp_mmio_read, NULL);
MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS,
ring_timestamp_mmio_read, NULL);
- MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
+ NULL, NULL);
MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
-
- MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(0x2088, D_ALL, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(0x20e4, D_ALL, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(0x2470, D_ALL, F_MODE_MASK, NULL, NULL);
- MMIO_D(GAM_ECOCHK, D_ALL);
- MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(CACHE_MODE_0, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x2124, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x2088, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x20e4, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x2470, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
+ NULL, NULL);
MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_D(0x9030, D_ALL);
- MMIO_D(0x20a0, D_ALL);
- MMIO_D(0x2420, D_ALL);
- MMIO_D(0x2430, D_ALL);
- MMIO_D(0x2434, D_ALL);
- MMIO_D(0x2438, D_ALL);
- MMIO_D(0x243c, D_ALL);
- MMIO_DFH(0x7018, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x2430, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x2434, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x2438, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x243c, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x7018, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xe100, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
/* display */
MMIO_F(0x60220, 0x20, 0, 0, 0, D_ALL, NULL, NULL);
@@ -2022,8 +2158,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(FORCEWAKE_ACK, D_ALL);
MMIO_D(GEN6_GT_CORE_STATUS, D_ALL);
MMIO_D(GEN6_GT_THREAD_STATUS_REG, D_ALL);
- MMIO_D(GTFIFODBG, D_ALL);
- MMIO_D(GTFIFOCTL, D_ALL);
+ MMIO_DFH(GTFIFODBG, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(GTFIFOCTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write);
MMIO_DH(FORCEWAKE_ACK_HSW, D_HSW | D_BDW, NULL, NULL);
MMIO_D(ECOBUS, D_ALL);
@@ -2080,7 +2216,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_F(0x4f000, 0x90, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_D(GEN6_PCODE_MAILBOX, D_PRE_SKL);
+ MMIO_D(GEN6_PCODE_MAILBOX, D_PRE_BDW);
MMIO_D(GEN6_PCODE_DATA, D_ALL);
MMIO_D(0x13812c, D_ALL);
MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
@@ -2159,36 +2295,35 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(0x1a054, D_ALL);
MMIO_D(0x44070, D_ALL);
-
- MMIO_D(0x215c, D_HSW_PLUS);
+ MMIO_DFH(0x215c, D_HSW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x2178, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x12178, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_F(0x2290, 8, 0, 0, 0, D_HSW_PLUS, NULL, NULL);
- MMIO_D(GEN7_OACONTROL, D_HSW);
+ MMIO_F(0x2290, 8, F_CMD_ACCESS, 0, 0, D_HSW_PLUS, NULL, NULL);
+ MMIO_DFH(GEN7_OACONTROL, D_HSW, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0x2b00, D_BDW_PLUS);
MMIO_D(0x2360, D_BDW_PLUS);
- MMIO_F(0x5200, 32, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0x5240, 32, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0x5280, 16, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x5200, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x5240, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x5280, 16, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
MMIO_DFH(0x1c17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x1c178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_D(BCS_SWCTRL, D_ALL);
-
- MMIO_F(HS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(DS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(IA_VERTICES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(IA_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(VS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(GS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(GS_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(CL_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(CL_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(PS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(PS_DEPTH_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_DFH(BCS_SWCTRL, D_ALL, F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_F(HS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(DS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(IA_VERTICES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(IA_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(VS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(GS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(GS_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(CL_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(CL_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(PS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(PS_DEPTH_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
MMIO_DH(0x4260, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
MMIO_DH(0x4264, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
MMIO_DH(0x4268, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
@@ -2196,6 +2331,17 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(0x4270, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
MMIO_DFH(0x4094, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(ARB_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_RING_GM_RDR(RING_BBADDR, D_ALL, NULL, NULL);
+ MMIO_DFH(0x2220, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x12220, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x22220, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_RING_DFH(RING_SYNC_1, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_RING_DFH(RING_SYNC_0, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x22178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x1a178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x1a17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x2217c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
return 0;
}
@@ -2204,7 +2350,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
struct drm_i915_private *dev_priv = gvt->dev_priv;
int ret;
- MMIO_DH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL,
+ MMIO_DFH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, NULL,
intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
@@ -2269,24 +2415,31 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
intel_vgpu_reg_master_irq_handler);
- MMIO_D(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
- MMIO_D(0x1c134, D_BDW_PLUS);
-
- MMIO_D(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
- MMIO_D(RING_HEAD(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
- MMIO_GM(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
- MMIO_D(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
- MMIO_D(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
- MMIO_D(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
- MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK, NULL, ring_mode_mmio_write);
- MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK,
- NULL, NULL);
- MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK,
- NULL, NULL);
+ MMIO_DFH(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
+ F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x1c134, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_DFH(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
+ NULL, NULL);
+ MMIO_DFH(RING_HEAD(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
+ F_CMD_ACCESS, NULL, NULL);
+ MMIO_GM_RDR(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
+ MMIO_DFH(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
+ NULL, NULL);
+ MMIO_DFH(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
+ F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
+ F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL,
+ ring_mode_mmio_write);
+ MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
+ F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
+ F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(RING_TIMESTAMP(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
ring_timestamp_mmio_read, NULL);
- MMIO_RING_D(RING_ACTHD_UDW, D_BDW_PLUS);
+ MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
#define RING_REG(base) (base + 0xd0)
MMIO_RING_F(RING_REG, 4, F_RO, 0,
@@ -2303,13 +2456,16 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
#undef RING_REG
#define RING_REG(base) (base + 0x234)
- MMIO_RING_F(RING_REG, 8, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL);
- MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO, 0, ~0LL, D_BDW_PLUS, NULL, NULL);
+ MMIO_RING_F(RING_REG, 8, F_RO | F_CMD_ACCESS, 0, ~0, D_BDW_PLUS,
+ NULL, NULL);
+ MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO | F_CMD_ACCESS, 0,
+ ~0LL, D_BDW_PLUS, NULL, NULL);
#undef RING_REG
#define RING_REG(base) (base + 0x244)
- MMIO_RING_D(RING_REG, D_BDW_PLUS);
- MMIO_D(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
+ MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
+ NULL, NULL);
#undef RING_REG
#define RING_REG(base) (base + 0x370)
@@ -2331,6 +2487,8 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_D(GEN7_MISCCPCTL, D_BDW_PLUS);
MMIO_D(0x1c054, D_BDW_PLUS);
+ MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write);
+
MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS);
MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS);
@@ -2341,14 +2499,14 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
#undef RING_REG
- MMIO_RING_GM(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL);
- MMIO_GM(0x1c080, D_BDW_PLUS, NULL, NULL);
+ MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL);
+ MMIO_GM_RDR(RING_HWS_PGA(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW);
- MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW);
- MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW);
+ MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW_PLUS);
+ MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW_PLUS);
+ MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW_PLUS);
MMIO_D(WM_MISC, D_BDW);
MMIO_D(BDW_EDP_PSR_BASE, D_BDW);
@@ -2362,27 +2520,31 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_D(GEN8_EU_DISABLE1, D_BDW_PLUS);
MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS);
- MMIO_D(0xfdc, D_BDW);
- MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_D(GEN7_ROW_CHICKEN2, D_BDW_PLUS);
- MMIO_D(GEN8_UCGCTL6, D_BDW_PLUS);
+ MMIO_D(0xfdc, D_BDW_PLUS);
+ MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
+ NULL, NULL);
+ MMIO_DFH(GEN7_ROW_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
+ NULL, NULL);
+ MMIO_DFH(GEN8_UCGCTL6, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_D(0xb1f0, D_BDW);
- MMIO_D(0xb1c0, D_BDW);
+ MMIO_DFH(0xb1f0, D_BDW, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0xb1c0, D_BDW, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_D(0xb100, D_BDW);
- MMIO_D(0xb10c, D_BDW);
+ MMIO_DFH(0xb100, D_BDW, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0xb10c, D_BDW, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0xb110, D_BDW);
- MMIO_DFH(0x24d0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x24d4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x24d8, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x24dc, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_F(0x24d0, 48, F_CMD_ACCESS, 0, 0, D_BDW_PLUS,
+ NULL, force_nonpriv_write);
+
+ MMIO_D(0x22040, D_BDW_PLUS);
+ MMIO_D(0x44484, D_BDW_PLUS);
+ MMIO_D(0x4448c, D_BDW_PLUS);
- MMIO_D(0x83a4, D_BDW);
+ MMIO_DFH(0x83a4, D_BDW, F_CMD_ACCESS, NULL, NULL);
MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS);
- MMIO_D(0x8430, D_BDW);
+ MMIO_DFH(0x8430, D_BDW, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0x110000, D_BDW_PLUS);
@@ -2394,10 +2556,19 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
-
- MMIO_D(0x2248, D_BDW);
-
+ MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_DFH(0x2248, D_BDW, F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_DFH(0xe220, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0xe230, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0xe240, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0xe260, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0xe270, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0xe280, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0xe2a0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0xe2b0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0xe2c0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
return 0;
}
@@ -2420,7 +2591,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(HSW_PWR_WELL_BIOS, D_SKL);
MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL, NULL, skl_power_well_ctl_write);
- MMIO_DH(GEN6_PCODE_MAILBOX, D_SKL, NULL, mailbox_write);
MMIO_D(0xa210, D_SKL_PLUS);
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
@@ -2578,16 +2748,16 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL);
MMIO_D(0xd08, D_SKL);
- MMIO_D(0x20e0, D_SKL);
- MMIO_D(0x20ec, D_SKL);
+ MMIO_DFH(0x20e0, D_SKL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(0x20ec, D_SKL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
/* TRTT */
- MMIO_D(0x4de0, D_SKL);
- MMIO_D(0x4de4, D_SKL);
- MMIO_D(0x4de8, D_SKL);
- MMIO_D(0x4dec, D_SKL);
- MMIO_D(0x4df0, D_SKL);
- MMIO_DH(0x4df4, D_SKL, NULL, gen9_trtte_write);
+ MMIO_DFH(0x4de0, D_SKL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x4de4, D_SKL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x4de8, D_SKL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x4dec, D_SKL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x4df0, D_SKL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x4df4, D_SKL, F_CMD_ACCESS, NULL, gen9_trtte_write);
MMIO_DH(0x4dfc, D_SKL, NULL, gen9_trtt_chicken_write);
MMIO_D(0x45008, D_SKL);
@@ -2611,7 +2781,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(0x65f08, D_SKL);
MMIO_D(0x320f0, D_SKL);
- MMIO_D(_REG_VCS2_EXCC, D_SKL);
+ MMIO_DFH(_REG_VCS2_EXCC, D_SKL, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0x70034, D_SKL);
MMIO_D(0x71034, D_SKL);
MMIO_D(0x72034, D_SKL);
@@ -2624,6 +2794,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL);
MMIO_D(0x44500, D_SKL);
+ MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL, F_MODE_MASK | F_CMD_ACCESS,
+ NULL, NULL);
return 0;
}
@@ -2813,3 +2986,20 @@ int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
write_vreg(vgpu, offset, p_data, bytes);
return 0;
}
+
+/**
+ * intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be
+ * force-nopriv register
+ *
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ * Returns:
+ * True if the register is in force-nonpriv whitelist;
+ * False if outside;
+ */
+bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
+ unsigned int offset)
+{
+ return in_whitelist(offset);
+}
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 0f7f5d97f582..1ea3eb270de8 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -96,10 +96,10 @@ static int gvt_dma_map_iova(struct intel_vgpu *vgpu, kvm_pfn_t pfn,
struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
dma_addr_t daddr;
- page = pfn_to_page(pfn);
- if (is_error_page(page))
+ if (unlikely(!pfn_valid(pfn)))
return -EFAULT;
+ page = pfn_to_page(pfn);
daddr = dma_map_page(dev, page, 0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, daddr))
@@ -295,10 +295,10 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev,
return 0;
return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
- "fence: %d\n",
- BYTES_TO_MB(type->low_gm_size),
- BYTES_TO_MB(type->high_gm_size),
- type->fence);
+ "fence: %d\nresolution: %s\n",
+ BYTES_TO_MB(type->low_gm_size),
+ BYTES_TO_MB(type->high_gm_size),
+ type->fence, vgpu_edid_str(type->resolution));
}
static MDEV_TYPE_ATTR_RO(available_instances);
@@ -426,7 +426,7 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
{
- struct intel_vgpu *vgpu;
+ struct intel_vgpu *vgpu = NULL;
struct intel_vgpu_type *type;
struct device *pdev;
void *gvt;
@@ -437,7 +437,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
if (!type) {
- gvt_err("failed to find type %s to create\n",
+ gvt_vgpu_err("failed to find type %s to create\n",
kobject_name(kobj));
ret = -EINVAL;
goto out;
@@ -446,7 +446,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
vgpu = intel_gvt_ops->vgpu_create(gvt, type);
if (IS_ERR_OR_NULL(vgpu)) {
ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
- gvt_err("failed to create intel vgpu: %d\n", ret);
+ gvt_vgpu_err("failed to create intel vgpu: %d\n", ret);
goto out;
}
@@ -526,7 +526,8 @@ static int intel_vgpu_open(struct mdev_device *mdev)
ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
&vgpu->vdev.iommu_notifier);
if (ret != 0) {
- gvt_err("vfio_register_notifier for iommu failed: %d\n", ret);
+ gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
+ ret);
goto out;
}
@@ -534,7 +535,8 @@ static int intel_vgpu_open(struct mdev_device *mdev)
ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
&vgpu->vdev.group_notifier);
if (ret != 0) {
- gvt_err("vfio_register_notifier for group failed: %d\n", ret);
+ gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
+ ret);
goto undo_iommu;
}
@@ -635,7 +637,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
if (index >= VFIO_PCI_NUM_REGIONS) {
- gvt_err("invalid index: %u\n", index);
+ gvt_vgpu_err("invalid index: %u\n", index);
return -EINVAL;
}
@@ -669,7 +671,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
case VFIO_PCI_VGA_REGION_INDEX:
case VFIO_PCI_ROM_REGION_INDEX:
default:
- gvt_err("unsupported region: %u\n", index);
+ gvt_vgpu_err("unsupported region: %u\n", index);
}
return ret == 0 ? count : ret;
@@ -861,7 +863,7 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
trigger = eventfd_ctx_fdget(fd);
if (IS_ERR(trigger)) {
- gvt_err("eventfd_ctx_fdget failed\n");
+ gvt_vgpu_err("eventfd_ctx_fdget failed\n");
return PTR_ERR(trigger);
}
vgpu->vdev.msi_trigger = trigger;
@@ -1120,7 +1122,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
VFIO_PCI_NUM_IRQS, &data_size);
if (ret) {
- gvt_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
+ gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
return -EINVAL;
}
if (data_size) {
@@ -1310,7 +1312,7 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
kvm = vgpu->vdev.kvm;
if (!kvm || kvm->mm != current->mm) {
- gvt_err("KVM is required to use Intel vGPU\n");
+ gvt_vgpu_err("KVM is required to use Intel vGPU\n");
return -ESRCH;
}
@@ -1337,8 +1339,10 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
{
+ struct intel_vgpu *vgpu = info->vgpu;
+
if (!info) {
- gvt_err("kvmgt_guest_info invalid\n");
+ gvt_vgpu_err("kvmgt_guest_info invalid\n");
return false;
}
@@ -1383,12 +1387,14 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
unsigned long iova, pfn;
struct kvmgt_guest_info *info;
struct device *dev;
+ struct intel_vgpu *vgpu;
int rc;
if (!handle_valid(handle))
return INTEL_GVT_INVALID_ADDR;
info = (struct kvmgt_guest_info *)handle;
+ vgpu = info->vgpu;
iova = gvt_cache_find(info->vgpu, gfn);
if (iova != INTEL_GVT_INVALID_ADDR)
return iova;
@@ -1397,13 +1403,14 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
dev = mdev_dev(info->vgpu->vdev.mdev);
rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn);
if (rc != 1) {
- gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc);
+ gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n",
+ gfn, rc);
return INTEL_GVT_INVALID_ADDR;
}
/* transfer to host iova for GFX to use DMA */
rc = gvt_dma_map_iova(info->vgpu, pfn, &iova);
if (rc) {
- gvt_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn);
+ gvt_vgpu_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn);
vfio_unpin_pages(dev, &gfn, 1);
return INTEL_GVT_INVALID_ADDR;
}
@@ -1417,7 +1424,7 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
{
struct kvmgt_guest_info *info;
struct kvm *kvm;
- int ret;
+ int idx, ret;
bool kthread = current->mm == NULL;
if (!handle_valid(handle))
@@ -1429,8 +1436,10 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
if (kthread)
use_mm(kvm->mm);
+ idx = srcu_read_lock(&kvm->srcu);
ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
kvm_read_guest(kvm, gpa, buf, len);
+ srcu_read_unlock(&kvm->srcu, idx);
if (kthread)
unuse_mm(kvm->mm);
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 4df078bc5d04..1ba3bdb09341 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -57,6 +57,58 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
(reg >= gvt->device_info.gtt_start_offset \
&& reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
+static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
+ void *p_data, unsigned int bytes, bool read)
+{
+ struct intel_gvt *gvt = NULL;
+ void *pt = NULL;
+ unsigned int offset = 0;
+
+ if (!vgpu || !p_data)
+ return;
+
+ gvt = vgpu->gvt;
+ mutex_lock(&gvt->lock);
+ offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
+ if (reg_is_mmio(gvt, offset)) {
+ if (read)
+ intel_vgpu_default_mmio_read(vgpu, offset, p_data,
+ bytes);
+ else
+ intel_vgpu_default_mmio_write(vgpu, offset, p_data,
+ bytes);
+ } else if (reg_is_gtt(gvt, offset) &&
+ vgpu->gtt.ggtt_mm->virtual_page_table) {
+ offset -= gvt->device_info.gtt_start_offset;
+ pt = vgpu->gtt.ggtt_mm->virtual_page_table + offset;
+ if (read)
+ memcpy(p_data, pt, bytes);
+ else
+ memcpy(pt, p_data, bytes);
+
+ } else if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
+ struct intel_vgpu_guest_page *gp;
+
+ /* Since we enter the failsafe mode early during guest boot,
+ * guest may not have chance to set up its ppgtt table, so
+ * there should not be any wp pages for guest. Keep the wp
+ * related code here in case we need to handle it in furture.
+ */
+ gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
+ if (gp) {
+ /* remove write protection to prevent furture traps */
+ intel_vgpu_clean_guest_page(vgpu, gp);
+ if (read)
+ intel_gvt_hypervisor_read_gpa(vgpu, pa,
+ p_data, bytes);
+ else
+ intel_gvt_hypervisor_write_gpa(vgpu, pa,
+ p_data, bytes);
+ }
+ }
+ mutex_unlock(&gvt->lock);
+}
+
/**
* intel_vgpu_emulate_mmio_read - emulate MMIO read
* @vgpu: a vGPU
@@ -75,6 +127,11 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
unsigned int offset = 0;
int ret = -EINVAL;
+
+ if (vgpu->failsafe) {
+ failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true);
+ return 0;
+ }
mutex_lock(&gvt->lock);
if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
@@ -85,10 +142,10 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
p_data, bytes);
if (ret) {
- gvt_err("vgpu%d: guest page read error %d, "
+ gvt_vgpu_err("guest page read error %d, "
"gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
- vgpu->id, ret,
- gp->gfn, pa, *(u32 *)p_data, bytes);
+ ret, gp->gfn, pa, *(u32 *)p_data,
+ bytes);
}
mutex_unlock(&gvt->lock);
return ret;
@@ -143,14 +200,13 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
if (!vgpu->mmio.disable_warn_untrack) {
- gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n",
- vgpu->id, offset, bytes, *(u32 *)p_data);
+ gvt_vgpu_err("read untracked MMIO %x(%dB) val %x\n",
+ offset, bytes, *(u32 *)p_data);
if (offset == 0x206c) {
- gvt_err("------------------------------------------\n");
- gvt_err("vgpu%d: likely triggers a gfx reset\n",
- vgpu->id);
- gvt_err("------------------------------------------\n");
+ gvt_vgpu_err("------------------------------------------\n");
+ gvt_vgpu_err("likely triggers a gfx reset\n");
+ gvt_vgpu_err("------------------------------------------\n");
vgpu->mmio.disable_warn_untrack = true;
}
}
@@ -163,8 +219,8 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
mutex_unlock(&gvt->lock);
return 0;
err:
- gvt_err("vgpu%d: fail to emulate MMIO read %08x len %d\n",
- vgpu->id, offset, bytes);
+ gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n",
+ offset, bytes);
mutex_unlock(&gvt->lock);
return ret;
}
@@ -188,6 +244,11 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
u32 old_vreg = 0, old_sreg = 0;
int ret = -EINVAL;
+ if (vgpu->failsafe) {
+ failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, false);
+ return 0;
+ }
+
mutex_lock(&gvt->lock);
if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
@@ -197,10 +258,11 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
if (gp) {
ret = gp->handler(gp, pa, p_data, bytes);
if (ret) {
- gvt_err("vgpu%d: guest page write error %d, "
- "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
- vgpu->id, ret,
- gp->gfn, pa, *(u32 *)p_data, bytes);
+ gvt_err("guest page write error %d, "
+ "gfn 0x%lx, pa 0x%llx, "
+ "var 0x%x, len %d\n",
+ ret, gp->gfn, pa,
+ *(u32 *)p_data, bytes);
}
mutex_unlock(&gvt->lock);
return ret;
@@ -236,7 +298,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
if (!mmio && !vgpu->mmio.disable_warn_untrack)
- gvt_err("vgpu%d: write untracked MMIO %x len %d val %x\n",
+ gvt_dbg_mmio("vgpu%d: write untracked MMIO %x len %d val %x\n",
vgpu->id, offset, bytes, *(u32 *)p_data);
if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
@@ -267,8 +329,8 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
/* all register bits are RO. */
if (ro_mask == ~(u64)0) {
- gvt_err("vgpu%d: try to write RO reg %x\n",
- vgpu->id, offset);
+ gvt_vgpu_err("try to write RO reg %x\n",
+ offset);
ret = 0;
goto out;
}
@@ -298,8 +360,8 @@ out:
mutex_unlock(&gvt->lock);
return 0;
err:
- gvt_err("vgpu%d: fail to emulate MMIO write %08x len %d\n",
- vgpu->id, offset, bytes);
+ gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset,
+ bytes);
mutex_unlock(&gvt->lock);
return ret;
}
@@ -322,6 +384,8 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu)
/* set the bit 0:2(Core C-State ) to C0 */
vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
+
+ vgpu->mmio.disable_warn_untrack = false;
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 3bc620f56f35..a3a027025cd0 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -107,4 +107,7 @@ int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes);
int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes);
+
+bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
+ unsigned int offset);
#endif
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index d9fb41ab7119..311799136d7f 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -27,7 +27,6 @@
static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
{
- void __iomem *host_va = vgpu->gvt->opregion.opregion_va;
u8 *buf;
int i;
@@ -43,8 +42,8 @@ static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
if (!vgpu_opregion(vgpu)->va)
return -ENOMEM;
- memcpy_fromio(vgpu_opregion(vgpu)->va, host_va,
- INTEL_GVT_OPREGION_SIZE);
+ memcpy(vgpu_opregion(vgpu)->va, vgpu->gvt->opregion.opregion_va,
+ INTEL_GVT_OPREGION_SIZE);
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
@@ -68,14 +67,15 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va
+ i * PAGE_SIZE);
if (mfn == INTEL_GVT_INVALID_ADDR) {
- gvt_err("fail to get MFN from VA\n");
+ gvt_vgpu_err("fail to get MFN from VA\n");
return -EINVAL;
}
ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
vgpu_opregion(vgpu)->gfn[i],
mfn, 1, map);
if (ret) {
- gvt_err("fail to map GFN to MFN, errno: %d\n", ret);
+ gvt_vgpu_err("fail to map GFN to MFN, errno: %d\n",
+ ret);
return ret;
}
}
@@ -288,7 +288,7 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM;
if (!(swsci & SWSCI_SCI_SELECT)) {
- gvt_err("vgpu%d: requesting SMI service\n", vgpu->id);
+ gvt_vgpu_err("requesting SMI service\n");
return 0;
}
/* ignore non 0->1 trasitions */
@@ -301,9 +301,8 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
func = GVT_OPREGION_FUNC(*scic);
subfunc = GVT_OPREGION_SUBFUNC(*scic);
if (!querying_capabilities(*scic)) {
- gvt_err("vgpu%d: requesting runtime service: func \"%s\","
+ gvt_vgpu_err("requesting runtime service: func \"%s\","
" subfunc \"%s\"\n",
- vgpu->id,
opregion_func_name(func),
opregion_subfunc_name(subfunc));
/*
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
index 2b3a642284b6..95ee091ce085 100644
--- a/drivers/gpu/drm/i915/gvt/render.c
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -53,6 +53,14 @@ static struct render_mmio gen8_render_mmio_list[] = {
{RCS, _MMIO(0x24d4), 0, false},
{RCS, _MMIO(0x24d8), 0, false},
{RCS, _MMIO(0x24dc), 0, false},
+ {RCS, _MMIO(0x24e0), 0, false},
+ {RCS, _MMIO(0x24e4), 0, false},
+ {RCS, _MMIO(0x24e8), 0, false},
+ {RCS, _MMIO(0x24ec), 0, false},
+ {RCS, _MMIO(0x24f0), 0, false},
+ {RCS, _MMIO(0x24f4), 0, false},
+ {RCS, _MMIO(0x24f8), 0, false},
+ {RCS, _MMIO(0x24fc), 0, false},
{RCS, _MMIO(0x7004), 0xffff, true},
{RCS, _MMIO(0x7008), 0xffff, true},
{RCS, _MMIO(0x7000), 0xffff, true},
@@ -76,6 +84,14 @@ static struct render_mmio gen9_render_mmio_list[] = {
{RCS, _MMIO(0x24d4), 0, false},
{RCS, _MMIO(0x24d8), 0, false},
{RCS, _MMIO(0x24dc), 0, false},
+ {RCS, _MMIO(0x24e0), 0, false},
+ {RCS, _MMIO(0x24e4), 0, false},
+ {RCS, _MMIO(0x24e8), 0, false},
+ {RCS, _MMIO(0x24ec), 0, false},
+ {RCS, _MMIO(0x24f0), 0, false},
+ {RCS, _MMIO(0x24f4), 0, false},
+ {RCS, _MMIO(0x24f8), 0, false},
+ {RCS, _MMIO(0x24fc), 0, false},
{RCS, _MMIO(0x7004), 0xffff, true},
{RCS, _MMIO(0x7008), 0xffff, true},
{RCS, _MMIO(0x7000), 0xffff, true},
@@ -151,7 +167,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
I915_WRITE_FW(reg, 0x1);
if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
- gvt_err("timeout in invalidate ring (%d) tlb\n", ring_id);
+ gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
else
vgpu_vreg(vgpu, regs[ring_id]) = 0;
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 06c9584ac5f0..34b9acdf3479 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -101,7 +101,7 @@ struct tbs_sched_data {
struct list_head runq_head;
};
-#define GVT_DEFAULT_TIME_SLICE (1 * HZ / 1000)
+#define GVT_DEFAULT_TIME_SLICE (msecs_to_jiffies(1))
static void tbs_sched_func(struct work_struct *work)
{
@@ -223,7 +223,7 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
return;
list_add_tail(&vgpu_data->list, &sched_data->runq_head);
- schedule_delayed_work(&sched_data->work, sched_data->period);
+ schedule_delayed_work(&sched_data->work, 0);
}
static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index d6b6d0efdd1a..c4353ed86d4b 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -84,7 +84,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
(u32)((workload->ctx_desc.lrca + i) <<
GTT_PAGE_SHIFT));
if (context_gpa == INTEL_GVT_INVALID_ADDR) {
- gvt_err("Invalid guest context descriptor\n");
+ gvt_vgpu_err("Invalid guest context descriptor\n");
return -EINVAL;
}
@@ -130,15 +130,16 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
static int shadow_context_status_change(struct notifier_block *nb,
unsigned long action, void *data)
{
- struct intel_vgpu *vgpu = container_of(nb,
- struct intel_vgpu, shadow_ctx_notifier_block);
- struct drm_i915_gem_request *req =
- (struct drm_i915_gem_request *)data;
- struct intel_gvt_workload_scheduler *scheduler =
- &vgpu->gvt->scheduler;
+ struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
+ struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
+ shadow_ctx_notifier_block[req->engine->id]);
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload =
scheduler->current_workload[req->engine->id];
+ if (unlikely(!workload))
+ return NOTIFY_OK;
+
switch (action) {
case INTEL_CONTEXT_SCHEDULE_IN:
intel_gvt_load_render_mmio(workload->vgpu,
@@ -148,6 +149,15 @@ static int shadow_context_status_change(struct notifier_block *nb,
case INTEL_CONTEXT_SCHEDULE_OUT:
intel_gvt_restore_render_mmio(workload->vgpu,
workload->ring_id);
+ /* If the status is -EINPROGRESS means this workload
+ * doesn't meet any issue during dispatching so when
+ * get the SCHEDULE_OUT set the status to be zero for
+ * good. If the status is NOT -EINPROGRESS means there
+ * is something wrong happened during dispatching and
+ * the status should not be set to zero
+ */
+ if (workload->status == -EINPROGRESS)
+ workload->status = 0;
atomic_set(&workload->shadow_ctx_active, 0);
break;
default:
@@ -163,7 +173,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+ struct intel_engine_cs *engine = dev_priv->engine[ring_id];
struct drm_i915_gem_request *rq;
+ struct intel_vgpu *vgpu = workload->vgpu;
int ret;
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
@@ -175,9 +187,24 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
mutex_lock(&dev_priv->drm.struct_mutex);
+ /* pin shadow context by gvt even the shadow context will be pinned
+ * when i915 alloc request. That is because gvt will update the guest
+ * context from shadow context when workload is completed, and at that
+ * moment, i915 may already unpined the shadow context to make the
+ * shadow_ctx pages invalid. So gvt need to pin itself. After update
+ * the guest context, gvt can unpin the shadow_ctx safely.
+ */
+ ret = engine->context_pin(engine, shadow_ctx);
+ if (ret) {
+ gvt_vgpu_err("fail to pin shadow context\n");
+ workload->status = ret;
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ return ret;
+ }
+
rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
if (IS_ERR(rq)) {
- gvt_err("fail to allocate gem request\n");
+ gvt_vgpu_err("fail to allocate gem request\n");
ret = PTR_ERR(rq);
goto out;
}
@@ -190,9 +217,12 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
if (ret)
goto out;
- ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
- if (ret)
- goto out;
+ if ((workload->ring_id == RCS) &&
+ (workload->wa_ctx.indirect_ctx.size != 0)) {
+ ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
+ if (ret)
+ goto out;
+ }
ret = populate_shadow_context(workload);
if (ret)
@@ -215,6 +245,9 @@ out:
if (!IS_ERR_OR_NULL(rq))
i915_add_request_no_flush(rq);
+ else
+ engine->context_unpin(engine, shadow_ctx);
+
mutex_unlock(&dev_priv->drm.struct_mutex);
return ret;
}
@@ -310,7 +343,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
(u32)((workload->ctx_desc.lrca + i) <<
GTT_PAGE_SHIFT));
if (context_gpa == INTEL_GVT_INVALID_ADDR) {
- gvt_err("invalid guest context descriptor\n");
+ gvt_vgpu_err("invalid guest context descriptor\n");
return;
}
@@ -359,15 +392,31 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
workload = scheduler->current_workload[ring_id];
vgpu = workload->vgpu;
- if (!workload->status && !vgpu->resetting) {
+ /* For the workload w/ request, needs to wait for the context
+ * switch to make sure request is completed.
+ * For the workload w/o request, directly complete the workload.
+ */
+ if (workload->req) {
+ struct drm_i915_private *dev_priv =
+ workload->vgpu->gvt->dev_priv;
+ struct intel_engine_cs *engine =
+ dev_priv->engine[workload->ring_id];
wait_event(workload->shadow_ctx_status_wq,
!atomic_read(&workload->shadow_ctx_active));
- update_guest_context(workload);
+ i915_gem_request_put(fetch_and_zero(&workload->req));
+
+ if (!workload->status && !vgpu->resetting) {
+ update_guest_context(workload);
- for_each_set_bit(event, workload->pending_events,
- INTEL_GVT_EVENT_MAX)
- intel_vgpu_trigger_virtual_event(vgpu, event);
+ for_each_set_bit(event, workload->pending_events,
+ INTEL_GVT_EVENT_MAX)
+ intel_vgpu_trigger_virtual_event(vgpu, event);
+ }
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ /* unpin shadow ctx as the shadow_ctx update is done */
+ engine->context_unpin(engine, workload->vgpu->shadow_ctx);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
}
gvt_dbg_sched("ring id %d complete workload %p status %d\n",
@@ -397,7 +446,7 @@ static int workload_thread(void *priv)
int ring_id = p->ring_id;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload = NULL;
- long lret;
+ struct intel_vgpu *vgpu = NULL;
int ret;
bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
@@ -440,29 +489,19 @@ static int workload_thread(void *priv)
mutex_unlock(&gvt->lock);
if (ret) {
- gvt_err("fail to dispatch workload, skip\n");
+ vgpu = workload->vgpu;
+ gvt_vgpu_err("fail to dispatch workload, skip\n");
goto complete;
}
gvt_dbg_sched("ring id %d wait workload %p\n",
workload->ring_id, workload);
-
- lret = i915_wait_request(workload->req,
- 0, MAX_SCHEDULE_TIMEOUT);
- if (lret < 0) {
- workload->status = lret;
- gvt_err("fail to wait workload, skip\n");
- } else {
- workload->status = 0;
- }
+ i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
complete:
gvt_dbg_sched("will complete workload %p, status: %d\n",
workload, workload->status);
- if (workload->req)
- i915_gem_request_put(fetch_and_zero(&workload->req));
-
complete_current_workload(gvt, ring_id);
if (need_force_wake)
@@ -493,15 +532,16 @@ void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
- int i;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id i;
gvt_dbg_core("clean workload scheduler\n");
- for (i = 0; i < I915_NUM_ENGINES; i++) {
- if (scheduler->thread[i]) {
- kthread_stop(scheduler->thread[i]);
- scheduler->thread[i] = NULL;
- }
+ for_each_engine(engine, gvt->dev_priv, i) {
+ atomic_notifier_chain_unregister(
+ &engine->context_status_notifier,
+ &gvt->shadow_ctx_notifier_block[i]);
+ kthread_stop(scheduler->thread[i]);
}
}
@@ -509,18 +549,15 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct workload_thread_param *param = NULL;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id i;
int ret;
- int i;
gvt_dbg_core("init workload scheduler\n");
init_waitqueue_head(&scheduler->workload_complete_wq);
- for (i = 0; i < I915_NUM_ENGINES; i++) {
- /* check ring mask at init time */
- if (!HAS_ENGINE(gvt->dev_priv, i))
- continue;
-
+ for_each_engine(engine, gvt->dev_priv, i) {
init_waitqueue_head(&scheduler->waitq[i]);
param = kzalloc(sizeof(*param), GFP_KERNEL);
@@ -539,6 +576,11 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
ret = PTR_ERR(scheduler->thread[i]);
goto err;
}
+
+ gvt->shadow_ctx_notifier_block[i].notifier_call =
+ shadow_context_status_change;
+ atomic_notifier_chain_register(&engine->context_status_notifier,
+ &gvt->shadow_ctx_notifier_block[i]);
}
return 0;
err:
@@ -550,9 +592,6 @@ err:
void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
{
- atomic_notifier_chain_unregister(&vgpu->shadow_ctx->status_notifier,
- &vgpu->shadow_ctx_notifier_block);
-
i915_gem_context_put_unlocked(vgpu->shadow_ctx);
}
@@ -567,10 +606,5 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
vgpu->shadow_ctx->engine[RCS].initialised = true;
- vgpu->shadow_ctx_notifier_block.notifier_call =
- shadow_context_status_change;
-
- atomic_notifier_chain_register(&vgpu->shadow_ctx->status_notifier,
- &vgpu->shadow_ctx_notifier_block);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 95a97aa0051e..41cfa5ccae84 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -64,6 +64,20 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
}
+static struct {
+ unsigned int low_mm;
+ unsigned int high_mm;
+ unsigned int fence;
+ enum intel_vgpu_edid edid;
+ char *name;
+} vgpu_types[] = {
+/* Fixed vGPU type table */
+ { MB_TO_BYTES(64), MB_TO_BYTES(512), 4, GVT_EDID_1024_768, "8" },
+ { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, GVT_EDID_1920_1200, "4" },
+ { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, GVT_EDID_1920_1200, "2" },
+ { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, GVT_EDID_1920_1200, "1" },
+};
+
/**
* intel_gvt_init_vgpu_types - initialize vGPU type list
* @gvt : GVT device
@@ -78,9 +92,8 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
unsigned int min_low;
/* vGPU type name is defined as GVTg_Vx_y which contains
- * physical GPU generation type and 'y' means maximum vGPU
- * instances user can create on one physical GPU for this
- * type.
+ * physical GPU generation type (e.g V4 as BDW server, V5 as
+ * SKL server).
*
* Depend on physical SKU resource, might see vGPU types like
* GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create
@@ -92,7 +105,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
*/
low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
- num_types = 4;
+ num_types = sizeof(vgpu_types) / sizeof(vgpu_types[0]);
gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type),
GFP_KERNEL);
@@ -101,28 +114,29 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
min_low = MB_TO_BYTES(32);
for (i = 0; i < num_types; ++i) {
- if (low_avail / min_low == 0)
+ if (low_avail / vgpu_types[i].low_mm == 0)
break;
- gvt->types[i].low_gm_size = min_low;
- gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U));
- gvt->types[i].fence = 4;
- gvt->types[i].max_instance = min(low_avail / min_low,
- high_avail / gvt->types[i].high_gm_size);
- gvt->types[i].avail_instance = gvt->types[i].max_instance;
+
+ gvt->types[i].low_gm_size = vgpu_types[i].low_mm;
+ gvt->types[i].high_gm_size = vgpu_types[i].high_mm;
+ gvt->types[i].fence = vgpu_types[i].fence;
+ gvt->types[i].resolution = vgpu_types[i].edid;
+ gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
+ high_avail / vgpu_types[i].high_mm);
if (IS_GEN8(gvt->dev_priv))
- sprintf(gvt->types[i].name, "GVTg_V4_%u",
- gvt->types[i].max_instance);
+ sprintf(gvt->types[i].name, "GVTg_V4_%s",
+ vgpu_types[i].name);
else if (IS_GEN9(gvt->dev_priv))
- sprintf(gvt->types[i].name, "GVTg_V5_%u",
- gvt->types[i].max_instance);
+ sprintf(gvt->types[i].name, "GVTg_V5_%s",
+ vgpu_types[i].name);
- min_low <<= 1;
- gvt_dbg_core("type[%d]: %s max %u avail %u low %u high %u fence %u\n",
- i, gvt->types[i].name, gvt->types[i].max_instance,
+ gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u res %s\n",
+ i, gvt->types[i].name,
gvt->types[i].avail_instance,
gvt->types[i].low_gm_size,
- gvt->types[i].high_gm_size, gvt->types[i].fence);
+ gvt->types[i].high_gm_size, gvt->types[i].fence,
+ vgpu_edid_str(gvt->types[i].resolution));
}
gvt->num_types = i;
@@ -138,7 +152,7 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
{
int i;
unsigned int low_gm_avail, high_gm_avail, fence_avail;
- unsigned int low_gm_min, high_gm_min, fence_min, total_min;
+ unsigned int low_gm_min, high_gm_min, fence_min;
/* Need to depend on maxium hw resource size but keep on
* static config for now.
@@ -154,12 +168,11 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
low_gm_min = low_gm_avail / gvt->types[i].low_gm_size;
high_gm_min = high_gm_avail / gvt->types[i].high_gm_size;
fence_min = fence_avail / gvt->types[i].fence;
- total_min = min(min(low_gm_min, high_gm_min), fence_min);
- gvt->types[i].avail_instance = min(gvt->types[i].max_instance,
- total_min);
+ gvt->types[i].avail_instance = min(min(low_gm_min, high_gm_min),
+ fence_min);
- gvt_dbg_core("update type[%d]: %s max %u avail %u low %u high %u fence %u\n",
- i, gvt->types[i].name, gvt->types[i].max_instance,
+ gvt_dbg_core("update type[%d]: %s avail %u low %u high %u fence %u\n",
+ i, gvt->types[i].name,
gvt->types[i].avail_instance, gvt->types[i].low_gm_size,
gvt->types[i].high_gm_size, gvt->types[i].fence);
}
@@ -248,7 +261,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_detach_hypervisor_vgpu;
- ret = intel_vgpu_init_display(vgpu);
+ ret = intel_vgpu_init_display(vgpu, param->resolution);
if (ret)
goto out_clean_gtt;
@@ -312,6 +325,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
param.low_gm_sz = type->low_gm_size;
param.high_gm_sz = type->high_gm_size;
param.fence_sz = type->fence;
+ param.resolution = type->resolution;
/* XXX current param based on MB */
param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
@@ -387,8 +401,12 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
populate_pvinfo_page(vgpu);
intel_vgpu_reset_display(vgpu);
- if (dmlr)
+ if (dmlr) {
intel_vgpu_reset_cfg_space(vgpu);
+ /* only reset the failsafe mode when dmlr reset */
+ vgpu->failsafe = false;
+ vgpu->pv_notified = false;
+ }
}
vgpu->resetting = false;