summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c4
-rw-r--r--drivers/gpu/drm/drm_buddy.c81
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c8
-rw-r--r--drivers/gpu/drm/drm_fbdev_generic.c15
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c6
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c76
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c37
-rw-r--r--drivers/gpu/drm/i915/gvt/debugfs.c36
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c21
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h15
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c35
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c12
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c5
-rw-r--r--drivers/gpu/drm/i915/i915_switcheroo.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c3
-rw-r--r--drivers/gpu/drm/meson/meson_viu.c5
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c15
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c7
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h1
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c5
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c3
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.c4
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c12
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h12
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c613
-rw-r--r--drivers/gpu/drm/panfrost/Kconfig3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c27
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c16
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.h5
-rw-r--r--drivers/gpu/drm/tests/Makefile2
-rw-r--r--drivers/gpu/drm/tests/drm_mm_test.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c19
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.c41
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.h15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c38
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c176
-rw-r--r--[-rwxr-xr-x]drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h0
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c33
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c3
53 files changed, 457 insertions, 1082 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 8b7a09b392ac..0f4cb41078c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -63,6 +63,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
amdgpu_ctx_put(p->ctx);
return -ECANCELED;
}
+
+ amdgpu_sync_create(&p->sync);
return 0;
}
@@ -454,18 +456,6 @@ static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p,
}
r = amdgpu_sync_fence(&p->sync, fence);
- if (r)
- goto error;
-
- /*
- * When we have an explicit dependency it might be necessary to insert a
- * pipeline sync to make sure that all caches etc are flushed and the
- * next job actually sees the results from the previous one.
- */
- if (fence->context == p->gang_leader->base.entity->fence_context)
- r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence);
-
-error:
dma_fence_put(fence);
return r;
}
@@ -1190,10 +1180,19 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+ struct drm_gpu_scheduler *sched;
struct amdgpu_bo_list_entry *e;
+ struct dma_fence *fence;
unsigned int i;
int r;
+ r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]);
+ if (r) {
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
+ return r;
+ }
+
list_for_each_entry(e, &p->validated, tv.head) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
struct dma_resv *resv = bo->tbo.base.resv;
@@ -1213,10 +1212,24 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
return r;
}
- r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]);
- if (r && r != -ERESTARTSYS)
- DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
- return r;
+ sched = p->gang_leader->base.entity->rq->sched;
+ while ((fence = amdgpu_sync_get_fence(&p->sync))) {
+ struct drm_sched_fence *s_fence = to_drm_sched_fence(fence);
+
+ /*
+ * When we have an dependency it might be necessary to insert a
+ * pipeline sync to make sure that all caches etc are flushed and the
+ * next job actually sees the results from the previous one
+ * before we start executing on the same scheduler ring.
+ */
+ if (!s_fence || s_fence->sched != sched)
+ continue;
+
+ r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence);
+ if (r)
+ return r;
+ }
+ return 0;
}
static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
@@ -1256,9 +1269,12 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
continue;
fence = &p->jobs[i]->base.s_fence->scheduled;
+ dma_fence_get(fence);
r = drm_sched_job_add_dependency(&leader->base, fence);
- if (r)
+ if (r) {
+ dma_fence_put(fence);
goto error_cleanup;
+ }
}
if (p->gang_size > 1) {
@@ -1346,6 +1362,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser)
{
unsigned i;
+ amdgpu_sync_free(&parser->sync);
for (i = 0; i < parser->num_post_deps; i++) {
drm_syncobj_put(parser->post_deps[i].syncobj);
kfree(parser->post_deps[i].chain);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index bac7976975bd..dcd8c066bc1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -391,8 +391,10 @@ int amdgpu_sync_push_to_job(struct amdgpu_sync *sync, struct amdgpu_job *job)
dma_fence_get(f);
r = drm_sched_job_add_dependency(&job->base, f);
- if (r)
+ if (r) {
+ dma_fence_put(f);
return r;
+ }
}
return 0;
}
diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
index 11bb59399471..3d1f50f481cf 100644
--- a/drivers/gpu/drm/drm_buddy.c
+++ b/drivers/gpu/drm/drm_buddy.c
@@ -38,6 +38,25 @@ static void drm_block_free(struct drm_buddy *mm,
kmem_cache_free(slab_blocks, block);
}
+static void list_insert_sorted(struct drm_buddy *mm,
+ struct drm_buddy_block *block)
+{
+ struct drm_buddy_block *node;
+ struct list_head *head;
+
+ head = &mm->free_list[drm_buddy_block_order(block)];
+ if (list_empty(head)) {
+ list_add(&block->link, head);
+ return;
+ }
+
+ list_for_each_entry(node, head, link)
+ if (drm_buddy_block_offset(block) < drm_buddy_block_offset(node))
+ break;
+
+ __list_add(&block->link, node->link.prev, &node->link);
+}
+
static void mark_allocated(struct drm_buddy_block *block)
{
block->header &= ~DRM_BUDDY_HEADER_STATE;
@@ -52,8 +71,7 @@ static void mark_free(struct drm_buddy *mm,
block->header &= ~DRM_BUDDY_HEADER_STATE;
block->header |= DRM_BUDDY_FREE;
- list_add(&block->link,
- &mm->free_list[drm_buddy_block_order(block)]);
+ list_insert_sorted(mm, block);
}
static void mark_split(struct drm_buddy_block *block)
@@ -387,20 +405,26 @@ err_undo:
}
static struct drm_buddy_block *
-get_maxblock(struct list_head *head)
+get_maxblock(struct drm_buddy *mm, unsigned int order)
{
struct drm_buddy_block *max_block = NULL, *node;
+ unsigned int i;
- max_block = list_first_entry_or_null(head,
- struct drm_buddy_block,
- link);
- if (!max_block)
- return NULL;
+ for (i = order; i <= mm->max_order; ++i) {
+ if (!list_empty(&mm->free_list[i])) {
+ node = list_last_entry(&mm->free_list[i],
+ struct drm_buddy_block,
+ link);
+ if (!max_block) {
+ max_block = node;
+ continue;
+ }
- list_for_each_entry(node, head, link) {
- if (drm_buddy_block_offset(node) >
- drm_buddy_block_offset(max_block))
- max_block = node;
+ if (drm_buddy_block_offset(node) >
+ drm_buddy_block_offset(max_block)) {
+ max_block = node;
+ }
+ }
}
return max_block;
@@ -412,20 +436,23 @@ alloc_from_freelist(struct drm_buddy *mm,
unsigned long flags)
{
struct drm_buddy_block *block = NULL;
- unsigned int i;
+ unsigned int tmp;
int err;
- for (i = order; i <= mm->max_order; ++i) {
- if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
- block = get_maxblock(&mm->free_list[i]);
- if (block)
- break;
- } else {
- block = list_first_entry_or_null(&mm->free_list[i],
- struct drm_buddy_block,
- link);
- if (block)
- break;
+ if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
+ block = get_maxblock(mm, order);
+ if (block)
+ /* Store the obtained block order */
+ tmp = drm_buddy_block_order(block);
+ } else {
+ for (tmp = order; tmp <= mm->max_order; ++tmp) {
+ if (!list_empty(&mm->free_list[tmp])) {
+ block = list_last_entry(&mm->free_list[tmp],
+ struct drm_buddy_block,
+ link);
+ if (block)
+ break;
+ }
}
}
@@ -434,18 +461,18 @@ alloc_from_freelist(struct drm_buddy *mm,
BUG_ON(!drm_buddy_block_is_free(block));
- while (i != order) {
+ while (tmp != order) {
err = split_block(mm, block);
if (unlikely(err))
goto err_undo;
block = block->right;
- i--;
+ tmp--;
}
return block;
err_undo:
- if (i != order)
+ if (tmp != order)
__drm_buddy_free(mm, block);
return ERR_PTR(err);
}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 97b34f377fe8..28c428e9c530 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -30,7 +30,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/console.h>
+#include <linux/pci.h>
#include <linux/sysrq.h>
+#include <linux/vga_switcheroo.h>
#include <drm/drm_atomic.h>
#include <drm/drm_drv.h>
@@ -1952,6 +1954,7 @@ static int drm_fb_helper_find_sizes(struct drm_fb_helper *fb_helper,
static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper)
{
struct drm_client_dev *client = &fb_helper->client;
+ struct drm_device *dev = fb_helper->dev;
struct drm_fb_helper_surface_size sizes;
int ret;
@@ -1973,6 +1976,11 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper)
return ret;
strcpy(fb_helper->fb->comm, "[fbcon]");
+
+ /* Set the fb info for vgaswitcheroo clients. Does nothing otherwise. */
+ if (dev_is_pci(dev->dev))
+ vga_switcheroo_client_fb_set(to_pci_dev(dev->dev), fb_helper->info);
+
return 0;
}
diff --git a/drivers/gpu/drm/drm_fbdev_generic.c b/drivers/gpu/drm/drm_fbdev_generic.c
index 43f94aa9e015..365f80717fa1 100644
--- a/drivers/gpu/drm/drm_fbdev_generic.c
+++ b/drivers/gpu/drm/drm_fbdev_generic.c
@@ -155,11 +155,6 @@ static const struct fb_ops drm_fbdev_fb_ops = {
.fb_imageblit = drm_fbdev_fb_imageblit,
};
-static struct fb_deferred_io drm_fbdev_defio = {
- .delay = HZ / 20,
- .deferred_io = drm_fb_helper_deferred_io,
-};
-
/*
* This function uses the client API to create a framebuffer backed by a dumb buffer.
*/
@@ -206,8 +201,14 @@ static int drm_fbdev_fb_probe(struct drm_fb_helper *fb_helper,
return -ENOMEM;
info->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST;
- info->fbdefio = &drm_fbdev_defio;
- fb_deferred_io_init(info);
+ /* Set a default deferred I/O handler */
+ fb_helper->fbdefio.delay = HZ / 20;
+ fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
+
+ info->fbdefio = &fb_helper->fbdefio;
+ ret = fb_deferred_io_init(info);
+ if (ret)
+ return ret;
} else {
/* buffer is mapped for HW framebuffer */
ret = drm_client_buffer_vmap(fb_helper->buffer, &map);
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index ca531dbb749d..b409fe256fd0 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -316,6 +316,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
+ }, { /* Lenovo Ideapad D330-10IGL (HD) */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGL"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* Lenovo Yoga Book X90F / X91F / X91L */
.matches = {
/* Non exact match to match all versions */
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
index 7de37f8c68fd..83229a031af0 100644
--- a/drivers/gpu/drm/drm_vma_manager.c
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -240,27 +240,8 @@ void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
}
EXPORT_SYMBOL(drm_vma_offset_remove);
-/**
- * drm_vma_node_allow - Add open-file to list of allowed users
- * @node: Node to modify
- * @tag: Tag of file to remove
- *
- * Add @tag to the list of allowed open-files for this node. If @tag is
- * already on this list, the ref-count is incremented.
- *
- * The list of allowed-users is preserved across drm_vma_offset_add() and
- * drm_vma_offset_remove() calls. You may even call it if the node is currently
- * not added to any offset-manager.
- *
- * You must remove all open-files the same number of times as you added them
- * before destroying the node. Otherwise, you will leak memory.
- *
- * This is locked against concurrent access internally.
- *
- * RETURNS:
- * 0 on success, negative error code on internal failure (out-of-mem)
- */
-int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
+static int vma_node_allow(struct drm_vma_offset_node *node,
+ struct drm_file *tag, bool ref_counted)
{
struct rb_node **iter;
struct rb_node *parent = NULL;
@@ -282,7 +263,8 @@ int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
if (tag == entry->vm_tag) {
- entry->vm_count++;
+ if (ref_counted)
+ entry->vm_count++;
goto unlock;
} else if (tag > entry->vm_tag) {
iter = &(*iter)->rb_right;
@@ -307,9 +289,59 @@ unlock:
kfree(new);
return ret;
}
+
+/**
+ * drm_vma_node_allow - Add open-file to list of allowed users
+ * @node: Node to modify
+ * @tag: Tag of file to remove
+ *
+ * Add @tag to the list of allowed open-files for this node. If @tag is
+ * already on this list, the ref-count is incremented.
+ *
+ * The list of allowed-users is preserved across drm_vma_offset_add() and
+ * drm_vma_offset_remove() calls. You may even call it if the node is currently
+ * not added to any offset-manager.
+ *
+ * You must remove all open-files the same number of times as you added them
+ * before destroying the node. Otherwise, you will leak memory.
+ *
+ * This is locked against concurrent access internally.
+ *
+ * RETURNS:
+ * 0 on success, negative error code on internal failure (out-of-mem)
+ */
+int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
+{
+ return vma_node_allow(node, tag, true);
+}
EXPORT_SYMBOL(drm_vma_node_allow);
/**
+ * drm_vma_node_allow_once - Add open-file to list of allowed users
+ * @node: Node to modify
+ * @tag: Tag of file to remove
+ *
+ * Add @tag to the list of allowed open-files for this node.
+ *
+ * The list of allowed-users is preserved across drm_vma_offset_add() and
+ * drm_vma_offset_remove() calls. You may even call it if the node is currently
+ * not added to any offset-manager.
+ *
+ * This is not ref-counted unlike drm_vma_node_allow() hence drm_vma_node_revoke()
+ * should only be called once after this.
+ *
+ * This is locked against concurrent access internally.
+ *
+ * RETURNS:
+ * 0 on success, negative error code on internal failure (out-of-mem)
+ */
+int drm_vma_node_allow_once(struct drm_vma_offset_node *node, struct drm_file *tag)
+{
+ return vma_node_allow(node, tag, false);
+}
+EXPORT_SYMBOL(drm_vma_node_allow_once);
+
+/**
* drm_vma_node_revoke - Remove open-file from list of allowed users
* @node: Node to modify
* @tag: Tag of file to remove
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 4f69bff63068..2aac6bf78740 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -697,7 +697,7 @@ insert:
GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo);
out:
if (file)
- drm_vma_node_allow(&mmo->vma_node, file);
+ drm_vma_node_allow_once(&mmo->vma_node, file);
return mmo;
err:
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 7771a19008c6..bbeeb6dde7ae 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -288,39 +288,6 @@ static const u8 dg2_xcs_offsets[] = {
END
};
-static const u8 mtl_xcs_offsets[] = {
- NOP(1),
- LRI(13, POSTED),
- REG16(0x244),
- REG(0x034),
- REG(0x030),
- REG(0x038),
- REG(0x03c),
- REG(0x168),
- REG(0x140),
- REG(0x110),
- REG(0x1c0),
- REG(0x1c4),
- REG(0x1c8),
- REG(0x180),
- REG16(0x2b4),
- NOP(4),
-
- NOP(1),
- LRI(9, POSTED),
- REG16(0x3a8),
- REG16(0x28c),
- REG16(0x288),
- REG16(0x284),
- REG16(0x280),
- REG16(0x27c),
- REG16(0x278),
- REG16(0x274),
- REG16(0x270),
-
- END
-};
-
static const u8 gen8_rcs_offsets[] = {
NOP(1),
LRI(14, POSTED),
@@ -739,9 +706,7 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine)
else
return gen8_rcs_offsets;
} else {
- if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 70))
- return mtl_xcs_offsets;
- else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
return dg2_xcs_offsets;
else if (GRAPHICS_VER(engine->i915) >= 12)
return gen12_xcs_offsets;
diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
index 9f1c209d9251..0616b73175f3 100644
--- a/drivers/gpu/drm/i915/gvt/debugfs.c
+++ b/drivers/gpu/drm/i915/gvt/debugfs.c
@@ -151,6 +151,22 @@ DEFINE_SIMPLE_ATTRIBUTE(vgpu_scan_nonprivbb_fops,
vgpu_scan_nonprivbb_get, vgpu_scan_nonprivbb_set,
"0x%llx\n");
+static int vgpu_status_get(void *data, u64 *val)
+{
+ struct intel_vgpu *vgpu = (struct intel_vgpu *)data;
+
+ *val = 0;
+
+ if (test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
+ *val |= (1 << INTEL_VGPU_STATUS_ATTACHED);
+ if (test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status))
+ *val |= (1 << INTEL_VGPU_STATUS_ACTIVE);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(vgpu_status_fops, vgpu_status_get, NULL, "0x%llx\n");
+
/**
* intel_gvt_debugfs_add_vgpu - register debugfs entries for a vGPU
* @vgpu: a vGPU
@@ -162,11 +178,12 @@ void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
snprintf(name, 16, "vgpu%d", vgpu->id);
vgpu->debugfs = debugfs_create_dir(name, vgpu->gvt->debugfs_root);
- debugfs_create_bool("active", 0444, vgpu->debugfs, &vgpu->active);
debugfs_create_file("mmio_diff", 0444, vgpu->debugfs, vgpu,
&vgpu_mmio_diff_fops);
debugfs_create_file("scan_nonprivbb", 0644, vgpu->debugfs, vgpu,
&vgpu_scan_nonprivbb_fops);
+ debugfs_create_file("status", 0644, vgpu->debugfs, vgpu,
+ &vgpu_status_fops);
}
/**
@@ -175,8 +192,13 @@ void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
*/
void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu)
{
- debugfs_remove_recursive(vgpu->debugfs);
- vgpu->debugfs = NULL;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_minor *minor = gvt->gt->i915->drm.primary;
+
+ if (minor->debugfs_root && gvt->debugfs_root) {
+ debugfs_remove_recursive(vgpu->debugfs);
+ vgpu->debugfs = NULL;
+ }
}
/**
@@ -199,6 +221,10 @@ void intel_gvt_debugfs_init(struct intel_gvt *gvt)
*/
void intel_gvt_debugfs_clean(struct intel_gvt *gvt)
{
- debugfs_remove_recursive(gvt->debugfs_root);
- gvt->debugfs_root = NULL;
+ struct drm_minor *minor = gvt->gt->i915->drm.primary;
+
+ if (minor->debugfs_root) {
+ debugfs_remove_recursive(gvt->debugfs_root);
+ gvt->debugfs_root = NULL;
+ }
}
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 7af09eb24ac0..6834f9fe40cf 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -136,7 +136,8 @@ static void dmabuf_gem_object_free(struct kref *kref)
struct list_head *pos;
struct intel_vgpu_dmabuf_obj *dmabuf_obj;
- if (vgpu && vgpu->active && !list_empty(&vgpu->dmabuf_obj_list_head)) {
+ if (vgpu && test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status) &&
+ !list_empty(&vgpu->dmabuf_obj_list_head)) {
list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list);
if (dmabuf_obj == obj) {
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 51e5e8fb505b..4ec85308379a 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -55,7 +55,7 @@ static bool intel_gvt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn)
int idx;
bool ret;
- if (!vgpu->attached)
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
return false;
idx = srcu_read_lock(&kvm->srcu);
@@ -1178,7 +1178,7 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M))
return 0;
- if (!vgpu->attached)
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
return -EINVAL;
pfn = gfn_to_pfn(vgpu->vfio_device.kvm, ops->get_pfn(entry));
if (is_error_noslot_pfn(pfn))
@@ -1209,10 +1209,8 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + sub_index,
PAGE_SIZE, &dma_addr);
- if (ret) {
- ppgtt_invalidate_spt(spt);
- return ret;
- }
+ if (ret)
+ goto err;
sub_se.val64 = se->val64;
/* Copy the PAT field from PDE. */
@@ -1231,6 +1229,17 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
ops->set_pfn(se, sub_spt->shadow_page.mfn);
ppgtt_set_shadow_entry(spt, se, index);
return 0;
+err:
+ /* Cancel the existing addess mappings of DMA addr. */
+ for_each_present_shadow_entry(sub_spt, &sub_se, sub_index) {
+ gvt_vdbg_mm("invalidate 4K entry\n");
+ ppgtt_invalidate_pte(sub_spt, &sub_se);
+ }
+ /* Release the new allocated spt. */
+ trace_spt_change(sub_spt->vgpu->id, "release", sub_spt,
+ sub_spt->guest_page.gfn, sub_spt->shadow_page.type);
+ ppgtt_free_spt(sub_spt);
+ return ret;
}
static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 62823c0e13ab..2d65800d8e93 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -172,13 +172,18 @@ struct intel_vgpu_submission {
#define KVMGT_DEBUGFS_FILENAME "kvmgt_nr_cache_entries"
+enum {
+ INTEL_VGPU_STATUS_ATTACHED = 0,
+ INTEL_VGPU_STATUS_ACTIVE,
+ INTEL_VGPU_STATUS_NR_BITS,
+};
+
struct intel_vgpu {
struct vfio_device vfio_device;
struct intel_gvt *gvt;
struct mutex vgpu_lock;
int id;
- bool active;
- bool attached;
+ DECLARE_BITMAP(status, INTEL_VGPU_STATUS_NR_BITS);
bool pv_notified;
bool failsafe;
unsigned int resetting_eng;
@@ -467,7 +472,7 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
#define for_each_active_vgpu(gvt, vgpu, id) \
idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
- for_each_if(vgpu->active)
+ for_each_if(test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status))
static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu,
u32 offset, u32 val, bool low)
@@ -725,7 +730,7 @@ static inline bool intel_gvt_mmio_is_cmd_write_patch(
static inline int intel_gvt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa,
void *buf, unsigned long len)
{
- if (!vgpu->attached)
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
return -ESRCH;
return vfio_dma_rw(&vgpu->vfio_device, gpa, buf, len, false);
}
@@ -743,7 +748,7 @@ static inline int intel_gvt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa,
static inline int intel_gvt_write_gpa(struct intel_vgpu *vgpu,
unsigned long gpa, void *buf, unsigned long len)
{
- if (!vgpu->attached)
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
return -ESRCH;
return vfio_dma_rw(&vgpu->vfio_device, gpa, buf, len, true);
}
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index a6b2021b665f..68eca023bbc6 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -433,7 +433,7 @@ static int inject_virtual_interrupt(struct intel_vgpu *vgpu)
* enabled by guest. so if msi_trigger is null, success is still
* returned and don't inject interrupt into guest.
*/
- if (!vgpu->attached)
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
return -ESRCH;
if (vgpu->msi_trigger && eventfd_signal(vgpu->msi_trigger, 1) != 1)
return -EFAULT;
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index f5451adcd489..8ae7039b3683 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -638,7 +638,7 @@ static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu)
mutex_lock(&vgpu->gvt->lock);
for_each_active_vgpu(vgpu->gvt, itr, id) {
- if (!itr->attached)
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, itr->status))
continue;
if (vgpu->vfio_device.kvm == itr->vfio_device.kvm) {
@@ -655,9 +655,6 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
{
struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
- if (vgpu->attached)
- return -EEXIST;
-
if (!vgpu->vfio_device.kvm ||
vgpu->vfio_device.kvm->mm != current->mm) {
gvt_vgpu_err("KVM is required to use Intel vGPU\n");
@@ -667,14 +664,14 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
if (__kvmgt_vgpu_exist(vgpu))
return -EEXIST;
- vgpu->attached = true;
-
vgpu->track_node.track_write = kvmgt_page_track_write;
vgpu->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
kvm_get_kvm(vgpu->vfio_device.kvm);
kvm_page_track_register_notifier(vgpu->vfio_device.kvm,
&vgpu->track_node);
+ set_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status);
+
debugfs_create_ulong(KVMGT_DEBUGFS_FILENAME, 0444, vgpu->debugfs,
&vgpu->nr_cache_entries);
@@ -698,11 +695,10 @@ static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
{
struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
- if (!vgpu->attached)
- return;
-
intel_gvt_release_vgpu(vgpu);
+ clear_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status);
+
debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME, vgpu->debugfs));
kvm_page_track_unregister_notifier(vgpu->vfio_device.kvm,
@@ -718,8 +714,6 @@ static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
vgpu->dma_addr_cache = RB_ROOT;
intel_vgpu_release_msi_eventfd_ctx(vgpu);
-
- vgpu->attached = false;
}
static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
@@ -1512,9 +1506,6 @@ static void intel_vgpu_remove(struct mdev_device *mdev)
{
struct intel_vgpu *vgpu = dev_get_drvdata(&mdev->dev);
- if (WARN_ON_ONCE(vgpu->attached))
- return;
-
vfio_unregister_group_dev(&vgpu->vfio_device);
vfio_put_device(&vgpu->vfio_device);
}
@@ -1559,7 +1550,7 @@ int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn)
struct kvm_memory_slot *slot;
int idx;
- if (!info->attached)
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, info->status))
return -ESRCH;
idx = srcu_read_lock(&kvm->srcu);
@@ -1589,8 +1580,8 @@ int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn)
struct kvm_memory_slot *slot;
int idx;
- if (!info->attached)
- return 0;
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, info->status))
+ return -ESRCH;
idx = srcu_read_lock(&kvm->srcu);
slot = gfn_to_memslot(kvm, gfn);
@@ -1668,7 +1659,7 @@ int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
struct gvt_dma *entry;
int ret;
- if (!vgpu->attached)
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
return -EINVAL;
mutex_lock(&vgpu->cache_lock);
@@ -1714,8 +1705,8 @@ int intel_gvt_dma_pin_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr)
struct gvt_dma *entry;
int ret = 0;
- if (!vgpu->attached)
- return -ENODEV;
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
+ return -EINVAL;
mutex_lock(&vgpu->cache_lock);
entry = __gvt_cache_find_dma_addr(vgpu, dma_addr);
@@ -1742,7 +1733,7 @@ void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu,
{
struct gvt_dma *entry;
- if (!vgpu->attached)
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
return;
mutex_lock(&vgpu->cache_lock);
@@ -1778,7 +1769,7 @@ static void intel_gvt_test_and_emulate_vblank(struct intel_gvt *gvt)
idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) {
if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK + id,
(void *)&gvt->service_request)) {
- if (vgpu->active)
+ if (test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status))
intel_vgpu_emulate_vblank(vgpu);
}
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 9cd8fcbf7cad..f4055804aad1 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -695,6 +695,7 @@ intel_vgpu_shadow_mm_pin(struct intel_vgpu_workload *workload)
if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT ||
!workload->shadow_mm->ppgtt_mm.shadowed) {
+ intel_vgpu_unpin_mm(workload->shadow_mm);
gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
return -EINVAL;
}
@@ -865,7 +866,8 @@ pick_next_workload(struct intel_gvt *gvt, struct intel_engine_cs *engine)
goto out;
}
- if (!scheduler->current_vgpu->active ||
+ if (!test_bit(INTEL_VGPU_STATUS_ACTIVE,
+ scheduler->current_vgpu->status) ||
list_empty(workload_q_head(scheduler->current_vgpu, engine)))
goto out;
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 3c529c2705dd..a5497440484f 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -166,9 +166,7 @@ void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt)
*/
void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
{
- mutex_lock(&vgpu->vgpu_lock);
- vgpu->active = true;
- mutex_unlock(&vgpu->vgpu_lock);
+ set_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status);
}
/**
@@ -183,7 +181,7 @@ void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
{
mutex_lock(&vgpu->vgpu_lock);
- vgpu->active = false;
+ clear_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status);
if (atomic_read(&vgpu->submission.running_workload_num)) {
mutex_unlock(&vgpu->vgpu_lock);
@@ -228,7 +226,8 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *i915 = gvt->gt->i915;
- drm_WARN(&i915->drm, vgpu->active, "vGPU is still active!\n");
+ drm_WARN(&i915->drm, test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status),
+ "vGPU is still active!\n");
/*
* remove idr first so later clean can judge if need to stop
@@ -285,8 +284,7 @@ struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
if (ret)
goto out_free_vgpu;
- vgpu->active = false;
-
+ clear_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status);
return vgpu;
out_free_vgpu:
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index 41384626577d..cf1c0970ecb4 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -936,12 +936,9 @@ static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
*/
static void i915_driver_lastclose(struct drm_device *dev)
{
- struct drm_i915_private *i915 = to_i915(dev);
-
intel_fbdev_restore_mode(dev);
- if (HAS_DISPLAY(i915))
- vga_switcheroo_process_delayed_switch();
+ vga_switcheroo_process_delayed_switch();
}
static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/i915_switcheroo.c b/drivers/gpu/drm/i915/i915_switcheroo.c
index 23777d500cdf..f45bd6b6cede 100644
--- a/drivers/gpu/drm/i915/i915_switcheroo.c
+++ b/drivers/gpu/drm/i915/i915_switcheroo.c
@@ -19,6 +19,10 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev,
dev_err(&pdev->dev, "DRM not initialized, aborting switch.\n");
return;
}
+ if (!HAS_DISPLAY(i915)) {
+ dev_err(&pdev->dev, "Device state not initialized, aborting switch.\n");
+ return;
+ }
if (state == VGA_SWITCHEROO_ON) {
drm_info(&i915->drm, "switched on\n");
@@ -44,7 +48,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
* locking inversion with the driver load path. And the access here is
* completely racy anyway. So don't bother with locking for now.
*/
- return i915 && atomic_read(&i915->drm.open_count) == 0;
+ return i915 && HAS_DISPLAY(i915) && atomic_read(&i915->drm.open_count) == 0;
}
static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
diff --git a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
index 310fb83c527e..2990dd4d4a0d 100644
--- a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
+++ b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
@@ -28,8 +28,7 @@ struct intel_engine_cs *intel_selftest_find_any_engine(struct intel_gt *gt)
int intel_selftest_modify_policy(struct intel_engine_cs *engine,
struct intel_selftest_saved_policy *saved,
- u32 modify_type)
-
+ enum selftest_scheduler_modify modify_type)
{
int err;
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
index d4b907889a21..cd399b0b7181 100644
--- a/drivers/gpu/drm/meson/meson_viu.c
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -436,15 +436,14 @@ void meson_viu_init(struct meson_drm *priv)
/* Initialize OSD1 fifo control register */
reg = VIU_OSD_DDR_PRIORITY_URGENT |
- VIU_OSD_HOLD_FIFO_LINES(31) |
VIU_OSD_FIFO_DEPTH_VAL(32) | /* fifo_depth_val: 32*8=256 */
VIU_OSD_WORDS_PER_BURST(4) | /* 4 words in 1 burst */
VIU_OSD_FIFO_LIMITS(2); /* fifo_lim: 2*16=32 */
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
- reg |= VIU_OSD_BURST_LENGTH_32;
+ reg |= (VIU_OSD_BURST_LENGTH_32 | VIU_OSD_HOLD_FIFO_LINES(31));
else
- reg |= VIU_OSD_BURST_LENGTH_64;
+ reg |= (VIU_OSD_BURST_LENGTH_64 | VIU_OSD_HOLD_FIFO_LINES(4));
writel_relaxed(reg, priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT));
writel_relaxed(reg, priv->io_base + _REG(VIU_OSD2_FIFO_CTRL_STAT));
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 6484b97c5344..f3c9600221d4 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -876,7 +876,8 @@ static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
#define GBIF_CLIENT_HALT_MASK BIT(0)
#define GBIF_ARB_HALT_MASK BIT(1)
-static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
+static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu,
+ bool gx_off)
{
struct msm_gpu *gpu = &adreno_gpu->base;
@@ -889,9 +890,11 @@ static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
return;
}
- /* Halt the gx side of GBIF */
- gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1);
- spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1);
+ if (gx_off) {
+ /* Halt the gx side of GBIF */
+ gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1);
+ spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1);
+ }
/* Halt new client requests on GBIF */
gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
@@ -929,7 +932,7 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
/* Halt the gmu cm3 core */
gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
- a6xx_bus_clear_pending_transactions(adreno_gpu);
+ a6xx_bus_clear_pending_transactions(adreno_gpu, true);
/* Reset GPU core blocks */
gpu_write(gpu, REG_A6XX_RBBM_SW_RESET_CMD, 1);
@@ -1083,7 +1086,7 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
return;
}
- a6xx_bus_clear_pending_transactions(adreno_gpu);
+ a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung);
/* tell the GMU we want to slumber */
ret = a6xx_gmu_notify_slumber(gmu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 36c8fb699b56..3be0f2928b57 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -1270,6 +1270,12 @@ static void a6xx_recover(struct msm_gpu *gpu)
if (hang_debug)
a6xx_dump(gpu);
+ /*
+ * To handle recovery specific sequences during the rpm suspend we are
+ * about to trigger
+ */
+ a6xx_gpu->hung = true;
+
/* Halt SQE first */
gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3);
@@ -1312,6 +1318,7 @@ static void a6xx_recover(struct msm_gpu *gpu)
mutex_unlock(&gpu->active_lock);
msm_gpu_hw_init(gpu);
+ a6xx_gpu->hung = false;
}
static const char *a6xx_uche_fault_block(struct msm_gpu *gpu, u32 mid)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index ab853f61db63..eea2e60ce3b7 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -32,6 +32,7 @@ struct a6xx_gpu {
void *llc_slice;
void *htw_llc_slice;
bool have_mmu500;
+ bool hung;
};
#define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 628806423f7d..36f062c7582f 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -551,13 +551,14 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
return 0;
}
+static int adreno_system_suspend(struct device *dev);
static void adreno_unbind(struct device *dev, struct device *master,
void *data)
{
struct msm_drm_private *priv = dev_get_drvdata(master);
struct msm_gpu *gpu = dev_to_gpu(dev);
- pm_runtime_force_suspend(dev);
+ WARN_ON_ONCE(adreno_system_suspend(dev));
gpu->funcs->destroy(gpu);
priv->gpu_pdev = NULL;
@@ -609,7 +610,7 @@ static int adreno_remove(struct platform_device *pdev)
static void adreno_shutdown(struct platform_device *pdev)
{
- pm_runtime_force_suspend(&pdev->dev);
+ WARN_ON_ONCE(adreno_system_suspend(&pdev->dev));
}
static const struct of_device_id dt_match[] = {
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 57586c794b84..3605f095b2de 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -352,6 +352,8 @@ int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
/* Ensure string is null terminated: */
str[len] = '\0';
+ mutex_lock(&gpu->lock);
+
if (param == MSM_PARAM_COMM) {
paramp = &ctx->comm;
} else {
@@ -361,6 +363,8 @@ int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
kfree(*paramp);
*paramp = str;
+ mutex_unlock(&gpu->lock);
+
return 0;
}
case MSM_PARAM_SYSPROF:
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 5d4b1c95033f..b4f9b1343d63 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -29,11 +29,9 @@ enum {
ADRENO_FW_MAX,
};
-enum adreno_quirks {
- ADRENO_QUIRK_TWO_PASS_USE_WFI = 1,
- ADRENO_QUIRK_FAULT_DETECT_MASK = 2,
- ADRENO_QUIRK_LMLOADKILL_DISABLE = 3,
-};
+#define ADRENO_QUIRK_TWO_PASS_USE_WFI BIT(0)
+#define ADRENO_QUIRK_FAULT_DETECT_MASK BIT(1)
+#define ADRENO_QUIRK_LMLOADKILL_DISABLE BIT(2)
struct adreno_rev {
uint8_t core;
@@ -65,7 +63,7 @@ struct adreno_info {
const char *name;
const char *fw[ADRENO_FW_MAX];
uint32_t gmem;
- enum adreno_quirks quirks;
+ u64 quirks;
struct msm_gpu *(*init)(struct drm_device *dev);
const char *zapfw;
u32 inactive_period;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
index 7cbcef6efe17..62f6ff6abf41 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
@@ -132,7 +132,6 @@ static void dpu_encoder_phys_wb_set_qos(struct dpu_encoder_phys *phys_enc)
* dpu_encoder_phys_wb_setup_fb - setup output framebuffer
* @phys_enc: Pointer to physical encoder
* @fb: Pointer to output framebuffer
- * @wb_roi: Pointer to output region of interest
*/
static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc,
struct drm_framebuffer *fb)
@@ -692,7 +691,7 @@ static void dpu_encoder_phys_wb_init_ops(struct dpu_encoder_phys_ops *ops)
/**
* dpu_encoder_phys_wb_init - initialize writeback encoder
- * @init: Pointer to init info structure with initialization params
+ * @p: Pointer to init info structure with initialization params
*/
struct dpu_encoder_phys *dpu_encoder_phys_wb_init(
struct dpu_enc_phys_init_params *p)
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
index d030a93a08c3..cc3efed593aa 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.c
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -423,6 +423,10 @@ void dp_aux_isr(struct drm_dp_aux *dp_aux)
isr = dp_catalog_aux_get_irq(aux->catalog);
+ /* no interrupts pending, return immediately */
+ if (!isr)
+ return;
+
if (!aux->cmd_busy)
return;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index c373a40bef37..f1f01db699d3 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -530,11 +530,19 @@ static int msm_hdmi_dev_probe(struct platform_device *pdev)
ret = devm_pm_runtime_enable(&pdev->dev);
if (ret)
- return ret;
+ goto err_put_phy;
platform_set_drvdata(pdev, hdmi);
- return component_add(&pdev->dev, &msm_hdmi_ops);
+ ret = component_add(&pdev->dev, &msm_hdmi_ops);
+ if (ret)
+ goto err_put_phy;
+
+ return 0;
+
+err_put_phy:
+ msm_hdmi_put_phy(hdmi);
+ return ret;
}
static int msm_hdmi_dev_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 8b0b0ac74a6f..45e81eb148a8 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -1278,7 +1278,7 @@ void msm_drv_shutdown(struct platform_device *pdev)
* msm_drm_init, drm_dev->registered is used as an indicator that the
* shutdown will be successful.
*/
- if (drm && drm->registered)
+ if (drm && drm->registered && priv->kms)
drm_atomic_helper_shutdown(drm);
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 30ed45af76ad..380249500325 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -335,6 +335,8 @@ static void get_comm_cmdline(struct msm_gem_submit *submit, char **comm, char **
struct msm_file_private *ctx = submit->queue->ctx;
struct task_struct *task;
+ WARN_ON(!mutex_is_locked(&submit->gpu->lock));
+
/* Note that kstrdup will return NULL if argument is NULL: */
*comm = kstrdup(ctx->comm, GFP_KERNEL);
*cmd = kstrdup(ctx->cmdline, GFP_KERNEL);
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 651786bc55e5..732295e25683 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -376,10 +376,18 @@ struct msm_file_private {
*/
int sysprof;
- /** comm: Overridden task comm, see MSM_PARAM_COMM */
+ /**
+ * comm: Overridden task comm, see MSM_PARAM_COMM
+ *
+ * Accessed under msm_gpu::lock
+ */
char *comm;
- /** cmdline: Overridden task cmdline, see MSM_PARAM_CMDLINE */
+ /**
+ * cmdline: Overridden task cmdline, see MSM_PARAM_CMDLINE
+ *
+ * Accessed under msm_gpu::lock
+ */
char *cmdline;
/**
diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c
index 86b28add1fff..2527afef9c19 100644
--- a/drivers/gpu/drm/msm/msm_mdss.c
+++ b/drivers/gpu/drm/msm/msm_mdss.c
@@ -47,15 +47,17 @@ struct msm_mdss {
static int msm_mdss_parse_data_bus_icc_path(struct device *dev,
struct msm_mdss *msm_mdss)
{
- struct icc_path *path0 = of_icc_get(dev, "mdp0-mem");
- struct icc_path *path1 = of_icc_get(dev, "mdp1-mem");
+ struct icc_path *path0;
+ struct icc_path *path1;
+ path0 = of_icc_get(dev, "mdp0-mem");
if (IS_ERR_OR_NULL(path0))
return PTR_ERR_OR_ZERO(path0);
msm_mdss->path[0] = path0;
msm_mdss->num_paths = 1;
+ path1 = of_icc_get(dev, "mdp1-mem");
if (!IS_ERR_OR_NULL(path1)) {
msm_mdss->path[1] = path1;
msm_mdss->num_paths++;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
deleted file mode 100644
index e87de7906f78..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ /dev/null
@@ -1,613 +0,0 @@
-/*
- * Copyright © 2007 David Airlie
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * David Airlie
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/tty.h>
-#include <linux/sysrq.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/screen_info.h>
-#include <linux/vga_switcheroo.h>
-#include <linux/console.h>
-
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_fourcc.h>
-#include <drm/drm_atomic.h>
-
-#include "nouveau_drv.h"
-#include "nouveau_gem.h"
-#include "nouveau_bo.h"
-#include "nouveau_fbcon.h"
-#include "nouveau_chan.h"
-#include "nouveau_vmm.h"
-
-#include "nouveau_crtc.h"
-
-MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
-int nouveau_nofbaccel = 0;
-module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
-
-MODULE_PARM_DESC(fbcon_bpp, "fbcon bits-per-pixel (default: auto)");
-static int nouveau_fbcon_bpp;
-module_param_named(fbcon_bpp, nouveau_fbcon_bpp, int, 0400);
-
-static void
-nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
-{
- struct nouveau_fbdev *fbcon = info->par;
- struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
- struct nvif_device *device = &drm->client.device;
- int ret;
-
- if (info->state != FBINFO_STATE_RUNNING)
- return;
-
- ret = -ENODEV;
- if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
- mutex_trylock(&drm->client.mutex)) {
- if (device->info.family < NV_DEVICE_INFO_V0_TESLA)
- ret = nv04_fbcon_fillrect(info, rect);
- else
- if (device->info.family < NV_DEVICE_INFO_V0_FERMI)
- ret = nv50_fbcon_fillrect(info, rect);
- else
- ret = nvc0_fbcon_fillrect(info, rect);
- mutex_unlock(&drm->client.mutex);
- }
-
- if (ret == 0)
- return;
-
- if (ret != -ENODEV)
- nouveau_fbcon_gpu_lockup(info);
- drm_fb_helper_cfb_fillrect(info, rect);
-}
-
-static void
-nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
-{
- struct nouveau_fbdev *fbcon = info->par;
- struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
- struct nvif_device *device = &drm->client.device;
- int ret;
-
- if (info->state != FBINFO_STATE_RUNNING)
- return;
-
- ret = -ENODEV;
- if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
- mutex_trylock(&drm->client.mutex)) {
- if (device->info.family < NV_DEVICE_INFO_V0_TESLA)
- ret = nv04_fbcon_copyarea(info, image);
- else
- if (device->info.family < NV_DEVICE_INFO_V0_FERMI)
- ret = nv50_fbcon_copyarea(info, image);
- else
- ret = nvc0_fbcon_copyarea(info, image);
- mutex_unlock(&drm->client.mutex);
- }
-
- if (ret == 0)
- return;
-
- if (ret != -ENODEV)
- nouveau_fbcon_gpu_lockup(info);
- drm_fb_helper_cfb_copyarea(info, image);
-}
-
-static void
-nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
-{
- struct nouveau_fbdev *fbcon = info->par;
- struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
- struct nvif_device *device = &drm->client.device;
- int ret;
-
- if (info->state != FBINFO_STATE_RUNNING)
- return;
-
- ret = -ENODEV;
- if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
- mutex_trylock(&drm->client.mutex)) {
- if (device->info.family < NV_DEVICE_INFO_V0_TESLA)
- ret = nv04_fbcon_imageblit(info, image);
- else
- if (device->info.family < NV_DEVICE_INFO_V0_FERMI)
- ret = nv50_fbcon_imageblit(info, image);
- else
- ret = nvc0_fbcon_imageblit(info, image);
- mutex_unlock(&drm->client.mutex);
- }
-
- if (ret == 0)
- return;
-
- if (ret != -ENODEV)
- nouveau_fbcon_gpu_lockup(info);
- drm_fb_helper_cfb_imageblit(info, image);
-}
-
-static int
-nouveau_fbcon_sync(struct fb_info *info)
-{
- struct nouveau_fbdev *fbcon = info->par;
- struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
- struct nouveau_channel *chan = drm->channel;
- int ret;
-
- if (!chan || !chan->accel_done || in_interrupt() ||
- info->state != FBINFO_STATE_RUNNING ||
- info->flags & FBINFO_HWACCEL_DISABLED)
- return 0;
-
- if (!mutex_trylock(&drm->client.mutex))
- return 0;
-
- ret = nouveau_channel_idle(chan);
- mutex_unlock(&drm->client.mutex);
- if (ret) {
- nouveau_fbcon_gpu_lockup(info);
- return 0;
- }
-
- chan->accel_done = false;
- return 0;
-}
-
-static int
-nouveau_fbcon_open(struct fb_info *info, int user)
-{
- struct nouveau_fbdev *fbcon = info->par;
- struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
- int ret = pm_runtime_get_sync(drm->dev->dev);
- if (ret < 0 && ret != -EACCES) {
- pm_runtime_put(drm->dev->dev);
- return ret;
- }
- return 0;
-}
-
-static int
-nouveau_fbcon_release(struct fb_info *info, int user)
-{
- struct nouveau_fbdev *fbcon = info->par;
- struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
- pm_runtime_put(drm->dev->dev);
- return 0;
-}
-
-static const struct fb_ops nouveau_fbcon_ops = {
- .owner = THIS_MODULE,
- DRM_FB_HELPER_DEFAULT_OPS,
- .fb_open = nouveau_fbcon_open,
- .fb_release = nouveau_fbcon_release,
- .fb_fillrect = nouveau_fbcon_fillrect,
- .fb_copyarea = nouveau_fbcon_copyarea,
- .fb_imageblit = nouveau_fbcon_imageblit,
- .fb_sync = nouveau_fbcon_sync,
-};
-
-static const struct fb_ops nouveau_fbcon_sw_ops = {
- .owner = THIS_MODULE,
- DRM_FB_HELPER_DEFAULT_OPS,
- .fb_open = nouveau_fbcon_open,
- .fb_release = nouveau_fbcon_release,
- .fb_fillrect = drm_fb_helper_cfb_fillrect,
- .fb_copyarea = drm_fb_helper_cfb_copyarea,
- .fb_imageblit = drm_fb_helper_cfb_imageblit,
-};
-
-void
-nouveau_fbcon_accel_save_disable(struct drm_device *dev)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- if (drm->fbcon && drm->fbcon->helper.info) {
- drm->fbcon->saved_flags = drm->fbcon->helper.info->flags;
- drm->fbcon->helper.info->flags |= FBINFO_HWACCEL_DISABLED;
- }
-}
-
-void
-nouveau_fbcon_accel_restore(struct drm_device *dev)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- if (drm->fbcon && drm->fbcon->helper.info)
- drm->fbcon->helper.info->flags = drm->fbcon->saved_flags;
-}
-
-static void
-nouveau_fbcon_accel_fini(struct drm_device *dev)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_fbdev *fbcon = drm->fbcon;
- if (fbcon && drm->channel) {
- console_lock();
- if (fbcon->helper.info)
- fbcon->helper.info->flags |= FBINFO_HWACCEL_DISABLED;
- console_unlock();
- nouveau_channel_idle(drm->channel);
- nvif_object_dtor(&fbcon->twod);
- nvif_object_dtor(&fbcon->blit);
- nvif_object_dtor(&fbcon->gdi);
- nvif_object_dtor(&fbcon->patt);
- nvif_object_dtor(&fbcon->rop);
- nvif_object_dtor(&fbcon->clip);
- nvif_object_dtor(&fbcon->surf2d);
- }
-}
-
-static void
-nouveau_fbcon_accel_init(struct drm_device *dev)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_fbdev *fbcon = drm->fbcon;
- struct fb_info *info = fbcon->helper.info;
- int ret;
-
- if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA)
- ret = nv04_fbcon_accel_init(info);
- else
- if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI)
- ret = nv50_fbcon_accel_init(info);
- else
- ret = nvc0_fbcon_accel_init(info);
-
- if (ret == 0)
- info->fbops = &nouveau_fbcon_ops;
-}
-
-static void
-nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *fbcon)
-{
- struct fb_info *info = fbcon->helper.info;
- struct fb_fillrect rect;
-
- /* Clear the entire fbcon. The drm will program every connector
- * with it's preferred mode. If the sizes differ, one display will
- * quite likely have garbage around the console.
- */
- rect.dx = rect.dy = 0;
- rect.width = info->var.xres_virtual;
- rect.height = info->var.yres_virtual;
- rect.color = 0;
- rect.rop = ROP_COPY;
- info->fbops->fb_fillrect(info, &rect);
-}
-
-static int
-nouveau_fbcon_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct nouveau_fbdev *fbcon =
- container_of(helper, struct nouveau_fbdev, helper);
- struct drm_device *dev = fbcon->helper.dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvif_device *device = &drm->client.device;
- struct fb_info *info;
- struct drm_framebuffer *fb;
- struct nouveau_channel *chan;
- struct nouveau_bo *nvbo;
- struct drm_mode_fb_cmd2 mode_cmd = {};
- int ret;
-
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
-
- mode_cmd.pitches[0] = mode_cmd.width * (sizes->surface_bpp >> 3);
- mode_cmd.pitches[0] = roundup(mode_cmd.pitches[0], 256);
-
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
-
- ret = nouveau_gem_new(&drm->client, mode_cmd.pitches[0] *
- mode_cmd.height, 0, NOUVEAU_GEM_DOMAIN_VRAM,
- 0, 0x0000, &nvbo);
- if (ret) {
- NV_ERROR(drm, "failed to allocate framebuffer\n");
- goto out;
- }
-
- ret = nouveau_framebuffer_new(dev, &mode_cmd, &nvbo->bo.base, &fb);
- if (ret)
- goto out_unref;
-
- ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false);
- if (ret) {
- NV_ERROR(drm, "failed to pin fb: %d\n", ret);
- goto out_unref;
- }
-
- ret = nouveau_bo_map(nvbo);
- if (ret) {
- NV_ERROR(drm, "failed to map fb: %d\n", ret);
- goto out_unpin;
- }
-
- chan = nouveau_nofbaccel ? NULL : drm->channel;
- if (chan && device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
- ret = nouveau_vma_new(nvbo, chan->vmm, &fbcon->vma);
- if (ret) {
- NV_ERROR(drm, "failed to map fb into chan: %d\n", ret);
- chan = NULL;
- }
- }
-
- info = drm_fb_helper_alloc_info(helper);
- if (IS_ERR(info)) {
- ret = PTR_ERR(info);
- goto out_unlock;
- }
-
- /* setup helper */
- fbcon->helper.fb = fb;
-
- if (!chan)
- info->flags = FBINFO_HWACCEL_DISABLED;
- else
- info->flags = FBINFO_HWACCEL_COPYAREA |
- FBINFO_HWACCEL_FILLRECT |
- FBINFO_HWACCEL_IMAGEBLIT;
- info->fbops = &nouveau_fbcon_sw_ops;
- info->fix.smem_start = nvbo->bo.resource->bus.offset;
- info->fix.smem_len = nvbo->bo.base.size;
-
- info->screen_base = nvbo_kmap_obj_iovirtual(nvbo);
- info->screen_size = nvbo->bo.base.size;
-
- drm_fb_helper_fill_info(info, &fbcon->helper, sizes);
-
- /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
-
- if (chan)
- nouveau_fbcon_accel_init(dev);
- nouveau_fbcon_zfill(dev, fbcon);
-
- /* To allow resizeing without swapping buffers */
- NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n",
- fb->width, fb->height, nvbo->offset, nvbo);
-
- if (dev_is_pci(dev->dev))
- vga_switcheroo_client_fb_set(to_pci_dev(dev->dev), info);
-
- return 0;
-
-out_unlock:
- if (chan)
- nouveau_vma_del(&fbcon->vma);
- nouveau_bo_unmap(nvbo);
-out_unpin:
- nouveau_bo_unpin(nvbo);
-out_unref:
- nouveau_bo_ref(NULL, &nvbo);
-out:
- return ret;
-}
-
-static int
-nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
-{
- struct drm_framebuffer *fb = fbcon->helper.fb;
- struct nouveau_bo *nvbo;
-
- drm_fb_helper_unregister_info(&fbcon->helper);
- drm_fb_helper_fini(&fbcon->helper);
-
- if (fb && fb->obj[0]) {
- nvbo = nouveau_gem_object(fb->obj[0]);
- nouveau_vma_del(&fbcon->vma);
- nouveau_bo_unmap(nvbo);
- nouveau_bo_unpin(nvbo);
- drm_framebuffer_put(fb);
- }
-
- return 0;
-}
-
-void nouveau_fbcon_gpu_lockup(struct fb_info *info)
-{
- struct nouveau_fbdev *fbcon = info->par;
- struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
-
- NV_ERROR(drm, "GPU lockup - switching to software fbcon\n");
- info->flags |= FBINFO_HWACCEL_DISABLED;
-}
-
-static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
- .fb_probe = nouveau_fbcon_create,
-};
-
-static void
-nouveau_fbcon_set_suspend_work(struct work_struct *work)
-{
- struct nouveau_drm *drm = container_of(work, typeof(*drm), fbcon_work);
- int state = READ_ONCE(drm->fbcon_new_state);
-
- if (state == FBINFO_STATE_RUNNING)
- pm_runtime_get_sync(drm->dev->dev);
-
- console_lock();
- if (state == FBINFO_STATE_RUNNING)
- nouveau_fbcon_accel_restore(drm->dev);
- drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
- if (state != FBINFO_STATE_RUNNING)
- nouveau_fbcon_accel_save_disable(drm->dev);
- console_unlock();
-
- if (state == FBINFO_STATE_RUNNING) {
- nouveau_fbcon_hotplug_resume(drm->fbcon);
- pm_runtime_mark_last_busy(drm->dev->dev);
- pm_runtime_put_autosuspend(drm->dev->dev);
- }
-}
-
-void
-nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
-
- if (!drm->fbcon)
- return;
-
- drm->fbcon_new_state = state;
- /* Since runtime resume can happen as a result of a sysfs operation,
- * it's possible we already have the console locked. So handle fbcon
- * init/deinit from a seperate work thread
- */
- schedule_work(&drm->fbcon_work);
-}
-
-void
-nouveau_fbcon_output_poll_changed(struct drm_device *dev)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_fbdev *fbcon = drm->fbcon;
- int ret;
-
- if (!fbcon)
- return;
-
- mutex_lock(&fbcon->hotplug_lock);
-
- ret = pm_runtime_get(dev->dev);
- if (ret == 1 || ret == -EACCES) {
- drm_fb_helper_hotplug_event(&fbcon->helper);
-
- pm_runtime_mark_last_busy(dev->dev);
- pm_runtime_put_autosuspend(dev->dev);
- } else if (ret == 0) {
- /* If the GPU was already in the process of suspending before
- * this event happened, then we can't block here as we'll
- * deadlock the runtime pmops since they wait for us to
- * finish. So, just defer this event for when we runtime
- * resume again. It will be handled by fbcon_work.
- */
- NV_DEBUG(drm, "fbcon HPD event deferred until runtime resume\n");
- fbcon->hotplug_waiting = true;
- pm_runtime_put_noidle(drm->dev->dev);
- } else {
- DRM_WARN("fbcon HPD event lost due to RPM failure: %d\n",
- ret);
- }
-
- mutex_unlock(&fbcon->hotplug_lock);
-}
-
-void
-nouveau_fbcon_hotplug_resume(struct nouveau_fbdev *fbcon)
-{
- struct nouveau_drm *drm;
-
- if (!fbcon)
- return;
- drm = nouveau_drm(fbcon->helper.dev);
-
- mutex_lock(&fbcon->hotplug_lock);
- if (fbcon->hotplug_waiting) {
- fbcon->hotplug_waiting = false;
-
- NV_DEBUG(drm, "Handling deferred fbcon HPD events\n");
- drm_fb_helper_hotplug_event(&fbcon->helper);
- }
- mutex_unlock(&fbcon->hotplug_lock);
-}
-
-int
-nouveau_fbcon_init(struct drm_device *dev)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_fbdev *fbcon;
- int preferred_bpp = nouveau_fbcon_bpp;
- int ret;
-
- if (!dev->mode_config.num_crtc ||
- (to_pci_dev(dev->dev)->class >> 8) != PCI_CLASS_DISPLAY_VGA)
- return 0;
-
- fbcon = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
- if (!fbcon)
- return -ENOMEM;
-
- drm->fbcon = fbcon;
- INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work);
- mutex_init(&fbcon->hotplug_lock);
-
- drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
-
- ret = drm_fb_helper_init(dev, &fbcon->helper);
- if (ret)
- goto free;
-
- if (preferred_bpp != 8 && preferred_bpp != 16 && preferred_bpp != 32) {
- if (drm->client.device.info.ram_size <= 32 * 1024 * 1024)
- preferred_bpp = 8;
- else
- if (drm->client.device.info.ram_size <= 64 * 1024 * 1024)
- preferred_bpp = 16;
- else
- preferred_bpp = 32;
- }
-
- /* disable all the possible outputs/crtcs before entering KMS mode */
- if (!drm_drv_uses_atomic_modeset(dev))
- drm_helper_disable_unused_functions(dev);
-
- ret = drm_fb_helper_initial_config(&fbcon->helper, preferred_bpp);
- if (ret)
- goto fini;
-
- if (fbcon->helper.info)
- fbcon->helper.info->pixmap.buf_align = 4;
- return 0;
-
-fini:
- drm_fb_helper_fini(&fbcon->helper);
-free:
- kfree(fbcon);
- drm->fbcon = NULL;
- return ret;
-}
-
-void
-nouveau_fbcon_fini(struct drm_device *dev)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
-
- if (!drm->fbcon)
- return;
-
- drm_kms_helper_poll_fini(dev);
- nouveau_fbcon_accel_fini(dev);
- nouveau_fbcon_destroy(dev, drm->fbcon);
- kfree(drm->fbcon);
- drm->fbcon = NULL;
-}
diff --git a/drivers/gpu/drm/panfrost/Kconfig b/drivers/gpu/drm/panfrost/Kconfig
index 079600328be1..e6403a9d66ad 100644
--- a/drivers/gpu/drm/panfrost/Kconfig
+++ b/drivers/gpu/drm/panfrost/Kconfig
@@ -3,7 +3,8 @@
config DRM_PANFROST
tristate "Panfrost (DRM support for ARM Mali Midgard/Bifrost GPUs)"
depends on DRM
- depends on ARM || ARM64 || (COMPILE_TEST && !GENERIC_ATOMIC64)
+ depends on ARM || ARM64 || COMPILE_TEST
+ depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
depends on MMU
select DRM_SCHED
select IOMMU_SUPPORT
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index fa619fe72086..abb0dadd8f63 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -82,6 +82,7 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
struct panfrost_gem_object *bo;
struct drm_panfrost_create_bo *args = data;
struct panfrost_gem_mapping *mapping;
+ int ret;
if (!args->size || args->pad ||
(args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP)))
@@ -92,21 +93,29 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
!(args->flags & PANFROST_BO_NOEXEC))
return -EINVAL;
- bo = panfrost_gem_create_with_handle(file, dev, args->size, args->flags,
- &args->handle);
+ bo = panfrost_gem_create(dev, args->size, args->flags);
if (IS_ERR(bo))
return PTR_ERR(bo);
+ ret = drm_gem_handle_create(file, &bo->base.base, &args->handle);
+ if (ret)
+ goto out;
+
mapping = panfrost_gem_mapping_get(bo, priv);
- if (!mapping) {
- drm_gem_object_put(&bo->base.base);
- return -EINVAL;
+ if (mapping) {
+ args->offset = mapping->mmnode.start << PAGE_SHIFT;
+ panfrost_gem_mapping_put(mapping);
+ } else {
+ /* This can only happen if the handle from
+ * drm_gem_handle_create() has already been guessed and freed
+ * by user space
+ */
+ ret = -EINVAL;
}
- args->offset = mapping->mmnode.start << PAGE_SHIFT;
- panfrost_gem_mapping_put(mapping);
-
- return 0;
+out:
+ drm_gem_object_put(&bo->base.base);
+ return ret;
}
/**
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index 293e799e2fe8..3c812fbd126f 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -235,12 +235,8 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t
}
struct panfrost_gem_object *
-panfrost_gem_create_with_handle(struct drm_file *file_priv,
- struct drm_device *dev, size_t size,
- u32 flags,
- uint32_t *handle)
+panfrost_gem_create(struct drm_device *dev, size_t size, u32 flags)
{
- int ret;
struct drm_gem_shmem_object *shmem;
struct panfrost_gem_object *bo;
@@ -256,16 +252,6 @@ panfrost_gem_create_with_handle(struct drm_file *file_priv,
bo->noexec = !!(flags & PANFROST_BO_NOEXEC);
bo->is_heap = !!(flags & PANFROST_BO_HEAP);
- /*
- * Allocate an id of idr table where the obj is registered
- * and handle has the id what user can see.
- */
- ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
- /* drop reference from allocate - handle holds it now. */
- drm_gem_object_put(&shmem->base);
- if (ret)
- return ERR_PTR(ret);
-
return bo;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
index 8088d5fd8480..ad2877eeeccd 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.h
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
@@ -69,10 +69,7 @@ panfrost_gem_prime_import_sg_table(struct drm_device *dev,
struct sg_table *sgt);
struct panfrost_gem_object *
-panfrost_gem_create_with_handle(struct drm_file *file_priv,
- struct drm_device *dev, size_t size,
- u32 flags,
- uint32_t *handle);
+panfrost_gem_create(struct drm_device *dev, size_t size, u32 flags);
int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv);
void panfrost_gem_close(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/tests/Makefile b/drivers/gpu/drm/tests/Makefile
index aaf357e29c65..bca726a8f483 100644
--- a/drivers/gpu/drm/tests/Makefile
+++ b/drivers/gpu/drm/tests/Makefile
@@ -18,3 +18,5 @@ obj-$(CONFIG_DRM_KUNIT_TEST) += \
drm_plane_helper_test.o \
drm_probe_helper_test.o \
drm_rect_test.o
+
+CFLAGS_drm_mm_test.o := $(DISABLE_STRUCTLEAK_PLUGIN)
diff --git a/drivers/gpu/drm/tests/drm_mm_test.c b/drivers/gpu/drm/tests/drm_mm_test.c
index 89f12d3b4a21..186b28dc7038 100644
--- a/drivers/gpu/drm/tests/drm_mm_test.c
+++ b/drivers/gpu/drm/tests/drm_mm_test.c
@@ -298,9 +298,9 @@ static bool expect_reserve_fail(struct kunit *test, struct drm_mm *mm, struct dr
return false;
}
-static bool check_reserve_boundaries(struct kunit *test, struct drm_mm *mm,
- unsigned int count,
- u64 size)
+static bool noinline_for_stack check_reserve_boundaries(struct kunit *test, struct drm_mm *mm,
+ unsigned int count,
+ u64 size)
{
const struct boundary {
u64 start, size;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index fca8b0e0e30a..7635d7d6b13b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -184,7 +184,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm));
if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC)))
- ttm_move_memcpy(clear, ttm->num_pages, dst_iter, src_iter);
+ ttm_move_memcpy(clear, PFN_UP(dst_mem->size), dst_iter, src_iter);
if (!src_iter->ops->maps_tt)
ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem);
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index c2b7573bd92b..86d629e45307 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -179,6 +179,7 @@ static void vc4_bo_destroy(struct vc4_bo *bo)
bo->validated_shader = NULL;
}
+ mutex_destroy(&bo->madv_lock);
drm_gem_dma_free(&bo->base);
}
@@ -394,7 +395,6 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_bo *bo;
- int ret;
if (WARN_ON_ONCE(vc4->is_vc5))
return ERR_PTR(-ENODEV);
@@ -406,9 +406,7 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
bo->madv = VC4_MADV_WILLNEED;
refcount_set(&bo->usecnt, 0);
- ret = drmm_mutex_init(dev, &bo->madv_lock);
- if (ret)
- return ERR_PTR(ret);
+ mutex_init(&bo->madv_lock);
mutex_lock(&vc4->bo_lock);
bo->label = VC4_BO_TYPE_KERNEL;
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 5d05093014ac..9f4a90493aea 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -358,10 +358,18 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
drm_gem_object_release(obj);
return ret;
}
- drm_gem_object_put(obj);
rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
rc->bo_handle = handle;
+
+ /*
+ * The handle owns the reference now. But we must drop our
+ * remaining reference *after* we no longer need to dereference
+ * the obj. Otherwise userspace could guess the handle and
+ * race closing it from another thread.
+ */
+ drm_gem_object_put(obj);
+
return 0;
}
@@ -723,11 +731,18 @@ static int virtio_gpu_resource_create_blob_ioctl(struct drm_device *dev,
drm_gem_object_release(obj);
return ret;
}
- drm_gem_object_put(obj);
rc_blob->res_handle = bo->hw_res_handle;
rc_blob->bo_handle = handle;
+ /*
+ * The handle owns the reference now. But we must drop our
+ * remaining reference *after* we no longer need to dereference
+ * the obj. Otherwise userspace could guess the handle and
+ * race closing it from another thread.
+ */
+ drm_gem_object_put(obj);
+
return 0;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index 8d7728181de0..c7e74cf13022 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -184,7 +184,7 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_array *objs = NULL;
struct drm_gem_shmem_object *shmem_obj;
struct virtio_gpu_object *bo;
- struct virtio_gpu_mem_entry *ents;
+ struct virtio_gpu_mem_entry *ents = NULL;
unsigned int nents;
int ret;
@@ -210,7 +210,7 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
ret = -ENOMEM;
objs = virtio_gpu_array_alloc(1);
if (!objs)
- goto err_put_id;
+ goto err_free_entry;
virtio_gpu_array_add_obj(objs, &bo->base.base);
ret = virtio_gpu_array_lock_resv(objs);
@@ -239,6 +239,8 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
err_put_objs:
virtio_gpu_array_put_free(objs);
+err_free_entry:
+ kvfree(ents);
err_put_id:
virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
err_free_gem:
diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.c b/drivers/gpu/drm/vmwgfx/ttm_object.c
index 932b125ebf3d..ddf8373c1d77 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_object.c
+++ b/drivers/gpu/drm/vmwgfx/ttm_object.c
@@ -254,40 +254,6 @@ void ttm_base_object_unref(struct ttm_base_object **p_base)
kref_put(&base->refcount, ttm_release_base);
}
-/**
- * ttm_base_object_noref_lookup - look up a base object without reference
- * @tfile: The struct ttm_object_file the object is registered with.
- * @key: The object handle.
- *
- * This function looks up a ttm base object and returns a pointer to it
- * without refcounting the pointer. The returned pointer is only valid
- * until ttm_base_object_noref_release() is called, and the object
- * pointed to by the returned pointer may be doomed. Any persistent usage
- * of the object requires a refcount to be taken using kref_get_unless_zero().
- * Iff this function returns successfully it needs to be paired with
- * ttm_base_object_noref_release() and no sleeping- or scheduling functions
- * may be called inbetween these function callse.
- *
- * Return: A pointer to the object if successful or NULL otherwise.
- */
-struct ttm_base_object *
-ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint64_t key)
-{
- struct vmwgfx_hash_item *hash;
- int ret;
-
- rcu_read_lock();
- ret = ttm_tfile_find_ref_rcu(tfile, key, &hash);
- if (ret) {
- rcu_read_unlock();
- return NULL;
- }
-
- __release(RCU);
- return hlist_entry(hash, struct ttm_ref_object, hash)->obj;
-}
-EXPORT_SYMBOL(ttm_base_object_noref_lookup);
-
struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
uint64_t key)
{
@@ -295,15 +261,16 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
struct vmwgfx_hash_item *hash;
int ret;
- rcu_read_lock();
- ret = ttm_tfile_find_ref_rcu(tfile, key, &hash);
+ spin_lock(&tfile->lock);
+ ret = ttm_tfile_find_ref(tfile, key, &hash);
if (likely(ret == 0)) {
base = hlist_entry(hash, struct ttm_ref_object, hash)->obj;
if (!kref_get_unless_zero(&base->refcount))
base = NULL;
}
- rcu_read_unlock();
+ spin_unlock(&tfile->lock);
+
return base;
}
diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.h b/drivers/gpu/drm/vmwgfx/ttm_object.h
index 95a9679f9d39..e6b77ee33e55 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_object.h
+++ b/drivers/gpu/drm/vmwgfx/ttm_object.h
@@ -309,21 +309,6 @@ extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
#define ttm_prime_object_kfree(__obj, __prime) \
kfree_rcu(__obj, __prime.base.rhead)
-struct ttm_base_object *
-ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint64_t key);
-
-/**
- * ttm_base_object_noref_release - release a base object pointer looked up
- * without reference
- *
- * Releases a base object pointer looked up with ttm_base_object_noref_lookup().
- */
-static inline void ttm_base_object_noref_release(void)
-{
- __acquire(RCU);
- rcu_read_unlock();
-}
-
static inline int ttm_bo_wait(struct ttm_buffer_object *bo, bool intr,
bool no_wait)
{
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 321c551784a1..aa1cd5126a32 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -716,44 +716,6 @@ int vmw_user_bo_lookup(struct drm_file *filp,
}
/**
- * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
- * @filp: The TTM object file the handle is registered with.
- * @handle: The user buffer object handle.
- *
- * This function looks up a struct vmw_bo and returns a pointer to the
- * struct vmw_buffer_object it derives from without refcounting the pointer.
- * The returned pointer is only valid until vmw_user_bo_noref_release() is
- * called, and the object pointed to by the returned pointer may be doomed.
- * Any persistent usage of the object requires a refcount to be taken using
- * ttm_bo_reference_unless_doomed(). Iff this function returns successfully it
- * needs to be paired with vmw_user_bo_noref_release() and no sleeping-
- * or scheduling functions may be called in between these function calls.
- *
- * Return: A struct vmw_buffer_object pointer if successful or negative
- * error pointer on failure.
- */
-struct vmw_buffer_object *
-vmw_user_bo_noref_lookup(struct drm_file *filp, u32 handle)
-{
- struct vmw_buffer_object *vmw_bo;
- struct ttm_buffer_object *bo;
- struct drm_gem_object *gobj = drm_gem_object_lookup(filp, handle);
-
- if (!gobj) {
- DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
- (unsigned long)handle);
- return ERR_PTR(-ESRCH);
- }
- vmw_bo = gem_to_vmw_bo(gobj);
- bo = ttm_bo_get_unless_zero(&vmw_bo->base);
- vmw_bo = vmw_buffer_object(bo);
- drm_gem_object_put(gobj);
-
- return vmw_bo;
-}
-
-
-/**
* vmw_bo_fence_single - Utility function to fence a single TTM buffer
* object without unreserving it.
*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 4b612fc9758c..203fa32cd4c1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -832,12 +832,7 @@ extern int vmw_user_resource_lookup_handle(
uint32_t handle,
const struct vmw_user_resource_conv *converter,
struct vmw_resource **p_res);
-extern struct vmw_resource *
-vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t handle,
- const struct vmw_user_resource_conv *
- converter);
+
extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
@@ -877,15 +872,6 @@ static inline bool vmw_resource_mob_attached(const struct vmw_resource *res)
}
/**
- * vmw_user_resource_noref_release - release a user resource pointer looked up
- * without reference
- */
-static inline void vmw_user_resource_noref_release(void)
-{
- ttm_base_object_noref_release();
-}
-
-/**
* Buffer object helper functions - vmwgfx_bo.c
*/
extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv,
@@ -936,8 +922,6 @@ extern void vmw_bo_unmap(struct vmw_buffer_object *vbo);
extern void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_resource *mem);
extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
-extern struct vmw_buffer_object *
-vmw_user_bo_noref_lookup(struct drm_file *filp, u32 handle);
/**
* vmw_bo_adjust_prio - Adjust the buffer object eviction priority
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 43cec8e37e4d..9359e8dfbac2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -290,20 +290,26 @@ static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
rcache->valid_handle = 0;
}
+enum vmw_val_add_flags {
+ vmw_val_add_flag_none = 0,
+ vmw_val_add_flag_noctx = 1 << 0,
+};
+
/**
- * vmw_execbuf_res_noref_val_add - Add a resource described by an unreferenced
- * rcu-protected pointer to the validation list.
+ * vmw_execbuf_res_val_add - Add a resource to the validation list.
*
* @sw_context: Pointer to the software context.
* @res: Unreferenced rcu-protected pointer to the resource.
* @dirty: Whether to change dirty status.
+ * @flags: specifies whether to use the context or not
*
* Returns: 0 on success. Negative error code on failure. Typical error codes
* are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
*/
-static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
- struct vmw_resource *res,
- u32 dirty)
+static int vmw_execbuf_res_val_add(struct vmw_sw_context *sw_context,
+ struct vmw_resource *res,
+ u32 dirty,
+ u32 flags)
{
struct vmw_private *dev_priv = res->dev_priv;
int ret;
@@ -318,24 +324,30 @@ static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
if (dirty)
vmw_validation_res_set_dirty(sw_context->ctx,
rcache->private, dirty);
- vmw_user_resource_noref_release();
return 0;
}
- priv_size = vmw_execbuf_res_size(dev_priv, res_type);
- ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
- dirty, (void **)&ctx_info,
- &first_usage);
- vmw_user_resource_noref_release();
- if (ret)
- return ret;
+ if ((flags & vmw_val_add_flag_noctx) != 0) {
+ ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
+ (void **)&ctx_info, NULL);
+ if (ret)
+ return ret;
- if (priv_size && first_usage) {
- ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
- ctx_info);
- if (ret) {
- VMW_DEBUG_USER("Failed first usage context setup.\n");
+ } else {
+ priv_size = vmw_execbuf_res_size(dev_priv, res_type);
+ ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
+ dirty, (void **)&ctx_info,
+ &first_usage);
+ if (ret)
return ret;
+
+ if (priv_size && first_usage) {
+ ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
+ ctx_info);
+ if (ret) {
+ VMW_DEBUG_USER("Failed first usage context setup.\n");
+ return ret;
+ }
}
}
@@ -344,43 +356,6 @@ static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
}
/**
- * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource
- * validation list if it's not already on it
- *
- * @sw_context: Pointer to the software context.
- * @res: Pointer to the resource.
- * @dirty: Whether to change dirty status.
- *
- * Returns: Zero on success. Negative error code on failure.
- */
-static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
- struct vmw_resource *res,
- u32 dirty)
-{
- struct vmw_res_cache_entry *rcache;
- enum vmw_res_type res_type = vmw_res_type(res);
- void *ptr;
- int ret;
-
- rcache = &sw_context->res_cache[res_type];
- if (likely(rcache->valid && rcache->res == res)) {
- if (dirty)
- vmw_validation_res_set_dirty(sw_context->ctx,
- rcache->private, dirty);
- return 0;
- }
-
- ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
- &ptr, NULL);
- if (ret)
- return ret;
-
- vmw_execbuf_rcache_update(rcache, res, ptr);
-
- return 0;
-}
-
-/**
* vmw_view_res_val_add - Add a view and the surface it's pointing to to the
* validation list
*
@@ -398,13 +373,13 @@ static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
* First add the resource the view is pointing to, otherwise it may be
* swapped out when the view is validated.
*/
- ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view),
- vmw_view_dirtying(view));
+ ret = vmw_execbuf_res_val_add(sw_context, vmw_view_srf(view),
+ vmw_view_dirtying(view), vmw_val_add_flag_noctx);
if (ret)
return ret;
- return vmw_execbuf_res_noctx_val_add(sw_context, view,
- VMW_RES_DIRTY_NONE);
+ return vmw_execbuf_res_val_add(sw_context, view, VMW_RES_DIRTY_NONE,
+ vmw_val_add_flag_noctx);
}
/**
@@ -475,8 +450,9 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
if (IS_ERR(res))
continue;
- ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
- VMW_RES_DIRTY_SET);
+ ret = vmw_execbuf_res_val_add(sw_context, res,
+ VMW_RES_DIRTY_SET,
+ vmw_val_add_flag_noctx);
if (unlikely(ret != 0))
return ret;
}
@@ -490,9 +466,9 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
if (vmw_res_type(entry->res) == vmw_res_view)
ret = vmw_view_res_val_add(sw_context, entry->res);
else
- ret = vmw_execbuf_res_noctx_val_add
- (sw_context, entry->res,
- vmw_binding_dirtying(entry->bt));
+ ret = vmw_execbuf_res_val_add(sw_context, entry->res,
+ vmw_binding_dirtying(entry->bt),
+ vmw_val_add_flag_noctx);
if (unlikely(ret != 0))
break;
}
@@ -658,7 +634,8 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
{
struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
struct vmw_resource *res;
- int ret;
+ int ret = 0;
+ bool needs_unref = false;
if (p_res)
*p_res = NULL;
@@ -683,17 +660,18 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
if (ret)
return ret;
- res = vmw_user_resource_noref_lookup_handle
- (dev_priv, sw_context->fp->tfile, *id_loc, converter);
- if (IS_ERR(res)) {
+ ret = vmw_user_resource_lookup_handle
+ (dev_priv, sw_context->fp->tfile, *id_loc, converter, &res);
+ if (ret != 0) {
VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
(unsigned int) *id_loc);
- return PTR_ERR(res);
+ return ret;
}
+ needs_unref = true;
- ret = vmw_execbuf_res_noref_val_add(sw_context, res, dirty);
+ ret = vmw_execbuf_res_val_add(sw_context, res, dirty, vmw_val_add_flag_none);
if (unlikely(ret != 0))
- return ret;
+ goto res_check_done;
if (rcache->valid && rcache->res == res) {
rcache->valid_handle = true;
@@ -708,7 +686,11 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
if (p_res)
*p_res = res;
- return 0;
+res_check_done:
+ if (needs_unref)
+ vmw_resource_unreference(&res);
+
+ return ret;
}
/**
@@ -1171,9 +1153,9 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
int ret;
vmw_validation_preload_bo(sw_context->ctx);
- vmw_bo = vmw_user_bo_noref_lookup(sw_context->filp, handle);
- if (IS_ERR(vmw_bo)) {
- VMW_DEBUG_USER("Could not find or use MOB buffer.\n");
+ ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
+ if (ret != 0) {
+ drm_dbg(&dev_priv->drm, "Could not find or use MOB buffer.\n");
return PTR_ERR(vmw_bo);
}
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
@@ -1225,9 +1207,9 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
int ret;
vmw_validation_preload_bo(sw_context->ctx);
- vmw_bo = vmw_user_bo_noref_lookup(sw_context->filp, handle);
- if (IS_ERR(vmw_bo)) {
- VMW_DEBUG_USER("Could not find or use GMR region.\n");
+ ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
+ if (ret != 0) {
+ drm_dbg(&dev_priv->drm, "Could not find or use GMR region.\n");
return PTR_ERR(vmw_bo);
}
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
@@ -2025,8 +2007,9 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
res = vmw_shader_lookup(vmw_context_res_man(ctx),
cmd->body.shid, cmd->body.type);
if (!IS_ERR(res)) {
- ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
- VMW_RES_DIRTY_NONE);
+ ret = vmw_execbuf_res_val_add(sw_context, res,
+ VMW_RES_DIRTY_NONE,
+ vmw_val_add_flag_noctx);
if (unlikely(ret != 0))
return ret;
@@ -2273,8 +2256,9 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
return PTR_ERR(res);
}
- ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
- VMW_RES_DIRTY_NONE);
+ ret = vmw_execbuf_res_val_add(sw_context, res,
+ VMW_RES_DIRTY_NONE,
+ vmw_val_add_flag_noctx);
if (ret)
return ret;
}
@@ -2777,8 +2761,8 @@ static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
return PTR_ERR(res);
}
- ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
- VMW_RES_DIRTY_NONE);
+ ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
+ vmw_val_add_flag_noctx);
if (ret) {
VMW_DEBUG_USER("Error creating resource validation node.\n");
return ret;
@@ -3098,8 +3082,8 @@ static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv,
vmw_dx_streamoutput_set_size(res, cmd->body.sizeInBytes);
- ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
- VMW_RES_DIRTY_NONE);
+ ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
+ vmw_val_add_flag_noctx);
if (ret) {
DRM_ERROR("Error creating resource validation node.\n");
return ret;
@@ -3148,8 +3132,8 @@ static int vmw_cmd_dx_set_streamoutput(struct vmw_private *dev_priv,
return 0;
}
- ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
- VMW_RES_DIRTY_NONE);
+ ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
+ vmw_val_add_flag_noctx);
if (ret) {
DRM_ERROR("Error creating resource validation node.\n");
return ret;
@@ -4066,22 +4050,26 @@ static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
if (ret)
return ret;
- res = vmw_user_resource_noref_lookup_handle
+ ret = vmw_user_resource_lookup_handle
(dev_priv, sw_context->fp->tfile, handle,
- user_context_converter);
- if (IS_ERR(res)) {
+ user_context_converter, &res);
+ if (ret != 0) {
VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
(unsigned int) handle);
- return PTR_ERR(res);
+ return ret;
}
- ret = vmw_execbuf_res_noref_val_add(sw_context, res, VMW_RES_DIRTY_SET);
- if (unlikely(ret != 0))
+ ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_SET,
+ vmw_val_add_flag_none);
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&res);
return ret;
+ }
sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
sw_context->man = vmw_context_res_man(res);
+ vmw_resource_unreference(&res);
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h
index 4f40167ad61f..4f40167ad61f 100755..100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index f66caa540e14..c7d645e5ec7b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -281,39 +281,6 @@ out_bad_resource:
return ret;
}
-/**
- * vmw_user_resource_noref_lookup_handle - lookup a struct resource from a
- * TTM user-space handle and perform basic type checks
- *
- * @dev_priv: Pointer to a device private struct
- * @tfile: Pointer to a struct ttm_object_file identifying the caller
- * @handle: The TTM user-space handle
- * @converter: Pointer to an object describing the resource type
- *
- * If the handle can't be found or is associated with an incorrect resource
- * type, -EINVAL will be returned.
- */
-struct vmw_resource *
-vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t handle,
- const struct vmw_user_resource_conv
- *converter)
-{
- struct ttm_base_object *base;
-
- base = ttm_base_object_noref_lookup(tfile, handle);
- if (!base)
- return ERR_PTR(-ESRCH);
-
- if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
- ttm_base_object_noref_release();
- return ERR_PTR(-EINVAL);
- }
-
- return converter->base_obj_to_res(base);
-}
-
/*
* Helper function that looks either a surface or bo.
*
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index 0d8e6bd1ccbf..90996c108146 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -717,7 +717,7 @@ static int xen_drv_probe(struct xenbus_device *xb_dev,
return xenbus_switch_state(xb_dev, XenbusStateInitialising);
}
-static int xen_drv_remove(struct xenbus_device *dev)
+static void xen_drv_remove(struct xenbus_device *dev)
{
struct xen_drm_front_info *front_info = dev_get_drvdata(&dev->dev);
int to = 100;
@@ -751,7 +751,6 @@ static int xen_drv_remove(struct xenbus_device *dev)
xen_drm_drv_fini(front_info);
xenbus_frontend_closed(dev);
- return 0;
}
static const struct xenbus_device_id xen_driver_ids[] = {