summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gem/i915_gem_phys.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2020-11-05 15:49:34 +0000
committerRodrigo Vivi <rodrigo.vivi@intel.com>2020-11-12 19:47:30 -0500
commit0eb0feb9aeac392edf01b525a54acde9b002312e (patch)
tree778c585077cd31fd5150a06dbc4b983d29541fbd /drivers/gpu/drm/i915/gem/i915_gem_phys.c
parent0a1db6f0841288274f0d1e3a8fa8a3a787e05633 (diff)
drm/i915/gem: Pull phys pread/pwrite implementations to the backend
Move the specialised interactions with the physical GEM object from the pread/pwrite ioctl handler into the phys backend. Currently, if one is able to exhaust the entire aperture and then try to pwrite into an object not backed by struct page, we accidentally invoked the phys pwrite handler on a non-phys object; calamitous. Fixes: c6790dc22312 ("drm/i915: Wean off drm_pci_alloc/drm_pci_free") Testcase: igt/gem_pwrite/exhaustion Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Cc: stable@vger.kernel.org Link: https://patchwork.freedesktop.org/patch/msgid/20201105154934.16022-2-chris@chris-wilson.co.uk (cherry picked from commit 852e1b3644817f071427b83859b889c788a0cf69) Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers/gpu/drm/i915/gem/i915_gem_phys.c')
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c55
1 files changed, 55 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 28147aab47b9..3a4dfe2ef1da 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -134,6 +134,58 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
vaddr, dma);
}
+static int
+phys_pwrite(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pwrite *args)
+{
+ void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
+ char __user *user_data = u64_to_user_ptr(args->data_ptr);
+ int err;
+
+ err = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT);
+ if (err)
+ return err;
+
+ /*
+ * We manually control the domain here and pretend that it
+ * remains coherent i.e. in the GTT domain, like shmem_pwrite.
+ */
+ i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
+
+ if (copy_from_user(vaddr, user_data, args->size))
+ return -EFAULT;
+
+ drm_clflush_virt_range(vaddr, args->size);
+ intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
+
+ i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
+ return 0;
+}
+
+static int
+phys_pread(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pread *args)
+{
+ void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
+ char __user *user_data = u64_to_user_ptr(args->data_ptr);
+ int err;
+
+ err = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
+ if (err)
+ return err;
+
+ drm_clflush_virt_range(vaddr, args->size);
+ if (copy_to_user(user_data, vaddr, args->size))
+ return -EFAULT;
+
+ return 0;
+}
+
static void phys_release(struct drm_i915_gem_object *obj)
{
fput(obj->base.filp);
@@ -144,6 +196,9 @@ static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
.get_pages = i915_gem_object_get_pages_phys,
.put_pages = i915_gem_object_put_pages_phys,
+ .pread = phys_pread,
+ .pwrite = phys_pwrite,
+
.release = phys_release,
};