summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gem/i915_gem_phys.c
diff options
context:
space:
mode:
authorMaarten Lankhorst <maarten.lankhorst@linux.intel.com>2021-03-23 16:49:57 +0100
committerDaniel Vetter <daniel.vetter@ffwll.ch>2021-03-24 11:50:21 +0100
commita61170975718d56a8aa8d53c1e11e8b4e70b42f3 (patch)
treee705f9e01f1490f8e2c7709930eb3150f217638e /drivers/gpu/drm/i915/gem/i915_gem_phys.c
parentc471748dc742c207a5461be924538c286d66be3e (diff)
drm/i915: Rework struct phys attachment handling
Instead of creating a separate object type, we make changes to the shmem type, to clear struct page backing. This will allow us to ensure we never run into a race when we exchange obj->ops with other function pointers. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-9-maarten.lankhorst@linux.intel.com
Diffstat (limited to 'drivers/gpu/drm/i915/gem/i915_gem_phys.c')
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c102
1 files changed, 49 insertions, 53 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index d1bf543d111a..ed283e168f2f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -76,6 +76,8 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
+ /* We're no longer struct page backed */
+ obj->flags &= ~I915_BO_ALLOC_STRUCT_PAGE;
__i915_gem_object_set_pages(obj, st, sg->length);
return 0;
@@ -89,7 +91,7 @@ err_pci:
return -ENOMEM;
}
-static void
+void
i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
@@ -134,9 +136,8 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
vaddr, dma);
}
-static int
-phys_pwrite(struct drm_i915_gem_object *obj,
- const struct drm_i915_gem_pwrite *args)
+int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pwrite *args)
{
void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
char __user *user_data = u64_to_user_ptr(args->data_ptr);
@@ -165,9 +166,8 @@ phys_pwrite(struct drm_i915_gem_object *obj,
return 0;
}
-static int
-phys_pread(struct drm_i915_gem_object *obj,
- const struct drm_i915_gem_pread *args)
+int i915_gem_object_pread_phys(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pread *args)
{
void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
char __user *user_data = u64_to_user_ptr(args->data_ptr);
@@ -186,86 +186,82 @@ phys_pread(struct drm_i915_gem_object *obj,
return 0;
}
-static void phys_release(struct drm_i915_gem_object *obj)
+static int i915_gem_object_shmem_to_phys(struct drm_i915_gem_object *obj)
{
- fput(obj->base.filp);
-}
+ struct sg_table *pages;
+ int err;
-static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
- .name = "i915_gem_object_phys",
- .get_pages = i915_gem_object_get_pages_phys,
- .put_pages = i915_gem_object_put_pages_phys,
+ pages = __i915_gem_object_unset_pages(obj);
+
+ err = i915_gem_object_get_pages_phys(obj);
+ if (err)
+ goto err_xfer;
- .pread = phys_pread,
- .pwrite = phys_pwrite,
+ /* Perma-pin (until release) the physical set of pages */
+ __i915_gem_object_pin_pages(obj);
- .release = phys_release,
-};
+ if (!IS_ERR_OR_NULL(pages))
+ i915_gem_shmem_ops.put_pages(obj, pages);
+
+ i915_gem_object_release_memory_region(obj);
+ return 0;
+
+err_xfer:
+ if (!IS_ERR_OR_NULL(pages)) {
+ unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
+
+ __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
+ }
+ return err;
+}
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
{
- struct sg_table *pages;
int err;
if (align > obj->base.size)
return -EINVAL;
- if (obj->ops == &i915_gem_phys_ops)
- return 0;
-
if (!i915_gem_object_is_shmem(obj))
return -EINVAL;
+ if (!i915_gem_object_has_struct_page(obj))
+ return 0;
+
err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
if (err)
return err;
mutex_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
+ if (unlikely(!i915_gem_object_has_struct_page(obj)))
+ goto out;
+
if (obj->mm.madv != I915_MADV_WILLNEED) {
err = -EFAULT;
- goto err_unlock;
+ goto out;
}
if (i915_gem_object_has_tiling_quirk(obj)) {
err = -EFAULT;
- goto err_unlock;
+ goto out;
}
- if (obj->mm.mapping) {
+ if (obj->mm.mapping || i915_gem_object_has_pinned_pages(obj)) {
err = -EBUSY;
- goto err_unlock;
+ goto out;
}
- pages = __i915_gem_object_unset_pages(obj);
-
- obj->ops = &i915_gem_phys_ops;
- obj->flags &= ~I915_BO_ALLOC_STRUCT_PAGE;
-
- err = ____i915_gem_object_get_pages(obj);
- if (err)
- goto err_xfer;
-
- /* Perma-pin (until release) the physical set of pages */
- __i915_gem_object_pin_pages(obj);
-
- if (!IS_ERR_OR_NULL(pages))
- i915_gem_shmem_ops.put_pages(obj, pages);
-
- i915_gem_object_release_memory_region(obj);
-
- mutex_unlock(&obj->mm.lock);
- return 0;
+ if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
+ drm_dbg(obj->base.dev,
+ "Attempting to obtain a purgeable object\n");
+ err = -EFAULT;
+ goto out;
+ }
-err_xfer:
- obj->ops = &i915_gem_shmem_ops;
- obj->flags |= I915_BO_ALLOC_STRUCT_PAGE;
- if (!IS_ERR_OR_NULL(pages)) {
- unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
+ err = i915_gem_object_shmem_to_phys(obj);
- __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
- }
-err_unlock:
+out:
mutex_unlock(&obj->mm.lock);
return err;
}