summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gem/i915_gem_mman.c
diff options
context:
space:
mode:
authorNirmoy Das <nirmoy.das@intel.com>2023-04-04 16:30:58 +0200
committerNirmoy Das <nirmoy.das@intel.com>2023-04-06 18:20:35 +0200
commiteaee1c08586395182e0004b3512a2f83570ea461 (patch)
tree54a8e9c99dc24e5bbc21b4bad5168321b9d5d485 /drivers/gpu/drm/i915/gem/i915_gem_mman.c
parentddb78a51fac65e8db2316ded59e27ab621aea856 (diff)
drm/i915: Add a function to mmap framebuffer obj
Implement i915_gem_fb_mmap() to enable fb_ops.fb_mmap() callback for i915's framebuffer objects. v2: add a comment why i915_gem_object_get() needed(Andi). v3: mmap also ttm objects. Cc: Matthew Auld <matthew.auld@intel.com> Cc: Andi Shyti <andi.shyti@linux.intel.com> Cc: Ville Syrjälä <ville.syrjala@linux.intel.com> Cc: Jani Nikula <jani.nikula@intel.com> Cc: Imre Deak <imre.deak@intel.com> Signed-off-by: Nirmoy Das <nirmoy.das@intel.com> Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20230404143100.10452-3-nirmoy.das@intel.com
Diffstat (limited to 'drivers/gpu/drm/i915/gem/i915_gem_mman.c')
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c137
1 files changed, 92 insertions, 45 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 4f69bff63068..d036ef2dcdba 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -927,53 +927,15 @@ static struct file *mmap_singleton(struct drm_i915_private *i915)
return file;
}
-/*
- * This overcomes the limitation in drm_gem_mmap's assignment of a
- * drm_gem_object as the vma->vm_private_data. Since we need to
- * be able to resolve multiple mmap offsets which could be tied
- * to a single gem object.
- */
-int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+static int
+i915_gem_object_mmap(struct drm_i915_gem_object *obj,
+ struct i915_mmap_offset *mmo,
+ struct vm_area_struct *vma)
{
- struct drm_vma_offset_node *node;
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_i915_gem_object *obj = NULL;
- struct i915_mmap_offset *mmo = NULL;
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct drm_device *dev = &i915->drm;
struct file *anon;
- if (drm_dev_is_unplugged(dev))
- return -ENODEV;
-
- rcu_read_lock();
- drm_vma_offset_lock_lookup(dev->vma_offset_manager);
- node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
- vma->vm_pgoff,
- vma_pages(vma));
- if (node && drm_vma_node_is_allowed(node, priv)) {
- /*
- * Skip 0-refcnted objects as it is in the process of being
- * destroyed and will be invalid when the vma manager lock
- * is released.
- */
- if (!node->driver_private) {
- mmo = container_of(node, struct i915_mmap_offset, vma_node);
- obj = i915_gem_object_get_rcu(mmo->obj);
-
- GEM_BUG_ON(obj && obj->ops->mmap_ops);
- } else {
- obj = i915_gem_object_get_rcu
- (container_of(node, struct drm_i915_gem_object,
- base.vma_node));
-
- GEM_BUG_ON(obj && !obj->ops->mmap_ops);
- }
- }
- drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
- rcu_read_unlock();
- if (!obj)
- return node ? -EACCES : -EINVAL;
-
if (i915_gem_object_is_readonly(obj)) {
if (vma->vm_flags & VM_WRITE) {
i915_gem_object_put(obj);
@@ -1005,7 +967,7 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
if (obj->ops->mmap_ops) {
vma->vm_page_prot = pgprot_decrypted(vm_get_page_prot(vma->vm_flags));
vma->vm_ops = obj->ops->mmap_ops;
- vma->vm_private_data = node->driver_private;
+ vma->vm_private_data = obj->base.vma_node.driver_private;
return 0;
}
@@ -1043,6 +1005,91 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return 0;
}
+/*
+ * This overcomes the limitation in drm_gem_mmap's assignment of a
+ * drm_gem_object as the vma->vm_private_data. Since we need to
+ * be able to resolve multiple mmap offsets which could be tied
+ * to a single gem object.
+ */
+int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_vma_offset_node *node;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->minor->dev;
+ struct drm_i915_gem_object *obj = NULL;
+ struct i915_mmap_offset *mmo = NULL;
+
+ if (drm_dev_is_unplugged(dev))
+ return -ENODEV;
+
+ rcu_read_lock();
+ drm_vma_offset_lock_lookup(dev->vma_offset_manager);
+ node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
+ vma->vm_pgoff,
+ vma_pages(vma));
+ if (node && drm_vma_node_is_allowed(node, priv)) {
+ /*
+ * Skip 0-refcnted objects as it is in the process of being
+ * destroyed and will be invalid when the vma manager lock
+ * is released.
+ */
+ if (!node->driver_private) {
+ mmo = container_of(node, struct i915_mmap_offset, vma_node);
+ obj = i915_gem_object_get_rcu(mmo->obj);
+
+ GEM_BUG_ON(obj && obj->ops->mmap_ops);
+ } else {
+ obj = i915_gem_object_get_rcu
+ (container_of(node, struct drm_i915_gem_object,
+ base.vma_node));
+
+ GEM_BUG_ON(obj && !obj->ops->mmap_ops);
+ }
+ }
+ drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
+ rcu_read_unlock();
+ if (!obj)
+ return node ? -EACCES : -EINVAL;
+
+ return i915_gem_object_mmap(obj, mmo, vma);
+}
+
+int i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct drm_device *dev = &i915->drm;
+ struct i915_mmap_offset *mmo = NULL;
+ enum i915_mmap_type mmap_type;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
+
+ if (drm_dev_is_unplugged(dev))
+ return -ENODEV;
+
+ /* handle ttm object */
+ if (obj->ops->mmap_ops) {
+ /*
+ * ttm fault handler, ttm_bo_vm_fault_reserved() uses fake offset
+ * to calculate page offset so set that up.
+ */
+ vma->vm_pgoff += drm_vma_node_start(&obj->base.vma_node);
+ } else {
+ /* handle stolen and smem objects */
+ mmap_type = i915_ggtt_has_aperture(ggtt) ? I915_MMAP_TYPE_GTT : I915_MMAP_TYPE_WC;
+ mmo = mmap_offset_attach(obj, mmap_type, NULL);
+ if (!mmo)
+ return -ENODEV;
+ }
+
+ /*
+ * When we install vm_ops for mmap we are too late for
+ * the vm_ops->open() which increases the ref_count of
+ * this obj and then it gets decreased by the vm_ops->close().
+ * To balance this increase the obj ref_count here.
+ */
+ obj = i915_gem_object_get(obj);
+ return i915_gem_object_mmap(obj, mmo, vma);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_gem_mman.c"
#endif