summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/drm_gem_shmem_helper.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/drm_gem_shmem_helper.c')
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c532
1 files changed, 317 insertions, 215 deletions
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 4ea6507a77e5..dc94a27710e5 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -10,7 +10,6 @@
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <linux/module.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>
@@ -19,11 +18,12 @@
#include <drm/drm.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
+#include <drm/drm_dumb_buffers.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_prime.h>
#include <drm/drm_print.h>
-MODULE_IMPORT_NS(DMA_BUF);
+MODULE_IMPORT_NS("DMA_BUF");
/**
* DOC: overview
@@ -49,27 +49,12 @@ static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
.vm_ops = &drm_gem_shmem_vm_ops,
};
-static struct drm_gem_shmem_object *
-__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
+static int __drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem,
+ size_t size, bool private, struct vfsmount *gemfs)
{
- struct drm_gem_shmem_object *shmem;
- struct drm_gem_object *obj;
+ struct drm_gem_object *obj = &shmem->base;
int ret = 0;
- size = PAGE_ALIGN(size);
-
- if (dev->driver->gem_create_object) {
- obj = dev->driver->gem_create_object(dev, size);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
- shmem = to_drm_gem_shmem_obj(obj);
- } else {
- shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
- if (!shmem)
- return ERR_PTR(-ENOMEM);
- obj = &shmem->base;
- }
-
if (!obj->funcs)
obj->funcs = &drm_gem_shmem_funcs;
@@ -77,19 +62,17 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
drm_gem_private_object_init(dev, obj, size);
shmem->map_wc = false; /* dma-buf mappings use always writecombine */
} else {
- ret = drm_gem_object_init(dev, obj, size);
+ ret = drm_gem_object_init_with_mnt(dev, obj, size, gemfs);
}
if (ret) {
drm_gem_private_object_fini(obj);
- goto err_free;
+ return ret;
}
ret = drm_gem_create_mmap_offset(obj);
if (ret)
goto err_release;
- mutex_init(&shmem->pages_lock);
- mutex_init(&shmem->vmap_lock);
INIT_LIST_HEAD(&shmem->madv_list);
if (!private) {
@@ -104,14 +87,55 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
__GFP_RETRY_MAYFAIL | __GFP_NOWARN);
}
- return shmem;
-
+ return 0;
err_release:
drm_gem_object_release(obj);
-err_free:
- kfree(obj);
+ return ret;
+}
- return ERR_PTR(ret);
+/**
+ * drm_gem_shmem_init - Initialize an allocated object.
+ * @dev: DRM device
+ * @obj: The allocated shmem GEM object.
+ *
+ * Returns:
+ * 0 on success, or a negative error code on failure.
+ */
+int drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, size_t size)
+{
+ return __drm_gem_shmem_init(dev, shmem, size, false, NULL);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_init);
+
+static struct drm_gem_shmem_object *
+__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private,
+ struct vfsmount *gemfs)
+{
+ struct drm_gem_shmem_object *shmem;
+ struct drm_gem_object *obj;
+ int ret = 0;
+
+ size = PAGE_ALIGN(size);
+
+ if (dev->driver->gem_create_object) {
+ obj = dev->driver->gem_create_object(dev, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+ shmem = to_drm_gem_shmem_obj(obj);
+ } else {
+ shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
+ if (!shmem)
+ return ERR_PTR(-ENOMEM);
+ obj = &shmem->base;
+ }
+
+ ret = __drm_gem_shmem_init(dev, shmem, size, private, gemfs);
+ if (ret) {
+ kfree(obj);
+ return ERR_PTR(ret);
+ }
+
+ return shmem;
}
/**
* drm_gem_shmem_create - Allocate an object with the given size
@@ -126,26 +150,49 @@ err_free:
*/
struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
{
- return __drm_gem_shmem_create(dev, size, false);
+ return __drm_gem_shmem_create(dev, size, false, NULL);
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
/**
- * drm_gem_shmem_free - Free resources associated with a shmem GEM object
- * @shmem: shmem GEM object to free
+ * drm_gem_shmem_create_with_mnt - Allocate an object with the given size in a
+ * given mountpoint
+ * @dev: DRM device
+ * @size: Size of the object to allocate
+ * @gemfs: tmpfs mount where the GEM object will be created
*
- * This function cleans up the GEM object state and frees the memory used to
- * store the object itself.
+ * This function creates a shmem GEM object in a given tmpfs mountpoint.
+ *
+ * Returns:
+ * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
+ * error code on failure.
*/
-void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
+struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *dev,
+ size_t size,
+ struct vfsmount *gemfs)
{
- struct drm_gem_object *obj = &shmem->base;
+ return __drm_gem_shmem_create(dev, size, false, gemfs);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_create_with_mnt);
- drm_WARN_ON(obj->dev, shmem->vmap_use_count);
+/**
+ * drm_gem_shmem_release - Release resources associated with a shmem GEM object.
+ * @shmem: shmem GEM object
+ *
+ * This function cleans up the GEM object state, but does not free the memory used to store the
+ * object itself. This function is meant to be a dedicated helper for the Rust GEM bindings.
+ */
+void drm_gem_shmem_release(struct drm_gem_shmem_object *shmem)
+{
+ struct drm_gem_object *obj = &shmem->base;
- if (obj->import_attach) {
+ if (drm_gem_is_imported(obj)) {
drm_prime_gem_destroy(obj, shmem->sgt);
} else {
+ dma_resv_lock(shmem->base.resv, NULL);
+
+ drm_WARN_ON(obj->dev, refcount_read(&shmem->vmap_use_count));
+
if (shmem->sgt) {
dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
DMA_BIDIRECTIONAL, 0);
@@ -153,14 +200,28 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
kfree(shmem->sgt);
}
if (shmem->pages)
- drm_gem_shmem_put_pages(shmem);
- }
+ drm_gem_shmem_put_pages_locked(shmem);
- drm_WARN_ON(obj->dev, shmem->pages_use_count);
+ drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_use_count));
+ drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_pin_count));
+
+ dma_resv_unlock(shmem->base.resv);
+ }
drm_gem_object_release(obj);
- mutex_destroy(&shmem->pages_lock);
- mutex_destroy(&shmem->vmap_lock);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_release);
+
+/**
+ * drm_gem_shmem_free - Free resources associated with a shmem GEM object
+ * @shmem: shmem GEM object to free
+ *
+ * This function cleans up the GEM object state and frees the memory used to
+ * store the object itself.
+ */
+void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
+{
+ drm_gem_shmem_release(shmem);
kfree(shmem);
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
@@ -170,14 +231,15 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
struct drm_gem_object *obj = &shmem->base;
struct page **pages;
- if (shmem->pages_use_count++ > 0)
+ dma_resv_assert_held(shmem->base.resv);
+
+ if (refcount_inc_not_zero(&shmem->pages_use_count))
return 0;
pages = drm_gem_get_pages(obj);
if (IS_ERR(pages)) {
drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
PTR_ERR(pages));
- shmem->pages_use_count = 0;
return PTR_ERR(pages);
}
@@ -193,70 +255,64 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
shmem->pages = pages;
+ refcount_set(&shmem->pages_use_count, 1);
+
return 0;
}
/*
- * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
+ * drm_gem_shmem_put_pages_locked - Decrease use count on the backing pages for a shmem GEM object
* @shmem: shmem GEM object
*
- * This function makes sure that backing pages exists for the shmem GEM object
- * and increases the use count.
- *
- * Returns:
- * 0 on success or a negative error code on failure.
+ * This function decreases the use count and puts the backing pages when use drops to zero.
*/
-int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
+void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
- int ret;
- drm_WARN_ON(obj->dev, obj->import_attach);
+ dma_resv_assert_held(shmem->base.resv);
- ret = mutex_lock_interruptible(&shmem->pages_lock);
- if (ret)
- return ret;
- ret = drm_gem_shmem_get_pages_locked(shmem);
- mutex_unlock(&shmem->pages_lock);
+ if (refcount_dec_and_test(&shmem->pages_use_count)) {
+#ifdef CONFIG_X86
+ if (shmem->map_wc)
+ set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
+#endif
- return ret;
+ drm_gem_put_pages(obj, shmem->pages,
+ shmem->pages_mark_dirty_on_put,
+ shmem->pages_mark_accessed_on_put);
+ shmem->pages = NULL;
+ }
}
-EXPORT_SYMBOL(drm_gem_shmem_get_pages);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_put_pages_locked);
-static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
+int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
{
- struct drm_gem_object *obj = &shmem->base;
+ int ret;
- if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
- return;
+ dma_resv_assert_held(shmem->base.resv);
- if (--shmem->pages_use_count > 0)
- return;
+ drm_WARN_ON(shmem->base.dev, drm_gem_is_imported(&shmem->base));
-#ifdef CONFIG_X86
- if (shmem->map_wc)
- set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
-#endif
+ if (refcount_inc_not_zero(&shmem->pages_pin_count))
+ return 0;
- drm_gem_put_pages(obj, shmem->pages,
- shmem->pages_mark_dirty_on_put,
- shmem->pages_mark_accessed_on_put);
- shmem->pages = NULL;
+ ret = drm_gem_shmem_get_pages_locked(shmem);
+ if (!ret)
+ refcount_set(&shmem->pages_pin_count, 1);
+
+ return ret;
}
+EXPORT_SYMBOL(drm_gem_shmem_pin_locked);
-/*
- * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
- * @shmem: shmem GEM object
- *
- * This function decreases the use count and puts the backing pages when use drops to zero.
- */
-void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
+void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem)
{
- mutex_lock(&shmem->pages_lock);
- drm_gem_shmem_put_pages_locked(shmem);
- mutex_unlock(&shmem->pages_lock);
+ dma_resv_assert_held(shmem->base.resv);
+
+ if (refcount_dec_and_test(&shmem->pages_pin_count))
+ drm_gem_shmem_put_pages_locked(shmem);
}
-EXPORT_SYMBOL(drm_gem_shmem_put_pages);
+EXPORT_SYMBOL(drm_gem_shmem_unpin_locked);
/**
* drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
@@ -271,12 +327,22 @@ EXPORT_SYMBOL(drm_gem_shmem_put_pages);
int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
+ int ret;
+
+ drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
- drm_WARN_ON(obj->dev, obj->import_attach);
+ if (refcount_inc_not_zero(&shmem->pages_pin_count))
+ return 0;
+
+ ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
+ if (ret)
+ return ret;
+ ret = drm_gem_shmem_pin_locked(shmem);
+ dma_resv_unlock(shmem->base.resv);
- return drm_gem_shmem_get_pages(shmem);
+ return ret;
}
-EXPORT_SYMBOL(drm_gem_shmem_pin);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_pin);
/**
* drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
@@ -289,46 +355,66 @@ void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
- drm_WARN_ON(obj->dev, obj->import_attach);
+ drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
+
+ if (refcount_dec_not_one(&shmem->pages_pin_count))
+ return;
- drm_gem_shmem_put_pages(shmem);
+ dma_resv_lock(shmem->base.resv, NULL);
+ drm_gem_shmem_unpin_locked(shmem);
+ dma_resv_unlock(shmem->base.resv);
}
-EXPORT_SYMBOL(drm_gem_shmem_unpin);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_unpin);
-static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
- struct iosys_map *map)
+/*
+ * drm_gem_shmem_vmap_locked - Create a virtual mapping for a shmem GEM object
+ * @shmem: shmem GEM object
+ * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
+ * store.
+ *
+ * This function makes sure that a contiguous kernel virtual address mapping
+ * exists for the buffer backing the shmem GEM object. It hides the differences
+ * between dma-buf imported and natively allocated objects.
+ *
+ * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap_locked().
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
+ struct iosys_map *map)
{
struct drm_gem_object *obj = &shmem->base;
int ret = 0;
- if (obj->import_attach) {
+ dma_resv_assert_held(obj->resv);
+
+ if (drm_gem_is_imported(obj)) {
ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
- if (!ret) {
- if (drm_WARN_ON(obj->dev, map->is_iomem)) {
- dma_buf_vunmap(obj->import_attach->dmabuf, map);
- return -EIO;
- }
- }
} else {
pgprot_t prot = PAGE_KERNEL;
- if (shmem->vmap_use_count++ > 0) {
+ dma_resv_assert_held(shmem->base.resv);
+
+ if (refcount_inc_not_zero(&shmem->vmap_use_count)) {
iosys_map_set_vaddr(map, shmem->vaddr);
return 0;
}
- ret = drm_gem_shmem_get_pages(shmem);
+ ret = drm_gem_shmem_pin_locked(shmem);
if (ret)
- goto err_zero_use;
+ return ret;
if (shmem->map_wc)
prot = pgprot_writecombine(prot);
shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
VM_MAP, prot);
- if (!shmem->vaddr)
+ if (!shmem->vaddr) {
ret = -ENOMEM;
- else
+ } else {
iosys_map_set_vaddr(map, shmem->vaddr);
+ refcount_set(&shmem->vmap_use_count, 1);
+ }
}
if (ret) {
@@ -339,85 +425,46 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
return 0;
err_put_pages:
- if (!obj->import_attach)
- drm_gem_shmem_put_pages(shmem);
-err_zero_use:
- shmem->vmap_use_count = 0;
+ if (!drm_gem_is_imported(obj))
+ drm_gem_shmem_unpin_locked(shmem);
return ret;
}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_vmap_locked);
/*
- * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
+ * drm_gem_shmem_vunmap_locked - Unmap a virtual mapping for a shmem GEM object
* @shmem: shmem GEM object
- * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
- * store.
- *
- * This function makes sure that a contiguous kernel virtual address mapping
- * exists for the buffer backing the shmem GEM object. It hides the differences
- * between dma-buf imported and natively allocated objects.
+ * @map: Kernel virtual address where the SHMEM GEM object was mapped
*
- * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
+ * This function cleans up a kernel virtual address mapping acquired by
+ * drm_gem_shmem_vmap_locked(). The mapping is only removed when the use count
+ * drops to zero.
*
- * Returns:
- * 0 on success or a negative error code on failure.
+ * This function hides the differences between dma-buf imported and natively
+ * allocated objects.
*/
-int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
- struct iosys_map *map)
-{
- int ret;
-
- ret = mutex_lock_interruptible(&shmem->vmap_lock);
- if (ret)
- return ret;
- ret = drm_gem_shmem_vmap_locked(shmem, map);
- mutex_unlock(&shmem->vmap_lock);
-
- return ret;
-}
-EXPORT_SYMBOL(drm_gem_shmem_vmap);
-
-static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
- struct iosys_map *map)
+void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
+ struct iosys_map *map)
{
struct drm_gem_object *obj = &shmem->base;
- if (obj->import_attach) {
+ dma_resv_assert_held(obj->resv);
+
+ if (drm_gem_is_imported(obj)) {
dma_buf_vunmap(obj->import_attach->dmabuf, map);
} else {
- if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
- return;
+ dma_resv_assert_held(shmem->base.resv);
- if (--shmem->vmap_use_count > 0)
- return;
+ if (refcount_dec_and_test(&shmem->vmap_use_count)) {
+ vunmap(shmem->vaddr);
+ shmem->vaddr = NULL;
- vunmap(shmem->vaddr);
- drm_gem_shmem_put_pages(shmem);
+ drm_gem_shmem_unpin_locked(shmem);
+ }
}
-
- shmem->vaddr = NULL;
}
-
-/*
- * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
- * @shmem: shmem GEM object
- * @map: Kernel virtual address where the SHMEM GEM object was mapped
- *
- * This function cleans up a kernel virtual address mapping acquired by
- * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
- * zero.
- *
- * This function hides the differences between dma-buf imported and natively
- * allocated objects.
- */
-void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
- struct iosys_map *map)
-{
- mutex_lock(&shmem->vmap_lock);
- drm_gem_shmem_vunmap_locked(shmem, map);
- mutex_unlock(&shmem->vmap_lock);
-}
-EXPORT_SYMBOL(drm_gem_shmem_vunmap);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_vunmap_locked);
static int
drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
@@ -445,26 +492,26 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
/* Update madvise status, returns true if not purged, else
* false or -errno.
*/
-int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
+int drm_gem_shmem_madvise_locked(struct drm_gem_shmem_object *shmem, int madv)
{
- mutex_lock(&shmem->pages_lock);
+ dma_resv_assert_held(shmem->base.resv);
if (shmem->madv >= 0)
shmem->madv = madv;
madv = shmem->madv;
- mutex_unlock(&shmem->pages_lock);
-
return (madv >= 0);
}
-EXPORT_SYMBOL(drm_gem_shmem_madvise);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_madvise_locked);
void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
struct drm_device *dev = obj->dev;
+ dma_resv_assert_held(shmem->base.resv);
+
drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem));
dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
@@ -488,18 +535,7 @@ void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
}
-EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
-
-bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
-{
- if (!mutex_trylock(&shmem->pages_lock))
- return false;
- drm_gem_shmem_purge_locked(shmem);
- mutex_unlock(&shmem->pages_lock);
-
- return true;
-}
-EXPORT_SYMBOL(drm_gem_shmem_purge);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_purge_locked);
/**
* drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
@@ -521,18 +557,11 @@ EXPORT_SYMBOL(drm_gem_shmem_purge);
int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
- u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+ int ret;
- if (!args->pitch || !args->size) {
- args->pitch = min_pitch;
- args->size = PAGE_ALIGN(args->pitch * args->height);
- } else {
- /* ensure sane minimum values */
- if (args->pitch < min_pitch)
- args->pitch = min_pitch;
- if (args->size < args->pitch * args->height)
- args->size = PAGE_ALIGN(args->pitch * args->height);
- }
+ ret = drm_mode_size_dumb(dev, args, SZ_8, 0);
+ if (ret)
+ return ret;
return drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
}
@@ -551,7 +580,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
/* We don't use vmf->pgoff since that has the fake offset */
page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
- mutex_lock(&shmem->pages_lock);
+ dma_resv_lock(shmem->base.resv, NULL);
if (page_offset >= num_pages ||
drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
@@ -563,7 +592,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
}
- mutex_unlock(&shmem->pages_lock);
+ dma_resv_unlock(shmem->base.resv);
return ret;
}
@@ -573,19 +602,19 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
struct drm_gem_object *obj = vma->vm_private_data;
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
- drm_WARN_ON(obj->dev, obj->import_attach);
+ drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
- mutex_lock(&shmem->pages_lock);
+ dma_resv_lock(shmem->base.resv, NULL);
/*
* We should have already pinned the pages when the buffer was first
* mmap'd, vm_open() just grabs an additional reference for the new
* mm the vma is getting copied into (ie. on fork()).
*/
- if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
- shmem->pages_use_count++;
+ drm_WARN_ON_ONCE(obj->dev,
+ !refcount_inc_not_zero(&shmem->pages_use_count));
- mutex_unlock(&shmem->pages_lock);
+ dma_resv_unlock(shmem->base.resv);
drm_gem_vm_open(vma);
}
@@ -595,7 +624,10 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
struct drm_gem_object *obj = vma->vm_private_data;
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
- drm_gem_shmem_put_pages(shmem);
+ dma_resv_lock(shmem->base.resv, NULL);
+ drm_gem_shmem_put_pages_locked(shmem);
+ dma_resv_unlock(shmem->base.resv);
+
drm_gem_vm_close(vma);
}
@@ -622,8 +654,14 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
struct drm_gem_object *obj = &shmem->base;
int ret;
- if (obj->import_attach) {
+ if (drm_gem_is_imported(obj)) {
+ /* Reset both vm_ops and vm_private_data, so we don't end up with
+ * vm_ops pointing to our implementation if the dma-buf backend
+ * doesn't set those fields.
+ */
vma->vm_private_data = NULL;
+ vma->vm_ops = NULL;
+
ret = dma_buf_mmap(obj->dma_buf, vma, 0);
/* Drop the reference drm_gem_mmap_obj() acquired.*/
@@ -633,7 +671,13 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
return ret;
}
- ret = drm_gem_shmem_get_pages(shmem);
+ if (is_cow_mapping(vma->vm_flags))
+ return -EINVAL;
+
+ dma_resv_lock(shmem->base.resv, NULL);
+ ret = drm_gem_shmem_get_pages_locked(shmem);
+ dma_resv_unlock(shmem->base.resv);
+
if (ret)
return ret;
@@ -655,14 +699,15 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
struct drm_printer *p, unsigned int indent)
{
- if (shmem->base.import_attach)
+ if (drm_gem_is_imported(&shmem->base))
return;
- drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
- drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
+ drm_printf_indent(p, indent, "pages_pin_count=%u\n", refcount_read(&shmem->pages_pin_count));
+ drm_printf_indent(p, indent, "pages_use_count=%u\n", refcount_read(&shmem->pages_use_count));
+ drm_printf_indent(p, indent, "vmap_use_count=%u\n", refcount_read(&shmem->vmap_use_count));
drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
}
-EXPORT_SYMBOL(drm_gem_shmem_print_info);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_print_info);
/**
* drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
@@ -682,7 +727,7 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
- drm_WARN_ON(obj->dev, obj->import_attach);
+ drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
}
@@ -697,7 +742,7 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_
if (shmem->sgt)
return shmem->sgt;
- drm_WARN_ON(obj->dev, obj->import_attach);
+ drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
ret = drm_gem_shmem_get_pages_locked(shmem);
if (ret)
@@ -746,11 +791,11 @@ struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
int ret;
struct sg_table *sgt;
- ret = mutex_lock_interruptible(&shmem->pages_lock);
+ ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
if (ret)
return ERR_PTR(ret);
sgt = drm_gem_shmem_get_pages_sgt_locked(shmem);
- mutex_unlock(&shmem->pages_lock);
+ dma_resv_unlock(shmem->base.resv);
return sgt;
}
@@ -779,7 +824,7 @@ drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
size_t size = PAGE_ALIGN(attach->dmabuf->size);
struct drm_gem_shmem_object *shmem;
- shmem = __drm_gem_shmem_create(dev, size, true);
+ shmem = __drm_gem_shmem_create(dev, size, true, NULL);
if (IS_ERR(shmem))
return ERR_CAST(shmem);
@@ -791,6 +836,63 @@ drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
+/**
+ * drm_gem_shmem_prime_import_no_map - Import dmabuf without mapping its sg_table
+ * @dev: Device to import into
+ * @dma_buf: dma-buf object to import
+ *
+ * Drivers that use the shmem helpers but also wants to import dmabuf without
+ * mapping its sg_table can use this as their &drm_driver.gem_prime_import
+ * implementation.
+ */
+struct drm_gem_object *drm_gem_shmem_prime_import_no_map(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+ struct drm_gem_shmem_object *shmem;
+ struct drm_gem_object *obj;
+ size_t size;
+ int ret;
+
+ if (drm_gem_is_prime_exported_dma_buf(dev, dma_buf)) {
+ /*
+ * Importing dmabuf exported from our own gem increases
+ * refcount on gem itself instead of f_count of dmabuf.
+ */
+ obj = dma_buf->priv;
+ drm_gem_object_get(obj);
+ return obj;
+ }
+
+ attach = dma_buf_attach(dma_buf, dev->dev);
+ if (IS_ERR(attach))
+ return ERR_CAST(attach);
+
+ get_dma_buf(dma_buf);
+
+ size = PAGE_ALIGN(attach->dmabuf->size);
+
+ shmem = __drm_gem_shmem_create(dev, size, true, NULL);
+ if (IS_ERR(shmem)) {
+ ret = PTR_ERR(shmem);
+ goto fail_detach;
+ }
+
+ drm_dbg_prime(dev, "size = %zu\n", size);
+
+ shmem->base.import_attach = attach;
+ shmem->base.resv = dma_buf->resv;
+
+ return &shmem->base;
+
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+ dma_buf_put(dma_buf);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_no_map);
+
MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
-MODULE_IMPORT_NS(DMA_BUF);
+MODULE_IMPORT_NS("DMA_BUF");
MODULE_LICENSE("GPL v2");