diff options
Diffstat (limited to 'drivers/gpu/drm/drm_gem_shmem_helper.c')
| -rw-r--r-- | drivers/gpu/drm/drm_gem_shmem_helper.c | 643 |
1 files changed, 386 insertions, 257 deletions
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 35138f8a375c..dc94a27710e5 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -10,7 +10,6 @@ #include <linux/shmem_fs.h> #include <linux/slab.h> #include <linux/vmalloc.h> -#include <linux/module.h> #ifdef CONFIG_X86 #include <asm/set_memory.h> @@ -19,11 +18,12 @@ #include <drm/drm.h> #include <drm/drm_device.h> #include <drm/drm_drv.h> +#include <drm/drm_dumb_buffers.h> #include <drm/drm_gem_shmem_helper.h> #include <drm/drm_prime.h> #include <drm/drm_print.h> -MODULE_IMPORT_NS(DMA_BUF); +MODULE_IMPORT_NS("DMA_BUF"); /** * DOC: overview @@ -49,27 +49,12 @@ static const struct drm_gem_object_funcs drm_gem_shmem_funcs = { .vm_ops = &drm_gem_shmem_vm_ops, }; -static struct drm_gem_shmem_object * -__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private) +static int __drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, + size_t size, bool private, struct vfsmount *gemfs) { - struct drm_gem_shmem_object *shmem; - struct drm_gem_object *obj; + struct drm_gem_object *obj = &shmem->base; int ret = 0; - size = PAGE_ALIGN(size); - - if (dev->driver->gem_create_object) { - obj = dev->driver->gem_create_object(dev, size); - if (IS_ERR(obj)) - return ERR_CAST(obj); - shmem = to_drm_gem_shmem_obj(obj); - } else { - shmem = kzalloc(sizeof(*shmem), GFP_KERNEL); - if (!shmem) - return ERR_PTR(-ENOMEM); - obj = &shmem->base; - } - if (!obj->funcs) obj->funcs = &drm_gem_shmem_funcs; @@ -77,17 +62,17 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private) drm_gem_private_object_init(dev, obj, size); shmem->map_wc = false; /* dma-buf mappings use always writecombine */ } else { - ret = drm_gem_object_init(dev, obj, size); + ret = drm_gem_object_init_with_mnt(dev, obj, size, gemfs); + } + if (ret) { + drm_gem_private_object_fini(obj); + return ret; } - if (ret) - goto err_free; ret = drm_gem_create_mmap_offset(obj); if (ret) goto err_release; - mutex_init(&shmem->pages_lock); - mutex_init(&shmem->vmap_lock); INIT_LIST_HEAD(&shmem->madv_list); if (!private) { @@ -102,14 +87,55 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private) __GFP_RETRY_MAYFAIL | __GFP_NOWARN); } - return shmem; - + return 0; err_release: drm_gem_object_release(obj); -err_free: - kfree(obj); + return ret; +} - return ERR_PTR(ret); +/** + * drm_gem_shmem_init - Initialize an allocated object. + * @dev: DRM device + * @obj: The allocated shmem GEM object. + * + * Returns: + * 0 on success, or a negative error code on failure. + */ +int drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, size_t size) +{ + return __drm_gem_shmem_init(dev, shmem, size, false, NULL); +} +EXPORT_SYMBOL_GPL(drm_gem_shmem_init); + +static struct drm_gem_shmem_object * +__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private, + struct vfsmount *gemfs) +{ + struct drm_gem_shmem_object *shmem; + struct drm_gem_object *obj; + int ret = 0; + + size = PAGE_ALIGN(size); + + if (dev->driver->gem_create_object) { + obj = dev->driver->gem_create_object(dev, size); + if (IS_ERR(obj)) + return ERR_CAST(obj); + shmem = to_drm_gem_shmem_obj(obj); + } else { + shmem = kzalloc(sizeof(*shmem), GFP_KERNEL); + if (!shmem) + return ERR_PTR(-ENOMEM); + obj = &shmem->base; + } + + ret = __drm_gem_shmem_init(dev, shmem, size, private, gemfs); + if (ret) { + kfree(obj); + return ERR_PTR(ret); + } + + return shmem; } /** * drm_gem_shmem_create - Allocate an object with the given size @@ -124,26 +150,49 @@ err_free: */ struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size) { - return __drm_gem_shmem_create(dev, size, false); + return __drm_gem_shmem_create(dev, size, false, NULL); } EXPORT_SYMBOL_GPL(drm_gem_shmem_create); /** - * drm_gem_shmem_free - Free resources associated with a shmem GEM object - * @shmem: shmem GEM object to free + * drm_gem_shmem_create_with_mnt - Allocate an object with the given size in a + * given mountpoint + * @dev: DRM device + * @size: Size of the object to allocate + * @gemfs: tmpfs mount where the GEM object will be created * - * This function cleans up the GEM object state and frees the memory used to - * store the object itself. + * This function creates a shmem GEM object in a given tmpfs mountpoint. + * + * Returns: + * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative + * error code on failure. */ -void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) +struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *dev, + size_t size, + struct vfsmount *gemfs) { - struct drm_gem_object *obj = &shmem->base; + return __drm_gem_shmem_create(dev, size, false, gemfs); +} +EXPORT_SYMBOL_GPL(drm_gem_shmem_create_with_mnt); - WARN_ON(shmem->vmap_use_count); +/** + * drm_gem_shmem_release - Release resources associated with a shmem GEM object. + * @shmem: shmem GEM object + * + * This function cleans up the GEM object state, but does not free the memory used to store the + * object itself. This function is meant to be a dedicated helper for the Rust GEM bindings. + */ +void drm_gem_shmem_release(struct drm_gem_shmem_object *shmem) +{ + struct drm_gem_object *obj = &shmem->base; - if (obj->import_attach) { + if (drm_gem_is_imported(obj)) { drm_prime_gem_destroy(obj, shmem->sgt); } else { + dma_resv_lock(shmem->base.resv, NULL); + + drm_WARN_ON(obj->dev, refcount_read(&shmem->vmap_use_count)); + if (shmem->sgt) { dma_unmap_sgtable(obj->dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0); @@ -151,14 +200,28 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) kfree(shmem->sgt); } if (shmem->pages) - drm_gem_shmem_put_pages(shmem); - } + drm_gem_shmem_put_pages_locked(shmem); - WARN_ON(shmem->pages_use_count); + drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_use_count)); + drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_pin_count)); + + dma_resv_unlock(shmem->base.resv); + } drm_gem_object_release(obj); - mutex_destroy(&shmem->pages_lock); - mutex_destroy(&shmem->vmap_lock); +} +EXPORT_SYMBOL_GPL(drm_gem_shmem_release); + +/** + * drm_gem_shmem_free - Free resources associated with a shmem GEM object + * @shmem: shmem GEM object to free + * + * This function cleans up the GEM object state and frees the memory used to + * store the object itself. + */ +void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) +{ + drm_gem_shmem_release(shmem); kfree(shmem); } EXPORT_SYMBOL_GPL(drm_gem_shmem_free); @@ -168,13 +231,15 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem) struct drm_gem_object *obj = &shmem->base; struct page **pages; - if (shmem->pages_use_count++ > 0) + dma_resv_assert_held(shmem->base.resv); + + if (refcount_inc_not_zero(&shmem->pages_use_count)) return 0; pages = drm_gem_get_pages(obj); if (IS_ERR(pages)) { - DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages)); - shmem->pages_use_count = 0; + drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n", + PTR_ERR(pages)); return PTR_ERR(pages); } @@ -190,69 +255,64 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem) shmem->pages = pages; + refcount_set(&shmem->pages_use_count, 1); + return 0; } /* - * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object + * drm_gem_shmem_put_pages_locked - Decrease use count on the backing pages for a shmem GEM object * @shmem: shmem GEM object * - * This function makes sure that backing pages exists for the shmem GEM object - * and increases the use count. - * - * Returns: - * 0 on success or a negative error code on failure. + * This function decreases the use count and puts the backing pages when use drops to zero. */ -int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem) +void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem) { - int ret; + struct drm_gem_object *obj = &shmem->base; - WARN_ON(shmem->base.import_attach); + dma_resv_assert_held(shmem->base.resv); - ret = mutex_lock_interruptible(&shmem->pages_lock); - if (ret) - return ret; - ret = drm_gem_shmem_get_pages_locked(shmem); - mutex_unlock(&shmem->pages_lock); + if (refcount_dec_and_test(&shmem->pages_use_count)) { +#ifdef CONFIG_X86 + if (shmem->map_wc) + set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT); +#endif - return ret; + drm_gem_put_pages(obj, shmem->pages, + shmem->pages_mark_dirty_on_put, + shmem->pages_mark_accessed_on_put); + shmem->pages = NULL; + } } -EXPORT_SYMBOL(drm_gem_shmem_get_pages); +EXPORT_SYMBOL_GPL(drm_gem_shmem_put_pages_locked); -static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem) +int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem) { - struct drm_gem_object *obj = &shmem->base; + int ret; - if (WARN_ON_ONCE(!shmem->pages_use_count)) - return; + dma_resv_assert_held(shmem->base.resv); - if (--shmem->pages_use_count > 0) - return; + drm_WARN_ON(shmem->base.dev, drm_gem_is_imported(&shmem->base)); -#ifdef CONFIG_X86 - if (shmem->map_wc) - set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT); -#endif + if (refcount_inc_not_zero(&shmem->pages_pin_count)) + return 0; + + ret = drm_gem_shmem_get_pages_locked(shmem); + if (!ret) + refcount_set(&shmem->pages_pin_count, 1); - drm_gem_put_pages(obj, shmem->pages, - shmem->pages_mark_dirty_on_put, - shmem->pages_mark_accessed_on_put); - shmem->pages = NULL; + return ret; } +EXPORT_SYMBOL(drm_gem_shmem_pin_locked); -/* - * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object - * @shmem: shmem GEM object - * - * This function decreases the use count and puts the backing pages when use drops to zero. - */ -void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem) +void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem) { - mutex_lock(&shmem->pages_lock); - drm_gem_shmem_put_pages_locked(shmem); - mutex_unlock(&shmem->pages_lock); + dma_resv_assert_held(shmem->base.resv); + + if (refcount_dec_and_test(&shmem->pages_pin_count)) + drm_gem_shmem_put_pages_locked(shmem); } -EXPORT_SYMBOL(drm_gem_shmem_put_pages); +EXPORT_SYMBOL(drm_gem_shmem_unpin_locked); /** * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object @@ -266,11 +326,23 @@ EXPORT_SYMBOL(drm_gem_shmem_put_pages); */ int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem) { - WARN_ON(shmem->base.import_attach); + struct drm_gem_object *obj = &shmem->base; + int ret; - return drm_gem_shmem_get_pages(shmem); + drm_WARN_ON(obj->dev, drm_gem_is_imported(obj)); + + if (refcount_inc_not_zero(&shmem->pages_pin_count)) + return 0; + + ret = dma_resv_lock_interruptible(shmem->base.resv, NULL); + if (ret) + return ret; + ret = drm_gem_shmem_pin_locked(shmem); + dma_resv_unlock(shmem->base.resv); + + return ret; } -EXPORT_SYMBOL(drm_gem_shmem_pin); +EXPORT_SYMBOL_GPL(drm_gem_shmem_pin); /** * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object @@ -281,139 +353,120 @@ EXPORT_SYMBOL(drm_gem_shmem_pin); */ void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem) { - WARN_ON(shmem->base.import_attach); + struct drm_gem_object *obj = &shmem->base; + + drm_WARN_ON(obj->dev, drm_gem_is_imported(obj)); + + if (refcount_dec_not_one(&shmem->pages_pin_count)) + return; - drm_gem_shmem_put_pages(shmem); + dma_resv_lock(shmem->base.resv, NULL); + drm_gem_shmem_unpin_locked(shmem); + dma_resv_unlock(shmem->base.resv); } -EXPORT_SYMBOL(drm_gem_shmem_unpin); +EXPORT_SYMBOL_GPL(drm_gem_shmem_unpin); -static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, - struct iosys_map *map) +/* + * drm_gem_shmem_vmap_locked - Create a virtual mapping for a shmem GEM object + * @shmem: shmem GEM object + * @map: Returns the kernel virtual address of the SHMEM GEM object's backing + * store. + * + * This function makes sure that a contiguous kernel virtual address mapping + * exists for the buffer backing the shmem GEM object. It hides the differences + * between dma-buf imported and natively allocated objects. + * + * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap_locked(). + * + * Returns: + * 0 on success or a negative error code on failure. + */ +int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, + struct iosys_map *map) { struct drm_gem_object *obj = &shmem->base; int ret = 0; - if (shmem->vmap_use_count++ > 0) { - iosys_map_set_vaddr(map, shmem->vaddr); - return 0; - } + dma_resv_assert_held(obj->resv); - if (obj->import_attach) { + if (drm_gem_is_imported(obj)) { ret = dma_buf_vmap(obj->import_attach->dmabuf, map); - if (!ret) { - if (WARN_ON(map->is_iomem)) { - dma_buf_vunmap(obj->import_attach->dmabuf, map); - ret = -EIO; - goto err_put_pages; - } - shmem->vaddr = map->vaddr; - } } else { pgprot_t prot = PAGE_KERNEL; - ret = drm_gem_shmem_get_pages(shmem); + dma_resv_assert_held(shmem->base.resv); + + if (refcount_inc_not_zero(&shmem->vmap_use_count)) { + iosys_map_set_vaddr(map, shmem->vaddr); + return 0; + } + + ret = drm_gem_shmem_pin_locked(shmem); if (ret) - goto err_zero_use; + return ret; if (shmem->map_wc) prot = pgprot_writecombine(prot); shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, VM_MAP, prot); - if (!shmem->vaddr) + if (!shmem->vaddr) { ret = -ENOMEM; - else + } else { iosys_map_set_vaddr(map, shmem->vaddr); + refcount_set(&shmem->vmap_use_count, 1); + } } if (ret) { - DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret); + drm_dbg_kms(obj->dev, "Failed to vmap pages, error %d\n", ret); goto err_put_pages; } return 0; err_put_pages: - if (!obj->import_attach) - drm_gem_shmem_put_pages(shmem); -err_zero_use: - shmem->vmap_use_count = 0; + if (!drm_gem_is_imported(obj)) + drm_gem_shmem_unpin_locked(shmem); return ret; } +EXPORT_SYMBOL_GPL(drm_gem_shmem_vmap_locked); /* - * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object + * drm_gem_shmem_vunmap_locked - Unmap a virtual mapping for a shmem GEM object * @shmem: shmem GEM object - * @map: Returns the kernel virtual address of the SHMEM GEM object's backing - * store. - * - * This function makes sure that a contiguous kernel virtual address mapping - * exists for the buffer backing the shmem GEM object. It hides the differences - * between dma-buf imported and natively allocated objects. + * @map: Kernel virtual address where the SHMEM GEM object was mapped * - * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap(). + * This function cleans up a kernel virtual address mapping acquired by + * drm_gem_shmem_vmap_locked(). The mapping is only removed when the use count + * drops to zero. * - * Returns: - * 0 on success or a negative error code on failure. + * This function hides the differences between dma-buf imported and natively + * allocated objects. */ -int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, - struct iosys_map *map) -{ - int ret; - - ret = mutex_lock_interruptible(&shmem->vmap_lock); - if (ret) - return ret; - ret = drm_gem_shmem_vmap_locked(shmem, map); - mutex_unlock(&shmem->vmap_lock); - - return ret; -} -EXPORT_SYMBOL(drm_gem_shmem_vmap); - -static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, - struct iosys_map *map) +void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, + struct iosys_map *map) { struct drm_gem_object *obj = &shmem->base; - if (WARN_ON_ONCE(!shmem->vmap_use_count)) - return; - - if (--shmem->vmap_use_count > 0) - return; + dma_resv_assert_held(obj->resv); - if (obj->import_attach) { + if (drm_gem_is_imported(obj)) { dma_buf_vunmap(obj->import_attach->dmabuf, map); } else { - vunmap(shmem->vaddr); - drm_gem_shmem_put_pages(shmem); - } + dma_resv_assert_held(shmem->base.resv); - shmem->vaddr = NULL; -} + if (refcount_dec_and_test(&shmem->vmap_use_count)) { + vunmap(shmem->vaddr); + shmem->vaddr = NULL; -/* - * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object - * @shmem: shmem GEM object - * @map: Kernel virtual address where the SHMEM GEM object was mapped - * - * This function cleans up a kernel virtual address mapping acquired by - * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to - * zero. - * - * This function hides the differences between dma-buf imported and natively - * allocated objects. - */ -void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, - struct iosys_map *map) -{ - mutex_lock(&shmem->vmap_lock); - drm_gem_shmem_vunmap_locked(shmem, map); - mutex_unlock(&shmem->vmap_lock); + drm_gem_shmem_unpin_locked(shmem); + } + } } -EXPORT_SYMBOL(drm_gem_shmem_vunmap); +EXPORT_SYMBOL_GPL(drm_gem_shmem_vunmap_locked); -static struct drm_gem_shmem_object * +static int drm_gem_shmem_create_with_handle(struct drm_file *file_priv, struct drm_device *dev, size_t size, uint32_t *handle) @@ -423,7 +476,7 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv, shmem = drm_gem_shmem_create(dev, size); if (IS_ERR(shmem)) - return shmem; + return PTR_ERR(shmem); /* * Allocate an id of idr table where the obj is registered @@ -432,36 +485,34 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv, ret = drm_gem_handle_create(file_priv, &shmem->base, handle); /* drop reference from allocate - handle holds it now. */ drm_gem_object_put(&shmem->base); - if (ret) - return ERR_PTR(ret); - return shmem; + return ret; } /* Update madvise status, returns true if not purged, else * false or -errno. */ -int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv) +int drm_gem_shmem_madvise_locked(struct drm_gem_shmem_object *shmem, int madv) { - mutex_lock(&shmem->pages_lock); + dma_resv_assert_held(shmem->base.resv); if (shmem->madv >= 0) shmem->madv = madv; madv = shmem->madv; - mutex_unlock(&shmem->pages_lock); - return (madv >= 0); } -EXPORT_SYMBOL(drm_gem_shmem_madvise); +EXPORT_SYMBOL_GPL(drm_gem_shmem_madvise_locked); void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; struct drm_device *dev = obj->dev; - WARN_ON(!drm_gem_shmem_is_purgeable(shmem)); + dma_resv_assert_held(shmem->base.resv); + + drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem)); dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0); sg_free_table(shmem->sgt); @@ -484,18 +535,7 @@ void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem) invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1); } -EXPORT_SYMBOL(drm_gem_shmem_purge_locked); - -bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem) -{ - if (!mutex_trylock(&shmem->pages_lock)) - return false; - drm_gem_shmem_purge_locked(shmem); - mutex_unlock(&shmem->pages_lock); - - return true; -} -EXPORT_SYMBOL(drm_gem_shmem_purge); +EXPORT_SYMBOL_GPL(drm_gem_shmem_purge_locked); /** * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object @@ -517,23 +557,13 @@ EXPORT_SYMBOL(drm_gem_shmem_purge); int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args) { - u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); - struct drm_gem_shmem_object *shmem; - - if (!args->pitch || !args->size) { - args->pitch = min_pitch; - args->size = PAGE_ALIGN(args->pitch * args->height); - } else { - /* ensure sane minimum values */ - if (args->pitch < min_pitch) - args->pitch = min_pitch; - if (args->size < args->pitch * args->height) - args->size = PAGE_ALIGN(args->pitch * args->height); - } + int ret; - shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle); + ret = drm_mode_size_dumb(dev, args, SZ_8, 0); + if (ret) + return ret; - return PTR_ERR_OR_ZERO(shmem); + return drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle); } EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create); @@ -550,10 +580,10 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) /* We don't use vmf->pgoff since that has the fake offset */ page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; - mutex_lock(&shmem->pages_lock); + dma_resv_lock(shmem->base.resv, NULL); if (page_offset >= num_pages || - WARN_ON_ONCE(!shmem->pages) || + drm_WARN_ON_ONCE(obj->dev, !shmem->pages) || shmem->madv < 0) { ret = VM_FAULT_SIGBUS; } else { @@ -562,7 +592,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page)); } - mutex_unlock(&shmem->pages_lock); + dma_resv_unlock(shmem->base.resv); return ret; } @@ -571,12 +601,20 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) { struct drm_gem_object *obj = vma->vm_private_data; struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - int ret; - WARN_ON(shmem->base.import_attach); + drm_WARN_ON(obj->dev, drm_gem_is_imported(obj)); - ret = drm_gem_shmem_get_pages(shmem); - WARN_ON_ONCE(ret != 0); + dma_resv_lock(shmem->base.resv, NULL); + + /* + * We should have already pinned the pages when the buffer was first + * mmap'd, vm_open() just grabs an additional reference for the new + * mm the vma is getting copied into (ie. on fork()). + */ + drm_WARN_ON_ONCE(obj->dev, + !refcount_inc_not_zero(&shmem->pages_use_count)); + + dma_resv_unlock(shmem->base.resv); drm_gem_vm_open(vma); } @@ -586,7 +624,10 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma) struct drm_gem_object *obj = vma->vm_private_data; struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - drm_gem_shmem_put_pages(shmem); + dma_resv_lock(shmem->base.resv, NULL); + drm_gem_shmem_put_pages_locked(shmem); + dma_resv_unlock(shmem->base.resv); + drm_gem_vm_close(vma); } @@ -613,21 +654,34 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct struct drm_gem_object *obj = &shmem->base; int ret; - if (obj->import_attach) { - /* Drop the reference drm_gem_mmap_obj() acquired.*/ - drm_gem_object_put(obj); + if (drm_gem_is_imported(obj)) { + /* Reset both vm_ops and vm_private_data, so we don't end up with + * vm_ops pointing to our implementation if the dma-buf backend + * doesn't set those fields. + */ vma->vm_private_data = NULL; + vma->vm_ops = NULL; - return dma_buf_mmap(obj->dma_buf, vma, 0); - } + ret = dma_buf_mmap(obj->dma_buf, vma, 0); + + /* Drop the reference drm_gem_mmap_obj() acquired.*/ + if (!ret) + drm_gem_object_put(obj); - ret = drm_gem_shmem_get_pages(shmem); - if (ret) { - drm_gem_vm_close(vma); return ret; } - vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; + if (is_cow_mapping(vma->vm_flags)) + return -EINVAL; + + dma_resv_lock(shmem->base.resv, NULL); + ret = drm_gem_shmem_get_pages_locked(shmem); + dma_resv_unlock(shmem->base.resv); + + if (ret) + return ret; + + vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); if (shmem->map_wc) vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); @@ -645,11 +699,15 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap); void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem, struct drm_printer *p, unsigned int indent) { - drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count); - drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count); + if (drm_gem_is_imported(&shmem->base)) + return; + + drm_printf_indent(p, indent, "pages_pin_count=%u\n", refcount_read(&shmem->pages_pin_count)); + drm_printf_indent(p, indent, "pages_use_count=%u\n", refcount_read(&shmem->pages_use_count)); + drm_printf_indent(p, indent, "vmap_use_count=%u\n", refcount_read(&shmem->vmap_use_count)); drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr); } -EXPORT_SYMBOL(drm_gem_shmem_print_info); +EXPORT_SYMBOL_GPL(drm_gem_shmem_print_info); /** * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned @@ -669,29 +727,13 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; - WARN_ON(shmem->base.import_attach); + drm_WARN_ON(obj->dev, drm_gem_is_imported(obj)); return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT); } EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table); -/** - * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a - * scatter/gather table for a shmem GEM object. - * @shmem: shmem GEM object - * - * This function returns a scatter/gather table suitable for driver usage. If - * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg - * table created. - * - * This is the main function for drivers to get at backing storage, and it hides - * and difference between dma-buf imported and natively allocated objects. - * drm_gem_shmem_get_sg_table() should not be directly called by drivers. - * - * Returns: - * A pointer to the scatter/gather table of pinned pages or errno on failure. - */ -struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem) +static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; int ret; @@ -700,9 +742,9 @@ struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem) if (shmem->sgt) return shmem->sgt; - WARN_ON(obj->import_attach); + drm_WARN_ON(obj->dev, drm_gem_is_imported(obj)); - ret = drm_gem_shmem_get_pages(shmem); + ret = drm_gem_shmem_get_pages_locked(shmem); if (ret) return ERR_PTR(ret); @@ -724,9 +766,39 @@ err_free_sgt: sg_free_table(sgt); kfree(sgt); err_put_pages: - drm_gem_shmem_put_pages(shmem); + drm_gem_shmem_put_pages_locked(shmem); return ERR_PTR(ret); } + +/** + * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a + * scatter/gather table for a shmem GEM object. + * @shmem: shmem GEM object + * + * This function returns a scatter/gather table suitable for driver usage. If + * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg + * table created. + * + * This is the main function for drivers to get at backing storage, and it hides + * and difference between dma-buf imported and natively allocated objects. + * drm_gem_shmem_get_sg_table() should not be directly called by drivers. + * + * Returns: + * A pointer to the scatter/gather table of pinned pages or errno on failure. + */ +struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem) +{ + int ret; + struct sg_table *sgt; + + ret = dma_resv_lock_interruptible(shmem->base.resv, NULL); + if (ret) + return ERR_PTR(ret); + sgt = drm_gem_shmem_get_pages_sgt_locked(shmem); + dma_resv_unlock(shmem->base.resv); + + return sgt; +} EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt); /** @@ -752,18 +824,75 @@ drm_gem_shmem_prime_import_sg_table(struct drm_device *dev, size_t size = PAGE_ALIGN(attach->dmabuf->size); struct drm_gem_shmem_object *shmem; - shmem = __drm_gem_shmem_create(dev, size, true); + shmem = __drm_gem_shmem_create(dev, size, true, NULL); if (IS_ERR(shmem)) return ERR_CAST(shmem); shmem->sgt = sgt; - DRM_DEBUG_PRIME("size = %zu\n", size); + drm_dbg_prime(dev, "size = %zu\n", size); return &shmem->base; } EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table); +/** + * drm_gem_shmem_prime_import_no_map - Import dmabuf without mapping its sg_table + * @dev: Device to import into + * @dma_buf: dma-buf object to import + * + * Drivers that use the shmem helpers but also wants to import dmabuf without + * mapping its sg_table can use this as their &drm_driver.gem_prime_import + * implementation. + */ +struct drm_gem_object *drm_gem_shmem_prime_import_no_map(struct drm_device *dev, + struct dma_buf *dma_buf) +{ + struct dma_buf_attachment *attach; + struct drm_gem_shmem_object *shmem; + struct drm_gem_object *obj; + size_t size; + int ret; + + if (drm_gem_is_prime_exported_dma_buf(dev, dma_buf)) { + /* + * Importing dmabuf exported from our own gem increases + * refcount on gem itself instead of f_count of dmabuf. + */ + obj = dma_buf->priv; + drm_gem_object_get(obj); + return obj; + } + + attach = dma_buf_attach(dma_buf, dev->dev); + if (IS_ERR(attach)) + return ERR_CAST(attach); + + get_dma_buf(dma_buf); + + size = PAGE_ALIGN(attach->dmabuf->size); + + shmem = __drm_gem_shmem_create(dev, size, true, NULL); + if (IS_ERR(shmem)) { + ret = PTR_ERR(shmem); + goto fail_detach; + } + + drm_dbg_prime(dev, "size = %zu\n", size); + + shmem->base.import_attach = attach; + shmem->base.resv = dma_buf->resv; + + return &shmem->base; + +fail_detach: + dma_buf_detach(dma_buf, attach); + dma_buf_put(dma_buf); + + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_no_map); + MODULE_DESCRIPTION("DRM SHMEM memory-management helpers"); -MODULE_IMPORT_NS(DMA_BUF); +MODULE_IMPORT_NS("DMA_BUF"); MODULE_LICENSE("GPL v2"); |
