diff options
Diffstat (limited to 'drivers/gpu/drm/drm_prime.c')
| -rw-r--r-- | drivers/gpu/drm/drm_prime.c | 1330 |
1 files changed, 903 insertions, 427 deletions
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 85e450e3241c..21809a82187b 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -28,49 +28,77 @@ #include <linux/export.h> #include <linux/dma-buf.h> -#include <drm/drmP.h> +#include <linux/rbtree.h> +#include <linux/module.h> -/* - * DMA-BUF/GEM Object references and lifetime overview: - * - * On the export the dma_buf holds a reference to the exporting GEM - * object. It takes this reference in handle_to_fd_ioctl, when it - * first calls .prime_export and stores the exporting GEM object in - * the dma_buf priv. This reference is released when the dma_buf - * object goes away in the driver .release function. - * - * On the import the importing GEM object holds a reference to the - * dma_buf (which in turn holds a ref to the exporting GEM object). - * It takes that reference in the fd_to_handle ioctl. - * It calls dma_buf_get, creates an attachment to it and stores the - * attachment in the GEM object. When this attachment is destroyed - * when the imported object is destroyed, we remove the attachment - * and drop the reference to the dma_buf. - * - * Thus the chain of references always flows in one direction - * (avoiding loops): importing_gem -> dmabuf -> exporting_gem - * - * Self-importing: if userspace is using PRIME as a replacement for flink - * then it will get a fd->handle request for a GEM object that it created. - * Drivers should detect this situation and return back the gem object - * from the dma-buf private. Prime will do this automatically for drivers that - * use the drm_gem_prime_{import,export} helpers. +#include <drm/drm.h> +#include <drm/drm_drv.h> +#include <drm/drm_file.h> +#include <drm/drm_framebuffer.h> +#include <drm/drm_gem.h> +#include <drm/drm_prime.h> +#include <drm/drm_print.h> + +#include "drm_internal.h" + +MODULE_IMPORT_NS("DMA_BUF"); + +/** + * DOC: overview and lifetime rules + * + * Similar to GEM global names, PRIME file descriptors are also used to share + * buffer objects across processes. They offer additional security: as file + * descriptors must be explicitly sent over UNIX domain sockets to be shared + * between applications, they can't be guessed like the globally unique GEM + * names. + * + * Drivers that support the PRIME API implement the drm_gem_object_funcs.export + * and &drm_driver.gem_prime_import hooks. &dma_buf_ops implementations for + * drivers are all individually exported for drivers which need to overwrite + * or reimplement some of them. + * + * Reference Counting for GEM Drivers + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * On the export the &dma_buf holds a reference to the exported buffer object, + * usually a &drm_gem_object. It takes this reference in the PRIME_HANDLE_TO_FD + * IOCTL, when it first calls &drm_gem_object_funcs.export + * and stores the exporting GEM object in the &dma_buf.priv field. This + * reference needs to be released when the final reference to the &dma_buf + * itself is dropped and its &dma_buf_ops.release function is called. For + * GEM-based drivers, the &dma_buf should be exported using + * drm_gem_dmabuf_export() and then released by drm_gem_dmabuf_release(). + * + * Thus the chain of references always flows in one direction, avoiding loops: + * importing GEM object -> dma-buf -> exported GEM bo. A further complication + * are the lookup caches for import and export. These are required to guarantee + * that any given object will always have only one unique userspace handle. This + * is required to allow userspace to detect duplicated imports, since some GEM + * drivers do fail command submissions if a given buffer object is listed more + * than once. These import and export caches in &drm_prime_file_private only + * retain a weak reference, which is cleaned up when the corresponding object is + * released. + * + * Self-importing: If userspace is using PRIME as a replacement for flink then + * it will get a fd->handle request for a GEM object that it created. Drivers + * should detect this situation and return back the underlying object from the + * dma-buf private. For GEM based drivers this is handled in + * drm_gem_prime_import() already. */ struct drm_prime_member { - struct list_head entry; struct dma_buf *dma_buf; uint32_t handle; -}; -struct drm_prime_attachment { - struct sg_table *sgt; - enum dma_data_direction dir; + struct rb_node dmabuf_rb; + struct rb_node handle_rb; }; -static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle) +int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, + struct dma_buf *dma_buf, uint32_t handle) { struct drm_prime_member *member; + struct rb_node **p, *rb; member = kmalloc(sizeof(*member), GFP_KERNEL); if (!member) @@ -79,564 +107,1012 @@ static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, get_dma_buf(dma_buf); member->dma_buf = dma_buf; member->handle = handle; - list_add(&member->entry, &prime_fpriv->head); - return 0; -} - -static int drm_gem_map_attach(struct dma_buf *dma_buf, - struct device *target_dev, - struct dma_buf_attachment *attach) -{ - struct drm_prime_attachment *prime_attach; - struct drm_gem_object *obj = dma_buf->priv; - struct drm_device *dev = obj->dev; - - prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL); - if (!prime_attach) - return -ENOMEM; - - prime_attach->dir = DMA_NONE; - attach->priv = prime_attach; - if (!dev->driver->gem_prime_pin) - return 0; + rb = NULL; + p = &prime_fpriv->dmabufs.rb_node; + while (*p) { + struct drm_prime_member *pos; + + rb = *p; + pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb); + if (dma_buf > pos->dma_buf) + p = &rb->rb_right; + else + p = &rb->rb_left; + } + rb_link_node(&member->dmabuf_rb, rb, p); + rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs); + + rb = NULL; + p = &prime_fpriv->handles.rb_node; + while (*p) { + struct drm_prime_member *pos; + + rb = *p; + pos = rb_entry(rb, struct drm_prime_member, handle_rb); + if (handle > pos->handle) + p = &rb->rb_right; + else + p = &rb->rb_left; + } + rb_link_node(&member->handle_rb, rb, p); + rb_insert_color(&member->handle_rb, &prime_fpriv->handles); - return dev->driver->gem_prime_pin(obj); + return 0; } -static void drm_gem_map_detach(struct dma_buf *dma_buf, - struct dma_buf_attachment *attach) +static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv, + uint32_t handle) { - struct drm_prime_attachment *prime_attach = attach->priv; - struct drm_gem_object *obj = dma_buf->priv; - struct drm_device *dev = obj->dev; - struct sg_table *sgt; - - if (dev->driver->gem_prime_unpin) - dev->driver->gem_prime_unpin(obj); - - if (!prime_attach) - return; - - sgt = prime_attach->sgt; - if (sgt) { - if (prime_attach->dir != DMA_NONE) - dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, - prime_attach->dir); - sg_free_table(sgt); + struct rb_node *rb; + + rb = prime_fpriv->handles.rb_node; + while (rb) { + struct drm_prime_member *member; + + member = rb_entry(rb, struct drm_prime_member, handle_rb); + if (member->handle == handle) + return member->dma_buf; + else if (member->handle < handle) + rb = rb->rb_right; + else + rb = rb->rb_left; } - kfree(sgt); - kfree(prime_attach); - attach->priv = NULL; + return NULL; } -static void drm_prime_remove_buf_handle_locked( - struct drm_prime_file_private *prime_fpriv, - struct dma_buf *dma_buf) +static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, + struct dma_buf *dma_buf, + uint32_t *handle) { - struct drm_prime_member *member, *safe; + struct rb_node *rb; - list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) { + rb = prime_fpriv->dmabufs.rb_node; + while (rb) { + struct drm_prime_member *member; + + member = rb_entry(rb, struct drm_prime_member, dmabuf_rb); if (member->dma_buf == dma_buf) { - dma_buf_put(dma_buf); - list_del(&member->entry); - kfree(member); + *handle = member->handle; + return 0; + } else if (member->dma_buf < dma_buf) { + rb = rb->rb_right; + } else { + rb = rb->rb_left; } } + + return -ENOENT; } -static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, - enum dma_data_direction dir) +void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, + uint32_t handle) { - struct drm_prime_attachment *prime_attach = attach->priv; - struct drm_gem_object *obj = attach->dmabuf->priv; - struct sg_table *sgt; - - if (WARN_ON(dir == DMA_NONE || !prime_attach)) - return ERR_PTR(-EINVAL); - - /* return the cached mapping when possible */ - if (prime_attach->dir == dir) - return prime_attach->sgt; + struct rb_node *rb; - /* - * two mappings with different directions for the same attachment are - * not allowed - */ - if (WARN_ON(prime_attach->dir != DMA_NONE)) - return ERR_PTR(-EBUSY); + rb = prime_fpriv->handles.rb_node; + while (rb) { + struct drm_prime_member *member; - mutex_lock(&obj->dev->struct_mutex); + member = rb_entry(rb, struct drm_prime_member, handle_rb); + if (member->handle == handle) { + rb_erase(&member->handle_rb, &prime_fpriv->handles); + rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs); - sgt = obj->dev->driver->gem_prime_get_sg_table(obj); - - if (!IS_ERR(sgt)) { - if (!dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir)) { - sg_free_table(sgt); - kfree(sgt); - sgt = ERR_PTR(-ENOMEM); + dma_buf_put(member->dma_buf); + kfree(member); + break; + } else if (member->handle < handle) { + rb = rb->rb_right; } else { - prime_attach->sgt = sgt; - prime_attach->dir = dir; + rb = rb->rb_left; } } - - mutex_unlock(&obj->dev->struct_mutex); - return sgt; } -static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, - struct sg_table *sgt, enum dma_data_direction dir) +void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv) { - /* nothing to be done here */ + mutex_init(&prime_fpriv->lock); + prime_fpriv->dmabufs = RB_ROOT; + prime_fpriv->handles = RB_ROOT; } -static void drm_gem_dmabuf_release(struct dma_buf *dma_buf) +void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv) { - struct drm_gem_object *obj = dma_buf->priv; - - if (obj->export_dma_buf == dma_buf) { - /* drop the reference on the export fd holds */ - obj->export_dma_buf = NULL; - drm_gem_object_unreference_unlocked(obj); - } + /* by now drm_gem_release should've made sure the list is empty */ + WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs)); } -static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf) +/** + * drm_gem_dmabuf_export - &dma_buf export implementation for GEM + * @dev: parent device for the exported dmabuf + * @exp_info: the export information used by dma_buf_export() + * + * This wraps dma_buf_export() for use by generic GEM drivers that are using + * drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take + * a reference to the &drm_device and the exported &drm_gem_object (stored in + * &dma_buf_export_info.priv) which is released by drm_gem_dmabuf_release(). + * + * Returns the new dmabuf. + */ +struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev, + struct dma_buf_export_info *exp_info) { - struct drm_gem_object *obj = dma_buf->priv; - struct drm_device *dev = obj->dev; + struct drm_gem_object *obj = exp_info->priv; + struct dma_buf *dma_buf; - return dev->driver->gem_prime_vmap(obj); + dma_buf = dma_buf_export(exp_info); + if (IS_ERR(dma_buf)) + return dma_buf; + + drm_dev_get(dev); + drm_gem_object_get(obj); + dma_buf->file->f_mapping = obj->dev->anon_inode->i_mapping; + + return dma_buf; } +EXPORT_SYMBOL(drm_gem_dmabuf_export); -static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) +/** + * drm_gem_dmabuf_release - &dma_buf release implementation for GEM + * @dma_buf: buffer to be released + * + * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers + * must use this in their &dma_buf_ops structure as the release callback. + * drm_gem_dmabuf_release() should be used in conjunction with + * drm_gem_dmabuf_export(). + */ +void drm_gem_dmabuf_release(struct dma_buf *dma_buf) { struct drm_gem_object *obj = dma_buf->priv; struct drm_device *dev = obj->dev; - dev->driver->gem_prime_vunmap(obj, vaddr); -} + /* drop the reference on the export fd holds */ + drm_gem_object_put(obj); -static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, - unsigned long page_num) -{ - return NULL; + drm_dev_put(dev); } +EXPORT_SYMBOL(drm_gem_dmabuf_release); -static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, - unsigned long page_num, void *addr) +/** + * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers + * @dev: drm_device to import into + * @file_priv: drm file-private structure + * @prime_fd: fd id of the dma-buf which should be imported + * @handle: pointer to storage for the handle of the imported buffer object + * + * This is the PRIME import function which must be used mandatorily by GEM + * drivers to ensure correct lifetime management of the underlying GEM object. + * The actual importing of GEM object from the dma-buf is done through the + * &drm_driver.gem_prime_import driver callback. + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_gem_prime_fd_to_handle(struct drm_device *dev, + struct drm_file *file_priv, int prime_fd, + uint32_t *handle) { + struct dma_buf *dma_buf; + struct drm_gem_object *obj; + int ret; + dma_buf = dma_buf_get(prime_fd); + if (IS_ERR(dma_buf)) + return PTR_ERR(dma_buf); + + mutex_lock(&file_priv->prime.lock); + + ret = drm_prime_lookup_buf_handle(&file_priv->prime, + dma_buf, handle); + if (ret == 0) + goto out_put; + + /* never seen this one, need to import */ + mutex_lock(&dev->object_name_lock); + if (dev->driver->gem_prime_import) + obj = dev->driver->gem_prime_import(dev, dma_buf); + else + obj = drm_gem_prime_import(dev, dma_buf); + if (IS_ERR(obj)) { + ret = PTR_ERR(obj); + goto out_unlock; + } + + if (obj->dma_buf) { + WARN_ON(obj->dma_buf != dma_buf); + } else { + obj->dma_buf = dma_buf; + get_dma_buf(dma_buf); + } + + /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */ + ret = drm_gem_handle_create_tail(file_priv, obj, handle); + drm_gem_object_put(obj); + if (ret) + goto out_put; + + ret = drm_prime_add_buf_handle(&file_priv->prime, + dma_buf, *handle); + mutex_unlock(&file_priv->prime.lock); + if (ret) + goto fail; + + dma_buf_put(dma_buf); + + return 0; + +fail: + /* hmm, if driver attached, we are relying on the free-object path + * to detach.. which seems ok.. + */ + drm_gem_handle_delete(file_priv, *handle); + dma_buf_put(dma_buf); + return ret; + +out_unlock: + mutex_unlock(&dev->object_name_lock); +out_put: + mutex_unlock(&file_priv->prime.lock); + dma_buf_put(dma_buf); + return ret; } -static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, - unsigned long page_num) -{ - return NULL; -} +EXPORT_SYMBOL(drm_gem_prime_fd_to_handle); -static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, - unsigned long page_num, void *addr) +int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { + struct drm_prime_handle *args = data; + + if (dev->driver->prime_fd_to_handle) { + return dev->driver->prime_fd_to_handle(dev, file_priv, args->fd, + &args->handle); + } + return drm_gem_prime_fd_to_handle(dev, file_priv, args->fd, &args->handle); } -static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, - struct vm_area_struct *vma) +static struct dma_buf *export_and_register_object(struct drm_device *dev, + struct drm_gem_object *obj, + uint32_t flags) { - struct drm_gem_object *obj = dma_buf->priv; - struct drm_device *dev = obj->dev; + struct dma_buf *dmabuf; - if (!dev->driver->gem_prime_mmap) - return -ENOSYS; + /* prevent races with concurrent gem_close. */ + if (obj->handle_count == 0) { + dmabuf = ERR_PTR(-ENOENT); + return dmabuf; + } - return dev->driver->gem_prime_mmap(obj, vma); -} + if (obj->funcs && obj->funcs->export) + dmabuf = obj->funcs->export(obj, flags); + else + dmabuf = drm_gem_prime_export(obj, flags); + if (IS_ERR(dmabuf)) { + /* normally the created dma-buf takes ownership of the ref, + * but if that fails then drop the ref + */ + return dmabuf; + } -static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { - .attach = drm_gem_map_attach, - .detach = drm_gem_map_detach, - .map_dma_buf = drm_gem_map_dma_buf, - .unmap_dma_buf = drm_gem_unmap_dma_buf, - .release = drm_gem_dmabuf_release, - .kmap = drm_gem_dmabuf_kmap, - .kmap_atomic = drm_gem_dmabuf_kmap_atomic, - .kunmap = drm_gem_dmabuf_kunmap, - .kunmap_atomic = drm_gem_dmabuf_kunmap_atomic, - .mmap = drm_gem_dmabuf_mmap, - .vmap = drm_gem_dmabuf_vmap, - .vunmap = drm_gem_dmabuf_vunmap, -}; + /* + * Note that callers do not need to clean up the export cache + * since the check for obj->handle_count guarantees that someone + * will clean it up. + */ + obj->dma_buf = dmabuf; + get_dma_buf(obj->dma_buf); + + return dmabuf; +} /** - * DOC: PRIME Helpers - * - * Drivers can implement @gem_prime_export and @gem_prime_import in terms of - * simpler APIs by using the helper functions @drm_gem_prime_export and - * @drm_gem_prime_import. These functions implement dma-buf support in terms of - * five lower-level driver callbacks: - * - * Export callbacks: - * - * - @gem_prime_pin (optional): prepare a GEM object for exporting - * - * - @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages - * - * - @gem_prime_vmap: vmap a buffer exported by your driver - * - * - @gem_prime_vunmap: vunmap a buffer exported by your driver + * drm_gem_prime_handle_to_dmabuf - PRIME export function for GEM drivers + * @dev: dev to export the buffer from + * @file_priv: drm file-private structure + * @handle: buffer handle to export + * @flags: flags like DRM_CLOEXEC * - * Import callback: + * This is the PRIME export function which must be used mandatorily by GEM + * drivers to ensure correct lifetime management of the underlying GEM object. + * The actual exporting from GEM object to a dma-buf is done through the + * &drm_gem_object_funcs.export callback. * - * - @gem_prime_import_sg_table (import): produce a GEM object from another - * driver's scatter/gather table + * Unlike drm_gem_prime_handle_to_fd(), it returns the struct dma_buf it + * has created, without attaching it to any file descriptors. The difference + * between those two is similar to that between anon_inode_getfile() and + * anon_inode_getfd(); insertion into descriptor table is something you + * can not revert if any cleanup is needed, so the descriptor-returning + * variants should only be used when you are past the last failure exit + * and the only thing left is passing the new file descriptor to userland. + * When all you need is the object itself or when you need to do something + * else that might fail, use that one instead. */ - -struct dma_buf *drm_gem_prime_export(struct drm_device *dev, - struct drm_gem_object *obj, int flags) -{ - return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags); -} -EXPORT_SYMBOL(drm_gem_prime_export); - -int drm_gem_prime_handle_to_fd(struct drm_device *dev, - struct drm_file *file_priv, uint32_t handle, uint32_t flags, - int *prime_fd) +struct dma_buf *drm_gem_prime_handle_to_dmabuf(struct drm_device *dev, + struct drm_file *file_priv, uint32_t handle, + uint32_t flags) { struct drm_gem_object *obj; - void *buf; int ret = 0; struct dma_buf *dmabuf; - obj = drm_gem_object_lookup(dev, file_priv, handle); - if (!obj) - return -ENOENT; - mutex_lock(&file_priv->prime.lock); + obj = drm_gem_object_lookup(file_priv, handle); + if (!obj) { + dmabuf = ERR_PTR(-ENOENT); + goto out_unlock; + } + + dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle); + if (dmabuf) { + get_dma_buf(dmabuf); + goto out; + } + + mutex_lock(&dev->object_name_lock); /* re-export the original imported object */ if (obj->import_attach) { dmabuf = obj->import_attach->dmabuf; + get_dma_buf(dmabuf); goto out_have_obj; } - if (obj->export_dma_buf) { - dmabuf = obj->export_dma_buf; + if (obj->dma_buf) { + get_dma_buf(obj->dma_buf); + dmabuf = obj->dma_buf; goto out_have_obj; } - buf = dev->driver->gem_prime_export(dev, obj, flags); - if (IS_ERR(buf)) { + dmabuf = export_and_register_object(dev, obj, flags); + if (IS_ERR(dmabuf)) { /* normally the created dma-buf takes ownership of the ref, * but if that fails then drop the ref */ - ret = PTR_ERR(buf); + mutex_unlock(&dev->object_name_lock); goto out; } - obj->export_dma_buf = buf; - /* if we've exported this buffer the cheat and add it to the import list - * so we get the correct handle back +out_have_obj: + /* + * If we've exported this buffer then cheat and add it to the import list + * so we get the correct handle back. We must do this under the + * protection of dev->object_name_lock to ensure that a racing gem close + * ioctl doesn't miss to remove this buffer handle from the cache. */ ret = drm_prime_add_buf_handle(&file_priv->prime, - obj->export_dma_buf, handle); - if (ret) - goto fail_put_dmabuf; + dmabuf, handle); + mutex_unlock(&dev->object_name_lock); + if (ret) { + dma_buf_put(dmabuf); + dmabuf = ERR_PTR(ret); + } +out: + drm_gem_object_put(obj); +out_unlock: + mutex_unlock(&file_priv->prime.lock); + return dmabuf; +} +EXPORT_SYMBOL(drm_gem_prime_handle_to_dmabuf); + +/** + * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers + * @dev: dev to export the buffer from + * @file_priv: drm file-private structure + * @handle: buffer handle to export + * @flags: flags like DRM_CLOEXEC + * @prime_fd: pointer to storage for the fd id of the create dma-buf + * + * This is the PRIME export function which must be used mandatorily by GEM + * drivers to ensure correct lifetime management of the underlying GEM object. + * The actual exporting from GEM object to a dma-buf is done through the + * &drm_gem_object_funcs.export callback. + */ +int drm_gem_prime_handle_to_fd(struct drm_device *dev, + struct drm_file *file_priv, uint32_t handle, + uint32_t flags, + int *prime_fd) +{ + struct dma_buf *dmabuf; + int fd = get_unused_fd_flags(flags); - ret = dma_buf_fd(buf, flags); - if (ret < 0) - goto fail_rm_handle; + if (fd < 0) + return fd; - *prime_fd = ret; - mutex_unlock(&file_priv->prime.lock); + dmabuf = drm_gem_prime_handle_to_dmabuf(dev, file_priv, handle, flags); + if (IS_ERR(dmabuf)) { + put_unused_fd(fd); + return PTR_ERR(dmabuf); + } + + fd_install(fd, dmabuf->file); + *prime_fd = fd; return 0; +} +EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); -out_have_obj: - get_dma_buf(dmabuf); - ret = dma_buf_fd(dmabuf, flags); - if (ret < 0) { - dma_buf_put(dmabuf); - } else { - *prime_fd = ret; - ret = 0; +int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_prime_handle *args = data; + + /* check flags are valid */ + if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR)) + return -EINVAL; + + if (dev->driver->prime_handle_to_fd) { + return dev->driver->prime_handle_to_fd(dev, file_priv, + args->handle, args->flags, + &args->fd); } + return drm_gem_prime_handle_to_fd(dev, file_priv, args->handle, + args->flags, &args->fd); +} - goto out; +/** + * DOC: PRIME Helpers + * + * Drivers can implement &drm_gem_object_funcs.export and + * &drm_driver.gem_prime_import in terms of simpler APIs by using the helper + * functions drm_gem_prime_export() and drm_gem_prime_import(). These functions + * implement dma-buf support in terms of some lower-level helpers, which are + * again exported for drivers to use individually: + * + * Exporting buffers + * ~~~~~~~~~~~~~~~~~ + * + * Optional pinning of buffers is handled at dma-buf attach and detach time in + * drm_gem_map_attach() and drm_gem_map_detach(). Backing storage itself is + * handled by drm_gem_map_dma_buf() and drm_gem_unmap_dma_buf(), which relies on + * &drm_gem_object_funcs.get_sg_table. If &drm_gem_object_funcs.get_sg_table is + * unimplemented, exports into another device are rejected. + * + * For kernel-internal access there's drm_gem_dmabuf_vmap() and + * drm_gem_dmabuf_vunmap(). Userspace mmap support is provided by + * drm_gem_dmabuf_mmap(). + * + * Note that these export helpers can only be used if the underlying backing + * storage is fully coherent and either permanently pinned, or it is safe to pin + * it indefinitely. + * + * FIXME: The underlying helper functions are named rather inconsistently. + * + * Importing buffers + * ~~~~~~~~~~~~~~~~~ + * + * Importing dma-bufs using drm_gem_prime_import() relies on + * &drm_driver.gem_prime_import_sg_table. + * + * Note that similarly to the export helpers this permanently pins the + * underlying backing storage. Which is ok for scanout, but is not the best + * option for sharing lots of buffers for rendering. + */ + +/** + * drm_gem_map_attach - dma_buf attach implementation for GEM + * @dma_buf: buffer to attach device to + * @attach: buffer attachment data + * + * Calls &drm_gem_object_funcs.pin for device specific handling. This can be + * used as the &dma_buf_ops.attach callback. Must be used together with + * drm_gem_map_detach(). + * + * Returns 0 on success, negative error code on failure. + */ +int drm_gem_map_attach(struct dma_buf *dma_buf, + struct dma_buf_attachment *attach) +{ + struct drm_gem_object *obj = dma_buf->priv; + int ret; + + /* + * drm_gem_map_dma_buf() requires obj->get_sg_table(), but drivers + * that implement their own ->map_dma_buf() do not. + */ + if (dma_buf->ops->map_dma_buf == drm_gem_map_dma_buf && + !obj->funcs->get_sg_table) + return -ENOSYS; + + if (!obj->funcs->pin) + return 0; + + ret = dma_resv_lock(obj->resv, NULL); + if (ret) + return ret; + ret = obj->funcs->pin(obj); + dma_resv_unlock(obj->resv); -fail_rm_handle: - drm_prime_remove_buf_handle_locked(&file_priv->prime, buf); -fail_put_dmabuf: - /* clear NOT to be checked when releasing dma_buf */ - obj->export_dma_buf = NULL; - dma_buf_put(buf); -out: - drm_gem_object_unreference_unlocked(obj); - mutex_unlock(&file_priv->prime.lock); return ret; } -EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); +EXPORT_SYMBOL(drm_gem_map_attach); -struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, - struct dma_buf *dma_buf) +/** + * drm_gem_map_detach - dma_buf detach implementation for GEM + * @dma_buf: buffer to detach from + * @attach: attachment to be detached + * + * Calls &drm_gem_object_funcs.pin for device specific handling. Cleans up + * &dma_buf_attachment from drm_gem_map_attach(). This can be used as the + * &dma_buf_ops.detach callback. + */ +void drm_gem_map_detach(struct dma_buf *dma_buf, + struct dma_buf_attachment *attach) { - struct dma_buf_attachment *attach; + struct drm_gem_object *obj = dma_buf->priv; + int ret; + + if (!obj->funcs->unpin) + return; + + ret = dma_resv_lock(obj->resv, NULL); + if (drm_WARN_ON(obj->dev, ret)) + return; + obj->funcs->unpin(obj); + dma_resv_unlock(obj->resv); +} +EXPORT_SYMBOL(drm_gem_map_detach); + +/** + * drm_gem_map_dma_buf - map_dma_buf implementation for GEM + * @attach: attachment whose scatterlist is to be returned + * @dir: direction of DMA transfer + * + * Calls &drm_gem_object_funcs.get_sg_table and then maps the scatterlist. This + * can be used as the &dma_buf_ops.map_dma_buf callback. Should be used together + * with drm_gem_unmap_dma_buf(). + * + * Returns:sg_table containing the scatterlist to be returned; returns ERR_PTR + * on error. May return -EINTR if it is interrupted by a signal. + */ +struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, + enum dma_data_direction dir) +{ + struct drm_gem_object *obj = attach->dmabuf->priv; struct sg_table *sgt; - struct drm_gem_object *obj; int ret; - if (!dev->driver->gem_prime_import_sg_table) + if (WARN_ON(dir == DMA_NONE)) return ERR_PTR(-EINVAL); - if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) { - obj = dma_buf->priv; - if (obj->dev == dev) { - /* - * Importing dmabuf exported from out own gem increases - * refcount on gem itself instead of f_count of dmabuf. - */ - drm_gem_object_reference(obj); - return obj; - } - } - - attach = dma_buf_attach(dma_buf, dev->dev); - if (IS_ERR(attach)) - return ERR_CAST(attach); + if (WARN_ON(!obj->funcs->get_sg_table)) + return ERR_PTR(-ENOSYS); - get_dma_buf(dma_buf); + sgt = obj->funcs->get_sg_table(obj); + if (IS_ERR(sgt)) + return sgt; - sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); - if (IS_ERR_OR_NULL(sgt)) { - ret = PTR_ERR(sgt); - goto fail_detach; + ret = dma_map_sgtable(attach->dev, sgt, dir, + DMA_ATTR_SKIP_CPU_SYNC); + if (ret) { + sg_free_table(sgt); + kfree(sgt); + sgt = ERR_PTR(ret); } - obj = dev->driver->gem_prime_import_sg_table(dev, dma_buf->size, sgt); - if (IS_ERR(obj)) { - ret = PTR_ERR(obj); - goto fail_unmap; - } + return sgt; +} +EXPORT_SYMBOL(drm_gem_map_dma_buf); - obj->import_attach = attach; +/** + * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM + * @attach: attachment to unmap buffer from + * @sgt: scatterlist info of the buffer to unmap + * @dir: direction of DMA transfer + * + * This can be used as the &dma_buf_ops.unmap_dma_buf callback. + */ +void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, + struct sg_table *sgt, + enum dma_data_direction dir) +{ + if (!sgt) + return; - return obj; + dma_unmap_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC); + sg_free_table(sgt); + kfree(sgt); +} +EXPORT_SYMBOL(drm_gem_unmap_dma_buf); -fail_unmap: - dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); -fail_detach: - dma_buf_detach(dma_buf, attach); - dma_buf_put(dma_buf); +/** + * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM + * @dma_buf: buffer to be mapped + * @map: the virtual address of the buffer + * + * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap + * callback. Calls into &drm_gem_object_funcs.vmap for device specific handling. + * The kernel virtual address is returned in map. + * + * Returns 0 on success or a negative errno code otherwise. + */ +int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map) +{ + struct drm_gem_object *obj = dma_buf->priv; - return ERR_PTR(ret); + return drm_gem_vmap_locked(obj, map); } -EXPORT_SYMBOL(drm_gem_prime_import); +EXPORT_SYMBOL(drm_gem_dmabuf_vmap); -int drm_gem_prime_fd_to_handle(struct drm_device *dev, - struct drm_file *file_priv, int prime_fd, uint32_t *handle) +/** + * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM + * @dma_buf: buffer to be unmapped + * @map: the virtual address of the buffer + * + * Releases a kernel virtual mapping. This can be used as the + * &dma_buf_ops.vunmap callback. Calls into &drm_gem_object_funcs.vunmap for device specific handling. + */ +void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map) { - struct dma_buf *dma_buf; - struct drm_gem_object *obj; + struct drm_gem_object *obj = dma_buf->priv; + + drm_gem_vunmap_locked(obj, map); +} +EXPORT_SYMBOL(drm_gem_dmabuf_vunmap); + +/** + * drm_gem_prime_mmap - PRIME mmap function for GEM drivers + * @obj: GEM object + * @vma: Virtual address range + * + * This function sets up a userspace mapping for PRIME exported buffers using + * the same codepath that is used for regular GEM buffer mapping on the DRM fd. + * The fake GEM offset is added to vma->vm_pgoff and &drm_driver->fops->mmap is + * called to set up the mapping. + */ +int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) +{ + struct drm_file *priv; + struct file *fil; int ret; - dma_buf = dma_buf_get(prime_fd); - if (IS_ERR(dma_buf)) - return PTR_ERR(dma_buf); + /* Add the fake offset */ + vma->vm_pgoff += drm_vma_node_start(&obj->vma_node); - mutex_lock(&file_priv->prime.lock); + if (obj->funcs && obj->funcs->mmap) { + vma->vm_ops = obj->funcs->vm_ops; - ret = drm_prime_lookup_buf_handle(&file_priv->prime, - dma_buf, handle); - if (!ret) { - ret = 0; - goto out_put; + drm_gem_object_get(obj); + ret = obj->funcs->mmap(obj, vma); + if (ret) { + drm_gem_object_put(obj); + return ret; + } + vma->vm_private_data = obj; + return 0; } - /* never seen this one, need to import */ - obj = dev->driver->gem_prime_import(dev, dma_buf); - if (IS_ERR(obj)) { - ret = PTR_ERR(obj); - goto out_put; + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + fil = kzalloc(sizeof(*fil), GFP_KERNEL); + if (!priv || !fil) { + ret = -ENOMEM; + goto out; } - ret = drm_gem_handle_create(file_priv, obj, handle); - drm_gem_object_unreference_unlocked(obj); - if (ret) - goto out_put; + /* Used by drm_gem_mmap() to lookup the GEM object */ + priv->minor = obj->dev->primary; + fil->private_data = priv; - ret = drm_prime_add_buf_handle(&file_priv->prime, - dma_buf, *handle); + ret = drm_vma_node_allow(&obj->vma_node, priv); if (ret) - goto fail; - - mutex_unlock(&file_priv->prime.lock); + goto out; - dma_buf_put(dma_buf); + ret = obj->dev->driver->fops->mmap(fil, vma); - return 0; + drm_vma_node_revoke(&obj->vma_node, priv); +out: + kfree(priv); + kfree(fil); -fail: - /* hmm, if driver attached, we are relying on the free-object path - * to detach.. which seems ok.. - */ - drm_gem_object_handle_unreference_unlocked(obj); -out_put: - dma_buf_put(dma_buf); - mutex_unlock(&file_priv->prime.lock); return ret; } -EXPORT_SYMBOL(drm_gem_prime_fd_to_handle); +EXPORT_SYMBOL(drm_gem_prime_mmap); -int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) +/** + * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM + * @dma_buf: buffer to be mapped + * @vma: virtual address range + * + * Provides memory mapping for the buffer. This can be used as the + * &dma_buf_ops.mmap callback. It just forwards to drm_gem_prime_mmap(). + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) { - struct drm_prime_handle *args = data; - uint32_t flags; + struct drm_gem_object *obj = dma_buf->priv; - if (!drm_core_check_feature(dev, DRIVER_PRIME)) - return -EINVAL; + return drm_gem_prime_mmap(obj, vma); +} +EXPORT_SYMBOL(drm_gem_dmabuf_mmap); - if (!dev->driver->prime_handle_to_fd) - return -ENOSYS; +static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { + .attach = drm_gem_map_attach, + .detach = drm_gem_map_detach, + .map_dma_buf = drm_gem_map_dma_buf, + .unmap_dma_buf = drm_gem_unmap_dma_buf, + .release = drm_gem_dmabuf_release, + .mmap = drm_gem_dmabuf_mmap, + .vmap = drm_gem_dmabuf_vmap, + .vunmap = drm_gem_dmabuf_vunmap, +}; - /* check flags are valid */ - if (args->flags & ~DRM_CLOEXEC) - return -EINVAL; +/** + * drm_prime_pages_to_sg - converts a page array into an sg list + * @dev: DRM device + * @pages: pointer to the array of page pointers to convert + * @nr_pages: length of the page vector + * + * This helper creates an sg table object from a set of pages + * the driver is responsible for mapping the pages into the + * importers address space for use with dma_buf itself. + * + * This is useful for implementing &drm_gem_object_funcs.get_sg_table. + */ +struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev, + struct page **pages, unsigned int nr_pages) +{ + struct sg_table *sg; + size_t max_segment = 0; + int err; - /* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC */ - flags = args->flags & DRM_CLOEXEC; + sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!sg) + return ERR_PTR(-ENOMEM); + + if (dev) + max_segment = dma_max_mapping_size(dev->dev); + if (max_segment == 0) + max_segment = UINT_MAX; + err = sg_alloc_table_from_pages_segment(sg, pages, nr_pages, 0, + (unsigned long)nr_pages << PAGE_SHIFT, + max_segment, GFP_KERNEL); + if (err) { + kfree(sg); + sg = ERR_PTR(err); + } + return sg; +} +EXPORT_SYMBOL(drm_prime_pages_to_sg); - return dev->driver->prime_handle_to_fd(dev, file_priv, - args->handle, flags, &args->fd); +/** + * drm_prime_get_contiguous_size - returns the contiguous size of the buffer + * @sgt: sg_table describing the buffer to check + * + * This helper calculates the contiguous size in the DMA address space + * of the buffer described by the provided sg_table. + * + * This is useful for implementing + * &drm_gem_object_funcs.gem_prime_import_sg_table. + */ +unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt) +{ + dma_addr_t expected = sg_dma_address(sgt->sgl); + struct scatterlist *sg; + unsigned long size = 0; + int i; + + for_each_sgtable_dma_sg(sgt, sg, i) { + unsigned int len = sg_dma_len(sg); + + if (!len) + break; + if (sg_dma_address(sg) != expected) + break; + expected += len; + size += len; + } + return size; } +EXPORT_SYMBOL(drm_prime_get_contiguous_size); -int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) +/** + * drm_gem_prime_export - helper library implementation of the export callback + * @obj: GEM object to export + * @flags: flags like DRM_CLOEXEC and DRM_RDWR + * + * This is the implementation of the &drm_gem_object_funcs.export functions for GEM drivers + * using the PRIME helpers. It is used as the default in + * drm_gem_prime_handle_to_fd(). + */ +struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj, + int flags) { - struct drm_prime_handle *args = data; + struct drm_device *dev = obj->dev; + struct dma_buf_export_info exp_info = { + .exp_name = KBUILD_MODNAME, /* white lie for debug */ + .owner = dev->driver->fops->owner, + .ops = &drm_gem_prime_dmabuf_ops, + .size = obj->size, + .flags = flags, + .priv = obj, + .resv = obj->resv, + }; + + return drm_gem_dmabuf_export(dev, &exp_info); +} +EXPORT_SYMBOL(drm_gem_prime_export); - if (!drm_core_check_feature(dev, DRIVER_PRIME)) - return -EINVAL; - if (!dev->driver->prime_fd_to_handle) - return -ENOSYS; +/** + * drm_gem_is_prime_exported_dma_buf - + * checks if the DMA-BUF was exported from a GEM object belonging to @dev. + * @dev: drm_device to check against + * @dma_buf: dma-buf object to import + * + * Return: true if the DMA-BUF was exported from a GEM object belonging + * to @dev, false otherwise. + */ + +bool drm_gem_is_prime_exported_dma_buf(struct drm_device *dev, + struct dma_buf *dma_buf) +{ + struct drm_gem_object *obj = dma_buf->priv; - return dev->driver->prime_fd_to_handle(dev, file_priv, - args->fd, &args->handle); + return (dma_buf->ops == &drm_gem_prime_dmabuf_ops) && (obj->dev == dev); } +EXPORT_SYMBOL(drm_gem_is_prime_exported_dma_buf); -/* - * drm_prime_pages_to_sg +/** + * drm_gem_prime_import_dev - core implementation of the import callback + * @dev: drm_device to import into + * @dma_buf: dma-buf object to import + * @attach_dev: struct device to dma_buf attach * - * this helper creates an sg table object from a set of pages - * the driver is responsible for mapping the pages into the - * importers address space + * This is the core of drm_gem_prime_import(). It's designed to be called by + * drivers who want to use a different device structure than &drm_device.dev for + * attaching via dma_buf. This function calls + * &drm_driver.gem_prime_import_sg_table internally. + * + * Drivers must arrange to call drm_prime_gem_destroy() from their + * &drm_gem_object_funcs.free hook when using this function. */ -struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages) +struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev, + struct dma_buf *dma_buf, + struct device *attach_dev) { - struct sg_table *sg = NULL; + struct dma_buf_attachment *attach; + struct sg_table *sgt; + struct drm_gem_object *obj; int ret; - sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL); - if (!sg) { - ret = -ENOMEM; - goto out; + if (drm_gem_is_prime_exported_dma_buf(dev, dma_buf)) { + /* + * Importing dmabuf exported from our own gem increases + * refcount on gem itself instead of f_count of dmabuf. + */ + obj = dma_buf->priv; + drm_gem_object_get(obj); + return obj; } - ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0, - nr_pages << PAGE_SHIFT, GFP_KERNEL); - if (ret) - goto out; + if (!dev->driver->gem_prime_import_sg_table) + return ERR_PTR(-EINVAL); + + attach = dma_buf_attach(dma_buf, attach_dev); + if (IS_ERR(attach)) + return ERR_CAST(attach); + + get_dma_buf(dma_buf); + + sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL); + if (IS_ERR(sgt)) { + ret = PTR_ERR(sgt); + goto fail_detach; + } + + obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt); + if (IS_ERR(obj)) { + ret = PTR_ERR(obj); + goto fail_unmap; + } + + obj->import_attach = attach; + obj->resv = dma_buf->resv; + + return obj; + +fail_unmap: + dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL); +fail_detach: + dma_buf_detach(dma_buf, attach); + dma_buf_put(dma_buf); - return sg; -out: - kfree(sg); return ERR_PTR(ret); } -EXPORT_SYMBOL(drm_prime_pages_to_sg); +EXPORT_SYMBOL(drm_gem_prime_import_dev); -/* export an sg table into an array of pages and addresses - this is currently required by the TTM driver in order to do correct fault - handling */ -int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, - dma_addr_t *addrs, int max_pages) +/** + * drm_gem_prime_import - helper library implementation of the import callback + * @dev: drm_device to import into + * @dma_buf: dma-buf object to import + * + * This is the implementation of the gem_prime_import functions for GEM drivers + * using the PRIME helpers. Drivers can use this as their + * &drm_driver.gem_prime_import implementation. It is used as the default + * implementation in drm_gem_prime_fd_to_handle(). + * + * Drivers must arrange to call drm_prime_gem_destroy() from their + * &drm_gem_object_funcs.free hook when using this function. + */ +struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf) { - unsigned count; - struct scatterlist *sg; - struct page *page; - u32 len, offset; - int pg_index; - dma_addr_t addr; - - pg_index = 0; - for_each_sg(sgt->sgl, sg, sgt->nents, count) { - len = sg->length; - offset = sg->offset; - page = sg_page(sg); - addr = sg_dma_address(sg); - - while (len > 0) { - if (WARN_ON(pg_index >= max_pages)) - return -1; - pages[pg_index] = page; - if (addrs) - addrs[pg_index] = addr; - - page++; - addr += PAGE_SIZE; - len -= PAGE_SIZE; - pg_index++; - } + return drm_gem_prime_import_dev(dev, dma_buf, drm_dev_dma_dev(dev)); +} +EXPORT_SYMBOL(drm_gem_prime_import); + +/** + * drm_prime_sg_to_page_array - convert an sg table into a page array + * @sgt: scatter-gather table to convert + * @pages: array of page pointers to store the pages in + * @max_entries: size of the passed-in array + * + * Exports an sg table into an array of pages. + * + * This function is deprecated and strongly discouraged to be used. + * The page array is only useful for page faults and those can corrupt fields + * in the struct page if they are not handled by the exporting driver. + */ +int __deprecated drm_prime_sg_to_page_array(struct sg_table *sgt, + struct page **pages, + int max_entries) +{ + struct sg_page_iter page_iter; + struct page **p = pages; + + for_each_sgtable_page(sgt, &page_iter, 0) { + if (WARN_ON(p - pages >= max_entries)) + return -1; + *p++ = sg_page_iter_page(&page_iter); } return 0; } -EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays); -/* helper function to cleanup a GEM/prime object */ +EXPORT_SYMBOL(drm_prime_sg_to_page_array); + +/** + * drm_prime_sg_to_dma_addr_array - convert an sg table into a dma addr array + * @sgt: scatter-gather table to convert + * @addrs: array to store the dma bus address of each page + * @max_entries: size of both the passed-in arrays + * + * Exports an sg table into an array of addresses. + * + * Drivers should use this in their &drm_driver.gem_prime_import_sg_table + * implementation. + */ +int drm_prime_sg_to_dma_addr_array(struct sg_table *sgt, dma_addr_t *addrs, + int max_entries) +{ + struct sg_dma_page_iter dma_iter; + dma_addr_t *a = addrs; + + for_each_sgtable_dma_page(sgt, &dma_iter, 0) { + if (WARN_ON(a - addrs >= max_entries)) + return -1; + *a++ = sg_page_iter_dma_address(&dma_iter); + } + return 0; +} +EXPORT_SYMBOL(drm_prime_sg_to_dma_addr_array); + +/** + * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object + * @obj: GEM object which was created from a dma-buf + * @sg: the sg-table which was pinned at import time + * + * This is the cleanup functions which GEM drivers need to call when they use + * drm_gem_prime_import() or drm_gem_prime_import_dev() to import dma-bufs. + */ void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg) { struct dma_buf_attachment *attach; struct dma_buf *dma_buf; + attach = obj->import_attach; if (sg) - dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); + dma_buf_unmap_attachment_unlocked(attach, sg, DMA_BIDIRECTIONAL); dma_buf = attach->dmabuf; dma_buf_detach(attach->dmabuf, attach); /* remove the reference */ dma_buf_put(dma_buf); } EXPORT_SYMBOL(drm_prime_gem_destroy); - -void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv) -{ - INIT_LIST_HEAD(&prime_fpriv->head); - mutex_init(&prime_fpriv->lock); -} -EXPORT_SYMBOL(drm_prime_init_file_private); - -void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv) -{ - /* by now drm_gem_release should've made sure the list is empty */ - WARN_ON(!list_empty(&prime_fpriv->head)); -} -EXPORT_SYMBOL(drm_prime_destroy_file_private); - -int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle) -{ - struct drm_prime_member *member; - - list_for_each_entry(member, &prime_fpriv->head, entry) { - if (member->dma_buf == dma_buf) { - *handle = member->handle; - return 0; - } - } - return -ENOENT; -} -EXPORT_SYMBOL(drm_prime_lookup_buf_handle); - -void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf) -{ - mutex_lock(&prime_fpriv->lock); - drm_prime_remove_buf_handle_locked(prime_fpriv, dma_buf); - mutex_unlock(&prime_fpriv->lock); -} -EXPORT_SYMBOL(drm_prime_remove_buf_handle); |
