summaryrefslogtreecommitdiff
path: root/drivers/dma-buf/dma-buf.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma-buf/dma-buf.c')
-rw-r--r--drivers/dma-buf/dma-buf.c1179
1 files changed, 753 insertions, 426 deletions
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 01ce125f8e8d..edaa9e4ee4ae 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -15,11 +15,15 @@
#include <linux/slab.h>
#include <linux/dma-buf.h>
#include <linux/dma-fence.h>
+#include <linux/dma-fence-unwrap.h>
#include <linux/anon_inodes.h>
#include <linux/export.h>
#include <linux/debugfs.h>
+#include <linux/list.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/seq_file.h>
+#include <linux/sync_file.h>
#include <linux/poll.h>
#include <linux/dma-resv.h>
#include <linux/mm.h>
@@ -29,33 +33,154 @@
#include <uapi/linux/dma-buf.h>
#include <uapi/linux/magic.h>
+#include "dma-buf-sysfs-stats.h"
+
static inline int is_dma_buf_file(struct file *);
-struct dma_buf_list {
- struct list_head head;
- struct mutex lock;
-};
+static DEFINE_MUTEX(dmabuf_list_mutex);
+static LIST_HEAD(dmabuf_list);
+
+static void __dma_buf_list_add(struct dma_buf *dmabuf)
+{
+ mutex_lock(&dmabuf_list_mutex);
+ list_add(&dmabuf->list_node, &dmabuf_list);
+ mutex_unlock(&dmabuf_list_mutex);
+}
+
+static void __dma_buf_list_del(struct dma_buf *dmabuf)
+{
+ if (!dmabuf)
+ return;
+
+ mutex_lock(&dmabuf_list_mutex);
+ list_del(&dmabuf->list_node);
+ mutex_unlock(&dmabuf_list_mutex);
+}
+
+/**
+ * dma_buf_iter_begin - begin iteration through global list of all DMA buffers
+ *
+ * Returns the first buffer in the global list of DMA-bufs that's not in the
+ * process of being destroyed. Increments that buffer's reference count to
+ * prevent buffer destruction. Callers must release the reference, either by
+ * continuing iteration with dma_buf_iter_next(), or with dma_buf_put().
+ *
+ * Return:
+ * * First buffer from global list, with refcount elevated
+ * * NULL if no active buffers are present
+ */
+struct dma_buf *dma_buf_iter_begin(void)
+{
+ struct dma_buf *ret = NULL, *dmabuf;
-static struct dma_buf_list db_list;
+ /*
+ * The list mutex does not protect a dmabuf's refcount, so it can be
+ * zeroed while we are iterating. We cannot call get_dma_buf() since the
+ * caller may not already own a reference to the buffer.
+ */
+ mutex_lock(&dmabuf_list_mutex);
+ list_for_each_entry(dmabuf, &dmabuf_list, list_node) {
+ if (file_ref_get(&dmabuf->file->f_ref)) {
+ ret = dmabuf;
+ break;
+ }
+ }
+ mutex_unlock(&dmabuf_list_mutex);
+ return ret;
+}
+
+/**
+ * dma_buf_iter_next - continue iteration through global list of all DMA buffers
+ * @dmabuf: [in] pointer to dma_buf
+ *
+ * Decrements the reference count on the provided buffer. Returns the next
+ * buffer from the remainder of the global list of DMA-bufs with its reference
+ * count incremented. Callers must release the reference, either by continuing
+ * iteration with dma_buf_iter_next(), or with dma_buf_put().
+ *
+ * Return:
+ * * Next buffer from global list, with refcount elevated
+ * * NULL if no additional active buffers are present
+ */
+struct dma_buf *dma_buf_iter_next(struct dma_buf *dmabuf)
+{
+ struct dma_buf *ret = NULL;
+
+ /*
+ * The list mutex does not protect a dmabuf's refcount, so it can be
+ * zeroed while we are iterating. We cannot call get_dma_buf() since the
+ * caller may not already own a reference to the buffer.
+ */
+ mutex_lock(&dmabuf_list_mutex);
+ dma_buf_put(dmabuf);
+ list_for_each_entry_continue(dmabuf, &dmabuf_list, list_node) {
+ if (file_ref_get(&dmabuf->file->f_ref)) {
+ ret = dmabuf;
+ break;
+ }
+ }
+ mutex_unlock(&dmabuf_list_mutex);
+ return ret;
+}
static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
{
struct dma_buf *dmabuf;
char name[DMA_BUF_NAME_LEN];
- size_t ret = 0;
+ ssize_t ret = 0;
dmabuf = dentry->d_fsdata;
- dma_resv_lock(dmabuf->resv, NULL);
+ spin_lock(&dmabuf->name_lock);
if (dmabuf->name)
- ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
- dma_resv_unlock(dmabuf->resv);
+ ret = strscpy(name, dmabuf->name, sizeof(name));
+ spin_unlock(&dmabuf->name_lock);
- return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
+ return dynamic_dname(buffer, buflen, "/%s:%s",
dentry->d_name.name, ret > 0 ? name : "");
}
+static void dma_buf_release(struct dentry *dentry)
+{
+ struct dma_buf *dmabuf;
+
+ dmabuf = dentry->d_fsdata;
+ if (unlikely(!dmabuf))
+ return;
+
+ BUG_ON(dmabuf->vmapping_counter);
+
+ /*
+ * If you hit this BUG() it could mean:
+ * * There's a file reference imbalance in dma_buf_poll / dma_buf_poll_cb or somewhere else
+ * * dmabuf->cb_in/out.active are non-0 despite no pending fence callback
+ */
+ BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active);
+
+ dma_buf_stats_teardown(dmabuf);
+ dmabuf->ops->release(dmabuf);
+
+ if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
+ dma_resv_fini(dmabuf->resv);
+
+ WARN_ON(!list_empty(&dmabuf->attachments));
+ module_put(dmabuf->owner);
+ kfree(dmabuf->name);
+ kfree(dmabuf);
+}
+
+static int dma_buf_file_release(struct inode *inode, struct file *file)
+{
+ if (!is_dma_buf_file(file))
+ return -EINVAL;
+
+ __dma_buf_list_del(file->private_data);
+
+ return 0;
+}
+
static const struct dentry_operations dma_buf_dentry_ops = {
.d_dname = dmabuffs_dname,
+ .d_release = dma_buf_release,
};
static struct vfsmount *dma_buf_mnt;
@@ -77,42 +202,6 @@ static struct file_system_type dma_buf_fs_type = {
.kill_sb = kill_anon_super,
};
-static int dma_buf_release(struct inode *inode, struct file *file)
-{
- struct dma_buf *dmabuf;
-
- if (!is_dma_buf_file(file))
- return -EINVAL;
-
- dmabuf = file->private_data;
-
- BUG_ON(dmabuf->vmapping_counter);
-
- /*
- * Any fences that a dma-buf poll can wait on should be signaled
- * before releasing dma-buf. This is the responsibility of each
- * driver that uses the reservation objects.
- *
- * If you hit this BUG() it means someone dropped their ref to the
- * dma-buf while still having pending operation to the buffer.
- */
- BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
-
- dmabuf->ops->release(dmabuf);
-
- mutex_lock(&db_list.lock);
- list_del(&dmabuf->list_node);
- mutex_unlock(&db_list.lock);
-
- if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
- dma_resv_fini(dmabuf->resv);
-
- module_put(dmabuf->owner);
- kfree(dmabuf->name);
- kfree(dmabuf);
- return 0;
-}
-
static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
{
struct dma_buf *dmabuf;
@@ -145,8 +234,9 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
dmabuf = file->private_data;
/* only support discovering the end of the buffer,
- but also allow SEEK_SET to maintain the idiomatic
- SEEK_END(0), SEEK_CUR(0) pattern */
+ * but also allow SEEK_SET to maintain the idiomatic
+ * SEEK_END(0), SEEK_CUR(0) pattern.
+ */
if (whence == SEEK_END)
base = dmabuf->size;
else if (whence == SEEK_SET)
@@ -161,11 +251,11 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
}
/**
- * DOC: fence polling
+ * DOC: implicit fence polling
*
* To support cross-device and cross-driver synchronization of buffer access
- * implicit fences (represented internally in the kernel with &struct fence) can
- * be attached to a &dma_buf. The glue for that and a few related things are
+ * implicit fences (represented internally in the kernel with &struct dma_fence)
+ * can be attached to a &dma_buf. The glue for that and a few related things are
* provided in the &dma_resv structure.
*
* Userspace can query the state of these implicitly tracked fences using poll()
@@ -180,27 +270,50 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
* Note that this only signals the completion of the respective fences, i.e. the
* DMA transfers are complete. Cache flushing and any other necessary
* preparations before CPU access can begin still need to happen.
+ *
+ * As an alternative to poll(), the set of fences on DMA buffer can be
+ * exported as a &sync_file using &dma_buf_sync_file_export.
*/
static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
+ struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll);
unsigned long flags;
spin_lock_irqsave(&dcb->poll->lock, flags);
wake_up_locked_poll(dcb->poll, dcb->active);
dcb->active = 0;
spin_unlock_irqrestore(&dcb->poll->lock, flags);
+ dma_fence_put(fence);
+ /* Paired with get_file in dma_buf_poll */
+ fput(dmabuf->file);
+}
+
+static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write,
+ struct dma_buf_poll_cb_t *dcb)
+{
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
+ int r;
+
+ dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write),
+ fence) {
+ dma_fence_get(fence);
+ r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
+ if (!r)
+ return true;
+ dma_fence_put(fence);
+ }
+
+ return false;
}
static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
{
struct dma_buf *dmabuf;
struct dma_resv *resv;
- struct dma_resv_list *fobj;
- struct dma_fence *fence_excl;
__poll_t events;
- unsigned shared_count, seq;
dmabuf = file->private_data;
if (!dmabuf || !dmabuf->resv)
@@ -214,114 +327,66 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
if (!events)
return 0;
-retry:
- seq = read_seqcount_begin(&resv->seq);
- rcu_read_lock();
+ dma_resv_lock(resv, NULL);
- fobj = rcu_dereference(resv->fence);
- if (fobj)
- shared_count = fobj->shared_count;
- else
- shared_count = 0;
- fence_excl = rcu_dereference(resv->fence_excl);
- if (read_seqcount_retry(&resv->seq, seq)) {
- rcu_read_unlock();
- goto retry;
- }
-
- if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
- struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
- __poll_t pevents = EPOLLIN;
-
- if (shared_count == 0)
- pevents |= EPOLLOUT;
+ if (events & EPOLLOUT) {
+ struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out;
+ /* Check that callback isn't busy */
spin_lock_irq(&dmabuf->poll.lock);
- if (dcb->active) {
- dcb->active |= pevents;
- events &= ~pevents;
- } else
- dcb->active = pevents;
+ if (dcb->active)
+ events &= ~EPOLLOUT;
+ else
+ dcb->active = EPOLLOUT;
spin_unlock_irq(&dmabuf->poll.lock);
- if (events & pevents) {
- if (!dma_fence_get_rcu(fence_excl)) {
- /* force a recheck */
- events &= ~pevents;
- dma_buf_poll_cb(NULL, &dcb->cb);
- } else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
- dma_buf_poll_cb)) {
- events &= ~pevents;
- dma_fence_put(fence_excl);
- } else {
- /*
- * No callback queued, wake up any additional
- * waiters.
- */
- dma_fence_put(fence_excl);
+ if (events & EPOLLOUT) {
+ /* Paired with fput in dma_buf_poll_cb */
+ get_file(dmabuf->file);
+
+ if (!dma_buf_poll_add_cb(resv, true, dcb))
+ /* No callback queued, wake up any other waiters */
dma_buf_poll_cb(NULL, &dcb->cb);
- }
+ else
+ events &= ~EPOLLOUT;
}
}
- if ((events & EPOLLOUT) && shared_count > 0) {
- struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
- int i;
+ if (events & EPOLLIN) {
+ struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in;
- /* Only queue a new callback if no event has fired yet */
+ /* Check that callback isn't busy */
spin_lock_irq(&dmabuf->poll.lock);
if (dcb->active)
- events &= ~EPOLLOUT;
+ events &= ~EPOLLIN;
else
- dcb->active = EPOLLOUT;
+ dcb->active = EPOLLIN;
spin_unlock_irq(&dmabuf->poll.lock);
- if (!(events & EPOLLOUT))
- goto out;
-
- for (i = 0; i < shared_count; ++i) {
- struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
+ if (events & EPOLLIN) {
+ /* Paired with fput in dma_buf_poll_cb */
+ get_file(dmabuf->file);
- if (!dma_fence_get_rcu(fence)) {
- /*
- * fence refcount dropped to zero, this means
- * that fobj has been freed
- *
- * call dma_buf_poll_cb and force a recheck!
- */
- events &= ~EPOLLOUT;
+ if (!dma_buf_poll_add_cb(resv, false, dcb))
+ /* No callback queued, wake up any other waiters */
dma_buf_poll_cb(NULL, &dcb->cb);
- break;
- }
- if (!dma_fence_add_callback(fence, &dcb->cb,
- dma_buf_poll_cb)) {
- dma_fence_put(fence);
- events &= ~EPOLLOUT;
- break;
- }
- dma_fence_put(fence);
+ else
+ events &= ~EPOLLIN;
}
-
- /* No callback queued, wake up any additional waiters. */
- if (i == shared_count)
- dma_buf_poll_cb(NULL, &dcb->cb);
}
-out:
- rcu_read_unlock();
+ dma_resv_unlock(resv);
return events;
}
/**
* dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
- * The name of the dma-buf buffer can only be set when the dma-buf is not
- * attached to any devices. It could theoritically support changing the
- * name of the dma-buf if the same piece of memory is used for multiple
- * purpose between different devices.
+ * It could support changing the name of the dma-buf if the same
+ * piece of memory is used for multiple purpose between different devices.
*
- * @dmabuf [in] dmabuf buffer that will be renamed.
- * @buf: [in] A piece of userspace memory that contains the name of
- * the dma-buf.
+ * @dmabuf: [in] dmabuf buffer that will be renamed.
+ * @buf: [in] A piece of userspace memory that contains the name of
+ * the dma-buf.
*
* Returns 0 on success. If the dma-buf buffer is already attached to
* devices, return -EBUSY.
@@ -330,24 +395,122 @@ out:
static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
{
char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
- long ret = 0;
if (IS_ERR(name))
return PTR_ERR(name);
- dma_resv_lock(dmabuf->resv, NULL);
- if (!list_empty(&dmabuf->attachments)) {
- ret = -EBUSY;
- kfree(name);
- goto out_unlock;
- }
+ spin_lock(&dmabuf->name_lock);
kfree(dmabuf->name);
dmabuf->name = name;
+ spin_unlock(&dmabuf->name_lock);
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_SYNC_FILE)
+static long dma_buf_export_sync_file(struct dma_buf *dmabuf,
+ void __user *user_data)
+{
+ struct dma_buf_export_sync_file arg;
+ enum dma_resv_usage usage;
+ struct dma_fence *fence = NULL;
+ struct sync_file *sync_file;
+ int fd, ret;
+
+ if (copy_from_user(&arg, user_data, sizeof(arg)))
+ return -EFAULT;
+
+ if (arg.flags & ~DMA_BUF_SYNC_RW)
+ return -EINVAL;
+
+ if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
+ return -EINVAL;
+
+ fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fd < 0)
+ return fd;
+
+ usage = dma_resv_usage_rw(arg.flags & DMA_BUF_SYNC_WRITE);
+ ret = dma_resv_get_singleton(dmabuf->resv, usage, &fence);
+ if (ret)
+ goto err_put_fd;
+
+ if (!fence)
+ fence = dma_fence_get_stub();
+
+ sync_file = sync_file_create(fence);
+
+ dma_fence_put(fence);
+
+ if (!sync_file) {
+ ret = -ENOMEM;
+ goto err_put_fd;
+ }
+
+ arg.fd = fd;
+ if (copy_to_user(user_data, &arg, sizeof(arg))) {
+ ret = -EFAULT;
+ goto err_put_file;
+ }
+
+ fd_install(fd, sync_file->file);
+
+ return 0;
+
+err_put_file:
+ fput(sync_file->file);
+err_put_fd:
+ put_unused_fd(fd);
+ return ret;
+}
+
+static long dma_buf_import_sync_file(struct dma_buf *dmabuf,
+ const void __user *user_data)
+{
+ struct dma_buf_import_sync_file arg;
+ struct dma_fence *fence, *f;
+ enum dma_resv_usage usage;
+ struct dma_fence_unwrap iter;
+ unsigned int num_fences;
+ int ret = 0;
+
+ if (copy_from_user(&arg, user_data, sizeof(arg)))
+ return -EFAULT;
+
+ if (arg.flags & ~DMA_BUF_SYNC_RW)
+ return -EINVAL;
+
+ if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
+ return -EINVAL;
+
+ fence = sync_file_get_fence(arg.fd);
+ if (!fence)
+ return -EINVAL;
+
+ usage = (arg.flags & DMA_BUF_SYNC_WRITE) ? DMA_RESV_USAGE_WRITE :
+ DMA_RESV_USAGE_READ;
+
+ num_fences = 0;
+ dma_fence_unwrap_for_each(f, &iter, fence)
+ ++num_fences;
+
+ if (num_fences > 0) {
+ dma_resv_lock(dmabuf->resv, NULL);
+
+ ret = dma_resv_reserve_fences(dmabuf->resv, num_fences);
+ if (!ret) {
+ dma_fence_unwrap_for_each(f, &iter, fence)
+ dma_resv_add_fence(dmabuf->resv, f, usage);
+ }
+
+ dma_resv_unlock(dmabuf->resv);
+ }
+
+ dma_fence_put(fence);
-out_unlock:
- dma_resv_unlock(dmabuf->resv);
return ret;
}
+#endif
static long dma_buf_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
@@ -392,6 +555,13 @@ static long dma_buf_ioctl(struct file *file,
case DMA_BUF_SET_NAME_B:
return dma_buf_set_name(dmabuf, (const char __user *)arg);
+#if IS_ENABLED(CONFIG_SYNC_FILE)
+ case DMA_BUF_IOCTL_EXPORT_SYNC_FILE:
+ return dma_buf_export_sync_file(dmabuf, (void __user *)arg);
+ case DMA_BUF_IOCTL_IMPORT_SYNC_FILE:
+ return dma_buf_import_sync_file(dmabuf, (const void __user *)arg);
+#endif
+
default:
return -ENOTTY;
}
@@ -405,14 +575,14 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
/* Don't count the temporary reference taken inside procfs seq_show */
seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
- dma_resv_lock(dmabuf->resv, NULL);
+ spin_lock(&dmabuf->name_lock);
if (dmabuf->name)
seq_printf(m, "name:\t%s\n", dmabuf->name);
- dma_resv_unlock(dmabuf->resv);
+ spin_unlock(&dmabuf->name_lock);
}
static const struct file_operations dma_buf_fops = {
- .release = dma_buf_release,
+ .release = dma_buf_file_release,
.mmap = dma_buf_mmap_internal,
.llseek = dma_buf_llseek,
.poll = dma_buf_poll,
@@ -429,24 +599,30 @@ static inline int is_dma_buf_file(struct file *file)
return file->f_op == &dma_buf_fops;
}
-static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
+static struct file *dma_buf_getfile(size_t size, int flags)
{
- struct file *file;
+ static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
+ struct file *file;
if (IS_ERR(inode))
return ERR_CAST(inode);
- inode->i_size = dmabuf->size;
- inode_set_bytes(inode, dmabuf->size);
+ inode->i_size = size;
+ inode_set_bytes(inode, size);
+ /*
+ * The ->i_ino acquired from get_next_ino() is not unique thus
+ * not suitable for using it as dentry name by dmabuf stats.
+ * Override ->i_ino with the unique and dmabuffs specific
+ * value.
+ */
+ inode->i_ino = atomic64_inc_return(&dmabuf_inode);
+ flags &= O_ACCMODE | O_NONBLOCK;
file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
flags, &dma_buf_fops);
if (IS_ERR(file))
goto err_alloc_file;
- file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
- file->private_data = dmabuf;
- file->f_path.dentry->d_fsdata = dmabuf;
return file;
@@ -467,7 +643,7 @@ err_alloc_file:
* as a file descriptor by calling dma_buf_fd().
*
* 2. Userspace passes this file-descriptors to all drivers it wants this buffer
- * to share with: First the filedescriptor is converted to a &dma_buf using
+ * to share with: First the file descriptor is converted to a &dma_buf using
* dma_buf_get(). Then the buffer is attached to the device using
* dma_buf_attach().
*
@@ -480,7 +656,7 @@ err_alloc_file:
*
* 4. Once a driver is done with a shared buffer it needs to call
* dma_buf_detach() (after cleaning up any mappings) and then release the
- * reference acquired with dma_buf_get by calling dma_buf_put().
+ * reference acquired with dma_buf_get() by calling dma_buf_put().
*
* For the detailed semantics exporters are expected to implement see
* &dma_buf_ops.
@@ -496,9 +672,10 @@ err_alloc_file:
* by the exporter. see &struct dma_buf_export_info
* for further details.
*
- * Returns, on success, a newly created dma_buf object, which wraps the
- * supplied private data and operations for dma_buf_ops. On either missing
- * ops, or error in allocating struct dma_buf, will return negative error.
+ * Returns, on success, a newly created struct dma_buf object, which wraps the
+ * supplied private data and operations for struct dma_buf_ops. On either
+ * missing ops, or error in allocating struct dma_buf, will return negative
+ * error.
*
* For most cases the easiest way to create @exp_info is through the
* %DEFINE_DMA_BUF_EXPORT_INFO macro.
@@ -511,22 +688,10 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
size_t alloc_size = sizeof(struct dma_buf);
int ret;
- if (!exp_info->resv)
- alloc_size += sizeof(struct dma_resv);
- else
- /* prevent &dma_buf[1] == dma_buf->resv */
- alloc_size += 1;
-
- if (WARN_ON(!exp_info->priv
- || !exp_info->ops
- || !exp_info->ops->map_dma_buf
- || !exp_info->ops->unmap_dma_buf
- || !exp_info->ops->release)) {
- return ERR_PTR(-EINVAL);
- }
-
- if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
- (exp_info->ops->pin || exp_info->ops->unpin)))
+ if (WARN_ON(!exp_info->priv || !exp_info->ops
+ || !exp_info->ops->map_dma_buf
+ || !exp_info->ops->unmap_dma_buf
+ || !exp_info->ops->release))
return ERR_PTR(-EINVAL);
if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
@@ -535,10 +700,21 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
if (!try_module_get(exp_info->owner))
return ERR_PTR(-ENOENT);
+ file = dma_buf_getfile(exp_info->size, exp_info->flags);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto err_module;
+ }
+
+ if (!exp_info->resv)
+ alloc_size += sizeof(struct dma_resv);
+ else
+ /* prevent &dma_buf[1] == dma_buf->resv */
+ alloc_size += 1;
dmabuf = kzalloc(alloc_size, GFP_KERNEL);
if (!dmabuf) {
ret = -ENOMEM;
- goto err_module;
+ goto err_file;
}
dmabuf->priv = exp_info->priv;
@@ -546,44 +722,45 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
dmabuf->size = exp_info->size;
dmabuf->exp_name = exp_info->exp_name;
dmabuf->owner = exp_info->owner;
+ spin_lock_init(&dmabuf->name_lock);
init_waitqueue_head(&dmabuf->poll);
- dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
- dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
+ dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
+ dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
+ INIT_LIST_HEAD(&dmabuf->attachments);
if (!resv) {
- resv = (struct dma_resv *)&dmabuf[1];
- dma_resv_init(resv);
+ dmabuf->resv = (struct dma_resv *)&dmabuf[1];
+ dma_resv_init(dmabuf->resv);
+ } else {
+ dmabuf->resv = resv;
}
- dmabuf->resv = resv;
- file = dma_buf_getfile(dmabuf, exp_info->flags);
- if (IS_ERR(file)) {
- ret = PTR_ERR(file);
+ ret = dma_buf_stats_setup(dmabuf, file);
+ if (ret)
goto err_dmabuf;
- }
- file->f_mode |= FMODE_LSEEK;
+ file->private_data = dmabuf;
+ file->f_path.dentry->d_fsdata = dmabuf;
dmabuf->file = file;
- mutex_init(&dmabuf->lock);
- INIT_LIST_HEAD(&dmabuf->attachments);
-
- mutex_lock(&db_list.lock);
- list_add(&dmabuf->list_node, &db_list.head);
- mutex_unlock(&db_list.lock);
+ __dma_buf_list_add(dmabuf);
return dmabuf;
err_dmabuf:
+ if (!resv)
+ dma_resv_fini(dmabuf->resv);
kfree(dmabuf);
+err_file:
+ fput(file);
err_module:
module_put(exp_info->owner);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(dma_buf_export);
+EXPORT_SYMBOL_NS_GPL(dma_buf_export, "DMA_BUF");
/**
- * dma_buf_fd - returns a file descriptor for the given dma_buf
+ * dma_buf_fd - returns a file descriptor for the given struct dma_buf
* @dmabuf: [in] pointer to dma_buf for which fd is required.
* @flags: [in] flags to give to fd
*
@@ -591,26 +768,18 @@ EXPORT_SYMBOL_GPL(dma_buf_export);
*/
int dma_buf_fd(struct dma_buf *dmabuf, int flags)
{
- int fd;
-
if (!dmabuf || !dmabuf->file)
return -EINVAL;
- fd = get_unused_fd_flags(flags);
- if (fd < 0)
- return fd;
-
- fd_install(fd, dmabuf->file);
-
- return fd;
+ return FD_ADD(flags, dmabuf->file);
}
-EXPORT_SYMBOL_GPL(dma_buf_fd);
+EXPORT_SYMBOL_NS_GPL(dma_buf_fd, "DMA_BUF");
/**
- * dma_buf_get - returns the dma_buf structure related to an fd
- * @fd: [in] fd associated with the dma_buf to be returned
+ * dma_buf_get - returns the struct dma_buf related to an fd
+ * @fd: [in] fd associated with the struct dma_buf to be returned
*
- * On success, returns the dma_buf structure associated with an fd; uses
+ * On success, returns the struct dma_buf associated with an fd; uses
* file's refcounting done by fget to increase refcount. returns ERR_PTR
* otherwise.
*/
@@ -630,7 +799,7 @@ struct dma_buf *dma_buf_get(int fd)
return file->private_data;
}
-EXPORT_SYMBOL_GPL(dma_buf_get);
+EXPORT_SYMBOL_NS_GPL(dma_buf_get, "DMA_BUF");
/**
* dma_buf_put - decreases refcount of the buffer
@@ -649,11 +818,105 @@ void dma_buf_put(struct dma_buf *dmabuf)
fput(dmabuf->file);
}
-EXPORT_SYMBOL_GPL(dma_buf_put);
+EXPORT_SYMBOL_NS_GPL(dma_buf_put, "DMA_BUF");
+
+static void mangle_sg_table(struct sg_table *sg_table)
+{
+#ifdef CONFIG_DMABUF_DEBUG
+ int i;
+ struct scatterlist *sg;
+
+ /* To catch abuse of the underlying struct page by importers mix
+ * up the bits, but take care to preserve the low SG_ bits to
+ * not corrupt the sgt. The mixing is undone on unmap
+ * before passing the sgt back to the exporter.
+ */
+ for_each_sgtable_sg(sg_table, sg, i)
+ sg->page_link ^= ~0xffUL;
+#endif
+
+}
+
+static inline bool
+dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
+{
+ return !!attach->importer_ops;
+}
+
+static bool
+dma_buf_pin_on_map(struct dma_buf_attachment *attach)
+{
+ return attach->dmabuf->ops->pin &&
+ (!dma_buf_attachment_is_dynamic(attach) ||
+ !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY));
+}
+
+/**
+ * DOC: locking convention
+ *
+ * In order to avoid deadlock situations between dma-buf exports and importers,
+ * all dma-buf API users must follow the common dma-buf locking convention.
+ *
+ * Convention for importers
+ *
+ * 1. Importers must hold the dma-buf reservation lock when calling these
+ * functions:
+ *
+ * - dma_buf_pin()
+ * - dma_buf_unpin()
+ * - dma_buf_map_attachment()
+ * - dma_buf_unmap_attachment()
+ * - dma_buf_vmap()
+ * - dma_buf_vunmap()
+ *
+ * 2. Importers must not hold the dma-buf reservation lock when calling these
+ * functions:
+ *
+ * - dma_buf_attach()
+ * - dma_buf_dynamic_attach()
+ * - dma_buf_detach()
+ * - dma_buf_export()
+ * - dma_buf_fd()
+ * - dma_buf_get()
+ * - dma_buf_put()
+ * - dma_buf_mmap()
+ * - dma_buf_begin_cpu_access()
+ * - dma_buf_end_cpu_access()
+ * - dma_buf_map_attachment_unlocked()
+ * - dma_buf_unmap_attachment_unlocked()
+ * - dma_buf_vmap_unlocked()
+ * - dma_buf_vunmap_unlocked()
+ *
+ * Convention for exporters
+ *
+ * 1. These &dma_buf_ops callbacks are invoked with unlocked dma-buf
+ * reservation and exporter can take the lock:
+ *
+ * - &dma_buf_ops.attach()
+ * - &dma_buf_ops.detach()
+ * - &dma_buf_ops.release()
+ * - &dma_buf_ops.begin_cpu_access()
+ * - &dma_buf_ops.end_cpu_access()
+ * - &dma_buf_ops.mmap()
+ *
+ * 2. These &dma_buf_ops callbacks are invoked with locked dma-buf
+ * reservation and exporter can't take the lock:
+ *
+ * - &dma_buf_ops.pin()
+ * - &dma_buf_ops.unpin()
+ * - &dma_buf_ops.map_dma_buf()
+ * - &dma_buf_ops.unmap_dma_buf()
+ * - &dma_buf_ops.vmap()
+ * - &dma_buf_ops.vunmap()
+ *
+ * 3. Exporters must hold the dma-buf reservation lock when calling these
+ * functions:
+ *
+ * - dma_buf_move_notify()
+ */
/**
- * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list; optionally,
- * calls attach() of dma_buf_ops to allow device-specific attach functionality
+ * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
* @dmabuf: [in] buffer to attach device to.
* @dev: [in] device to be attached.
* @importer_ops: [in] importer operations for the attachment
@@ -662,6 +925,9 @@ EXPORT_SYMBOL_GPL(dma_buf_put);
* Returns struct dma_buf_attachment pointer for this attachment. Attachments
* must be cleaned up by calling dma_buf_detach().
*
+ * Optionally this calls &dma_buf_ops.attach to allow device-specific attach
+ * functionality.
+ *
* Returns:
*
* A pointer to newly created &dma_buf_attachment on success, or a negative
@@ -705,52 +971,13 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
list_add(&attach->node, &dmabuf->attachments);
dma_resv_unlock(dmabuf->resv);
- /* When either the importer or the exporter can't handle dynamic
- * mappings we cache the mapping here to avoid issues with the
- * reservation object lock.
- */
- if (dma_buf_attachment_is_dynamic(attach) !=
- dma_buf_is_dynamic(dmabuf)) {
- struct sg_table *sgt;
-
- if (dma_buf_is_dynamic(attach->dmabuf)) {
- dma_resv_lock(attach->dmabuf->resv, NULL);
- ret = dma_buf_pin(attach);
- if (ret)
- goto err_unlock;
- }
-
- sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL);
- if (!sgt)
- sgt = ERR_PTR(-ENOMEM);
- if (IS_ERR(sgt)) {
- ret = PTR_ERR(sgt);
- goto err_unpin;
- }
- if (dma_buf_is_dynamic(attach->dmabuf))
- dma_resv_unlock(attach->dmabuf->resv);
- attach->sgt = sgt;
- attach->dir = DMA_BIDIRECTIONAL;
- }
-
return attach;
err_attach:
kfree(attach);
return ERR_PTR(ret);
-
-err_unpin:
- if (dma_buf_is_dynamic(attach->dmabuf))
- dma_buf_unpin(attach);
-
-err_unlock:
- if (dma_buf_is_dynamic(attach->dmabuf))
- dma_resv_unlock(attach->dmabuf->resv);
-
- dma_buf_detach(dmabuf, attach);
- return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach);
+EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, "DMA_BUF");
/**
* dma_buf_attach - Wrapper for dma_buf_dynamic_attach
@@ -765,48 +992,44 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
{
return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
}
-EXPORT_SYMBOL_GPL(dma_buf_attach);
+EXPORT_SYMBOL_NS_GPL(dma_buf_attach, "DMA_BUF");
/**
- * dma_buf_detach - Remove the given attachment from dmabuf's attachments list;
- * optionally calls detach() of dma_buf_ops for device-specific detach
+ * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
* @dmabuf: [in] buffer to detach from.
* @attach: [in] attachment to be detached; is free'd after this call.
*
* Clean up a device attachment obtained by calling dma_buf_attach().
+ *
+ * Optionally this calls &dma_buf_ops.detach for device-specific detach.
*/
void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
{
- if (WARN_ON(!dmabuf || !attach))
+ if (WARN_ON(!dmabuf || !attach || dmabuf != attach->dmabuf))
return;
- if (attach->sgt) {
- if (dma_buf_is_dynamic(attach->dmabuf))
- dma_resv_lock(attach->dmabuf->resv, NULL);
-
- dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
-
- if (dma_buf_is_dynamic(attach->dmabuf)) {
- dma_buf_unpin(attach);
- dma_resv_unlock(attach->dmabuf->resv);
- }
- }
-
dma_resv_lock(dmabuf->resv, NULL);
list_del(&attach->node);
dma_resv_unlock(dmabuf->resv);
+
if (dmabuf->ops->detach)
dmabuf->ops->detach(dmabuf, attach);
kfree(attach);
}
-EXPORT_SYMBOL_GPL(dma_buf_detach);
+EXPORT_SYMBOL_NS_GPL(dma_buf_detach, "DMA_BUF");
/**
* dma_buf_pin - Lock down the DMA-buf
- *
* @attach: [in] attachment which should be pinned
*
+ * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may
+ * call this, and only for limited use cases like scanout and not for temporary
+ * pin operations. It is not permitted to allow userspace to pin arbitrary
+ * amounts of buffers through this interface.
+ *
+ * Buffers must be unpinned by calling dma_buf_unpin().
+ *
* Returns:
* 0 on success, negative error code on failure.
*/
@@ -815,6 +1038,8 @@ int dma_buf_pin(struct dma_buf_attachment *attach)
struct dma_buf *dmabuf = attach->dmabuf;
int ret = 0;
+ WARN_ON(!attach->importer_ops);
+
dma_resv_assert_held(dmabuf->resv);
if (dmabuf->ops->pin)
@@ -822,23 +1047,28 @@ int dma_buf_pin(struct dma_buf_attachment *attach)
return ret;
}
-EXPORT_SYMBOL_GPL(dma_buf_pin);
+EXPORT_SYMBOL_NS_GPL(dma_buf_pin, "DMA_BUF");
/**
- * dma_buf_unpin - Remove lock from DMA-buf
- *
+ * dma_buf_unpin - Unpin a DMA-buf
* @attach: [in] attachment which should be unpinned
+ *
+ * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move
+ * any mapping of @attach again and inform the importer through
+ * &dma_buf_attach_ops.move_notify.
*/
void dma_buf_unpin(struct dma_buf_attachment *attach)
{
struct dma_buf *dmabuf = attach->dmabuf;
+ WARN_ON(!attach->importer_ops);
+
dma_resv_assert_held(dmabuf->resv);
if (dmabuf->ops->unpin)
dmabuf->ops->unpin(attach);
}
-EXPORT_SYMBOL_GPL(dma_buf_unpin);
+EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, "DMA_BUF");
/**
* dma_buf_map_attachment - Returns the scatterlist table of the attachment;
@@ -850,62 +1080,117 @@ EXPORT_SYMBOL_GPL(dma_buf_unpin);
* Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
* on error. May return -EINTR if it is interrupted by a signal.
*
+ * On success, the DMA addresses and lengths in the returned scatterlist are
+ * PAGE_SIZE aligned.
+ *
* A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
* the underlying backing storage is pinned for as long as a mapping exists,
* therefore users/importers should not hold onto a mapping for undue amounts of
* time.
+ *
+ * Important: Dynamic importers must wait for the exclusive fence of the struct
+ * dma_resv attached to the DMA-BUF first.
*/
struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
enum dma_data_direction direction)
{
struct sg_table *sg_table;
- int r;
+ signed long ret;
might_sleep();
if (WARN_ON(!attach || !attach->dmabuf))
return ERR_PTR(-EINVAL);
- if (dma_buf_attachment_is_dynamic(attach))
- dma_resv_assert_held(attach->dmabuf->resv);
+ dma_resv_assert_held(attach->dmabuf->resv);
- if (attach->sgt) {
+ if (dma_buf_pin_on_map(attach)) {
+ ret = attach->dmabuf->ops->pin(attach);
/*
- * Two mappings with different directions for the same
- * attachment are not allowed.
+ * Catch exporters making buffers inaccessible even when
+ * attachments preventing that exist.
*/
- if (attach->dir != direction &&
- attach->dir != DMA_BIDIRECTIONAL)
- return ERR_PTR(-EBUSY);
-
- return attach->sgt;
- }
-
- if (dma_buf_is_dynamic(attach->dmabuf)) {
- dma_resv_assert_held(attach->dmabuf->resv);
- if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
- r = dma_buf_pin(attach);
- if (r)
- return ERR_PTR(r);
- }
+ WARN_ON_ONCE(ret == -EBUSY);
+ if (ret)
+ return ERR_PTR(ret);
}
sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
if (!sg_table)
sg_table = ERR_PTR(-ENOMEM);
+ if (IS_ERR(sg_table))
+ goto error_unpin;
+
+ /*
+ * Importers with static attachments don't wait for fences.
+ */
+ if (!dma_buf_attachment_is_dynamic(attach)) {
+ ret = dma_resv_wait_timeout(attach->dmabuf->resv,
+ DMA_RESV_USAGE_KERNEL, true,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret < 0)
+ goto error_unmap;
+ }
+ mangle_sg_table(sg_table);
- if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
- !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
- dma_buf_unpin(attach);
+#ifdef CONFIG_DMA_API_DEBUG
+ {
+ struct scatterlist *sg;
+ u64 addr;
+ int len;
+ int i;
- if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
- attach->sgt = sg_table;
- attach->dir = direction;
+ for_each_sgtable_dma_sg(sg_table, sg, i) {
+ addr = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+ if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {
+ pr_debug("%s: addr %llx or len %x is not page aligned!\n",
+ __func__, addr, len);
+ }
+ }
}
+#endif /* CONFIG_DMA_API_DEBUG */
+ return sg_table;
+
+error_unmap:
+ attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
+ sg_table = ERR_PTR(ret);
+
+error_unpin:
+ if (dma_buf_pin_on_map(attach))
+ attach->dmabuf->ops->unpin(attach);
return sg_table;
}
-EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
+EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, "DMA_BUF");
+
+/**
+ * dma_buf_map_attachment_unlocked - Returns the scatterlist table of the attachment;
+ * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
+ * dma_buf_ops.
+ * @attach: [in] attachment whose scatterlist is to be returned
+ * @direction: [in] direction of DMA transfer
+ *
+ * Unlocked variant of dma_buf_map_attachment().
+ */
+struct sg_table *
+dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach,
+ enum dma_data_direction direction)
+{
+ struct sg_table *sg_table;
+
+ might_sleep();
+
+ if (WARN_ON(!attach || !attach->dmabuf))
+ return ERR_PTR(-EINVAL);
+
+ dma_resv_lock(attach->dmabuf->resv, NULL);
+ sg_table = dma_buf_map_attachment(attach, direction);
+ dma_resv_unlock(attach->dmabuf->resv);
+
+ return sg_table;
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment_unlocked, "DMA_BUF");
/**
* dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
@@ -926,29 +1211,47 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
return;
- if (dma_buf_attachment_is_dynamic(attach))
- dma_resv_assert_held(attach->dmabuf->resv);
+ dma_resv_assert_held(attach->dmabuf->resv);
- if (attach->sgt == sg_table)
- return;
+ mangle_sg_table(sg_table);
+ attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
- if (dma_buf_is_dynamic(attach->dmabuf))
- dma_resv_assert_held(attach->dmabuf->resv);
+ if (dma_buf_pin_on_map(attach))
+ attach->dmabuf->ops->unpin(attach);
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, "DMA_BUF");
- attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
+/**
+ * dma_buf_unmap_attachment_unlocked - unmaps and decreases usecount of the buffer;might
+ * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
+ * dma_buf_ops.
+ * @attach: [in] attachment to unmap buffer from
+ * @sg_table: [in] scatterlist info of the buffer to unmap
+ * @direction: [in] direction of DMA transfer
+ *
+ * Unlocked variant of dma_buf_unmap_attachment().
+ */
+void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach,
+ struct sg_table *sg_table,
+ enum dma_data_direction direction)
+{
+ might_sleep();
- if (dma_buf_is_dynamic(attach->dmabuf) &&
- !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
- dma_buf_unpin(attach);
+ if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
+ return;
+
+ dma_resv_lock(attach->dmabuf->resv, NULL);
+ dma_buf_unmap_attachment(attach, sg_table, direction);
+ dma_resv_unlock(attach->dmabuf->resv);
}
-EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
+EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, "DMA_BUF");
/**
* dma_buf_move_notify - notify attachments that DMA-buf is moving
*
* @dmabuf: [in] buffer which is moving
*
- * Informs all attachmenst that they need to destroy and recreated all their
+ * Informs all attachments that they need to destroy and recreate all their
* mappings.
*/
void dma_buf_move_notify(struct dma_buf *dmabuf)
@@ -961,16 +1264,16 @@ void dma_buf_move_notify(struct dma_buf *dmabuf)
if (attach->importer_ops)
attach->importer_ops->move_notify(attach);
}
-EXPORT_SYMBOL_GPL(dma_buf_move_notify);
+EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, "DMA_BUF");
/**
* DOC: cpu access
*
- * There are mutliple reasons for supporting CPU access to a dma buffer object:
+ * There are multiple reasons for supporting CPU access to a dma buffer object:
*
* - Fallback operations in the kernel, for example when a device is connected
* over USB and the kernel needs to shuffle the data around first before
- * sending it away. Cache coherency is handled by braketing any transactions
+ * sending it away. Cache coherency is handled by bracketing any transactions
* with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
* access.
*
@@ -978,16 +1281,18 @@ EXPORT_SYMBOL_GPL(dma_buf_move_notify);
* vmap interface is introduced. Note that on very old 32-bit architectures
* vmalloc space might be limited and result in vmap calls failing.
*
- * Interfaces::
- * void \*dma_buf_vmap(struct dma_buf \*dmabuf)
- * void dma_buf_vunmap(struct dma_buf \*dmabuf, void \*vaddr)
+ * Interfaces:
+ *
+ * .. code-block:: c
+ *
+ * void *dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
+ * void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
*
* The vmap call can fail if there is no vmap support in the exporter, or if
- * it runs out of vmalloc space. Fallback to kmap should be implemented. Note
- * that the dma-buf layer keeps a reference count for all vmap access and
- * calls down into the exporter's vmap function only when no vmapping exists,
- * and only unmaps it once. Protection against concurrent vmap/vunmap calls is
- * provided by taking the dma_buf->lock mutex.
+ * it runs out of vmalloc space. Note that the dma-buf layer keeps a reference
+ * count for all vmap access and calls down into the exporter's vmap function
+ * only when no vmapping exists, and only unmaps it once. Protection against
+ * concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex.
*
* - For full compatibility on the importer side with existing userspace
* interfaces, which might already support mmap'ing buffers. This is needed in
@@ -997,7 +1302,7 @@ EXPORT_SYMBOL_GPL(dma_buf_move_notify);
* replace ION buffers mmap support was needed.
*
* There is no special interfaces, userspace simply calls mmap on the dma-buf
- * fd. But like for CPU access there's a need to braket the actual access,
+ * fd. But like for CPU access there's a need to bracket the actual access,
* which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
* DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
* be restarted.
@@ -1038,12 +1343,14 @@ EXPORT_SYMBOL_GPL(dma_buf_move_notify);
* enough, since adding interfaces to intercept pagefaults and allow pte
* shootdowns would increase the complexity quite a bit.
*
- * Interface::
- * int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
- * unsigned long);
+ * Interface:
+ *
+ * .. code-block:: c
+ *
+ * int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, unsigned long);
*
* If the importing subsystem simply provides a special-purpose mmap call to
- * set up a mapping in userspace, calling do_mmap with dma_buf->file will
+ * set up a mapping in userspace, calling do_mmap with &dma_buf.file will
* equally achieve that for a dma-buf object.
*/
@@ -1056,8 +1363,8 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
long ret;
/* Wait on any implicit rendering fences */
- ret = dma_resv_wait_timeout_rcu(resv, write, true,
- MAX_SCHEDULE_TIMEOUT);
+ ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write),
+ true, MAX_SCHEDULE_TIMEOUT);
if (ret < 0)
return ret;
@@ -1070,12 +1377,17 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
* preparations. Coherency is only guaranteed in the specified range for the
* specified access direction.
* @dmabuf: [in] buffer to prepare cpu access for.
- * @direction: [in] length of range for cpu access.
+ * @direction: [in] direction of access.
*
* After the cpu access is complete the caller should call
- * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
+ * dma_buf_end_cpu_access(). Only when cpu access is bracketed by both calls is
* it guaranteed to be coherent with other DMA access.
*
+ * This function will also wait for any DMA transactions tracked through
+ * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit
+ * synchronization this function will only ensure cache coherency, callers must
+ * ensure synchronization with such DMA transactions on their own.
+ *
* Can return negative error values, returns 0 on success.
*/
int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
@@ -1086,6 +1398,8 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
if (WARN_ON(!dmabuf))
return -EINVAL;
+ might_lock(&dmabuf->resv->lock.base);
+
if (dmabuf->ops->begin_cpu_access)
ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
@@ -1098,7 +1412,7 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
return ret;
}
-EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
+EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, "DMA_BUF");
/**
* dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
@@ -1106,7 +1420,7 @@ EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
* actions. Coherency is only guaranteed in the specified range for the
* specified access direction.
* @dmabuf: [in] buffer to complete cpu access for.
- * @direction: [in] length of range for cpu access.
+ * @direction: [in] direction of access.
*
* This terminates CPU access started with dma_buf_begin_cpu_access().
*
@@ -1119,12 +1433,14 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
WARN_ON(!dmabuf);
+ might_lock(&dmabuf->resv->lock.base);
+
if (dmabuf->ops->end_cpu_access)
ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
return ret;
}
-EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
+EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, "DMA_BUF");
/**
@@ -1144,9 +1460,6 @@ EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
unsigned long pgoff)
{
- struct file *oldfile;
- int ret;
-
if (WARN_ON(!dmabuf || !vma))
return -EINVAL;
@@ -1164,160 +1477,169 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
return -EINVAL;
/* readjust the vma */
- get_file(dmabuf->file);
- oldfile = vma->vm_file;
- vma->vm_file = dmabuf->file;
+ vma_set_file(vma, dmabuf->file);
vma->vm_pgoff = pgoff;
- ret = dmabuf->ops->mmap(dmabuf, vma);
- if (ret) {
- /* restore old parameters on failure */
- vma->vm_file = oldfile;
- fput(dmabuf->file);
- } else {
- if (oldfile)
- fput(oldfile);
- }
- return ret;
-
+ return dmabuf->ops->mmap(dmabuf, vma);
}
-EXPORT_SYMBOL_GPL(dma_buf_mmap);
+EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, "DMA_BUF");
/**
* dma_buf_vmap - Create virtual mapping for the buffer object into kernel
* address space. Same restrictions as for vmap and friends apply.
* @dmabuf: [in] buffer to vmap
+ * @map: [out] returns the vmap pointer
*
* This call may fail due to lack of virtual mapping address space.
* These calls are optional in drivers. The intended use for them
* is for mapping objects linear in kernel space for high use objects.
- * Please attempt to use kmap/kunmap before thinking about these interfaces.
*
- * Returns NULL on error.
+ * To ensure coherency users must call dma_buf_begin_cpu_access() and
+ * dma_buf_end_cpu_access() around any cpu access performed through this
+ * mapping.
+ *
+ * Returns 0 on success, or a negative errno code otherwise.
*/
-void *dma_buf_vmap(struct dma_buf *dmabuf)
+int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
{
- void *ptr;
+ struct iosys_map ptr;
+ int ret;
+
+ iosys_map_clear(map);
if (WARN_ON(!dmabuf))
- return NULL;
+ return -EINVAL;
+
+ dma_resv_assert_held(dmabuf->resv);
if (!dmabuf->ops->vmap)
- return NULL;
+ return -EINVAL;
- mutex_lock(&dmabuf->lock);
if (dmabuf->vmapping_counter) {
dmabuf->vmapping_counter++;
- BUG_ON(!dmabuf->vmap_ptr);
- ptr = dmabuf->vmap_ptr;
- goto out_unlock;
+ BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
+ *map = dmabuf->vmap_ptr;
+ return 0;
}
- BUG_ON(dmabuf->vmap_ptr);
+ BUG_ON(iosys_map_is_set(&dmabuf->vmap_ptr));
- ptr = dmabuf->ops->vmap(dmabuf);
- if (WARN_ON_ONCE(IS_ERR(ptr)))
- ptr = NULL;
- if (!ptr)
- goto out_unlock;
+ ret = dmabuf->ops->vmap(dmabuf, &ptr);
+ if (WARN_ON_ONCE(ret))
+ return ret;
dmabuf->vmap_ptr = ptr;
dmabuf->vmapping_counter = 1;
-out_unlock:
- mutex_unlock(&dmabuf->lock);
- return ptr;
+ *map = dmabuf->vmap_ptr;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, "DMA_BUF");
+
+/**
+ * dma_buf_vmap_unlocked - Create virtual mapping for the buffer object into kernel
+ * address space. Same restrictions as for vmap and friends apply.
+ * @dmabuf: [in] buffer to vmap
+ * @map: [out] returns the vmap pointer
+ *
+ * Unlocked version of dma_buf_vmap()
+ *
+ * Returns 0 on success, or a negative errno code otherwise.
+ */
+int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
+{
+ int ret;
+
+ iosys_map_clear(map);
+
+ if (WARN_ON(!dmabuf))
+ return -EINVAL;
+
+ dma_resv_lock(dmabuf->resv, NULL);
+ ret = dma_buf_vmap(dmabuf, map);
+ dma_resv_unlock(dmabuf->resv);
+
+ return ret;
}
-EXPORT_SYMBOL_GPL(dma_buf_vmap);
+EXPORT_SYMBOL_NS_GPL(dma_buf_vmap_unlocked, "DMA_BUF");
/**
* dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
* @dmabuf: [in] buffer to vunmap
- * @vaddr: [in] vmap to vunmap
+ * @map: [in] vmap pointer to vunmap
*/
-void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
+void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
{
if (WARN_ON(!dmabuf))
return;
- BUG_ON(!dmabuf->vmap_ptr);
+ dma_resv_assert_held(dmabuf->resv);
+
+ BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
BUG_ON(dmabuf->vmapping_counter == 0);
- BUG_ON(dmabuf->vmap_ptr != vaddr);
+ BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map));
- mutex_lock(&dmabuf->lock);
if (--dmabuf->vmapping_counter == 0) {
if (dmabuf->ops->vunmap)
- dmabuf->ops->vunmap(dmabuf, vaddr);
- dmabuf->vmap_ptr = NULL;
+ dmabuf->ops->vunmap(dmabuf, map);
+ iosys_map_clear(&dmabuf->vmap_ptr);
}
- mutex_unlock(&dmabuf->lock);
}
-EXPORT_SYMBOL_GPL(dma_buf_vunmap);
+EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, "DMA_BUF");
+
+/**
+ * dma_buf_vunmap_unlocked - Unmap a vmap obtained by dma_buf_vmap.
+ * @dmabuf: [in] buffer to vunmap
+ * @map: [in] vmap pointer to vunmap
+ */
+void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
+{
+ if (WARN_ON(!dmabuf))
+ return;
+
+ dma_resv_lock(dmabuf->resv, NULL);
+ dma_buf_vunmap(dmabuf, map);
+ dma_resv_unlock(dmabuf->resv);
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap_unlocked, "DMA_BUF");
#ifdef CONFIG_DEBUG_FS
static int dma_buf_debug_show(struct seq_file *s, void *unused)
{
- int ret;
struct dma_buf *buf_obj;
struct dma_buf_attachment *attach_obj;
- struct dma_resv *robj;
- struct dma_resv_list *fobj;
- struct dma_fence *fence;
- unsigned seq;
- int count = 0, attach_count, shared_count, i;
+ int count = 0, attach_count;
size_t size = 0;
+ int ret;
- ret = mutex_lock_interruptible(&db_list.lock);
+ ret = mutex_lock_interruptible(&dmabuf_list_mutex);
if (ret)
return ret;
seq_puts(s, "\nDma-buf Objects:\n");
- seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n",
+ seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n",
"size", "flags", "mode", "count", "ino");
- list_for_each_entry(buf_obj, &db_list.head, list_node) {
+ list_for_each_entry(buf_obj, &dmabuf_list, list_node) {
ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
if (ret)
goto error_unlock;
+
+ spin_lock(&buf_obj->name_lock);
seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
buf_obj->size,
buf_obj->file->f_flags, buf_obj->file->f_mode,
file_count(buf_obj->file),
buf_obj->exp_name,
file_inode(buf_obj->file)->i_ino,
- buf_obj->name ?: "");
-
- robj = buf_obj->resv;
- while (true) {
- seq = read_seqcount_begin(&robj->seq);
- rcu_read_lock();
- fobj = rcu_dereference(robj->fence);
- shared_count = fobj ? fobj->shared_count : 0;
- fence = rcu_dereference(robj->fence_excl);
- if (!read_seqcount_retry(&robj->seq, seq))
- break;
- rcu_read_unlock();
- }
+ buf_obj->name ?: "<none>");
+ spin_unlock(&buf_obj->name_lock);
- if (fence)
- seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
- fence->ops->get_driver_name(fence),
- fence->ops->get_timeline_name(fence),
- dma_fence_is_signaled(fence) ? "" : "un");
- for (i = 0; i < shared_count; i++) {
- fence = rcu_dereference(fobj->shared[i]);
- if (!dma_fence_get_rcu(fence))
- continue;
- seq_printf(s, "\tShared fence: %s %s %ssignalled\n",
- fence->ops->get_driver_name(fence),
- fence->ops->get_timeline_name(fence),
- dma_fence_is_signaled(fence) ? "" : "un");
- dma_fence_put(fence);
- }
- rcu_read_unlock();
+ dma_resv_describe(buf_obj->resv, s);
seq_puts(s, "\tAttached Devices:\n");
attach_count = 0;
@@ -1337,11 +1659,11 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
- mutex_unlock(&db_list.lock);
+ mutex_unlock(&dmabuf_list_mutex);
return 0;
error_unlock:
- mutex_unlock(&db_list.lock);
+ mutex_unlock(&dmabuf_list_mutex);
return ret;
}
@@ -1360,7 +1682,7 @@ static int dma_buf_init_debugfs(void)
dma_buf_debugfs_dir = d;
- d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
+ d = debugfs_create_file("bufinfo", 0444, dma_buf_debugfs_dir,
NULL, &dma_buf_debug_fops);
if (IS_ERR(d)) {
pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
@@ -1388,12 +1710,16 @@ static inline void dma_buf_uninit_debugfs(void)
static int __init dma_buf_init(void)
{
+ int ret;
+
+ ret = dma_buf_init_sysfs_statistics();
+ if (ret)
+ return ret;
+
dma_buf_mnt = kern_mount(&dma_buf_fs_type);
if (IS_ERR(dma_buf_mnt))
return PTR_ERR(dma_buf_mnt);
- mutex_init(&db_list.lock);
- INIT_LIST_HEAD(&db_list.head);
dma_buf_init_debugfs();
return 0;
}
@@ -1403,5 +1729,6 @@ static void __exit dma_buf_deinit(void)
{
dma_buf_uninit_debugfs();
kern_unmount(dma_buf_mnt);
+ dma_buf_uninit_sysfs_statistics();
}
__exitcall(dma_buf_deinit);