summaryrefslogtreecommitdiff
path: root/drivers/dma-buf
diff options
context:
space:
mode:
authorThomas Zimmermann <tzimmermann@suse.de>2021-11-18 09:36:39 +0100
committerThomas Zimmermann <tzimmermann@suse.de>2021-11-18 09:36:39 +0100
commita713ca234ea9d946235ac7248995c5fddfd9e523 (patch)
tree708f72ee1c76360aa80c926f1defc8301aef1a23 /drivers/dma-buf
parent37fe0cf5fb803d98efd7feb64b408c9b029c1085 (diff)
parentfa55b7dcdc43c1aa1ba12bca9d2dd4318c2a0dbf (diff)
Merge drm/drm-next into drm-misc-next
Backmerging from drm/drm-next for v5.16-rc1. Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Diffstat (limited to 'drivers/dma-buf')
-rw-r--r--drivers/dma-buf/dma-buf.c55
1 files changed, 32 insertions, 23 deletions
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 385cd037325e..602b12d7470d 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -67,12 +67,9 @@ static void dma_buf_release(struct dentry *dentry)
BUG_ON(dmabuf->vmapping_counter);
/*
- * Any fences that a dma-buf poll can wait on should be signaled
- * before releasing dma-buf. This is the responsibility of each
- * driver that uses the reservation objects.
- *
- * If you hit this BUG() it means someone dropped their ref to the
- * dma-buf while still having pending operation to the buffer.
+ * If you hit this BUG() it could mean:
+ * * There's a file reference imbalance in dma_buf_poll / dma_buf_poll_cb or somewhere else
+ * * dmabuf->cb_in/out.active are non-0 despite no pending fence callback
*/
BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active);
@@ -200,6 +197,7 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
+ struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll);
unsigned long flags;
spin_lock_irqsave(&dcb->poll->lock, flags);
@@ -207,6 +205,8 @@ static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
dcb->active = 0;
spin_unlock_irqrestore(&dcb->poll->lock, flags);
dma_fence_put(fence);
+ /* Paired with get_file in dma_buf_poll */
+ fput(dmabuf->file);
}
static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write,
@@ -259,6 +259,9 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
spin_unlock_irq(&dmabuf->poll.lock);
if (events & EPOLLOUT) {
+ /* Paired with fput in dma_buf_poll_cb */
+ get_file(dmabuf->file);
+
if (!dma_buf_poll_add_cb(resv, true, dcb))
/* No callback queued, wake up any other waiters */
dma_buf_poll_cb(NULL, &dcb->cb);
@@ -279,6 +282,9 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
spin_unlock_irq(&dmabuf->poll.lock);
if (events & EPOLLIN) {
+ /* Paired with fput in dma_buf_poll_cb */
+ get_file(dmabuf->file);
+
if (!dma_buf_poll_add_cb(resv, false, dcb))
/* No callback queued, wake up any other waiters */
dma_buf_poll_cb(NULL, &dcb->cb);
@@ -564,7 +570,7 @@ err_module:
module_put(exp_info->owner);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(dma_buf_export);
+EXPORT_SYMBOL_NS_GPL(dma_buf_export, DMA_BUF);
/**
* dma_buf_fd - returns a file descriptor for the given struct dma_buf
@@ -588,7 +594,7 @@ int dma_buf_fd(struct dma_buf *dmabuf, int flags)
return fd;
}
-EXPORT_SYMBOL_GPL(dma_buf_fd);
+EXPORT_SYMBOL_NS_GPL(dma_buf_fd, DMA_BUF);
/**
* dma_buf_get - returns the struct dma_buf related to an fd
@@ -614,7 +620,7 @@ struct dma_buf *dma_buf_get(int fd)
return file->private_data;
}
-EXPORT_SYMBOL_GPL(dma_buf_get);
+EXPORT_SYMBOL_NS_GPL(dma_buf_get, DMA_BUF);
/**
* dma_buf_put - decreases refcount of the buffer
@@ -633,7 +639,7 @@ void dma_buf_put(struct dma_buf *dmabuf)
fput(dmabuf->file);
}
-EXPORT_SYMBOL_GPL(dma_buf_put);
+EXPORT_SYMBOL_NS_GPL(dma_buf_put, DMA_BUF);
static void mangle_sg_table(struct sg_table *sg_table)
{
@@ -764,7 +770,7 @@ err_unlock:
dma_buf_detach(dmabuf, attach);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach);
+EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, DMA_BUF);
/**
* dma_buf_attach - Wrapper for dma_buf_dynamic_attach
@@ -779,7 +785,7 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
{
return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
}
-EXPORT_SYMBOL_GPL(dma_buf_attach);
+EXPORT_SYMBOL_NS_GPL(dma_buf_attach, DMA_BUF);
static void __unmap_dma_buf(struct dma_buf_attachment *attach,
struct sg_table *sg_table,
@@ -825,7 +831,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
kfree(attach);
}
-EXPORT_SYMBOL_GPL(dma_buf_detach);
+EXPORT_SYMBOL_NS_GPL(dma_buf_detach, DMA_BUF);
/**
* dma_buf_pin - Lock down the DMA-buf
@@ -855,7 +861,7 @@ int dma_buf_pin(struct dma_buf_attachment *attach)
return ret;
}
-EXPORT_SYMBOL_GPL(dma_buf_pin);
+EXPORT_SYMBOL_NS_GPL(dma_buf_pin, DMA_BUF);
/**
* dma_buf_unpin - Unpin a DMA-buf
@@ -876,7 +882,7 @@ void dma_buf_unpin(struct dma_buf_attachment *attach)
if (dmabuf->ops->unpin)
dmabuf->ops->unpin(attach);
}
-EXPORT_SYMBOL_GPL(dma_buf_unpin);
+EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, DMA_BUF);
/**
* dma_buf_map_attachment - Returns the scatterlist table of the attachment;
@@ -966,7 +972,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
#endif /* CONFIG_DMA_API_DEBUG */
return sg_table;
}
-EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
+EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, DMA_BUF);
/**
* dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
@@ -1002,7 +1008,7 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
dma_buf_unpin(attach);
}
-EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
+EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, DMA_BUF);
/**
* dma_buf_move_notify - notify attachments that DMA-buf is moving
@@ -1022,7 +1028,7 @@ void dma_buf_move_notify(struct dma_buf *dmabuf)
if (attach->importer_ops)
attach->importer_ops->move_notify(attach);
}
-EXPORT_SYMBOL_GPL(dma_buf_move_notify);
+EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF);
/**
* DOC: cpu access
@@ -1166,7 +1172,7 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
return ret;
}
-EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
+EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, DMA_BUF);
/**
* dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
@@ -1194,7 +1200,7 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
return ret;
}
-EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
+EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF);
/**
@@ -1236,7 +1242,7 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
return dmabuf->ops->mmap(dmabuf, vma);
}
-EXPORT_SYMBOL_GPL(dma_buf_mmap);
+EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF);
/**
* dma_buf_vmap - Create virtual mapping for the buffer object into kernel
@@ -1290,7 +1296,7 @@ out_unlock:
mutex_unlock(&dmabuf->lock);
return ret;
}
-EXPORT_SYMBOL_GPL(dma_buf_vmap);
+EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, DMA_BUF);
/**
* dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
@@ -1314,7 +1320,7 @@ void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
}
mutex_unlock(&dmabuf->lock);
}
-EXPORT_SYMBOL_GPL(dma_buf_vunmap);
+EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF);
#ifdef CONFIG_DEBUG_FS
static int dma_buf_debug_show(struct seq_file *s, void *unused)
@@ -1340,6 +1346,8 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
if (ret)
goto error_unlock;
+
+ spin_lock(&buf_obj->name_lock);
seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
buf_obj->size,
buf_obj->file->f_flags, buf_obj->file->f_mode,
@@ -1347,6 +1355,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
buf_obj->exp_name,
file_inode(buf_obj->file)->i_ino,
buf_obj->name ?: "");
+ spin_unlock(&buf_obj->name_lock);
dma_resv_describe(buf_obj->resv, s);