summaryrefslogtreecommitdiff
path: root/drivers/media/common/videobuf2/videobuf2-dma-contig.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/media/common/videobuf2/videobuf2-dma-contig.c')
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-contig.c386
1 files changed, 233 insertions, 153 deletions
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
index aff0ab7bf83d..7123c5fae92c 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
@@ -17,6 +17,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
@@ -40,6 +41,9 @@ struct vb2_dc_buf {
/* DMABUF related */
struct dma_buf_attachment *db_attach;
+
+ struct vb2_buffer *vb;
+ bool non_coherent_mem;
};
/*********************************************/
@@ -53,10 +57,10 @@ static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
unsigned int i;
unsigned long size = 0;
- for_each_sg(sgt->sgl, s, sgt->nents, i) {
+ for_each_sgtable_dma_sg(sgt, s, i) {
if (sg_dma_address(s) != expected)
break;
- expected = sg_dma_address(s) + sg_dma_len(s);
+ expected += sg_dma_len(s);
size += sg_dma_len(s);
}
return size;
@@ -66,20 +70,46 @@ static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
/* callbacks for all buffers */
/*********************************************/
-static void *vb2_dc_cookie(void *buf_priv)
+static void *vb2_dc_cookie(struct vb2_buffer *vb, void *buf_priv)
{
struct vb2_dc_buf *buf = buf_priv;
return &buf->dma_addr;
}
-static void *vb2_dc_vaddr(void *buf_priv)
+/*
+ * This function may fail if:
+ *
+ * - dma_buf_vmap() fails
+ * E.g. due to lack of virtual mapping address space, or due to
+ * dmabuf->ops misconfiguration.
+ *
+ * - dma_vmap_noncontiguous() fails
+ * For instance, when requested buffer size is larger than totalram_pages().
+ * Relevant for buffers that use non-coherent memory.
+ *
+ * - Queue DMA attrs have DMA_ATTR_NO_KERNEL_MAPPING set
+ * Relevant for buffers that use coherent memory.
+ */
+static void *vb2_dc_vaddr(struct vb2_buffer *vb, void *buf_priv)
{
struct vb2_dc_buf *buf = buf_priv;
- if (!buf->vaddr && buf->db_attach)
- buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
+ if (buf->vaddr)
+ return buf->vaddr;
+ if (buf->db_attach) {
+ struct iosys_map map;
+
+ if (!dma_buf_vmap_unlocked(buf->db_attach->dmabuf, &map))
+ buf->vaddr = map.vaddr;
+
+ return buf->vaddr;
+ }
+
+ if (buf->non_coherent_mem)
+ buf->vaddr = dma_vmap_noncontiguous(buf->dev, buf->size,
+ buf->dma_sgt);
return buf->vaddr;
}
@@ -95,12 +125,19 @@ static void vb2_dc_prepare(void *buf_priv)
struct vb2_dc_buf *buf = buf_priv;
struct sg_table *sgt = buf->dma_sgt;
- /* DMABUF exporter will flush the cache for us */
- if (!sgt || buf->db_attach)
+ /* This takes care of DMABUF and user-enforced cache sync hint */
+ if (buf->vb->skip_cache_sync_on_prepare)
+ return;
+
+ if (!buf->non_coherent_mem)
return;
- dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir);
+ /* Non-coherent MMAP only */
+ if (buf->vaddr)
+ flush_kernel_vmap_range(buf->vaddr, buf->size);
+
+ /* For both USERPTR and non-coherent MMAP */
+ dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
}
static void vb2_dc_finish(void *buf_priv)
@@ -108,11 +145,19 @@ static void vb2_dc_finish(void *buf_priv)
struct vb2_dc_buf *buf = buf_priv;
struct sg_table *sgt = buf->dma_sgt;
- /* DMABUF exporter will flush the cache for us */
- if (!sgt || buf->db_attach)
+ /* This takes care of DMABUF and user-enforced cache sync hint */
+ if (buf->vb->skip_cache_sync_on_finish)
+ return;
+
+ if (!buf->non_coherent_mem)
return;
- dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
+ /* Non-coherent MMAP only */
+ if (buf->vaddr)
+ invalidate_kernel_vmap_range(buf->vaddr, buf->size);
+
+ /* For both USERPTR and non-coherent MMAP */
+ dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
}
/*********************************************/
@@ -126,21 +171,69 @@ static void vb2_dc_put(void *buf_priv)
if (!refcount_dec_and_test(&buf->refcount))
return;
- if (buf->sgt_base) {
- sg_free_table(buf->sgt_base);
- kfree(buf->sgt_base);
+ if (buf->non_coherent_mem) {
+ if (buf->vaddr)
+ dma_vunmap_noncontiguous(buf->dev, buf->vaddr);
+ dma_free_noncontiguous(buf->dev, buf->size,
+ buf->dma_sgt, buf->dma_dir);
+ } else {
+ if (buf->sgt_base) {
+ sg_free_table(buf->sgt_base);
+ kfree(buf->sgt_base);
+ }
+ dma_free_attrs(buf->dev, buf->size, buf->cookie,
+ buf->dma_addr, buf->attrs);
}
- dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
- buf->attrs);
put_device(buf->dev);
kfree(buf);
}
-static void *vb2_dc_alloc(struct device *dev, unsigned long attrs,
- unsigned long size, enum dma_data_direction dma_dir,
- gfp_t gfp_flags)
+static int vb2_dc_alloc_coherent(struct vb2_dc_buf *buf)
+{
+ struct vb2_queue *q = buf->vb->vb2_queue;
+
+ buf->cookie = dma_alloc_attrs(buf->dev,
+ buf->size,
+ &buf->dma_addr,
+ GFP_KERNEL | q->gfp_flags,
+ buf->attrs);
+ if (!buf->cookie)
+ return -ENOMEM;
+
+ if (q->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
+ return 0;
+
+ buf->vaddr = buf->cookie;
+ return 0;
+}
+
+static int vb2_dc_alloc_non_coherent(struct vb2_dc_buf *buf)
+{
+ struct vb2_queue *q = buf->vb->vb2_queue;
+
+ buf->dma_sgt = dma_alloc_noncontiguous(buf->dev,
+ buf->size,
+ buf->dma_dir,
+ GFP_KERNEL | q->gfp_flags,
+ buf->attrs);
+ if (!buf->dma_sgt)
+ return -ENOMEM;
+
+ buf->dma_addr = sg_dma_address(buf->dma_sgt->sgl);
+
+ /*
+ * For non-coherent buffers the kernel mapping is created on demand
+ * in vb2_dc_vaddr().
+ */
+ return 0;
+}
+
+static void *vb2_dc_alloc(struct vb2_buffer *vb,
+ struct device *dev,
+ unsigned long size)
{
struct vb2_dc_buf *buf;
+ int ret;
if (WARN_ON(!dev))
return ERR_PTR(-EINVAL);
@@ -149,23 +242,26 @@ static void *vb2_dc_alloc(struct device *dev, unsigned long attrs,
if (!buf)
return ERR_PTR(-ENOMEM);
- if (attrs)
- buf->attrs = attrs;
- buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
- GFP_KERNEL | gfp_flags, buf->attrs);
- if (!buf->cookie) {
- dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
- kfree(buf);
- return ERR_PTR(-ENOMEM);
- }
-
- if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
- buf->vaddr = buf->cookie;
+ buf->attrs = vb->vb2_queue->dma_attrs;
+ buf->dma_dir = vb->vb2_queue->dma_dir;
+ buf->vb = vb;
+ buf->non_coherent_mem = vb->vb2_queue->non_coherent_mem;
+ buf->size = size;
/* Prevent the device from being released while the buffer is used */
buf->dev = get_device(dev);
- buf->size = size;
- buf->dma_dir = dma_dir;
+
+ if (buf->non_coherent_mem)
+ ret = vb2_dc_alloc_non_coherent(buf);
+ else
+ ret = vb2_dc_alloc_coherent(buf);
+
+ if (ret) {
+ dev_err(dev, "dma alloc of size %lu failed\n", size);
+ put_device(buf->dev);
+ kfree(buf);
+ return ERR_PTR(-ENOMEM);
+ }
buf->handler.refcount = &buf->refcount;
buf->handler.put = vb2_dc_put;
@@ -186,29 +282,26 @@ static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
return -EINVAL;
}
- /*
- * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
- * map whole buffer
- */
- vma->vm_pgoff = 0;
-
- ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
- buf->dma_addr, buf->size, buf->attrs);
-
+ if (buf->non_coherent_mem)
+ ret = dma_mmap_noncontiguous(buf->dev, vma, buf->size,
+ buf->dma_sgt);
+ else
+ ret = dma_mmap_attrs(buf->dev, vma, buf->cookie, buf->dma_addr,
+ buf->size, buf->attrs);
if (ret) {
pr_err("Remapping memory failed, error: %d\n", ret);
return ret;
}
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_private_data = &buf->handler;
vma->vm_ops = &vb2_common_vm_ops;
vma->vm_ops->open(vma);
- pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
- __func__, (unsigned long)buf->dma_addr, vma->vm_start,
- buf->size);
+ pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %lu\n",
+ __func__, (unsigned long)buf->dma_addr, vma->vm_start,
+ buf->size);
return 0;
}
@@ -273,8 +366,14 @@ static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
/* release the scatterlist cache */
if (attach->dma_dir != DMA_NONE)
- dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
- attach->dma_dir);
+ /*
+ * Cache sync can be skipped here, as the vb2_dc memory is
+ * allocated from device coherent memory, which means the
+ * memory locations do not require any explicit cache
+ * maintenance prior or after being used by the device.
+ */
+ dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
sg_free_table(sgt);
kfree(attach);
db_attach->priv = NULL;
@@ -284,39 +383,32 @@ static struct sg_table *vb2_dc_dmabuf_ops_map(
struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
{
struct vb2_dc_attachment *attach = db_attach->priv;
- /* stealing dmabuf mutex to serialize map/unmap operations */
- struct mutex *lock = &db_attach->dmabuf->lock;
struct sg_table *sgt;
- mutex_lock(lock);
-
sgt = &attach->sgt;
/* return previously mapped sg table */
- if (attach->dma_dir == dma_dir) {
- mutex_unlock(lock);
+ if (attach->dma_dir == dma_dir)
return sgt;
- }
/* release any previous cache */
if (attach->dma_dir != DMA_NONE) {
- dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
- attach->dma_dir);
+ dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
attach->dma_dir = DMA_NONE;
}
- /* mapping to the client with new direction */
- sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
- dma_dir);
- if (!sgt->nents) {
+ /*
+ * mapping to the client with new direction, no cache sync
+ * required see comment in vb2_dc_dmabuf_ops_detach()
+ */
+ if (dma_map_sgtable(db_attach->dev, sgt, dma_dir,
+ DMA_ATTR_SKIP_CPU_SYNC)) {
pr_err("failed to map scatterlist\n");
- mutex_unlock(lock);
return ERR_PTR(-EIO);
}
attach->dma_dir = dma_dir;
- mutex_unlock(lock);
-
return sgt;
}
@@ -332,18 +424,33 @@ static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
vb2_dc_put(dbuf->priv);
}
-static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
+static int
+vb2_dc_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
+ enum dma_data_direction direction)
{
- struct vb2_dc_buf *buf = dbuf->priv;
+ return 0;
+}
- return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
+static int
+vb2_dc_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
+ enum dma_data_direction direction)
+{
+ return 0;
}
-static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
+static int vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct iosys_map *map)
{
- struct vb2_dc_buf *buf = dbuf->priv;
+ struct vb2_dc_buf *buf;
+ void *vaddr;
- return buf->vaddr;
+ buf = dbuf->priv;
+ vaddr = vb2_dc_vaddr(buf->vb, buf);
+ if (!vaddr)
+ return -EINVAL;
+
+ iosys_map_set_vaddr(map, vaddr);
+
+ return 0;
}
static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
@@ -357,7 +464,8 @@ static const struct dma_buf_ops vb2_dc_dmabuf_ops = {
.detach = vb2_dc_dmabuf_ops_detach,
.map_dma_buf = vb2_dc_dmabuf_ops_map,
.unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
- .map = vb2_dc_dmabuf_ops_kmap,
+ .begin_cpu_access = vb2_dc_dmabuf_ops_begin_cpu_access,
+ .end_cpu_access = vb2_dc_dmabuf_ops_end_cpu_access,
.vmap = vb2_dc_dmabuf_ops_vmap,
.mmap = vb2_dc_dmabuf_ops_mmap,
.release = vb2_dc_dmabuf_ops_release,
@@ -368,6 +476,9 @@ static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
int ret;
struct sg_table *sgt;
+ if (buf->non_coherent_mem)
+ return buf->dma_sgt;
+
sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt) {
dev_err(buf->dev, "failed to alloc sg table\n");
@@ -385,7 +496,9 @@ static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
return sgt;
}
-static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
+static struct dma_buf *vb2_dc_get_dmabuf(struct vb2_buffer *vb,
+ void *buf_priv,
+ unsigned long flags)
{
struct vb2_dc_buf *buf = buf_priv;
struct dma_buf *dbuf;
@@ -428,55 +541,28 @@ static void vb2_dc_put_userptr(void *buf_priv)
* No need to sync to CPU, it's already synced to the CPU
* since the finish() memop will have been called before this.
*/
- dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
- pages = frame_vector_pages(buf->vec);
- /* sgt should exist only if vector contains pages... */
- BUG_ON(IS_ERR(pages));
+ dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
if (buf->dma_dir == DMA_FROM_DEVICE ||
- buf->dma_dir == DMA_BIDIRECTIONAL)
- for (i = 0; i < frame_vector_count(buf->vec); i++)
- set_page_dirty_lock(pages[i]);
+ buf->dma_dir == DMA_BIDIRECTIONAL) {
+ pages = frame_vector_pages(buf->vec);
+ /* sgt should exist only if vector contains pages... */
+ if (!WARN_ON_ONCE(IS_ERR(pages)))
+ for (i = 0; i < frame_vector_count(buf->vec); i++)
+ set_page_dirty_lock(pages[i]);
+ }
sg_free_table(sgt);
kfree(sgt);
+ } else {
+ dma_unmap_resource(buf->dev, buf->dma_addr, buf->size,
+ buf->dma_dir, 0);
}
vb2_destroy_framevec(buf->vec);
kfree(buf);
}
-/*
- * For some kind of reserved memory there might be no struct page available,
- * so all that can be done to support such 'pages' is to try to convert
- * pfn to dma address or at the last resort just assume that
- * dma address == physical address (like it has been assumed in earlier version
- * of videobuf2-dma-contig
- */
-
-#ifdef __arch_pfn_to_dma
-static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
-{
- return (dma_addr_t)__arch_pfn_to_dma(dev, pfn);
-}
-#elif defined(__pfn_to_bus)
-static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
-{
- return (dma_addr_t)__pfn_to_bus(pfn);
-}
-#elif defined(__pfn_to_phys)
-static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
-{
- return (dma_addr_t)__pfn_to_phys(pfn);
-}
-#else
-static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
-{
- /* really, we cannot do anything better at this point */
- return (dma_addr_t)(pfn) << PAGE_SHIFT;
-}
-#endif
-
-static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
- unsigned long size, enum dma_data_direction dma_dir)
+static void *vb2_dc_get_userptr(struct vb2_buffer *vb, struct device *dev,
+ unsigned long vaddr, unsigned long size)
{
struct vb2_dc_buf *buf;
struct frame_vector *vec;
@@ -506,11 +592,12 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
return ERR_PTR(-ENOMEM);
buf->dev = dev;
- buf->dma_dir = dma_dir;
+ buf->dma_dir = vb->vb2_queue->dma_dir;
+ buf->vb = vb;
offset = lower_32_bits(offset_in_page(vaddr));
- vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE ||
- dma_dir == DMA_BIDIRECTIONAL);
+ vec = vb2_create_framevec(vaddr, size, buf->dma_dir == DMA_FROM_DEVICE ||
+ buf->dma_dir == DMA_BIDIRECTIONAL);
if (IS_ERR(vec)) {
ret = PTR_ERR(vec);
goto fail_buf;
@@ -528,7 +615,12 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
for (i = 1; i < n_pages; i++)
if (nums[i-1] + 1 != nums[i])
goto fail_pfnvec;
- buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, nums[0]);
+ buf->dma_addr = dma_map_resource(buf->dev,
+ __pfn_to_phys(nums[0]), size, buf->dma_dir, 0);
+ if (dma_mapping_error(buf->dev, buf->dma_addr)) {
+ ret = -ENOMEM;
+ goto fail_pfnvec;
+ }
goto out;
}
@@ -550,9 +642,8 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
* No need to sync to the device, this will happen later when the
* prepare() memop is called.
*/
- sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
- if (sgt->nents <= 0) {
+ if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
+ DMA_ATTR_SKIP_CPU_SYNC)) {
pr_err("failed to map scatterlist\n");
ret = -EIO;
goto fail_sgt_init;
@@ -568,14 +659,15 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
buf->dma_addr = sg_dma_address(sgt->sgl);
buf->dma_sgt = sgt;
+ buf->non_coherent_mem = 1;
+
out:
buf->size = size;
return buf;
fail_map_sg:
- dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
+ dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
fail_sgt_init:
sg_free_table(sgt);
@@ -613,7 +705,7 @@ static int vb2_dc_map_dmabuf(void *mem_priv)
}
/* get the associated scatterlist for this buffer */
- sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
+ sgt = dma_buf_map_attachment_unlocked(buf->db_attach, buf->dma_dir);
if (IS_ERR(sgt)) {
pr_err("Error getting dmabuf scatterlist\n");
return -EINVAL;
@@ -622,9 +714,10 @@ static int vb2_dc_map_dmabuf(void *mem_priv)
/* checking if dmabuf is big enough to store contiguous chunk */
contig_size = vb2_dc_get_contiguous_size(sgt);
if (contig_size < buf->size) {
- pr_err("contiguous chunk is too small %lu/%lu b\n",
- contig_size, buf->size);
- dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
+ pr_err("contiguous chunk is too small %lu/%lu\n",
+ contig_size, buf->size);
+ dma_buf_unmap_attachment_unlocked(buf->db_attach, sgt,
+ buf->dma_dir);
return -EFAULT;
}
@@ -639,6 +732,7 @@ static void vb2_dc_unmap_dmabuf(void *mem_priv)
{
struct vb2_dc_buf *buf = mem_priv;
struct sg_table *sgt = buf->dma_sgt;
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
if (WARN_ON(!buf->db_attach)) {
pr_err("trying to unpin a not attached buffer\n");
@@ -651,10 +745,10 @@ static void vb2_dc_unmap_dmabuf(void *mem_priv)
}
if (buf->vaddr) {
- dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
+ dma_buf_vunmap_unlocked(buf->db_attach->dmabuf, &map);
buf->vaddr = NULL;
}
- dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
+ dma_buf_unmap_attachment_unlocked(buf->db_attach, sgt, buf->dma_dir);
buf->dma_addr = 0;
buf->dma_sgt = NULL;
@@ -673,8 +767,8 @@ static void vb2_dc_detach_dmabuf(void *mem_priv)
kfree(buf);
}
-static void *vb2_dc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
- unsigned long size, enum dma_data_direction dma_dir)
+static void *vb2_dc_attach_dmabuf(struct vb2_buffer *vb, struct device *dev,
+ struct dma_buf *dbuf, unsigned long size)
{
struct vb2_dc_buf *buf;
struct dma_buf_attachment *dba;
@@ -690,6 +784,8 @@ static void *vb2_dc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
return ERR_PTR(-ENOMEM);
buf->dev = dev;
+ buf->vb = vb;
+
/* create attachment for the dmabuf with the user device */
dba = dma_buf_attach(dbuf, buf->dev);
if (IS_ERR(dba)) {
@@ -698,7 +794,7 @@ static void *vb2_dc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
return dba;
}
- buf->dma_dir = dma_dir;
+ buf->dma_dir = vb->vb2_queue->dma_dir;
buf->size = size;
buf->db_attach = dba;
@@ -755,32 +851,16 @@ EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
{
if (!dev->dma_parms) {
- dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
- if (!dev->dma_parms)
- return -ENOMEM;
+ dev_err(dev, "Failed to set max_seg_size: dma_parms is NULL\n");
+ return -ENODEV;
}
if (dma_get_max_seg_size(dev) < size)
- return dma_set_max_seg_size(dev, size);
-
+ dma_set_max_seg_size(dev, size);
return 0;
}
EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
-/*
- * vb2_dma_contig_clear_max_seg_size() - release resources for DMA parameters
- * @dev: device for configuring DMA parameters
- *
- * This function releases resources allocated to configure DMA parameters
- * (see vb2_dma_contig_set_max_seg_size() function). It should be called from
- * device drivers on driver remove.
- */
-void vb2_dma_contig_clear_max_seg_size(struct device *dev)
-{
- kfree(dev->dma_parms);
- dev->dma_parms = NULL;
-}
-EXPORT_SYMBOL_GPL(vb2_dma_contig_clear_max_seg_size);
-
MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("DMA_BUF");