summaryrefslogtreecommitdiff
path: root/drivers/media/v4l2-core
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/media/v4l2-core')
-rw-r--r--drivers/media/v4l2-core/Kconfig3
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c3
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c19
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c1
-rw-r--r--drivers/media/v4l2-core/v4l2-event.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-fh.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c11
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c19
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c22
-rw-r--r--drivers/media/v4l2-core/videobuf-core.c4
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c300
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c700
-rw-r--r--drivers/media/v4l2-core/videobuf2-memops.c40
-rw-r--r--drivers/media/v4l2-core/videobuf2-vmalloc.c56
14 files changed, 1065 insertions, 117 deletions
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index 0c54e19d9944..65875c3aba1b 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -59,6 +59,7 @@ config VIDEOBUF_DVB
# Used by drivers that need Videobuf2 modules
config VIDEOBUF2_CORE
+ select DMA_SHARED_BUFFER
tristate
config VIDEOBUF2_MEMOPS
@@ -68,11 +69,13 @@ config VIDEOBUF2_DMA_CONTIG
tristate
select VIDEOBUF2_CORE
select VIDEOBUF2_MEMOPS
+ select DMA_SHARED_BUFFER
config VIDEOBUF2_VMALLOC
tristate
select VIDEOBUF2_CORE
select VIDEOBUF2_MEMOPS
+ select DMA_SHARED_BUFFER
config VIDEOBUF2_DMA_SG
tristate
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index f995dd31151d..380ddd89fa4c 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -837,7 +837,7 @@ bool v4l2_detect_gtf(unsigned frame_height,
struct v4l2_dv_timings *fmt)
{
int pix_clk;
- int v_fp, v_bp, h_fp, h_bp, hsync;
+ int v_fp, v_bp, h_fp, hsync;
int frame_width, image_height, image_width;
bool default_gtf;
int h_blank;
@@ -885,7 +885,6 @@ bool v4l2_detect_gtf(unsigned frame_height,
hsync = hsync - hsync % GTF_CELL_GRAN;
h_fp = h_blank / 2 - hsync;
- h_bp = h_blank / 2;
fmt->bt.polarities = polarities;
fmt->bt.width = image_width;
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 83ffb6436baf..7157af301b14 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -297,6 +297,7 @@ struct v4l2_plane32 {
union {
__u32 mem_offset;
compat_long_t userptr;
+ __s32 fd;
} m;
__u32 data_offset;
__u32 reserved[11];
@@ -318,6 +319,7 @@ struct v4l2_buffer32 {
__u32 offset;
compat_long_t userptr;
compat_caddr_t planes;
+ __s32 fd;
} m;
__u32 length;
__u32 reserved2;
@@ -341,6 +343,9 @@ static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
up_pln = compat_ptr(p);
if (put_user((unsigned long)up_pln, &up->m.userptr))
return -EFAULT;
+ } else if (memory == V4L2_MEMORY_DMABUF) {
+ if (copy_in_user(&up->m.fd, &up32->m.fd, sizeof(int)))
+ return -EFAULT;
} else {
if (copy_in_user(&up->m.mem_offset, &up32->m.mem_offset,
sizeof(__u32)))
@@ -364,6 +369,11 @@ static int put_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
if (copy_in_user(&up32->m.mem_offset, &up->m.mem_offset,
sizeof(__u32)))
return -EFAULT;
+ /* For DMABUF, driver might've set up the fd, so copy it back. */
+ if (memory == V4L2_MEMORY_DMABUF)
+ if (copy_in_user(&up32->m.fd, &up->m.fd,
+ sizeof(int)))
+ return -EFAULT;
return 0;
}
@@ -446,6 +456,10 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
if (get_user(kp->m.offset, &up->m.offset))
return -EFAULT;
break;
+ case V4L2_MEMORY_DMABUF:
+ if (get_user(kp->m.fd, &up->m.fd))
+ return -EFAULT;
+ break;
}
}
@@ -510,6 +524,10 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
if (put_user(kp->m.offset, &up->m.offset))
return -EFAULT;
break;
+ case V4L2_MEMORY_DMABUF:
+ if (put_user(kp->m.fd, &up->m.fd))
+ return -EFAULT;
+ break;
}
}
@@ -1000,6 +1018,7 @@ long v4l2_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
case VIDIOC_S_FBUF32:
case VIDIOC_OVERLAY32:
case VIDIOC_QBUF32:
+ case VIDIOC_EXPBUF:
case VIDIOC_DQBUF32:
case VIDIOC_STREAMON32:
case VIDIOC_STREAMOFF32:
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index a2df842e5100..98dcad9c8a3b 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -571,6 +571,7 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_REQBUFS, vidioc_reqbufs);
SET_VALID_IOCTL(ops, VIDIOC_QUERYBUF, vidioc_querybuf);
SET_VALID_IOCTL(ops, VIDIOC_QBUF, vidioc_qbuf);
+ SET_VALID_IOCTL(ops, VIDIOC_EXPBUF, vidioc_expbuf);
SET_VALID_IOCTL(ops, VIDIOC_DQBUF, vidioc_dqbuf);
SET_VALID_IOCTL(ops, VIDIOC_STREAMON, vidioc_streamon);
SET_VALID_IOCTL(ops, VIDIOC_STREAMOFF, vidioc_streamoff);
diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c
index 18a040b935a3..c72009218152 100644
--- a/drivers/media/v4l2-core/v4l2-event.c
+++ b/drivers/media/v4l2-core/v4l2-event.c
@@ -5,7 +5,7 @@
*
* Copyright (C) 2009--2010 Nokia Corporation.
*
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/media/v4l2-core/v4l2-fh.c b/drivers/media/v4l2-core/v4l2-fh.c
index 9e3fc040ea20..e57c002b4150 100644
--- a/drivers/media/v4l2-core/v4l2-fh.c
+++ b/drivers/media/v4l2-core/v4l2-fh.c
@@ -5,7 +5,7 @@
*
* Copyright (C) 2009--2010 Nokia Corporation.
*
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 8f388ff31ebb..aa6e7c788db2 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -155,6 +155,7 @@ static const char *v4l2_memory_names[] = {
[V4L2_MEMORY_MMAP] = "mmap",
[V4L2_MEMORY_USERPTR] = "userptr",
[V4L2_MEMORY_OVERLAY] = "overlay",
+ [V4L2_MEMORY_DMABUF] = "dmabuf",
};
#define prt_names(a, arr) (((unsigned)(a)) < ARRAY_SIZE(arr) ? arr[a] : "unknown")
@@ -453,6 +454,15 @@ static void v4l_print_buffer(const void *arg, bool write_only)
tc->type, tc->flags, tc->frames, *(__u32 *)tc->userbits);
}
+static void v4l_print_exportbuffer(const void *arg, bool write_only)
+{
+ const struct v4l2_exportbuffer *p = arg;
+
+ pr_cont("fd=%d, type=%s, index=%u, plane=%u, flags=0x%08x\n",
+ p->fd, prt_names(p->type, v4l2_type_names),
+ p->index, p->plane, p->flags);
+}
+
static void v4l_print_create_buffers(const void *arg, bool write_only)
{
const struct v4l2_create_buffers *p = arg;
@@ -1960,6 +1970,7 @@ static struct v4l2_ioctl_info v4l2_ioctls[] = {
IOCTL_INFO_STD(VIDIOC_S_FBUF, vidioc_s_fbuf, v4l_print_framebuffer, INFO_FL_PRIO),
IOCTL_INFO_FNC(VIDIOC_OVERLAY, v4l_overlay, v4l_print_u32, INFO_FL_PRIO),
IOCTL_INFO_FNC(VIDIOC_QBUF, v4l_qbuf, v4l_print_buffer, INFO_FL_QUEUE),
+ IOCTL_INFO_STD(VIDIOC_EXPBUF, vidioc_expbuf, v4l_print_exportbuffer, INFO_FL_QUEUE | INFO_FL_CLEAR(v4l2_exportbuffer, flags)),
IOCTL_INFO_FNC(VIDIOC_DQBUF, v4l_dqbuf, v4l_print_buffer, INFO_FL_QUEUE),
IOCTL_INFO_FNC(VIDIOC_STREAMON, v4l_streamon, v4l_print_buftype, INFO_FL_PRIO | INFO_FL_QUEUE),
IOCTL_INFO_FNC(VIDIOC_STREAMOFF, v4l_streamoff, v4l_print_buftype, INFO_FL_PRIO | INFO_FL_QUEUE),
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 3ac83583ad7a..438ea45d1074 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -369,6 +369,19 @@ int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
/**
+ * v4l2_m2m_expbuf() - export a source or destination buffer, depending on
+ * the type
+ */
+int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_exportbuffer *eb)
+{
+ struct vb2_queue *vq;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, eb->type);
+ return vb2_expbuf(vq, eb);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf);
+/**
* v4l2_m2m_streamon() - turn on streaming for a video queue
*/
int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
@@ -510,12 +523,10 @@ struct v4l2_m2m_dev *v4l2_m2m_init(struct v4l2_m2m_ops *m2m_ops)
{
struct v4l2_m2m_dev *m2m_dev;
- if (!m2m_ops)
+ if (!m2m_ops || WARN_ON(!m2m_ops->device_run) ||
+ WARN_ON(!m2m_ops->job_abort))
return ERR_PTR(-EINVAL);
- BUG_ON(!m2m_ops->device_run);
- BUG_ON(!m2m_ops->job_abort);
-
m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
if (!m2m_dev)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index dced41c1d993..996c248dea42 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -412,20 +412,20 @@ static int
v4l2_subdev_link_validate_get_format(struct media_pad *pad,
struct v4l2_subdev_format *fmt)
{
- switch (media_entity_type(pad->entity)) {
- case MEDIA_ENT_T_V4L2_SUBDEV:
+ if (media_entity_type(pad->entity) == MEDIA_ENT_T_V4L2_SUBDEV) {
+ struct v4l2_subdev *sd =
+ media_entity_to_v4l2_subdev(pad->entity);
+
fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
fmt->pad = pad->index;
- return v4l2_subdev_call(media_entity_to_v4l2_subdev(
- pad->entity),
- pad, get_fmt, NULL, fmt);
- default:
- WARN(1, "Driver bug! Wrong media entity type %d, entity %s\n",
- media_entity_type(pad->entity), pad->entity->name);
- /* Fall through */
- case MEDIA_ENT_T_DEVNODE_V4L:
- return -EINVAL;
+ return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
}
+
+ WARN(pad->entity->type != MEDIA_ENT_T_DEVNODE_V4L,
+ "Driver bug! Wrong media entity type 0x%08x, entity %s\n",
+ pad->entity->type, pad->entity->name);
+
+ return -EINVAL;
}
int v4l2_subdev_link_validate(struct media_link *link)
diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c
index bf7a326b1cdc..5449e8aa984a 100644
--- a/drivers/media/v4l2-core/videobuf-core.c
+++ b/drivers/media/v4l2-core/videobuf-core.c
@@ -335,6 +335,9 @@ static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
case V4L2_MEMORY_OVERLAY:
b->m.offset = vb->boff;
break;
+ case V4L2_MEMORY_DMABUF:
+ /* DMABUF is not handled in videobuf framework */
+ break;
}
b->flags = 0;
@@ -405,6 +408,7 @@ int __videobuf_mmap_setup(struct videobuf_queue *q,
break;
case V4L2_MEMORY_USERPTR:
case V4L2_MEMORY_OVERLAY:
+ case V4L2_MEMORY_DMABUF:
/* nothing */
break;
}
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 432df119af27..9f81be23a81f 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -109,6 +109,36 @@ static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
}
/**
+ * __vb2_plane_dmabuf_put() - release memory associated with
+ * a DMABUF shared plane
+ */
+static void __vb2_plane_dmabuf_put(struct vb2_queue *q, struct vb2_plane *p)
+{
+ if (!p->mem_priv)
+ return;
+
+ if (p->dbuf_mapped)
+ call_memop(q, unmap_dmabuf, p->mem_priv);
+
+ call_memop(q, detach_dmabuf, p->mem_priv);
+ dma_buf_put(p->dbuf);
+ memset(p, 0, sizeof(*p));
+}
+
+/**
+ * __vb2_buf_dmabuf_put() - release memory associated with
+ * a DMABUF shared buffer
+ */
+static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ unsigned int plane;
+
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ __vb2_plane_dmabuf_put(q, &vb->planes[plane]);
+}
+
+/**
* __setup_offsets() - setup unique offsets ("cookies") for every plane in
* every buffer on the queue
*/
@@ -230,6 +260,8 @@ static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
/* Free MMAP buffers or release USERPTR buffers */
if (q->memory == V4L2_MEMORY_MMAP)
__vb2_buf_mem_free(vb);
+ else if (q->memory == V4L2_MEMORY_DMABUF)
+ __vb2_buf_dmabuf_put(vb);
else
__vb2_buf_userptr_put(vb);
}
@@ -362,6 +394,8 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
b->m.offset = vb->v4l2_planes[0].m.mem_offset;
else if (q->memory == V4L2_MEMORY_USERPTR)
b->m.userptr = vb->v4l2_planes[0].m.userptr;
+ else if (q->memory == V4L2_MEMORY_DMABUF)
+ b->m.fd = vb->v4l2_planes[0].m.fd;
}
/*
@@ -454,13 +488,28 @@ static int __verify_mmap_ops(struct vb2_queue *q)
}
/**
+ * __verify_dmabuf_ops() - verify that all memory operations required for
+ * DMABUF queue type have been provided
+ */
+static int __verify_dmabuf_ops(struct vb2_queue *q)
+{
+ if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf ||
+ !q->mem_ops->detach_dmabuf || !q->mem_ops->map_dmabuf ||
+ !q->mem_ops->unmap_dmabuf)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
* __verify_memory_type() - Check whether the memory type and buffer type
* passed to a buffer operation are compatible with the queue.
*/
static int __verify_memory_type(struct vb2_queue *q,
enum v4l2_memory memory, enum v4l2_buf_type type)
{
- if (memory != V4L2_MEMORY_MMAP && memory != V4L2_MEMORY_USERPTR) {
+ if (memory != V4L2_MEMORY_MMAP && memory != V4L2_MEMORY_USERPTR &&
+ memory != V4L2_MEMORY_DMABUF) {
dprintk(1, "reqbufs: unsupported memory type\n");
return -EINVAL;
}
@@ -484,6 +533,11 @@ static int __verify_memory_type(struct vb2_queue *q,
return -EINVAL;
}
+ if (memory == V4L2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) {
+ dprintk(1, "reqbufs: DMABUF for current setup unsupported\n");
+ return -EINVAL;
+ }
+
/*
* Place the busy tests at the end: -EBUSY can be ignored when
* create_bufs is called with count == 0, but count == 0 should still
@@ -790,6 +844,7 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
{
struct vb2_queue *q = vb->vb2_queue;
unsigned long flags;
+ unsigned int plane;
if (vb->state != VB2_BUF_STATE_ACTIVE)
return;
@@ -800,6 +855,10 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
dprintk(4, "Done processing on buffer %d, state: %d\n",
vb->v4l2_buf.index, vb->state);
+ /* sync buffers */
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ call_memop(q, finish, vb->planes[plane].mem_priv);
+
/* Add the buffer to the done buffers list */
spin_lock_irqsave(&q->done_lock, flags);
vb->state = state;
@@ -845,6 +904,16 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
b->m.planes[plane].length;
}
}
+ if (b->memory == V4L2_MEMORY_DMABUF) {
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ v4l2_planes[plane].m.fd =
+ b->m.planes[plane].m.fd;
+ v4l2_planes[plane].length =
+ b->m.planes[plane].length;
+ v4l2_planes[plane].data_offset =
+ b->m.planes[plane].data_offset;
+ }
+ }
} else {
/*
* Single-planar buffers do not use planes array,
@@ -859,6 +928,13 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
v4l2_planes[0].m.userptr = b->m.userptr;
v4l2_planes[0].length = b->length;
}
+
+ if (b->memory == V4L2_MEMORY_DMABUF) {
+ v4l2_planes[0].m.fd = b->m.fd;
+ v4l2_planes[0].length = b->length;
+ v4l2_planes[0].data_offset = 0;
+ }
+
}
vb->v4l2_buf.field = b->field;
@@ -959,14 +1035,121 @@ static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b)
}
/**
+ * __qbuf_dmabuf() - handle qbuf of a DMABUF buffer
+ */
+static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
+{
+ struct v4l2_plane planes[VIDEO_MAX_PLANES];
+ struct vb2_queue *q = vb->vb2_queue;
+ void *mem_priv;
+ unsigned int plane;
+ int ret;
+ int write = !V4L2_TYPE_IS_OUTPUT(q->type);
+
+ /* Verify and copy relevant information provided by the userspace */
+ __fill_vb2_buffer(vb, b, planes);
+
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd);
+
+ if (IS_ERR_OR_NULL(dbuf)) {
+ dprintk(1, "qbuf: invalid dmabuf fd for plane %d\n",
+ plane);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* use DMABUF size if length is not provided */
+ if (planes[plane].length == 0)
+ planes[plane].length = dbuf->size;
+
+ if (planes[plane].length < planes[plane].data_offset +
+ q->plane_sizes[plane]) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* Skip the plane if already verified */
+ if (dbuf == vb->planes[plane].dbuf &&
+ vb->v4l2_planes[plane].length == planes[plane].length) {
+ dma_buf_put(dbuf);
+ continue;
+ }
+
+ dprintk(1, "qbuf: buffer for plane %d changed\n", plane);
+
+ /* Release previously acquired memory if present */
+ __vb2_plane_dmabuf_put(q, &vb->planes[plane]);
+ memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane));
+
+ /* Acquire each plane's memory */
+ mem_priv = call_memop(q, attach_dmabuf, q->alloc_ctx[plane],
+ dbuf, planes[plane].length, write);
+ if (IS_ERR(mem_priv)) {
+ dprintk(1, "qbuf: failed to attach dmabuf\n");
+ ret = PTR_ERR(mem_priv);
+ dma_buf_put(dbuf);
+ goto err;
+ }
+
+ vb->planes[plane].dbuf = dbuf;
+ vb->planes[plane].mem_priv = mem_priv;
+ }
+
+ /* TODO: This pins the buffer(s) with dma_buf_map_attachment()).. but
+ * really we want to do this just before the DMA, not while queueing
+ * the buffer(s)..
+ */
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ ret = call_memop(q, map_dmabuf, vb->planes[plane].mem_priv);
+ if (ret) {
+ dprintk(1, "qbuf: failed to map dmabuf for plane %d\n",
+ plane);
+ goto err;
+ }
+ vb->planes[plane].dbuf_mapped = 1;
+ }
+
+ /*
+ * Call driver-specific initialization on the newly acquired buffer,
+ * if provided.
+ */
+ ret = call_qop(q, buf_init, vb);
+ if (ret) {
+ dprintk(1, "qbuf: buffer initialization failed\n");
+ goto err;
+ }
+
+ /*
+ * Now that everything is in order, copy relevant information
+ * provided by userspace.
+ */
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ vb->v4l2_planes[plane] = planes[plane];
+
+ return 0;
+err:
+ /* In case of errors, release planes that were already acquired */
+ __vb2_buf_dmabuf_put(vb);
+
+ return ret;
+}
+
+/**
* __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing
*/
static void __enqueue_in_driver(struct vb2_buffer *vb)
{
struct vb2_queue *q = vb->vb2_queue;
+ unsigned int plane;
vb->state = VB2_BUF_STATE_ACTIVE;
atomic_inc(&q->queued_count);
+
+ /* sync buffers */
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ call_memop(q, prepare, vb->planes[plane].mem_priv);
+
q->ops->buf_queue(vb);
}
@@ -982,6 +1165,9 @@ static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
case V4L2_MEMORY_USERPTR:
ret = __qbuf_userptr(vb, b);
break;
+ case V4L2_MEMORY_DMABUF:
+ ret = __qbuf_dmabuf(vb, b);
+ break;
default:
WARN(1, "Invalid queue type\n");
ret = -EINVAL;
@@ -1303,6 +1489,30 @@ int vb2_wait_for_all_buffers(struct vb2_queue *q)
EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
/**
+ * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state
+ */
+static void __vb2_dqbuf(struct vb2_buffer *vb)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ unsigned int i;
+
+ /* nothing to do if the buffer is already dequeued */
+ if (vb->state == VB2_BUF_STATE_DEQUEUED)
+ return;
+
+ vb->state = VB2_BUF_STATE_DEQUEUED;
+
+ /* unmap DMABUF buffer */
+ if (q->memory == V4L2_MEMORY_DMABUF)
+ for (i = 0; i < vb->num_planes; ++i) {
+ if (!vb->planes[i].dbuf_mapped)
+ continue;
+ call_memop(q, unmap_dmabuf, vb->planes[i].mem_priv);
+ vb->planes[i].dbuf_mapped = 0;
+ }
+}
+
+/**
* vb2_dqbuf() - Dequeue a buffer to the userspace
* @q: videobuf2 queue
* @b: buffer structure passed from userspace to vidioc_dqbuf handler
@@ -1363,11 +1573,12 @@ int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
__fill_v4l2_buffer(vb, b);
/* Remove from videobuf queue */
list_del(&vb->queued_entry);
+ /* go back to dequeued state */
+ __vb2_dqbuf(vb);
dprintk(1, "dqbuf of buffer %d, with state %d\n",
vb->v4l2_buf.index, vb->state);
- vb->state = VB2_BUF_STATE_DEQUEUED;
return 0;
}
EXPORT_SYMBOL_GPL(vb2_dqbuf);
@@ -1406,7 +1617,7 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
* Reinitialize all buffers for next use.
*/
for (i = 0; i < q->num_buffers; ++i)
- q->bufs[i]->state = VB2_BUF_STATE_DEQUEUED;
+ __vb2_dqbuf(q->bufs[i]);
}
/**
@@ -1540,6 +1751,79 @@ static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
}
/**
+ * vb2_expbuf() - Export a buffer as a file descriptor
+ * @q: videobuf2 queue
+ * @eb: export buffer structure passed from userspace to vidioc_expbuf
+ * handler in driver
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_expbuf handler in driver.
+ */
+int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
+{
+ struct vb2_buffer *vb = NULL;
+ struct vb2_plane *vb_plane;
+ int ret;
+ struct dma_buf *dbuf;
+
+ if (q->memory != V4L2_MEMORY_MMAP) {
+ dprintk(1, "Queue is not currently set up for mmap\n");
+ return -EINVAL;
+ }
+
+ if (!q->mem_ops->get_dmabuf) {
+ dprintk(1, "Queue does not support DMA buffer exporting\n");
+ return -EINVAL;
+ }
+
+ if (eb->flags & ~O_CLOEXEC) {
+ dprintk(1, "Queue does support only O_CLOEXEC flag\n");
+ return -EINVAL;
+ }
+
+ if (eb->type != q->type) {
+ dprintk(1, "qbuf: invalid buffer type\n");
+ return -EINVAL;
+ }
+
+ if (eb->index >= q->num_buffers) {
+ dprintk(1, "buffer index out of range\n");
+ return -EINVAL;
+ }
+
+ vb = q->bufs[eb->index];
+
+ if (eb->plane >= vb->num_planes) {
+ dprintk(1, "buffer plane out of range\n");
+ return -EINVAL;
+ }
+
+ vb_plane = &vb->planes[eb->plane];
+
+ dbuf = call_memop(q, get_dmabuf, vb_plane->mem_priv);
+ if (IS_ERR_OR_NULL(dbuf)) {
+ dprintk(1, "Failed to export buffer %d, plane %d\n",
+ eb->index, eb->plane);
+ return -EINVAL;
+ }
+
+ ret = dma_buf_fd(dbuf, eb->flags);
+ if (ret < 0) {
+ dprintk(3, "buffer %d, plane %d failed to export (%d)\n",
+ eb->index, eb->plane, ret);
+ dma_buf_put(dbuf);
+ return ret;
+ }
+
+ dprintk(3, "buffer %d, plane %d exported as %d descriptor\n",
+ eb->index, eb->plane, ret);
+ eb->fd = ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_expbuf);
+
+/**
* vb2_mmap() - map video buffers into application address space
* @q: videobuf2 queue
* @vma: vma passed to the mmap file operation handler in the driver
@@ -2245,6 +2529,16 @@ int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
}
EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff);
+int vb2_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *p)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vb2_queue_is_busy(vdev, file))
+ return -EBUSY;
+ return vb2_expbuf(vdev->queue, p);
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf);
+
/* v4l2_file_operations helpers */
int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index 4b7132660a93..10beaee7f0ae 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -10,7 +10,10 @@
* the Free Software Foundation.
*/
+#include <linux/dma-buf.h>
#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
@@ -23,40 +26,158 @@ struct vb2_dc_conf {
};
struct vb2_dc_buf {
- struct vb2_dc_conf *conf;
+ struct device *dev;
void *vaddr;
- dma_addr_t dma_addr;
unsigned long size;
- struct vm_area_struct *vma;
- atomic_t refcount;
+ dma_addr_t dma_addr;
+ enum dma_data_direction dma_dir;
+ struct sg_table *dma_sgt;
+
+ /* MMAP related */
struct vb2_vmarea_handler handler;
+ atomic_t refcount;
+ struct sg_table *sgt_base;
+
+ /* USERPTR related */
+ struct vm_area_struct *vma;
+
+ /* DMABUF related */
+ struct dma_buf_attachment *db_attach;
};
-static void vb2_dma_contig_put(void *buf_priv);
+/*********************************************/
+/* scatterlist table functions */
+/*********************************************/
+
+
+static void vb2_dc_sgt_foreach_page(struct sg_table *sgt,
+ void (*cb)(struct page *pg))
+{
+ struct scatterlist *s;
+ unsigned int i;
+
+ for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
+ struct page *page = sg_page(s);
+ unsigned int n_pages = PAGE_ALIGN(s->offset + s->length)
+ >> PAGE_SHIFT;
+ unsigned int j;
+
+ for (j = 0; j < n_pages; ++j, ++page)
+ cb(page);
+ }
+}
+
+static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
+{
+ struct scatterlist *s;
+ dma_addr_t expected = sg_dma_address(sgt->sgl);
+ unsigned int i;
+ unsigned long size = 0;
+
+ for_each_sg(sgt->sgl, s, sgt->nents, i) {
+ if (sg_dma_address(s) != expected)
+ break;
+ expected = sg_dma_address(s) + sg_dma_len(s);
+ size += sg_dma_len(s);
+ }
+ return size;
+}
+
+/*********************************************/
+/* callbacks for all buffers */
+/*********************************************/
+
+static void *vb2_dc_cookie(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+
+ return &buf->dma_addr;
+}
+
+static void *vb2_dc_vaddr(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+
+ return buf->vaddr;
+}
+
+static unsigned int vb2_dc_num_users(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+
+ return atomic_read(&buf->refcount);
+}
+
+static void vb2_dc_prepare(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+ struct sg_table *sgt = buf->dma_sgt;
+
+ /* DMABUF exporter will flush the cache for us */
+ if (!sgt || buf->db_attach)
+ return;
+
+ dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
+}
+
+static void vb2_dc_finish(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+ struct sg_table *sgt = buf->dma_sgt;
+
+ /* DMABUF exporter will flush the cache for us */
+ if (!sgt || buf->db_attach)
+ return;
+
+ dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
+}
+
+/*********************************************/
+/* callbacks for MMAP buffers */
+/*********************************************/
+
+static void vb2_dc_put(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+
+ if (!atomic_dec_and_test(&buf->refcount))
+ return;
+
+ if (buf->sgt_base) {
+ sg_free_table(buf->sgt_base);
+ kfree(buf->sgt_base);
+ }
+ dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr);
+ put_device(buf->dev);
+ kfree(buf);
+}
-static void *vb2_dma_contig_alloc(void *alloc_ctx, unsigned long size)
+static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size)
{
struct vb2_dc_conf *conf = alloc_ctx;
+ struct device *dev = conf->dev;
struct vb2_dc_buf *buf;
buf = kzalloc(sizeof *buf, GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
- buf->vaddr = dma_alloc_coherent(conf->dev, size, &buf->dma_addr,
- GFP_KERNEL);
+ /* align image size to PAGE_SIZE */
+ size = PAGE_ALIGN(size);
+
+ buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr, GFP_KERNEL);
if (!buf->vaddr) {
- dev_err(conf->dev, "dma_alloc_coherent of size %ld failed\n",
- size);
+ dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
kfree(buf);
return ERR_PTR(-ENOMEM);
}
- buf->conf = conf;
+ /* Prevent the device from being released while the buffer is used */
+ buf->dev = get_device(dev);
buf->size = size;
buf->handler.refcount = &buf->refcount;
- buf->handler.put = vb2_dma_contig_put;
+ buf->handler.put = vb2_dc_put;
buf->handler.arg = buf;
atomic_inc(&buf->refcount);
@@ -64,100 +185,569 @@ static void *vb2_dma_contig_alloc(void *alloc_ctx, unsigned long size)
return buf;
}
-static void vb2_dma_contig_put(void *buf_priv)
+static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
{
struct vb2_dc_buf *buf = buf_priv;
+ int ret;
- if (atomic_dec_and_test(&buf->refcount)) {
- dma_free_coherent(buf->conf->dev, buf->size, buf->vaddr,
- buf->dma_addr);
- kfree(buf);
+ if (!buf) {
+ printk(KERN_ERR "No buffer to map\n");
+ return -EINVAL;
+ }
+
+ /*
+ * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
+ * map whole buffer
+ */
+ vma->vm_pgoff = 0;
+
+ ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr,
+ buf->dma_addr, buf->size);
+
+ if (ret) {
+ pr_err("Remapping memory failed, error: %d\n", ret);
+ return ret;
}
+
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_private_data = &buf->handler;
+ vma->vm_ops = &vb2_common_vm_ops;
+
+ vma->vm_ops->open(vma);
+
+ pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
+ __func__, (unsigned long)buf->dma_addr, vma->vm_start,
+ buf->size);
+
+ return 0;
}
-static void *vb2_dma_contig_cookie(void *buf_priv)
+/*********************************************/
+/* DMABUF ops for exporters */
+/*********************************************/
+
+struct vb2_dc_attachment {
+ struct sg_table sgt;
+ enum dma_data_direction dir;
+};
+
+static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
+ struct dma_buf_attachment *dbuf_attach)
{
- struct vb2_dc_buf *buf = buf_priv;
+ struct vb2_dc_attachment *attach;
+ unsigned int i;
+ struct scatterlist *rd, *wr;
+ struct sg_table *sgt;
+ struct vb2_dc_buf *buf = dbuf->priv;
+ int ret;
- return &buf->dma_addr;
+ attach = kzalloc(sizeof(*attach), GFP_KERNEL);
+ if (!attach)
+ return -ENOMEM;
+
+ sgt = &attach->sgt;
+ /* Copy the buf->base_sgt scatter list to the attachment, as we can't
+ * map the same scatter list to multiple attachments at the same time.
+ */
+ ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
+ if (ret) {
+ kfree(attach);
+ return -ENOMEM;
+ }
+
+ rd = buf->sgt_base->sgl;
+ wr = sgt->sgl;
+ for (i = 0; i < sgt->orig_nents; ++i) {
+ sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
+ rd = sg_next(rd);
+ wr = sg_next(wr);
+ }
+
+ attach->dir = DMA_NONE;
+ dbuf_attach->priv = attach;
+
+ return 0;
}
-static void *vb2_dma_contig_vaddr(void *buf_priv)
+static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
+ struct dma_buf_attachment *db_attach)
{
- struct vb2_dc_buf *buf = buf_priv;
- if (!buf)
- return NULL;
+ struct vb2_dc_attachment *attach = db_attach->priv;
+ struct sg_table *sgt;
+
+ if (!attach)
+ return;
+
+ sgt = &attach->sgt;
+
+ /* release the scatterlist cache */
+ if (attach->dir != DMA_NONE)
+ dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
+ attach->dir);
+ sg_free_table(sgt);
+ kfree(attach);
+ db_attach->priv = NULL;
+}
+
+static struct sg_table *vb2_dc_dmabuf_ops_map(
+ struct dma_buf_attachment *db_attach, enum dma_data_direction dir)
+{
+ struct vb2_dc_attachment *attach = db_attach->priv;
+ /* stealing dmabuf mutex to serialize map/unmap operations */
+ struct mutex *lock = &db_attach->dmabuf->lock;
+ struct sg_table *sgt;
+ int ret;
+
+ mutex_lock(lock);
+
+ sgt = &attach->sgt;
+ /* return previously mapped sg table */
+ if (attach->dir == dir) {
+ mutex_unlock(lock);
+ return sgt;
+ }
+
+ /* release any previous cache */
+ if (attach->dir != DMA_NONE) {
+ dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
+ attach->dir);
+ attach->dir = DMA_NONE;
+ }
+
+ /* mapping to the client with new direction */
+ ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dir);
+ if (ret <= 0) {
+ pr_err("failed to map scatterlist\n");
+ mutex_unlock(lock);
+ return ERR_PTR(-EIO);
+ }
+
+ attach->dir = dir;
+
+ mutex_unlock(lock);
+
+ return sgt;
+}
+
+static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
+ struct sg_table *sgt, enum dma_data_direction dir)
+{
+ /* nothing to be done here */
+}
+
+static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
+{
+ /* drop reference obtained in vb2_dc_get_dmabuf */
+ vb2_dc_put(dbuf->priv);
+}
+
+static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
+{
+ struct vb2_dc_buf *buf = dbuf->priv;
+
+ return buf->vaddr + pgnum * PAGE_SIZE;
+}
+
+static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
+{
+ struct vb2_dc_buf *buf = dbuf->priv;
return buf->vaddr;
}
-static unsigned int vb2_dma_contig_num_users(void *buf_priv)
+static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
+ struct vm_area_struct *vma)
{
- struct vb2_dc_buf *buf = buf_priv;
+ return vb2_dc_mmap(dbuf->priv, vma);
+}
- return atomic_read(&buf->refcount);
+static struct dma_buf_ops vb2_dc_dmabuf_ops = {
+ .attach = vb2_dc_dmabuf_ops_attach,
+ .detach = vb2_dc_dmabuf_ops_detach,
+ .map_dma_buf = vb2_dc_dmabuf_ops_map,
+ .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
+ .kmap = vb2_dc_dmabuf_ops_kmap,
+ .kmap_atomic = vb2_dc_dmabuf_ops_kmap,
+ .vmap = vb2_dc_dmabuf_ops_vmap,
+ .mmap = vb2_dc_dmabuf_ops_mmap,
+ .release = vb2_dc_dmabuf_ops_release,
+};
+
+static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
+{
+ int ret;
+ struct sg_table *sgt;
+
+ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt) {
+ dev_err(buf->dev, "failed to alloc sg table\n");
+ return NULL;
+ }
+
+ ret = dma_get_sgtable(buf->dev, sgt, buf->vaddr, buf->dma_addr,
+ buf->size);
+ if (ret < 0) {
+ dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
+ kfree(sgt);
+ return NULL;
+ }
+
+ return sgt;
}
-static int vb2_dma_contig_mmap(void *buf_priv, struct vm_area_struct *vma)
+static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv)
{
struct vb2_dc_buf *buf = buf_priv;
+ struct dma_buf *dbuf;
- if (!buf) {
- printk(KERN_ERR "No buffer to map\n");
- return -EINVAL;
+ if (!buf->sgt_base)
+ buf->sgt_base = vb2_dc_get_base_sgt(buf);
+
+ if (WARN_ON(!buf->sgt_base))
+ return NULL;
+
+ dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, 0);
+ if (IS_ERR(dbuf))
+ return NULL;
+
+ /* dmabuf keeps reference to vb2 buffer */
+ atomic_inc(&buf->refcount);
+
+ return dbuf;
+}
+
+/*********************************************/
+/* callbacks for USERPTR buffers */
+/*********************************************/
+
+static inline int vma_is_io(struct vm_area_struct *vma)
+{
+ return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
+}
+
+static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
+ int n_pages, struct vm_area_struct *vma, int write)
+{
+ if (vma_is_io(vma)) {
+ unsigned int i;
+
+ for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) {
+ unsigned long pfn;
+ int ret = follow_pfn(vma, start, &pfn);
+
+ if (ret) {
+ pr_err("no page for address %lu\n", start);
+ return ret;
+ }
+ pages[i] = pfn_to_page(pfn);
+ }
+ } else {
+ int n;
+
+ n = get_user_pages(current, current->mm, start & PAGE_MASK,
+ n_pages, write, 1, pages, NULL);
+ /* negative error means that no page was pinned */
+ n = max(n, 0);
+ if (n != n_pages) {
+ pr_err("got only %d of %d user pages\n", n, n_pages);
+ while (n)
+ put_page(pages[--n]);
+ return -EFAULT;
+ }
}
- return vb2_mmap_pfn_range(vma, buf->dma_addr, buf->size,
- &vb2_common_vm_ops, &buf->handler);
+ return 0;
}
-static void *vb2_dma_contig_get_userptr(void *alloc_ctx, unsigned long vaddr,
- unsigned long size, int write)
+static void vb2_dc_put_dirty_page(struct page *page)
{
+ set_page_dirty_lock(page);
+ put_page(page);
+}
+
+static void vb2_dc_put_userptr(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+ struct sg_table *sgt = buf->dma_sgt;
+
+ dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
+ if (!vma_is_io(buf->vma))
+ vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
+
+ sg_free_table(sgt);
+ kfree(sgt);
+ vb2_put_vma(buf->vma);
+ kfree(buf);
+}
+
+static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
+ unsigned long size, int write)
+{
+ struct vb2_dc_conf *conf = alloc_ctx;
struct vb2_dc_buf *buf;
+ unsigned long start;
+ unsigned long end;
+ unsigned long offset;
+ struct page **pages;
+ int n_pages;
+ int ret = 0;
struct vm_area_struct *vma;
- dma_addr_t dma_addr = 0;
- int ret;
+ struct sg_table *sgt;
+ unsigned long contig_size;
+ unsigned long dma_align = dma_get_cache_alignment();
+
+ /* Only cache aligned DMA transfers are reliable */
+ if (!IS_ALIGNED(vaddr | size, dma_align)) {
+ pr_debug("user data must be aligned to %lu bytes\n", dma_align);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!size) {
+ pr_debug("size is zero\n");
+ return ERR_PTR(-EINVAL);
+ }
buf = kzalloc(sizeof *buf, GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
- ret = vb2_get_contig_userptr(vaddr, size, &vma, &dma_addr);
+ buf->dev = conf->dev;
+ buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ start = vaddr & PAGE_MASK;
+ offset = vaddr & ~PAGE_MASK;
+ end = PAGE_ALIGN(vaddr + size);
+ n_pages = (end - start) >> PAGE_SHIFT;
+
+ pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL);
+ if (!pages) {
+ ret = -ENOMEM;
+ pr_err("failed to allocate pages table\n");
+ goto fail_buf;
+ }
+
+ /* current->mm->mmap_sem is taken by videobuf2 core */
+ vma = find_vma(current->mm, vaddr);
+ if (!vma) {
+ pr_err("no vma for address %lu\n", vaddr);
+ ret = -EFAULT;
+ goto fail_pages;
+ }
+
+ if (vma->vm_end < vaddr + size) {
+ pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size);
+ ret = -EFAULT;
+ goto fail_pages;
+ }
+
+ buf->vma = vb2_get_vma(vma);
+ if (!buf->vma) {
+ pr_err("failed to copy vma\n");
+ ret = -ENOMEM;
+ goto fail_pages;
+ }
+
+ /* extract page list from userspace mapping */
+ ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write);
if (ret) {
- printk(KERN_ERR "Failed acquiring VMA for vaddr 0x%08lx\n",
- vaddr);
- kfree(buf);
- return ERR_PTR(ret);
+ pr_err("failed to get user pages\n");
+ goto fail_vma;
+ }
+
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt) {
+ pr_err("failed to allocate sg table\n");
+ ret = -ENOMEM;
+ goto fail_get_user_pages;
+ }
+
+ ret = sg_alloc_table_from_pages(sgt, pages, n_pages,
+ offset, size, GFP_KERNEL);
+ if (ret) {
+ pr_err("failed to initialize sg table\n");
+ goto fail_sgt;
}
+ /* pages are no longer needed */
+ kfree(pages);
+ pages = NULL;
+
+ sgt->nents = dma_map_sg(buf->dev, sgt->sgl, sgt->orig_nents,
+ buf->dma_dir);
+ if (sgt->nents <= 0) {
+ pr_err("failed to map scatterlist\n");
+ ret = -EIO;
+ goto fail_sgt_init;
+ }
+
+ contig_size = vb2_dc_get_contiguous_size(sgt);
+ if (contig_size < size) {
+ pr_err("contiguous mapping is too small %lu/%lu\n",
+ contig_size, size);
+ ret = -EFAULT;
+ goto fail_map_sg;
+ }
+
+ buf->dma_addr = sg_dma_address(sgt->sgl);
buf->size = size;
- buf->dma_addr = dma_addr;
- buf->vma = vma;
+ buf->dma_sgt = sgt;
return buf;
+
+fail_map_sg:
+ dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
+
+fail_sgt_init:
+ if (!vma_is_io(buf->vma))
+ vb2_dc_sgt_foreach_page(sgt, put_page);
+ sg_free_table(sgt);
+
+fail_sgt:
+ kfree(sgt);
+
+fail_get_user_pages:
+ if (pages && !vma_is_io(buf->vma))
+ while (n_pages)
+ put_page(pages[--n_pages]);
+
+fail_vma:
+ vb2_put_vma(buf->vma);
+
+fail_pages:
+ kfree(pages); /* kfree is NULL-proof */
+
+fail_buf:
+ kfree(buf);
+
+ return ERR_PTR(ret);
}
-static void vb2_dma_contig_put_userptr(void *mem_priv)
+/*********************************************/
+/* callbacks for DMABUF buffers */
+/*********************************************/
+
+static int vb2_dc_map_dmabuf(void *mem_priv)
{
struct vb2_dc_buf *buf = mem_priv;
+ struct sg_table *sgt;
+ unsigned long contig_size;
- if (!buf)
+ if (WARN_ON(!buf->db_attach)) {
+ pr_err("trying to pin a non attached buffer\n");
+ return -EINVAL;
+ }
+
+ if (WARN_ON(buf->dma_sgt)) {
+ pr_err("dmabuf buffer is already pinned\n");
+ return 0;
+ }
+
+ /* get the associated scatterlist for this buffer */
+ sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
+ if (IS_ERR_OR_NULL(sgt)) {
+ pr_err("Error getting dmabuf scatterlist\n");
+ return -EINVAL;
+ }
+
+ /* checking if dmabuf is big enough to store contiguous chunk */
+ contig_size = vb2_dc_get_contiguous_size(sgt);
+ if (contig_size < buf->size) {
+ pr_err("contiguous chunk is too small %lu/%lu b\n",
+ contig_size, buf->size);
+ dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
+ return -EFAULT;
+ }
+
+ buf->dma_addr = sg_dma_address(sgt->sgl);
+ buf->dma_sgt = sgt;
+
+ return 0;
+}
+
+static void vb2_dc_unmap_dmabuf(void *mem_priv)
+{
+ struct vb2_dc_buf *buf = mem_priv;
+ struct sg_table *sgt = buf->dma_sgt;
+
+ if (WARN_ON(!buf->db_attach)) {
+ pr_err("trying to unpin a not attached buffer\n");
return;
+ }
- vb2_put_vma(buf->vma);
+ if (WARN_ON(!sgt)) {
+ pr_err("dmabuf buffer is already unpinned\n");
+ return;
+ }
+
+ dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
+
+ buf->dma_addr = 0;
+ buf->dma_sgt = NULL;
+}
+
+static void vb2_dc_detach_dmabuf(void *mem_priv)
+{
+ struct vb2_dc_buf *buf = mem_priv;
+
+ /* if vb2 works correctly you should never detach mapped buffer */
+ if (WARN_ON(buf->dma_addr))
+ vb2_dc_unmap_dmabuf(buf);
+
+ /* detach this attachment */
+ dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
kfree(buf);
}
+static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
+ unsigned long size, int write)
+{
+ struct vb2_dc_conf *conf = alloc_ctx;
+ struct vb2_dc_buf *buf;
+ struct dma_buf_attachment *dba;
+
+ if (dbuf->size < size)
+ return ERR_PTR(-EFAULT);
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ buf->dev = conf->dev;
+ /* create attachment for the dmabuf with the user device */
+ dba = dma_buf_attach(dbuf, buf->dev);
+ if (IS_ERR(dba)) {
+ pr_err("failed to attach dmabuf\n");
+ kfree(buf);
+ return dba;
+ }
+
+ buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ buf->size = size;
+ buf->db_attach = dba;
+
+ return buf;
+}
+
+/*********************************************/
+/* DMA CONTIG exported functions */
+/*********************************************/
+
const struct vb2_mem_ops vb2_dma_contig_memops = {
- .alloc = vb2_dma_contig_alloc,
- .put = vb2_dma_contig_put,
- .cookie = vb2_dma_contig_cookie,
- .vaddr = vb2_dma_contig_vaddr,
- .mmap = vb2_dma_contig_mmap,
- .get_userptr = vb2_dma_contig_get_userptr,
- .put_userptr = vb2_dma_contig_put_userptr,
- .num_users = vb2_dma_contig_num_users,
+ .alloc = vb2_dc_alloc,
+ .put = vb2_dc_put,
+ .get_dmabuf = vb2_dc_get_dmabuf,
+ .cookie = vb2_dc_cookie,
+ .vaddr = vb2_dc_vaddr,
+ .mmap = vb2_dc_mmap,
+ .get_userptr = vb2_dc_get_userptr,
+ .put_userptr = vb2_dc_put_userptr,
+ .prepare = vb2_dc_prepare,
+ .finish = vb2_dc_finish,
+ .map_dmabuf = vb2_dc_map_dmabuf,
+ .unmap_dmabuf = vb2_dc_unmap_dmabuf,
+ .attach_dmabuf = vb2_dc_attach_dmabuf,
+ .detach_dmabuf = vb2_dc_detach_dmabuf,
+ .num_users = vb2_dc_num_users,
};
EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
index 051ea3571b20..81c1ad8b2cf1 100644
--- a/drivers/media/v4l2-core/videobuf2-memops.c
+++ b/drivers/media/v4l2-core/videobuf2-memops.c
@@ -137,46 +137,6 @@ int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size,
EXPORT_SYMBOL_GPL(vb2_get_contig_userptr);
/**
- * vb2_mmap_pfn_range() - map physical pages to userspace
- * @vma: virtual memory region for the mapping
- * @paddr: starting physical address of the memory to be mapped
- * @size: size of the memory to be mapped
- * @vm_ops: vm operations to be assigned to the created area
- * @priv: private data to be associated with the area
- *
- * Returns 0 on success.
- */
-int vb2_mmap_pfn_range(struct vm_area_struct *vma, unsigned long paddr,
- unsigned long size,
- const struct vm_operations_struct *vm_ops,
- void *priv)
-{
- int ret;
-
- size = min_t(unsigned long, vma->vm_end - vma->vm_start, size);
-
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- ret = remap_pfn_range(vma, vma->vm_start, paddr >> PAGE_SHIFT,
- size, vma->vm_page_prot);
- if (ret) {
- printk(KERN_ERR "Remapping memory failed, error: %d\n", ret);
- return ret;
- }
-
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_private_data = priv;
- vma->vm_ops = vm_ops;
-
- vma->vm_ops->open(vma);
-
- pr_debug("%s: mapped paddr 0x%08lx at 0x%08lx, size %ld\n",
- __func__, paddr, vma->vm_start, size);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(vb2_mmap_pfn_range);
-
-/**
* vb2_common_vm_open() - increase refcount of the vma
* @vma: virtual memory region for the mapping
*
diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c
index 94efa04d8d55..a47fd4f589a1 100644
--- a/drivers/media/v4l2-core/videobuf2-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c
@@ -30,6 +30,7 @@ struct vb2_vmalloc_buf {
unsigned int n_pages;
atomic_t refcount;
struct vb2_vmarea_handler handler;
+ struct dma_buf *dbuf;
};
static void vb2_vmalloc_put(void *buf_priv);
@@ -207,11 +208,66 @@ static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
return 0;
}
+/*********************************************/
+/* callbacks for DMABUF buffers */
+/*********************************************/
+
+static int vb2_vmalloc_map_dmabuf(void *mem_priv)
+{
+ struct vb2_vmalloc_buf *buf = mem_priv;
+
+ buf->vaddr = dma_buf_vmap(buf->dbuf);
+
+ return buf->vaddr ? 0 : -EFAULT;
+}
+
+static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
+{
+ struct vb2_vmalloc_buf *buf = mem_priv;
+
+ dma_buf_vunmap(buf->dbuf, buf->vaddr);
+ buf->vaddr = NULL;
+}
+
+static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
+{
+ struct vb2_vmalloc_buf *buf = mem_priv;
+
+ if (buf->vaddr)
+ dma_buf_vunmap(buf->dbuf, buf->vaddr);
+
+ kfree(buf);
+}
+
+static void *vb2_vmalloc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
+ unsigned long size, int write)
+{
+ struct vb2_vmalloc_buf *buf;
+
+ if (dbuf->size < size)
+ return ERR_PTR(-EFAULT);
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ buf->dbuf = dbuf;
+ buf->write = write;
+ buf->size = size;
+
+ return buf;
+}
+
+
const struct vb2_mem_ops vb2_vmalloc_memops = {
.alloc = vb2_vmalloc_alloc,
.put = vb2_vmalloc_put,
.get_userptr = vb2_vmalloc_get_userptr,
.put_userptr = vb2_vmalloc_put_userptr,
+ .map_dmabuf = vb2_vmalloc_map_dmabuf,
+ .unmap_dmabuf = vb2_vmalloc_unmap_dmabuf,
+ .attach_dmabuf = vb2_vmalloc_attach_dmabuf,
+ .detach_dmabuf = vb2_vmalloc_detach_dmabuf,
.vaddr = vb2_vmalloc_vaddr,
.mmap = vb2_vmalloc_mmap,
.num_users = vb2_vmalloc_num_users,