diff options
Diffstat (limited to 'drivers/iio/buffer')
-rw-r--r-- | drivers/iio/buffer/industrialio-buffer-cb.c | 2 | ||||
-rw-r--r-- | drivers/iio/buffer/industrialio-buffer-dma.c | 300 | ||||
-rw-r--r-- | drivers/iio/buffer/industrialio-buffer-dmaengine.c | 246 | ||||
-rw-r--r-- | drivers/iio/buffer/industrialio-hw-consumer.c | 4 | ||||
-rw-r--r-- | drivers/iio/buffer/kfifo_buf.c | 1 |
5 files changed, 448 insertions, 105 deletions
diff --git a/drivers/iio/buffer/industrialio-buffer-cb.c b/drivers/iio/buffer/industrialio-buffer-cb.c index 4c12b7a94af5..4befc9f55201 100644 --- a/drivers/iio/buffer/industrialio-buffer-cb.c +++ b/drivers/iio/buffer/industrialio-buffer-cb.c @@ -77,7 +77,7 @@ struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev, } cb_buff->indio_dev = cb_buff->channels[0].indio_dev; - cb_buff->buffer.scan_mask = bitmap_zalloc(cb_buff->indio_dev->masklength, + cb_buff->buffer.scan_mask = bitmap_zalloc(iio_get_masklength(cb_buff->indio_dev), GFP_KERNEL); if (cb_buff->buffer.scan_mask == NULL) { ret = -ENOMEM; diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c index 5610ba67925e..ee294a775e8a 100644 --- a/drivers/iio/buffer/industrialio-buffer-dma.c +++ b/drivers/iio/buffer/industrialio-buffer-dma.c @@ -4,6 +4,8 @@ * Author: Lars-Peter Clausen <lars@metafoo.de> */ +#include <linux/atomic.h> +#include <linux/cleanup.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> @@ -14,6 +16,8 @@ #include <linux/poll.h> #include <linux/iio/buffer_impl.h> #include <linux/iio/buffer-dma.h> +#include <linux/dma-buf.h> +#include <linux/dma-fence.h> #include <linux/dma-mapping.h> #include <linux/sizes.h> @@ -94,13 +98,18 @@ static void iio_buffer_block_release(struct kref *kref) { struct iio_dma_buffer_block *block = container_of(kref, struct iio_dma_buffer_block, kref); + struct iio_dma_buffer_queue *queue = block->queue; - WARN_ON(block->state != IIO_BLOCK_STATE_DEAD); + WARN_ON(block->fileio && block->state != IIO_BLOCK_STATE_DEAD); - dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size), - block->vaddr, block->phys_addr); + if (block->fileio) { + dma_free_coherent(queue->dev, PAGE_ALIGN(block->size), + block->vaddr, block->phys_addr); + } else { + atomic_dec(&queue->num_dmabufs); + } - iio_buffer_put(&block->queue->buffer); + iio_buffer_put(&queue->buffer); kfree(block); } @@ -163,7 +172,7 @@ static struct iio_dma_buffer_queue *iio_buffer_to_queue(struct iio_buffer *buf) } static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block( - struct iio_dma_buffer_queue *queue, size_t size) + struct iio_dma_buffer_queue *queue, size_t size, bool fileio) { struct iio_dma_buffer_block *block; @@ -171,13 +180,16 @@ static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block( if (!block) return NULL; - block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size), - &block->phys_addr, GFP_KERNEL); - if (!block->vaddr) { - kfree(block); - return NULL; + if (fileio) { + block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size), + &block->phys_addr, GFP_KERNEL); + if (!block->vaddr) { + kfree(block); + return NULL; + } } + block->fileio = fileio; block->size = size; block->state = IIO_BLOCK_STATE_DONE; block->queue = queue; @@ -186,6 +198,9 @@ static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block( iio_buffer_get(&queue->buffer); + if (!fileio) + atomic_inc(&queue->num_dmabufs); + return block; } @@ -195,6 +210,18 @@ static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block) block->state = IIO_BLOCK_STATE_DONE; } +static void iio_dma_buffer_queue_wake(struct iio_dma_buffer_queue *queue) +{ + __poll_t flags; + + if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) + flags = EPOLLIN | EPOLLRDNORM; + else + flags = EPOLLOUT | EPOLLWRNORM; + + wake_up_interruptible_poll(&queue->buffer.pollq, flags); +} + /** * iio_dma_buffer_block_done() - Indicate that a block has been completed * @block: The completed block @@ -206,15 +233,22 @@ void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block) { struct iio_dma_buffer_queue *queue = block->queue; unsigned long flags; + bool cookie; + + cookie = dma_fence_begin_signalling(); spin_lock_irqsave(&queue->list_lock, flags); _iio_dma_buffer_block_done(block); spin_unlock_irqrestore(&queue->list_lock, flags); + if (!block->fileio) + iio_buffer_signal_dmabuf_done(block->fence, 0); + iio_buffer_block_put_atomic(block); - wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM); + iio_dma_buffer_queue_wake(queue); + dma_fence_end_signalling(cookie); } -EXPORT_SYMBOL_GPL(iio_dma_buffer_block_done); +EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_block_done, "IIO_DMA_BUFFER"); /** * iio_dma_buffer_block_list_abort() - Indicate that a list block has been @@ -231,19 +265,29 @@ void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue, { struct iio_dma_buffer_block *block, *_block; unsigned long flags; + bool cookie; + + cookie = dma_fence_begin_signalling(); spin_lock_irqsave(&queue->list_lock, flags); list_for_each_entry_safe(block, _block, list, head) { list_del(&block->head); block->bytes_used = 0; _iio_dma_buffer_block_done(block); + + if (!block->fileio) + iio_buffer_signal_dmabuf_done(block->fence, -EINTR); iio_buffer_block_put_atomic(block); } spin_unlock_irqrestore(&queue->list_lock, flags); - wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM); + if (queue->fileio.enabled) + queue->fileio.enabled = false; + + iio_dma_buffer_queue_wake(queue); + dma_fence_end_signalling(cookie); } -EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort); +EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_block_list_abort, "IIO_DMA_BUFFER"); static bool iio_dma_block_reusable(struct iio_dma_buffer_block *block) { @@ -261,6 +305,16 @@ static bool iio_dma_block_reusable(struct iio_dma_buffer_block *block) } } +static bool iio_dma_buffer_can_use_fileio(struct iio_dma_buffer_queue *queue) +{ + /* + * Note that queue->num_dmabufs cannot increase while the queue is + * locked, it can only decrease, so it does not race against + * iio_dma_buffer_alloc_block(). + */ + return queue->fileio.enabled || !atomic_read(&queue->num_dmabufs); +} + /** * iio_dma_buffer_request_update() - DMA buffer request_update callback * @buffer: The buffer which to request an update @@ -287,6 +341,12 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer) mutex_lock(&queue->lock); + queue->fileio.enabled = iio_dma_buffer_can_use_fileio(queue); + + /* If DMABUFs were created, disable fileio interface */ + if (!queue->fileio.enabled) + goto out_unlock; + /* Allocations are page aligned */ if (PAGE_ALIGN(queue->fileio.block_size) == PAGE_ALIGN(size)) try_reuse = true; @@ -327,7 +387,7 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer) } if (!block) { - block = iio_dma_buffer_alloc_block(queue, size); + block = iio_dma_buffer_alloc_block(queue, size, true); if (!block) { ret = -ENOMEM; goto out_unlock; @@ -335,8 +395,24 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer) queue->fileio.blocks[i] = block; } - block->state = IIO_BLOCK_STATE_QUEUED; - list_add_tail(&block->head, &queue->incoming); + /* + * block->bytes_used may have been modified previously, e.g. by + * iio_dma_buffer_block_list_abort(). Reset it here to the + * block's so that iio_dma_buffer_io() will work. + */ + block->bytes_used = block->size; + + /* + * If it's an input buffer, mark the block as queued, and + * iio_dma_buffer_enable() will submit it. Otherwise mark it as + * done, which means it's ready to be dequeued. + */ + if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) { + block->state = IIO_BLOCK_STATE_QUEUED; + list_add_tail(&block->head, &queue->incoming); + } else { + block->state = IIO_BLOCK_STATE_DONE; + } } out_unlock: @@ -344,7 +420,7 @@ out_unlock: return ret; } -EXPORT_SYMBOL_GPL(iio_dma_buffer_request_update); +EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_request_update, "IIO_DMA_BUFFER"); static void iio_dma_buffer_fileio_free(struct iio_dma_buffer_queue *queue) { @@ -384,8 +460,12 @@ static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue, block->state = IIO_BLOCK_STATE_ACTIVE; iio_buffer_block_get(block); + ret = queue->ops->submit(queue, block); if (ret) { + if (!block->fileio) + iio_buffer_signal_dmabuf_done(block->fence, ret); + /* * This is a bit of a problem and there is not much we can do * other then wait for the buffer to be disabled and re-enabled @@ -426,7 +506,7 @@ int iio_dma_buffer_enable(struct iio_buffer *buffer, return 0; } -EXPORT_SYMBOL_GPL(iio_dma_buffer_enable); +EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_enable, "IIO_DMA_BUFFER"); /** * iio_dma_buffer_disable() - Disable DMA buffer @@ -450,7 +530,7 @@ int iio_dma_buffer_disable(struct iio_buffer *buffer, return 0; } -EXPORT_SYMBOL_GPL(iio_dma_buffer_disable); +EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_disable, "IIO_DMA_BUFFER"); static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue, struct iio_dma_buffer_block *block) @@ -488,20 +568,12 @@ static struct iio_dma_buffer_block *iio_dma_buffer_dequeue( return block; } -/** - * iio_dma_buffer_read() - DMA buffer read callback - * @buffer: Buffer to read form - * @n: Number of bytes to read - * @user_buffer: Userspace buffer to copy the data to - * - * Should be used as the read callback for iio_buffer_access_ops - * struct for DMA buffers. - */ -int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n, - char __user *user_buffer) +static int iio_dma_buffer_io(struct iio_buffer *buffer, size_t n, + char __user *user_buffer, bool is_from_user) { struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); struct iio_dma_buffer_block *block; + void *addr; int ret; if (n < buffer->bytes_per_datum) @@ -524,8 +596,13 @@ int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n, n = rounddown(n, buffer->bytes_per_datum); if (n > block->bytes_used - queue->fileio.pos) n = block->bytes_used - queue->fileio.pos; + addr = block->vaddr + queue->fileio.pos; - if (copy_to_user(user_buffer, block->vaddr + queue->fileio.pos, n)) { + if (is_from_user) + ret = copy_from_user(addr, user_buffer, n); + else + ret = copy_to_user(user_buffer, addr, n); + if (ret) { ret = -EFAULT; goto out_unlock; } @@ -544,16 +621,49 @@ out_unlock: return ret; } -EXPORT_SYMBOL_GPL(iio_dma_buffer_read); /** - * iio_dma_buffer_data_available() - DMA buffer data_available callback - * @buf: Buffer to check for data availability + * iio_dma_buffer_read() - DMA buffer read callback + * @buffer: Buffer to read from + * @n: Number of bytes to read + * @user_buffer: Userspace buffer to copy the data to * - * Should be used as the data_available callback for iio_buffer_access_ops + * Should be used as the read callback for iio_buffer_access_ops * struct for DMA buffers. */ -size_t iio_dma_buffer_data_available(struct iio_buffer *buf) +int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n, + char __user *user_buffer) +{ + return iio_dma_buffer_io(buffer, n, user_buffer, false); +} +EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_read, "IIO_DMA_BUFFER"); + +/** + * iio_dma_buffer_write() - DMA buffer write callback + * @buffer: Buffer to write to + * @n: Number of bytes to read + * @user_buffer: Userspace buffer to copy the data from + * + * Should be used as the write callback for iio_buffer_access_ops + * struct for DMA buffers. + */ +int iio_dma_buffer_write(struct iio_buffer *buffer, size_t n, + const char __user *user_buffer) +{ + return iio_dma_buffer_io(buffer, n, + (__force __user char *)user_buffer, true); +} +EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_write, "IIO_DMA_BUFFER"); + +/** + * iio_dma_buffer_usage() - DMA buffer data_available and + * space_available callback + * @buf: Buffer to check for data availability + * + * Should be used as the data_available and space_available callbacks for + * iio_buffer_access_ops struct for DMA buffers. + */ +size_t iio_dma_buffer_usage(struct iio_buffer *buf) { struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf); struct iio_dma_buffer_block *block; @@ -586,7 +696,111 @@ size_t iio_dma_buffer_data_available(struct iio_buffer *buf) return data_available; } -EXPORT_SYMBOL_GPL(iio_dma_buffer_data_available); +EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_usage, "IIO_DMA_BUFFER"); + +struct iio_dma_buffer_block * +iio_dma_buffer_attach_dmabuf(struct iio_buffer *buffer, + struct dma_buf_attachment *attach) +{ + struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); + struct iio_dma_buffer_block *block; + + guard(mutex)(&queue->lock); + + /* + * If the buffer is enabled and in fileio mode new blocks can't be + * allocated. + */ + if (queue->fileio.enabled) + return ERR_PTR(-EBUSY); + + block = iio_dma_buffer_alloc_block(queue, attach->dmabuf->size, false); + if (!block) + return ERR_PTR(-ENOMEM); + + /* Free memory that might be in use for fileio mode */ + iio_dma_buffer_fileio_free(queue); + + return block; +} +EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_attach_dmabuf, "IIO_DMA_BUFFER"); + +void iio_dma_buffer_detach_dmabuf(struct iio_buffer *buffer, + struct iio_dma_buffer_block *block) +{ + block->state = IIO_BLOCK_STATE_DEAD; + iio_buffer_block_put_atomic(block); +} +EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_detach_dmabuf, "IIO_DMA_BUFFER"); + +static int iio_dma_can_enqueue_block(struct iio_dma_buffer_block *block) +{ + struct iio_dma_buffer_queue *queue = block->queue; + + /* If in fileio mode buffers can't be enqueued. */ + if (queue->fileio.enabled) + return -EBUSY; + + switch (block->state) { + case IIO_BLOCK_STATE_QUEUED: + return -EPERM; + case IIO_BLOCK_STATE_ACTIVE: + case IIO_BLOCK_STATE_DEAD: + return -EBUSY; + case IIO_BLOCK_STATE_DONE: + break; + } + + return 0; +} + +int iio_dma_buffer_enqueue_dmabuf(struct iio_buffer *buffer, + struct iio_dma_buffer_block *block, + struct dma_fence *fence, + struct sg_table *sgt, + size_t size, bool cyclic) +{ + struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); + bool cookie; + int ret; + + WARN_ON(!mutex_is_locked(&queue->lock)); + + cookie = dma_fence_begin_signalling(); + + ret = iio_dma_can_enqueue_block(block); + if (ret < 0) + goto out_end_signalling; + + block->bytes_used = size; + block->cyclic = cyclic; + block->sg_table = sgt; + block->fence = fence; + + iio_dma_buffer_enqueue(queue, block); + +out_end_signalling: + dma_fence_end_signalling(cookie); + + return ret; +} +EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_enqueue_dmabuf, "IIO_DMA_BUFFER"); + +void iio_dma_buffer_lock_queue(struct iio_buffer *buffer) +{ + struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); + + mutex_lock(&queue->lock); +} +EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_lock_queue, "IIO_DMA_BUFFER"); + +void iio_dma_buffer_unlock_queue(struct iio_buffer *buffer) +{ + struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); + + mutex_unlock(&queue->lock); +} +EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_unlock_queue, "IIO_DMA_BUFFER"); /** * iio_dma_buffer_set_bytes_per_datum() - DMA buffer set_bytes_per_datum callback @@ -602,7 +816,7 @@ int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd) return 0; } -EXPORT_SYMBOL_GPL(iio_dma_buffer_set_bytes_per_datum); +EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_set_bytes_per_datum, "IIO_DMA_BUFFER"); /** * iio_dma_buffer_set_length - DMA buffer set_length callback @@ -622,7 +836,7 @@ int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length) return 0; } -EXPORT_SYMBOL_GPL(iio_dma_buffer_set_length); +EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_set_length, "IIO_DMA_BUFFER"); /** * iio_dma_buffer_init() - Initialize DMA buffer queue @@ -650,7 +864,7 @@ int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue, return 0; } -EXPORT_SYMBOL_GPL(iio_dma_buffer_init); +EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_init, "IIO_DMA_BUFFER"); /** * iio_dma_buffer_exit() - Cleanup DMA buffer queue @@ -668,7 +882,7 @@ void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue) mutex_unlock(&queue->lock); } -EXPORT_SYMBOL_GPL(iio_dma_buffer_exit); +EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_exit, "IIO_DMA_BUFFER"); /** * iio_dma_buffer_release() - Release final buffer resources @@ -682,7 +896,7 @@ void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue) { mutex_destroy(&queue->lock); } -EXPORT_SYMBOL_GPL(iio_dma_buffer_release); +EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_release, "IIO_DMA_BUFFER"); MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); MODULE_DESCRIPTION("DMA buffer for the IIO framework"); diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c index a18c1da292af..e9d9a7d39fe1 100644 --- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c +++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c @@ -64,15 +64,63 @@ static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue, struct dmaengine_buffer *dmaengine_buffer = iio_buffer_to_dmaengine_buffer(&queue->buffer); struct dma_async_tx_descriptor *desc; + enum dma_transfer_direction dma_dir; + struct scatterlist *sgl; + struct dma_vec *vecs; + size_t max_size; dma_cookie_t cookie; + size_t len_total; + unsigned int i; + int nents; - block->bytes_used = min(block->size, dmaengine_buffer->max_size); - block->bytes_used = round_down(block->bytes_used, - dmaengine_buffer->align); + max_size = min(block->size, dmaengine_buffer->max_size); + max_size = round_down(max_size, dmaengine_buffer->align); - desc = dmaengine_prep_slave_single(dmaengine_buffer->chan, - block->phys_addr, block->bytes_used, DMA_DEV_TO_MEM, - DMA_PREP_INTERRUPT); + if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) + dma_dir = DMA_DEV_TO_MEM; + else + dma_dir = DMA_MEM_TO_DEV; + + if (block->sg_table) { + sgl = block->sg_table->sgl; + nents = sg_nents_for_len(sgl, block->bytes_used); + if (nents < 0) + return nents; + + vecs = kmalloc_array(nents, sizeof(*vecs), GFP_ATOMIC); + if (!vecs) + return -ENOMEM; + + len_total = block->bytes_used; + + for (i = 0; i < nents; i++) { + vecs[i].addr = sg_dma_address(sgl); + vecs[i].len = min(sg_dma_len(sgl), len_total); + len_total -= vecs[i].len; + + sgl = sg_next(sgl); + } + + desc = dmaengine_prep_peripheral_dma_vec(dmaengine_buffer->chan, + vecs, nents, dma_dir, + DMA_PREP_INTERRUPT); + kfree(vecs); + } else { + max_size = min(block->size, dmaengine_buffer->max_size); + max_size = round_down(max_size, dmaengine_buffer->align); + + if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) + block->bytes_used = max_size; + + if (!block->bytes_used || block->bytes_used > max_size) + return -EINVAL; + + desc = dmaengine_prep_slave_single(dmaengine_buffer->chan, + block->phys_addr, + block->bytes_used, + dma_dir, + DMA_PREP_INTERRUPT); + } if (!desc) return -ENOMEM; @@ -112,14 +160,23 @@ static void iio_dmaengine_buffer_release(struct iio_buffer *buf) static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops = { .read = iio_dma_buffer_read, + .write = iio_dma_buffer_write, .set_bytes_per_datum = iio_dma_buffer_set_bytes_per_datum, .set_length = iio_dma_buffer_set_length, .request_update = iio_dma_buffer_request_update, .enable = iio_dma_buffer_enable, .disable = iio_dma_buffer_disable, - .data_available = iio_dma_buffer_data_available, + .data_available = iio_dma_buffer_usage, + .space_available = iio_dma_buffer_usage, .release = iio_dmaengine_buffer_release, + .enqueue_dmabuf = iio_dma_buffer_enqueue_dmabuf, + .attach_dmabuf = iio_dma_buffer_attach_dmabuf, + .detach_dmabuf = iio_dma_buffer_detach_dmabuf, + + .lock_queue = iio_dma_buffer_lock_queue, + .unlock_queue = iio_dma_buffer_unlock_queue, + .modes = INDIO_BUFFER_HARDWARE, .flags = INDIO_BUFFER_FLAG_FIXED_WATERMARK, }; @@ -149,39 +206,29 @@ static const struct iio_dev_attr *iio_dmaengine_buffer_attrs[] = { /** * iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine - * @dev: Parent device for the buffer - * @channel: DMA channel name, typically "rx". + * @chan: DMA channel. * * This allocates a new IIO buffer which internally uses the DMAengine framework - * to perform its transfers. The parent device will be used to request the DMA - * channel. + * to perform its transfers. * * Once done using the buffer iio_dmaengine_buffer_free() should be used to * release it. */ -struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev, - const char *channel) +static struct iio_buffer *iio_dmaengine_buffer_alloc(struct dma_chan *chan) { struct dmaengine_buffer *dmaengine_buffer; unsigned int width, src_width, dest_width; struct dma_slave_caps caps; - struct dma_chan *chan; int ret; + ret = dma_get_slave_caps(chan, &caps); + if (ret < 0) + return ERR_PTR(ret); + dmaengine_buffer = kzalloc(sizeof(*dmaengine_buffer), GFP_KERNEL); if (!dmaengine_buffer) return ERR_PTR(-ENOMEM); - chan = dma_request_chan(dev, channel); - if (IS_ERR(chan)) { - ret = PTR_ERR(chan); - goto err_free; - } - - ret = dma_get_slave_caps(chan, &caps); - if (ret < 0) - goto err_free; - /* Needs to be aligned to the maximum of the minimums */ if (caps.src_addr_widths) src_width = __ffs(caps.src_addr_widths); @@ -205,12 +252,7 @@ struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev, dmaengine_buffer->queue.buffer.access = &iio_dmaengine_buffer_ops; return &dmaengine_buffer->queue.buffer; - -err_free: - kfree(dmaengine_buffer); - return ERR_PTR(ret); } -EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_alloc, IIO_DMAENGINE_BUFFER); /** * iio_dmaengine_buffer_free() - Free dmaengine buffer @@ -218,79 +260,165 @@ EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_alloc, IIO_DMAENGINE_BUFFER); * * Frees a buffer previously allocated with iio_dmaengine_buffer_alloc(). */ -void iio_dmaengine_buffer_free(struct iio_buffer *buffer) +static void iio_dmaengine_buffer_free(struct iio_buffer *buffer) { struct dmaengine_buffer *dmaengine_buffer = iio_buffer_to_dmaengine_buffer(buffer); iio_dma_buffer_exit(&dmaengine_buffer->queue); - dma_release_channel(dmaengine_buffer->chan); - iio_buffer_put(buffer); } -EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_free, IIO_DMAENGINE_BUFFER); -static void __devm_iio_dmaengine_buffer_free(void *buffer) +/** + * iio_dmaengine_buffer_teardown() - Releases DMA channel and frees buffer + * @buffer: Buffer to free + * + * Releases the DMA channel and frees the buffer previously setup with + * iio_dmaengine_buffer_setup_ext(). + */ +void iio_dmaengine_buffer_teardown(struct iio_buffer *buffer) { + struct dmaengine_buffer *dmaengine_buffer = + iio_buffer_to_dmaengine_buffer(buffer); + struct dma_chan *chan = dmaengine_buffer->chan; + iio_dmaengine_buffer_free(buffer); + dma_release_channel(chan); +} +EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_teardown, "IIO_DMAENGINE_BUFFER"); + +static struct iio_buffer +*__iio_dmaengine_buffer_setup_ext(struct iio_dev *indio_dev, + struct dma_chan *chan, + enum iio_buffer_direction dir) +{ + struct iio_buffer *buffer; + int ret; + + buffer = iio_dmaengine_buffer_alloc(chan); + if (IS_ERR(buffer)) + return ERR_CAST(buffer); + + indio_dev->modes |= INDIO_BUFFER_HARDWARE; + + buffer->direction = dir; + + ret = iio_device_attach_buffer(indio_dev, buffer); + if (ret) { + iio_dmaengine_buffer_free(buffer); + return ERR_PTR(ret); + } + + return buffer; } /** - * devm_iio_dmaengine_buffer_alloc() - Resource-managed iio_dmaengine_buffer_alloc() - * @dev: Parent device for the buffer + * iio_dmaengine_buffer_setup_ext() - Setup a DMA buffer for an IIO device + * @dev: DMA channel consumer device + * @indio_dev: IIO device to which to attach this buffer. * @channel: DMA channel name, typically "rx". + * @dir: Direction of buffer (in or out) * - * This allocates a new IIO buffer which internally uses the DMAengine framework - * to perform its transfers. The parent device will be used to request the DMA - * channel. + * This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc() + * and attaches it to an IIO device with iio_device_attach_buffer(). + * It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the + * IIO device. * - * The buffer will be automatically de-allocated once the device gets destroyed. + * Once done using the buffer iio_dmaengine_buffer_teardown() should be used to + * release it. */ -static struct iio_buffer *devm_iio_dmaengine_buffer_alloc(struct device *dev, - const char *channel) +struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev, + struct iio_dev *indio_dev, + const char *channel, + enum iio_buffer_direction dir) { + struct dma_chan *chan; struct iio_buffer *buffer; - int ret; - buffer = iio_dmaengine_buffer_alloc(dev, channel); - if (IS_ERR(buffer)) - return buffer; + chan = dma_request_chan(dev, channel); + if (IS_ERR(chan)) + return ERR_CAST(chan); - ret = devm_add_action_or_reset(dev, __devm_iio_dmaengine_buffer_free, - buffer); - if (ret) - return ERR_PTR(ret); + buffer = __iio_dmaengine_buffer_setup_ext(indio_dev, chan, dir); + if (IS_ERR(buffer)) + dma_release_channel(chan); return buffer; } +EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_setup_ext, "IIO_DMAENGINE_BUFFER"); + +static void devm_iio_dmaengine_buffer_teardown(void *buffer) +{ + iio_dmaengine_buffer_teardown(buffer); +} /** - * devm_iio_dmaengine_buffer_setup() - Setup a DMA buffer for an IIO device - * @dev: Parent device for the buffer + * devm_iio_dmaengine_buffer_setup_ext() - Setup a DMA buffer for an IIO device + * @dev: Device for devm ownership and DMA channel consumer device * @indio_dev: IIO device to which to attach this buffer. * @channel: DMA channel name, typically "rx". + * @dir: Direction of buffer (in or out) * * This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc() * and attaches it to an IIO device with iio_device_attach_buffer(). * It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the * IIO device. */ -int devm_iio_dmaengine_buffer_setup(struct device *dev, - struct iio_dev *indio_dev, - const char *channel) +int devm_iio_dmaengine_buffer_setup_ext(struct device *dev, + struct iio_dev *indio_dev, + const char *channel, + enum iio_buffer_direction dir) { struct iio_buffer *buffer; - buffer = devm_iio_dmaengine_buffer_alloc(dev, channel); + buffer = iio_dmaengine_buffer_setup_ext(dev, indio_dev, channel, dir); if (IS_ERR(buffer)) return PTR_ERR(buffer); - indio_dev->modes |= INDIO_BUFFER_HARDWARE; + return devm_add_action_or_reset(dev, devm_iio_dmaengine_buffer_teardown, + buffer); +} +EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup_ext, "IIO_DMAENGINE_BUFFER"); + +static void devm_iio_dmaengine_buffer_free(void *buffer) +{ + iio_dmaengine_buffer_free(buffer); +} + +/** + * devm_iio_dmaengine_buffer_setup_with_handle() - Setup a DMA buffer for an + * IIO device + * @dev: Device for devm ownership + * @indio_dev: IIO device to which to attach this buffer. + * @chan: DMA channel + * @dir: Direction of buffer (in or out) + * + * This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc() + * and attaches it to an IIO device with iio_device_attach_buffer(). + * It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the + * IIO device. + * + * This is the same as devm_iio_dmaengine_buffer_setup_ext() except that the + * caller manages requesting and releasing the DMA channel handle. + */ +int devm_iio_dmaengine_buffer_setup_with_handle(struct device *dev, + struct iio_dev *indio_dev, + struct dma_chan *chan, + enum iio_buffer_direction dir) +{ + struct iio_buffer *buffer; + + buffer = __iio_dmaengine_buffer_setup_ext(indio_dev, chan, dir); + if (IS_ERR(buffer)) + return PTR_ERR(buffer); - return iio_device_attach_buffer(indio_dev, buffer); + return devm_add_action_or_reset(dev, devm_iio_dmaengine_buffer_free, + buffer); } -EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup, IIO_DMAENGINE_BUFFER); +EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup_with_handle, + "IIO_DMAENGINE_BUFFER"); MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); MODULE_DESCRIPTION("DMA buffer for the IIO framework"); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS("IIO_DMA_BUFFER"); diff --git a/drivers/iio/buffer/industrialio-hw-consumer.c b/drivers/iio/buffer/industrialio-hw-consumer.c index fb58f599a80b..526b2a8d725d 100644 --- a/drivers/iio/buffer/industrialio-hw-consumer.c +++ b/drivers/iio/buffer/industrialio-hw-consumer.c @@ -52,6 +52,7 @@ static const struct iio_buffer_access_funcs iio_hw_buf_access = { static struct hw_consumer_buffer *iio_hw_consumer_get_buffer( struct iio_hw_consumer *hwc, struct iio_dev *indio_dev) { + unsigned int mask_longs = BITS_TO_LONGS(iio_get_masklength(indio_dev)); struct hw_consumer_buffer *buf; list_for_each_entry(buf, &hwc->buffers, head) { @@ -59,8 +60,7 @@ static struct hw_consumer_buffer *iio_hw_consumer_get_buffer( return buf; } - buf = kzalloc(struct_size(buf, scan_mask, BITS_TO_LONGS(indio_dev->masklength)), - GFP_KERNEL); + buf = kzalloc(struct_size(buf, scan_mask, mask_longs), GFP_KERNEL); if (!buf) return NULL; diff --git a/drivers/iio/buffer/kfifo_buf.c b/drivers/iio/buffer/kfifo_buf.c index 05b285f0eb22..38034c8bcc04 100644 --- a/drivers/iio/buffer/kfifo_buf.c +++ b/drivers/iio/buffer/kfifo_buf.c @@ -287,4 +287,5 @@ int devm_iio_kfifo_buffer_setup_ext(struct device *dev, } EXPORT_SYMBOL_GPL(devm_iio_kfifo_buffer_setup_ext); +MODULE_DESCRIPTION("Industrial I/O buffering based on kfifo"); MODULE_LICENSE("GPL"); |