summaryrefslogtreecommitdiff
path: root/include/linux/dmaengine.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/dmaengine.h')
-rw-r--r--include/linux/dmaengine.h113
1 files changed, 93 insertions, 20 deletions
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 0bc727534108..6fd9390ccf91 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -22,6 +22,7 @@
#define LINUX_DMAENGINE_H
#include <linux/device.h>
+#include <linux/err.h>
#include <linux/uio.h>
#include <linux/bug.h>
#include <linux/scatterlist.h>
@@ -45,13 +46,13 @@ static inline int dma_submit_error(dma_cookie_t cookie)
/**
* enum dma_status - DMA transaction status
- * @DMA_SUCCESS: transaction completed successfully
+ * @DMA_COMPLETE: transaction completed
* @DMA_IN_PROGRESS: transaction not yet processed
* @DMA_PAUSED: transaction is paused
* @DMA_ERROR: transaction failed
*/
enum dma_status {
- DMA_SUCCESS,
+ DMA_COMPLETE,
DMA_IN_PROGRESS,
DMA_PAUSED,
DMA_ERROR,
@@ -171,12 +172,6 @@ struct dma_interleaved_template {
* @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client
* acknowledges receipt, i.e. has has a chance to establish any dependency
* chains
- * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
- * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s)
- * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single
- * (if not set, do the source dma-unmapping as page)
- * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single
- * (if not set, do the destination dma-unmapping as page)
* @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q
* @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P
* @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as
@@ -188,14 +183,10 @@ struct dma_interleaved_template {
enum dma_ctrl_flags {
DMA_PREP_INTERRUPT = (1 << 0),
DMA_CTRL_ACK = (1 << 1),
- DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2),
- DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3),
- DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4),
- DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5),
- DMA_PREP_PQ_DISABLE_P = (1 << 6),
- DMA_PREP_PQ_DISABLE_Q = (1 << 7),
- DMA_PREP_CONTINUE = (1 << 8),
- DMA_PREP_FENCE = (1 << 9),
+ DMA_PREP_PQ_DISABLE_P = (1 << 2),
+ DMA_PREP_PQ_DISABLE_Q = (1 << 3),
+ DMA_PREP_CONTINUE = (1 << 4),
+ DMA_PREP_FENCE = (1 << 5),
};
/**
@@ -373,6 +364,32 @@ struct dma_slave_config {
unsigned int slave_id;
};
+/**
+ * enum dma_residue_granularity - Granularity of the reported transfer residue
+ * @DMA_RESIDUE_GRANULARITY_DESCRIPTOR: Residue reporting is not support. The
+ * DMA channel is only able to tell whether a descriptor has been completed or
+ * not, which means residue reporting is not supported by this channel. The
+ * residue field of the dma_tx_state field will always be 0.
+ * @DMA_RESIDUE_GRANULARITY_SEGMENT: Residue is updated after each successfully
+ * completed segment of the transfer (For cyclic transfers this is after each
+ * period). This is typically implemented by having the hardware generate an
+ * interrupt after each transferred segment and then the drivers updates the
+ * outstanding residue by the size of the segment. Another possibility is if
+ * the hardware supports scatter-gather and the segment descriptor has a field
+ * which gets set after the segment has been completed. The driver then counts
+ * the number of segments without the flag set to compute the residue.
+ * @DMA_RESIDUE_GRANULARITY_BURST: Residue is updated after each transferred
+ * burst. This is typically only supported if the hardware has a progress
+ * register of some sort (E.g. a register with the current read/write address
+ * or a register with the amount of bursts/beats/bytes that have been
+ * transferred or still need to be transferred).
+ */
+enum dma_residue_granularity {
+ DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0,
+ DMA_RESIDUE_GRANULARITY_SEGMENT = 1,
+ DMA_RESIDUE_GRANULARITY_BURST = 2,
+};
+
/* struct dma_slave_caps - expose capabilities of a slave channel only
*
* @src_addr_widths: bit mask of src addr widths the channel supports
@@ -383,6 +400,7 @@ struct dma_slave_config {
* should be checked by controller as well
* @cmd_pause: true, if pause and thereby resume is supported
* @cmd_terminate: true, if terminate cmd is supported
+ * @residue_granularity: granularity of the reported transfer residue
*/
struct dma_slave_caps {
u32 src_addr_widths;
@@ -390,6 +408,7 @@ struct dma_slave_caps {
u32 directions;
bool cmd_pause;
bool cmd_terminate;
+ enum dma_residue_granularity residue_granularity;
};
static inline const char *dma_chan_name(struct dma_chan *chan)
@@ -413,6 +432,17 @@ void dma_chan_cleanup(struct kref *kref);
typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
typedef void (*dma_async_tx_callback)(void *dma_async_param);
+
+struct dmaengine_unmap_data {
+ u8 to_cnt;
+ u8 from_cnt;
+ u8 bidi_cnt;
+ struct device *dev;
+ struct kref kref;
+ size_t len;
+ dma_addr_t addr[0];
+};
+
/**
* struct dma_async_tx_descriptor - async transaction descriptor
* ---dma generic offload fields---
@@ -438,6 +468,7 @@ struct dma_async_tx_descriptor {
dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
dma_async_tx_callback callback;
void *callback_param;
+ struct dmaengine_unmap_data *unmap;
#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
struct dma_async_tx_descriptor *next;
struct dma_async_tx_descriptor *parent;
@@ -445,6 +476,40 @@ struct dma_async_tx_descriptor {
#endif
};
+#ifdef CONFIG_DMA_ENGINE
+static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
+ struct dmaengine_unmap_data *unmap)
+{
+ kref_get(&unmap->kref);
+ tx->unmap = unmap;
+}
+
+struct dmaengine_unmap_data *
+dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags);
+void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap);
+#else
+static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
+ struct dmaengine_unmap_data *unmap)
+{
+}
+static inline struct dmaengine_unmap_data *
+dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
+{
+ return NULL;
+}
+static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
+{
+}
+#endif
+
+static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx)
+{
+ if (tx->unmap) {
+ dmaengine_unmap_put(tx->unmap);
+ tx->unmap = NULL;
+ }
+}
+
#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
static inline void txd_lock(struct dma_async_tx_descriptor *txd)
{
@@ -979,10 +1044,10 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
{
if (last_complete <= last_used) {
if ((cookie <= last_complete) || (cookie > last_used))
- return DMA_SUCCESS;
+ return DMA_COMPLETE;
} else {
if ((cookie <= last_complete) && (cookie > last_used))
- return DMA_SUCCESS;
+ return DMA_COMPLETE;
}
return DMA_IN_PROGRESS;
}
@@ -1004,6 +1069,8 @@ enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
void dma_issue_pending_all(void);
struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
dma_filter_fn fn, void *fn_param);
+struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
+ const char *name);
struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
void dma_release_channel(struct dma_chan *chan);
#else
@@ -1013,11 +1080,11 @@ static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_typ
}
static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
{
- return DMA_SUCCESS;
+ return DMA_COMPLETE;
}
static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
{
- return DMA_SUCCESS;
+ return DMA_COMPLETE;
}
static inline void dma_issue_pending_all(void)
{
@@ -1027,6 +1094,11 @@ static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
{
return NULL;
}
+static inline struct dma_chan *dma_request_slave_channel_reason(
+ struct device *dev, const char *name)
+{
+ return ERR_PTR(-ENODEV);
+}
static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
const char *name)
{
@@ -1043,6 +1115,7 @@ int dma_async_device_register(struct dma_device *device);
void dma_async_device_unregister(struct dma_device *device);
void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
+struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
struct dma_chan *net_dma_find_channel(void);
#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
#define dma_request_slave_channel_compat(mask, x, y, dev, name) \