From dda510890498b9a2f4b2142192f6d516c6c1e2e5 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Fri, 6 Dec 2019 14:24:35 +0100 Subject: dmaengine: Remove spaces before TABs Signed-off-by: Geert Uytterhoeven Link: https://lore.kernel.org/r/20191206132435.29139-1-geert+renesas@glider.be Signed-off-by: Vinod Koul --- include/linux/dmaengine.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 8fcdee1c0cf9..dfd2d35b64af 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -481,7 +481,7 @@ struct dmaengine_unmap_data { * @cookie: tracking cookie for this transaction, set to -EBUSY if * this tx is sitting on a dependency list * @flags: flags to augment operation preparation, control completion, and - * communicate status + * communicate status * @phys: physical address of the descriptor * @chan: target channel for this operation * @tx_submit: accept the descriptor, assign ordered cookie and mark the -- cgit From dae7a589c18a4d979d5f14b09374e871b995ceb1 Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Mon, 16 Dec 2019 12:01:16 -0700 Subject: dmaengine: Store module owner in dma_device struct dma_chan_to_owner() dereferences the driver from the struct device to obtain the owner and call module_[get|put](). However, if the backing device is unbound before the dma_device is unregistered, the driver will be cleared and this will cause a NULL pointer dereference. Instead, store a pointer to the owner module in the dma_device struct so the module reference can be properly put when the channel is put, even if the backing device was destroyed first. This change helps to support a safer unbind of DMA engines. If the dma_device is unregistered in the driver's remove function, there's no guarantee that there are no existing clients and a users action may trigger the WARN_ONCE in dma_async_device_unregister() which is unlikely to leave the system in a consistent state. Instead, a better approach is to allow the backing driver to go away and fail any subsequent requests to it. Signed-off-by: Logan Gunthorpe Link: https://lore.kernel.org/r/20191216190120.21374-2-logang@deltatee.com Signed-off-by: Vinod Koul --- include/linux/dmaengine.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index dfd2d35b64af..11b15a2e97a0 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -674,6 +674,7 @@ struct dma_filter { * @fill_align: alignment shift for memset operations * @dev_id: unique device ID * @dev: struct device reference for dma mapping api + * @owner: owner module (automatically set based on the provided dev) * @src_addr_widths: bit mask of src addr widths the device supports * Width is specified in bytes, e.g. for a device supporting * a width of 4 the mask should have BIT(4) set. @@ -737,6 +738,7 @@ struct dma_device { int dev_id; struct device *dev; + struct module *owner; u32 src_addr_widths; u32 dst_addr_widths; -- cgit From 8ad342a863590b24ce77681b7e081363fb3333f7 Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Mon, 16 Dec 2019 12:01:19 -0700 Subject: dmaengine: Add reference counting to dma_device struct Adding a reference count helps drivers to properly implement the unbind while in use case. References are taken and put every time a channel is allocated or freed. Once the final reference is put, the device is removed from the dma_device_list and a release callback function is called to signal the driver to free the memory. Signed-off-by: Logan Gunthorpe Link: https://lore.kernel.org/r/20191216190120.21374-5-logang@deltatee.com Signed-off-by: Vinod Koul --- include/linux/dmaengine.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 11b15a2e97a0..7927731e3716 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -719,9 +719,14 @@ struct dma_filter { * will just return a simple status code * @device_issue_pending: push pending transactions to hardware * @descriptor_reuse: a submitted transfer can be resubmitted after completion + * @device_release: called sometime atfer dma_async_device_unregister() is + * called and there are no further references to this structure. This + * must be implemented to free resources however many existing drivers + * do not and are therefore not safe to unbind while in use. + * */ struct dma_device { - + struct kref ref; unsigned int chancnt; unsigned int privatecnt; struct list_head channels; @@ -802,6 +807,7 @@ struct dma_device { dma_cookie_t cookie, struct dma_tx_state *txstate); void (*device_issue_pending)(struct dma_chan *chan); + void (*device_release)(struct dma_device *dev); }; static inline int dmaengine_slave_config(struct dma_chan *chan, -- cgit From 3277e8aa2504d97e022ecb9777d784ac1a439d36 Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Wed, 15 Jan 2020 10:07:27 -0800 Subject: soc: ti: k3: add navss ringacc driver The Ring Accelerator (RINGACC or RA) provides hardware acceleration to enable straightforward passing of work between a producer and a consumer. There is one RINGACC module per NAVSS on TI AM65x SoCs. The RINGACC converts constant-address read and write accesses to equivalent read or write accesses to a circular data structure in memory. The RINGACC eliminates the need for each DMA controller which needs to access ring elements from having to know the current state of the ring (base address, current offset). The DMA controller performs a read or write access to a specific address range (which maps to the source interface on the RINGACC) and the RINGACC replaces the address for the transaction with a new address which corresponds to the head or tail element of the ring (head for reads, tail for writes). Since the RINGACC maintains the state, multiple DMA controllers or channels are allowed to coherently share the same rings as applicable. The RINGACC is able to place data which is destined towards software into cached memory directly. Supported ring modes: - Ring Mode - Messaging Mode - Credentials Mode - Queue Manager Mode TI-SCI integration: Texas Instrument's System Control Interface (TI-SCI) Message Protocol now has control over Ringacc module resources management (RM) and Rings configuration. The corresponding support of TI-SCI Ringacc module RM protocol introduced as option through DT parameters: - ti,sci: phandle on TI-SCI firmware controller DT node - ti,sci-dev-id: TI-SCI device identifier as per TI-SCI firmware spec if both parameters present - Ringacc driver will configure/free/reset Rings using TI-SCI Message Ringacc RM Protocol. The Ringacc driver manages Rings allocation by itself now and requests TI-SCI firmware to allocate and configure specific Rings only. It's done this way because, Linux driver implements two stage Rings allocation and configuration (allocate ring and configure ring) while TI-SCI Message Protocol supports only one combined operation (allocate+configure). Signed-off-by: Grygorii Strashko Signed-off-by: Peter Ujfalusi Reviewed-by: Tero Kristo Tested-by: Keerthy Signed-off-by: Santosh Shilimkar --- include/linux/soc/ti/k3-ringacc.h | 244 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 244 insertions(+) create mode 100644 include/linux/soc/ti/k3-ringacc.h (limited to 'include/linux') diff --git a/include/linux/soc/ti/k3-ringacc.h b/include/linux/soc/ti/k3-ringacc.h new file mode 100644 index 000000000000..26f73df0a524 --- /dev/null +++ b/include/linux/soc/ti/k3-ringacc.h @@ -0,0 +1,244 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * K3 Ring Accelerator (RA) subsystem interface + * + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + */ + +#ifndef __SOC_TI_K3_RINGACC_API_H_ +#define __SOC_TI_K3_RINGACC_API_H_ + +#include + +struct device_node; + +/** + * enum k3_ring_mode - &struct k3_ring_cfg mode + * + * RA ring operational modes + * + * @K3_RINGACC_RING_MODE_RING: Exposed Ring mode for SW direct access + * @K3_RINGACC_RING_MODE_MESSAGE: Messaging mode. Messaging mode requires + * that all accesses to the queue must go through this IP so that all + * accesses to the memory are controlled and ordered. This IP then + * controls the entire state of the queue, and SW has no directly control, + * such as through doorbells and cannot access the storage memory directly. + * This is particularly useful when more than one SW or HW entity can be + * the producer and/or consumer at the same time + * @K3_RINGACC_RING_MODE_CREDENTIALS: Credentials mode is message mode plus + * stores credentials with each message, requiring the element size to be + * doubled to fit the credentials. Any exposed memory should be protected + * by a firewall from unwanted access + */ +enum k3_ring_mode { + K3_RINGACC_RING_MODE_RING = 0, + K3_RINGACC_RING_MODE_MESSAGE, + K3_RINGACC_RING_MODE_CREDENTIALS, + K3_RINGACC_RING_MODE_INVALID +}; + +/** + * enum k3_ring_size - &struct k3_ring_cfg elm_size + * + * RA ring element's sizes in bytes. + */ +enum k3_ring_size { + K3_RINGACC_RING_ELSIZE_4 = 0, + K3_RINGACC_RING_ELSIZE_8, + K3_RINGACC_RING_ELSIZE_16, + K3_RINGACC_RING_ELSIZE_32, + K3_RINGACC_RING_ELSIZE_64, + K3_RINGACC_RING_ELSIZE_128, + K3_RINGACC_RING_ELSIZE_256, + K3_RINGACC_RING_ELSIZE_INVALID +}; + +struct k3_ringacc; +struct k3_ring; + +/** + * enum k3_ring_cfg - RA ring configuration structure + * + * @size: Ring size, number of elements + * @elm_size: Ring element size + * @mode: Ring operational mode + * @flags: Ring configuration flags. Possible values: + * @K3_RINGACC_RING_SHARED: when set allows to request the same ring + * few times. It's usable when the same ring is used as Free Host PD ring + * for different flows, for example. + * Note: Locking should be done by consumer if required + */ +struct k3_ring_cfg { + u32 size; + enum k3_ring_size elm_size; + enum k3_ring_mode mode; +#define K3_RINGACC_RING_SHARED BIT(1) + u32 flags; +}; + +#define K3_RINGACC_RING_ID_ANY (-1) + +/** + * of_k3_ringacc_get_by_phandle - find a RA by phandle property + * @np: device node + * @propname: property name containing phandle on RA node + * + * Returns pointer on the RA - struct k3_ringacc + * or -ENODEV if not found, + * or -EPROBE_DEFER if not yet registered + */ +struct k3_ringacc *of_k3_ringacc_get_by_phandle(struct device_node *np, + const char *property); + +#define K3_RINGACC_RING_USE_PROXY BIT(1) + +/** + * k3_ringacc_request_ring - request ring from ringacc + * @ringacc: pointer on ringacc + * @id: ring id or K3_RINGACC_RING_ID_ANY for any general purpose ring + * @flags: + * @K3_RINGACC_RING_USE_PROXY: if set - proxy will be allocated and + * used to access ring memory. Sopported only for rings in + * Message/Credentials/Queue mode. + * + * Returns pointer on the Ring - struct k3_ring + * or NULL in case of failure. + */ +struct k3_ring *k3_ringacc_request_ring(struct k3_ringacc *ringacc, + int id, u32 flags); + +/** + * k3_ringacc_ring_reset - ring reset + * @ring: pointer on Ring + * + * Resets ring internal state ((hw)occ, (hw)idx). + */ +void k3_ringacc_ring_reset(struct k3_ring *ring); +/** + * k3_ringacc_ring_reset - ring reset for DMA rings + * @ring: pointer on Ring + * + * Resets ring internal state ((hw)occ, (hw)idx). Should be used for rings + * which are read by K3 UDMA, like TX or Free Host PD rings. + */ +void k3_ringacc_ring_reset_dma(struct k3_ring *ring, u32 occ); + +/** + * k3_ringacc_ring_free - ring free + * @ring: pointer on Ring + * + * Resets ring and free all alocated resources. + */ +int k3_ringacc_ring_free(struct k3_ring *ring); + +/** + * k3_ringacc_get_ring_id - Get the Ring ID + * @ring: pointer on ring + * + * Returns the Ring ID + */ +u32 k3_ringacc_get_ring_id(struct k3_ring *ring); + +/** + * k3_ringacc_get_ring_irq_num - Get the irq number for the ring + * @ring: pointer on ring + * + * Returns the interrupt number which can be used to request the interrupt + */ +int k3_ringacc_get_ring_irq_num(struct k3_ring *ring); + +/** + * k3_ringacc_ring_cfg - ring configure + * @ring: pointer on ring + * @cfg: Ring configuration parameters (see &struct k3_ring_cfg) + * + * Configures ring, including ring memory allocation. + * Returns 0 on success, errno otherwise. + */ +int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg); + +/** + * k3_ringacc_ring_get_size - get ring size + * @ring: pointer on ring + * + * Returns ring size in number of elements. + */ +u32 k3_ringacc_ring_get_size(struct k3_ring *ring); + +/** + * k3_ringacc_ring_get_free - get free elements + * @ring: pointer on ring + * + * Returns number of free elements in the ring. + */ +u32 k3_ringacc_ring_get_free(struct k3_ring *ring); + +/** + * k3_ringacc_ring_get_occ - get ring occupancy + * @ring: pointer on ring + * + * Returns total number of valid entries on the ring + */ +u32 k3_ringacc_ring_get_occ(struct k3_ring *ring); + +/** + * k3_ringacc_ring_is_full - checks if ring is full + * @ring: pointer on ring + * + * Returns true if the ring is full + */ +u32 k3_ringacc_ring_is_full(struct k3_ring *ring); + +/** + * k3_ringacc_ring_push - push element to the ring tail + * @ring: pointer on ring + * @elem: pointer on ring element buffer + * + * Push one ring element to the ring tail. Size of the ring element is + * determined by ring configuration &struct k3_ring_cfg elm_size. + * + * Returns 0 on success, errno otherwise. + */ +int k3_ringacc_ring_push(struct k3_ring *ring, void *elem); + +/** + * k3_ringacc_ring_pop - pop element from the ring head + * @ring: pointer on ring + * @elem: pointer on ring element buffer + * + * Push one ring element from the ring head. Size of the ring element is + * determined by ring configuration &struct k3_ring_cfg elm_size.. + * + * Returns 0 on success, errno otherwise. + */ +int k3_ringacc_ring_pop(struct k3_ring *ring, void *elem); + +/** + * k3_ringacc_ring_push_head - push element to the ring head + * @ring: pointer on ring + * @elem: pointer on ring element buffer + * + * Push one ring element to the ring head. Size of the ring element is + * determined by ring configuration &struct k3_ring_cfg elm_size. + * + * Returns 0 on success, errno otherwise. + * Not Supported by ring modes: K3_RINGACC_RING_MODE_RING + */ +int k3_ringacc_ring_push_head(struct k3_ring *ring, void *elem); + +/** + * k3_ringacc_ring_pop_tail - pop element from the ring tail + * @ring: pointer on ring + * @elem: pointer on ring element buffer + * + * Push one ring element from the ring tail. Size of the ring element is + * determined by ring configuration &struct k3_ring_cfg elm_size. + * + * Returns 0 on success, errno otherwise. + * Not Supported by ring modes: K3_RINGACC_RING_MODE_RING + */ +int k3_ringacc_ring_pop_tail(struct k3_ring *ring, void *elem); + +u32 k3_ringacc_get_tisci_dev_id(struct k3_ring *ring); + +#endif /* __SOC_TI_K3_RINGACC_API_H_ */ -- cgit From 4db8fd32ed2be7cc510e51e43ec3349aa64074a9 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Mon, 23 Dec 2019 13:04:44 +0200 Subject: dmaengine: Add metadata_ops for dma_async_tx_descriptor The metadata is best described as side band data or parameters traveling alongside the data DMAd by the DMA engine. It is data which is understood by the peripheral and the peripheral driver only, the DMA engine see it only as data block and it is not interpreting it in any way. The metadata can be different per descriptor as it is a parameter for the data being transferred. If the DMA supports per descriptor metadata it can implement the attach, get_ptr/set_len callbacks. Client drivers must only use either attach or get_ptr/set_len to avoid misconfiguration. Client driver can check if a given metadata mode is supported by the channel during probe time with dmaengine_is_metadata_mode_supported(chan, DESC_METADATA_CLIENT); dmaengine_is_metadata_mode_supported(chan, DESC_METADATA_ENGINE); and based on this information can use either mode. Wrappers are also added for the metadata_ops. To be used in DESC_METADATA_CLIENT mode: dmaengine_desc_attach_metadata() To be used in DESC_METADATA_ENGINE mode: dmaengine_desc_get_metadata_ptr() dmaengine_desc_set_metadata_len() Signed-off-by: Peter Ujfalusi Reviewed-by: Tero Kristo Tested-by: Keerthy Reviewed-by: Grygorii Strashko Link: https://lore.kernel.org/r/20191223110458.30766-5-peter.ujfalusi@ti.com Signed-off-by: Vinod Koul --- include/linux/dmaengine.h | 112 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 112 insertions(+) (limited to 'include/linux') diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 7927731e3716..68b361891a61 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -219,6 +219,62 @@ typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; * @bytes_transferred: byte counter */ +/** + * enum dma_desc_metadata_mode - per descriptor metadata mode types supported + * @DESC_METADATA_CLIENT - the metadata buffer is allocated/provided by the + * client driver and it is attached (via the dmaengine_desc_attach_metadata() + * helper) to the descriptor. + * + * Client drivers interested to use this mode can follow: + * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM: + * 1. prepare the descriptor (dmaengine_prep_*) + * construct the metadata in the client's buffer + * 2. use dmaengine_desc_attach_metadata() to attach the buffer to the + * descriptor + * 3. submit the transfer + * - DMA_DEV_TO_MEM: + * 1. prepare the descriptor (dmaengine_prep_*) + * 2. use dmaengine_desc_attach_metadata() to attach the buffer to the + * descriptor + * 3. submit the transfer + * 4. when the transfer is completed, the metadata should be available in the + * attached buffer + * + * @DESC_METADATA_ENGINE - the metadata buffer is allocated/managed by the DMA + * driver. The client driver can ask for the pointer, maximum size and the + * currently used size of the metadata and can directly update or read it. + * dmaengine_desc_get_metadata_ptr() and dmaengine_desc_set_metadata_len() is + * provided as helper functions. + * + * Note: the metadata area for the descriptor is no longer valid after the + * transfer has been completed (valid up to the point when the completion + * callback returns if used). + * + * Client drivers interested to use this mode can follow: + * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM: + * 1. prepare the descriptor (dmaengine_prep_*) + * 2. use dmaengine_desc_get_metadata_ptr() to get the pointer to the engine's + * metadata area + * 3. update the metadata at the pointer + * 4. use dmaengine_desc_set_metadata_len() to tell the DMA engine the amount + * of data the client has placed into the metadata buffer + * 5. submit the transfer + * - DMA_DEV_TO_MEM: + * 1. prepare the descriptor (dmaengine_prep_*) + * 2. submit the transfer + * 3. on transfer completion, use dmaengine_desc_get_metadata_ptr() to get the + * pointer to the engine's metadata area + * 4. Read out the metadata from the pointer + * + * Note: the two mode is not compatible and clients must use one mode for a + * descriptor. + */ +enum dma_desc_metadata_mode { + DESC_METADATA_NONE = 0, + DESC_METADATA_CLIENT = BIT(0), + DESC_METADATA_ENGINE = BIT(1), +}; + struct dma_chan_percpu { /* stats */ unsigned long memcpy_count; @@ -475,6 +531,18 @@ struct dmaengine_unmap_data { dma_addr_t addr[0]; }; +struct dma_async_tx_descriptor; + +struct dma_descriptor_metadata_ops { + int (*attach)(struct dma_async_tx_descriptor *desc, void *data, + size_t len); + + void *(*get_ptr)(struct dma_async_tx_descriptor *desc, + size_t *payload_len, size_t *max_len); + int (*set_len)(struct dma_async_tx_descriptor *desc, + size_t payload_len); +}; + /** * struct dma_async_tx_descriptor - async transaction descriptor * ---dma generic offload fields--- @@ -488,6 +556,11 @@ struct dmaengine_unmap_data { * descriptor pending. To be pushed on .issue_pending() call * @callback: routine to call after this operation is complete * @callback_param: general parameter to pass to the callback routine + * @desc_metadata_mode: core managed metadata mode to protect mixed use of + * DESC_METADATA_CLIENT or DESC_METADATA_ENGINE. Otherwise + * DESC_METADATA_NONE + * @metadata_ops: DMA driver provided metadata mode ops, need to be set by the + * DMA driver if metadata mode is supported with the descriptor * ---async_tx api specific fields--- * @next: at completion submit this descriptor * @parent: pointer to the next level up in the dependency chain @@ -504,6 +577,8 @@ struct dma_async_tx_descriptor { dma_async_tx_callback_result callback_result; void *callback_param; struct dmaengine_unmap_data *unmap; + enum dma_desc_metadata_mode desc_metadata_mode; + struct dma_descriptor_metadata_ops *metadata_ops; #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH struct dma_async_tx_descriptor *next; struct dma_async_tx_descriptor *parent; @@ -666,6 +741,7 @@ struct dma_filter { * @global_node: list_head for global dma_device_list * @filter: information for device/slave to filter function/param mapping * @cap_mask: one or more dma_capability flags + * @desc_metadata_modes: supported metadata modes by the DMA device * @max_xor: maximum number of xor sources, 0 if no capability * @max_pq: maximum number of PQ sources and PQ-continue capability * @copy_align: alignment shift for memcpy operations @@ -733,6 +809,7 @@ struct dma_device { struct list_head global_node; struct dma_filter filter; dma_cap_mask_t cap_mask; + enum dma_desc_metadata_mode desc_metadata_modes; unsigned short max_xor; unsigned short max_pq; enum dmaengine_alignment copy_align; @@ -910,6 +987,41 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy( len, flags); } +static inline bool dmaengine_is_metadata_mode_supported(struct dma_chan *chan, + enum dma_desc_metadata_mode mode) +{ + if (!chan) + return false; + + return !!(chan->device->desc_metadata_modes & mode); +} + +#ifdef CONFIG_DMA_ENGINE +int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc, + void *data, size_t len); +void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc, + size_t *payload_len, size_t *max_len); +int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc, + size_t payload_len); +#else /* CONFIG_DMA_ENGINE */ +static inline int dmaengine_desc_attach_metadata( + struct dma_async_tx_descriptor *desc, void *data, size_t len) +{ + return -EINVAL; +} +static inline void *dmaengine_desc_get_metadata_ptr( + struct dma_async_tx_descriptor *desc, size_t *payload_len, + size_t *max_len) +{ + return NULL; +} +static inline int dmaengine_desc_set_metadata_len( + struct dma_async_tx_descriptor *desc, size_t payload_len) +{ + return -EINVAL; +} +#endif /* CONFIG_DMA_ENGINE */ + /** * dmaengine_terminate_all() - Terminate all active DMA transfers * @chan: The channel for which to terminate the transfers -- cgit From 6755ec06d1333765d2b935e4e4a5bd011332bac6 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Mon, 23 Dec 2019 13:04:45 +0200 Subject: dmaengine: Add support for reporting DMA cached data amount A DMA hardware can have big cache or FIFO and the amount of data sitting in the DMA fabric can be an interest for the clients. For example in audio we want to know the delay in the data flow and in case the DMA have significantly large FIFO/cache, it can affect the latenc/delay Signed-off-by: Peter Ujfalusi Reviewed-by: Tero Kristo Tested-by: Keerthy Reviewed-by: Grygorii Strashko Link: https://lore.kernel.org/r/20191223110458.30766-6-peter.ujfalusi@ti.com Signed-off-by: Vinod Koul --- include/linux/dmaengine.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 68b361891a61..b44b9c608709 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -686,11 +686,13 @@ static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descr * @residue: the remaining number of bytes left to transmit * on the selected transfer for states DMA_IN_PROGRESS and * DMA_PAUSED if this is implemented in the driver, else 0 + * @in_flight_bytes: amount of data in bytes cached by the DMA. */ struct dma_tx_state { dma_cookie_t last; dma_cookie_t used; u32 residue; + u32 in_flight_bytes; }; /** -- cgit From 816ebf48442eef1c61db26d2ec055f5c8ac83b21 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Mon, 23 Dec 2019 13:04:46 +0200 Subject: dmaengine: Add helper function to convert direction value to text dmaengine_get_direction_text() can be useful when the direction is printed out. The text is easier to comprehend than the number. Signed-off-by: Peter Ujfalusi Link: https://lore.kernel.org/r/20191223110458.30766-7-peter.ujfalusi@ti.com Signed-off-by: Vinod Koul --- include/linux/dmaengine.h | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'include/linux') diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index b44b9c608709..62225d46908b 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -1545,4 +1545,23 @@ static inline struct dma_chan return __dma_request_channel(mask, fn, fn_param, NULL); } + +static inline char * +dmaengine_get_direction_text(enum dma_transfer_direction dir) +{ + switch (dir) { + case DMA_DEV_TO_MEM: + return "DEV_TO_MEM"; + case DMA_MEM_TO_DEV: + return "MEM_TO_DEV"; + case DMA_MEM_TO_MEM: + return "MEM_TO_MEM"; + case DMA_DEV_TO_DEV: + return "DEV_TO_DEV"; + default: + break; + } + + return "invalid"; +} #endif /* DMAENGINE_H */ -- cgit From 69bafc318560d02dc07e3b52af0d7cf5fc036bfe Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Mon, 23 Dec 2019 13:04:47 +0200 Subject: dmaengine: ti: Add cppi5 header for K3 NAVSS/UDMA The K3 DMA architecture uses CPPI5 (Communications Port Programming Interface) specified descriptors over PSI-L bus within NAVSS. The header provides helpers, macros to work with these descriptors in a consistent way. Signed-off-by: Peter Ujfalusi Tested-by: Keerthy Reviewed-by: Grygorii Strashko Link: https://lore.kernel.org/r/20191223110458.30766-8-peter.ujfalusi@ti.com Signed-off-by: Vinod Koul --- include/linux/dma/ti-cppi5.h | 1059 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 1059 insertions(+) create mode 100644 include/linux/dma/ti-cppi5.h (limited to 'include/linux') diff --git a/include/linux/dma/ti-cppi5.h b/include/linux/dma/ti-cppi5.h new file mode 100644 index 000000000000..579356ae447e --- /dev/null +++ b/include/linux/dma/ti-cppi5.h @@ -0,0 +1,1059 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * CPPI5 descriptors interface + * + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + */ + +#ifndef __TI_CPPI5_H__ +#define __TI_CPPI5_H__ + +#include +#include +#include + +/** + * struct cppi5_desc_hdr_t - Descriptor header, present in all types of + * descriptors + * @pkt_info0: Packet info word 0 (n/a in Buffer desc) + * @pkt_info0: Packet info word 1 (n/a in Buffer desc) + * @pkt_info0: Packet info word 2 (n/a in Buffer desc) + * @src_dst_tag: Packet info word 3 (n/a in Buffer desc) + */ +struct cppi5_desc_hdr_t { + u32 pkt_info0; + u32 pkt_info1; + u32 pkt_info2; + u32 src_dst_tag; +} __packed; + +/** + * struct cppi5_host_desc_t - Host-mode packet and buffer descriptor definition + * @hdr: Descriptor header + * @next_desc: word 4/5: Linking word + * @buf_ptr: word 6/7: Buffer pointer + * @buf_info1: word 8: Buffer valid data length + * @org_buf_len: word 9: Original buffer length + * @org_buf_ptr: word 10/11: Original buffer pointer + * @epib[0]: Extended Packet Info Data (optional, 4 words), and/or + * Protocol Specific Data (optional, 0-128 bytes in + * multiples of 4), and/or + * Other Software Data (0-N bytes, optional) + */ +struct cppi5_host_desc_t { + struct cppi5_desc_hdr_t hdr; + u64 next_desc; + u64 buf_ptr; + u32 buf_info1; + u32 org_buf_len; + u64 org_buf_ptr; + u32 epib[0]; +} __packed; + +#define CPPI5_DESC_MIN_ALIGN (16U) + +#define CPPI5_INFO0_HDESC_EPIB_SIZE (16U) +#define CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE (128U) + +#define CPPI5_INFO0_HDESC_TYPE_SHIFT (30U) +#define CPPI5_INFO0_HDESC_TYPE_MASK GENMASK(31, 30) +#define CPPI5_INFO0_DESC_TYPE_VAL_HOST (1U) +#define CPPI5_INFO0_DESC_TYPE_VAL_MONO (2U) +#define CPPI5_INFO0_DESC_TYPE_VAL_TR (3U) +#define CPPI5_INFO0_HDESC_EPIB_PRESENT BIT(29) +/* + * Protocol Specific Words location: + * 0 - located in the descriptor, + * 1 = located in the SOP Buffer immediately prior to the data. + */ +#define CPPI5_INFO0_HDESC_PSINFO_LOCATION BIT(28) +#define CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT (22U) +#define CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK GENMASK(27, 22) +#define CPPI5_INFO0_HDESC_PKTLEN_SHIFT (0) +#define CPPI5_INFO0_HDESC_PKTLEN_MASK GENMASK(21, 0) + +#define CPPI5_INFO1_DESC_PKTERROR_SHIFT (28U) +#define CPPI5_INFO1_DESC_PKTERROR_MASK GENMASK(31, 28) +#define CPPI5_INFO1_HDESC_PSFLGS_SHIFT (24U) +#define CPPI5_INFO1_HDESC_PSFLGS_MASK GENMASK(27, 24) +#define CPPI5_INFO1_DESC_PKTID_SHIFT (14U) +#define CPPI5_INFO1_DESC_PKTID_MASK GENMASK(23, 14) +#define CPPI5_INFO1_DESC_FLOWID_SHIFT (0) +#define CPPI5_INFO1_DESC_FLOWID_MASK GENMASK(13, 0) +#define CPPI5_INFO1_DESC_FLOWID_DEFAULT CPPI5_INFO1_DESC_FLOWID_MASK + +#define CPPI5_INFO2_HDESC_PKTTYPE_SHIFT (27U) +#define CPPI5_INFO2_HDESC_PKTTYPE_MASK GENMASK(31, 27) +/* Return Policy: 0 - Entire packet 1 - Each buffer */ +#define CPPI5_INFO2_HDESC_RETPOLICY BIT(18) +/* + * Early Return: + * 0 = desc pointers should be returned after all reads have been completed + * 1 = desc pointers should be returned immediately upon fetching + * the descriptor and beginning to transfer data. + */ +#define CPPI5_INFO2_HDESC_EARLYRET BIT(17) +/* + * Return Push Policy: + * 0 = Descriptor must be returned to tail of queue + * 1 = Descriptor must be returned to head of queue + */ +#define CPPI5_INFO2_DESC_RETPUSHPOLICY BIT(16) +#define CPPI5_INFO2_DESC_RETP_MASK GENMASK(18, 16) + +#define CPPI5_INFO2_DESC_RETQ_SHIFT (0) +#define CPPI5_INFO2_DESC_RETQ_MASK GENMASK(15, 0) + +#define CPPI5_INFO3_DESC_SRCTAG_SHIFT (16U) +#define CPPI5_INFO3_DESC_SRCTAG_MASK GENMASK(31, 16) +#define CPPI5_INFO3_DESC_DSTTAG_SHIFT (0) +#define CPPI5_INFO3_DESC_DSTTAG_MASK GENMASK(15, 0) + +#define CPPI5_BUFINFO1_HDESC_DATA_LEN_SHIFT (0) +#define CPPI5_BUFINFO1_HDESC_DATA_LEN_MASK GENMASK(27, 0) + +#define CPPI5_OBUFINFO0_HDESC_BUF_LEN_SHIFT (0) +#define CPPI5_OBUFINFO0_HDESC_BUF_LEN_MASK GENMASK(27, 0) + +/** + * struct cppi5_desc_epib_t - Host Packet Descriptor Extended Packet Info Block + * @timestamp: word 0: application specific timestamp + * @sw_info0: word 1: Software Info 0 + * @sw_info1: word 1: Software Info 1 + * @sw_info2: word 1: Software Info 2 + */ +struct cppi5_desc_epib_t { + u32 timestamp; /* w0: application specific timestamp */ + u32 sw_info0; /* w1: Software Info 0 */ + u32 sw_info1; /* w2: Software Info 1 */ + u32 sw_info2; /* w3: Software Info 2 */ +}; + +/** + * struct cppi5_monolithic_desc_t - Monolithic-mode packet descriptor + * @hdr: Descriptor header + * @epib[0]: Extended Packet Info Data (optional, 4 words), and/or + * Protocol Specific Data (optional, 0-128 bytes in + * multiples of 4), and/or + * Other Software Data (0-N bytes, optional) + */ +struct cppi5_monolithic_desc_t { + struct cppi5_desc_hdr_t hdr; + u32 epib[0]; +}; + +#define CPPI5_INFO2_MDESC_DATA_OFFSET_SHIFT (18U) +#define CPPI5_INFO2_MDESC_DATA_OFFSET_MASK GENMASK(26, 18) + +/* + * Reload Count: + * 0 = Finish the packet and place the descriptor back on the return queue + * 1-0x1ff = Vector to the Reload Index and resume processing + * 0x1ff indicates perpetual loop, infinite reload until the channel is stopped + */ +#define CPPI5_INFO0_TRDESC_RLDCNT_SHIFT (20U) +#define CPPI5_INFO0_TRDESC_RLDCNT_MASK GENMASK(28, 20) +#define CPPI5_INFO0_TRDESC_RLDCNT_MAX (0x1ff) +#define CPPI5_INFO0_TRDESC_RLDCNT_INFINITE CPPI5_INFO0_TRDESC_RLDCNT_MAX +#define CPPI5_INFO0_TRDESC_RLDIDX_SHIFT (14U) +#define CPPI5_INFO0_TRDESC_RLDIDX_MASK GENMASK(19, 14) +#define CPPI5_INFO0_TRDESC_RLDIDX_MAX (0x3f) +#define CPPI5_INFO0_TRDESC_LASTIDX_SHIFT (0) +#define CPPI5_INFO0_TRDESC_LASTIDX_MASK GENMASK(13, 0) + +#define CPPI5_INFO1_TRDESC_RECSIZE_SHIFT (24U) +#define CPPI5_INFO1_TRDESC_RECSIZE_MASK GENMASK(26, 24) +#define CPPI5_INFO1_TRDESC_RECSIZE_VAL_16B (0) +#define CPPI5_INFO1_TRDESC_RECSIZE_VAL_32B (1U) +#define CPPI5_INFO1_TRDESC_RECSIZE_VAL_64B (2U) +#define CPPI5_INFO1_TRDESC_RECSIZE_VAL_128B (3U) + +static inline void cppi5_desc_dump(void *desc, u32 size) +{ + print_hex_dump(KERN_ERR, "dump udmap_desc: ", DUMP_PREFIX_NONE, + 32, 4, desc, size, false); +} + +#define CPPI5_TDCM_MARKER (0x1) +/** + * cppi5_desc_is_tdcm - check if the paddr indicates Teardown Complete Message + * @paddr: Physical address of the packet popped from the ring + * + * Returns true if the address indicates TDCM + */ +static inline bool cppi5_desc_is_tdcm(dma_addr_t paddr) +{ + return (paddr & CPPI5_TDCM_MARKER) ? true : false; +} + +/** + * cppi5_desc_get_type - get descriptor type + * @desc_hdr: packet descriptor/TR header + * + * Returns descriptor type: + * CPPI5_INFO0_DESC_TYPE_VAL_HOST + * CPPI5_INFO0_DESC_TYPE_VAL_MONO + * CPPI5_INFO0_DESC_TYPE_VAL_TR + */ +static inline u32 cppi5_desc_get_type(struct cppi5_desc_hdr_t *desc_hdr) +{ + return (desc_hdr->pkt_info0 & CPPI5_INFO0_HDESC_TYPE_MASK) >> + CPPI5_INFO0_HDESC_TYPE_SHIFT; +} + +/** + * cppi5_desc_get_errflags - get Error Flags from Desc + * @desc_hdr: packet/TR descriptor header + * + * Returns Error Flags from Packet/TR Descriptor + */ +static inline u32 cppi5_desc_get_errflags(struct cppi5_desc_hdr_t *desc_hdr) +{ + return (desc_hdr->pkt_info1 & CPPI5_INFO1_DESC_PKTERROR_MASK) >> + CPPI5_INFO1_DESC_PKTERROR_SHIFT; +} + +/** + * cppi5_desc_get_pktids - get Packet and Flow ids from Desc + * @desc_hdr: packet/TR descriptor header + * @pkt_id: Packet ID + * @flow_id: Flow ID + * + * Returns Packet and Flow ids from packet/TR descriptor + */ +static inline void cppi5_desc_get_pktids(struct cppi5_desc_hdr_t *desc_hdr, + u32 *pkt_id, u32 *flow_id) +{ + *pkt_id = (desc_hdr->pkt_info1 & CPPI5_INFO1_DESC_PKTID_MASK) >> + CPPI5_INFO1_DESC_PKTID_SHIFT; + *flow_id = (desc_hdr->pkt_info1 & CPPI5_INFO1_DESC_FLOWID_MASK) >> + CPPI5_INFO1_DESC_FLOWID_SHIFT; +} + +/** + * cppi5_desc_set_pktids - set Packet and Flow ids in Desc + * @desc_hdr: packet/TR descriptor header + * @pkt_id: Packet ID + * @flow_id: Flow ID + */ +static inline void cppi5_desc_set_pktids(struct cppi5_desc_hdr_t *desc_hdr, + u32 pkt_id, u32 flow_id) +{ + desc_hdr->pkt_info1 &= ~(CPPI5_INFO1_DESC_PKTID_MASK | + CPPI5_INFO1_DESC_FLOWID_MASK); + desc_hdr->pkt_info1 |= (pkt_id << CPPI5_INFO1_DESC_PKTID_SHIFT) & + CPPI5_INFO1_DESC_PKTID_MASK; + desc_hdr->pkt_info1 |= (flow_id << CPPI5_INFO1_DESC_FLOWID_SHIFT) & + CPPI5_INFO1_DESC_FLOWID_MASK; +} + +/** + * cppi5_desc_set_retpolicy - set Packet Return Policy in Desc + * @desc_hdr: packet/TR descriptor header + * @flags: fags, supported values + * CPPI5_INFO2_HDESC_RETPOLICY + * CPPI5_INFO2_HDESC_EARLYRET + * CPPI5_INFO2_DESC_RETPUSHPOLICY + * @return_ring_id: Packet Return Queue/Ring id, value 0xFFFF reserved + */ +static inline void cppi5_desc_set_retpolicy(struct cppi5_desc_hdr_t *desc_hdr, + u32 flags, u32 return_ring_id) +{ + desc_hdr->pkt_info2 &= ~(CPPI5_INFO2_DESC_RETP_MASK | + CPPI5_INFO2_DESC_RETQ_MASK); + desc_hdr->pkt_info2 |= flags & CPPI5_INFO2_DESC_RETP_MASK; + desc_hdr->pkt_info2 |= return_ring_id & CPPI5_INFO2_DESC_RETQ_MASK; +} + +/** + * cppi5_desc_get_tags_ids - get Packet Src/Dst Tags from Desc + * @desc_hdr: packet/TR descriptor header + * @src_tag_id: Source Tag + * @dst_tag_id: Dest Tag + * + * Returns Packet Src/Dst Tags from packet/TR descriptor + */ +static inline void cppi5_desc_get_tags_ids(struct cppi5_desc_hdr_t *desc_hdr, + u32 *src_tag_id, u32 *dst_tag_id) +{ + if (src_tag_id) + *src_tag_id = (desc_hdr->src_dst_tag & + CPPI5_INFO3_DESC_SRCTAG_MASK) >> + CPPI5_INFO3_DESC_SRCTAG_SHIFT; + if (dst_tag_id) + *dst_tag_id = desc_hdr->src_dst_tag & + CPPI5_INFO3_DESC_DSTTAG_MASK; +} + +/** + * cppi5_desc_set_tags_ids - set Packet Src/Dst Tags in HDesc + * @desc_hdr: packet/TR descriptor header + * @src_tag_id: Source Tag + * @dst_tag_id: Dest Tag + * + * Returns Packet Src/Dst Tags from packet/TR descriptor + */ +static inline void cppi5_desc_set_tags_ids(struct cppi5_desc_hdr_t *desc_hdr, + u32 src_tag_id, u32 dst_tag_id) +{ + desc_hdr->src_dst_tag = (src_tag_id << CPPI5_INFO3_DESC_SRCTAG_SHIFT) & + CPPI5_INFO3_DESC_SRCTAG_MASK; + desc_hdr->src_dst_tag |= dst_tag_id & CPPI5_INFO3_DESC_DSTTAG_MASK; +} + +/** + * cppi5_hdesc_calc_size - Calculate Host Packet Descriptor size + * @epib: is EPIB present + * @psdata_size: PSDATA size + * @sw_data_size: SWDATA size + * + * Returns required Host Packet Descriptor size + * 0 - if PSDATA > CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE + */ +static inline u32 cppi5_hdesc_calc_size(bool epib, u32 psdata_size, + u32 sw_data_size) +{ + u32 desc_size; + + if (psdata_size > CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE) + return 0; + + desc_size = sizeof(struct cppi5_host_desc_t) + psdata_size + + sw_data_size; + + if (epib) + desc_size += CPPI5_INFO0_HDESC_EPIB_SIZE; + + return ALIGN(desc_size, CPPI5_DESC_MIN_ALIGN); +} + +/** + * cppi5_hdesc_init - Init Host Packet Descriptor size + * @desc: Host packet descriptor + * @flags: supported values + * CPPI5_INFO0_HDESC_EPIB_PRESENT + * CPPI5_INFO0_HDESC_PSINFO_LOCATION + * @psdata_size: PSDATA size + * + * Returns required Host Packet Descriptor size + * 0 - if PSDATA > CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE + */ +static inline void cppi5_hdesc_init(struct cppi5_host_desc_t *desc, u32 flags, + u32 psdata_size) +{ + desc->hdr.pkt_info0 = (CPPI5_INFO0_DESC_TYPE_VAL_HOST << + CPPI5_INFO0_HDESC_TYPE_SHIFT) | (flags); + desc->hdr.pkt_info0 |= ((psdata_size >> 2) << + CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT) & + CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK; + desc->next_desc = 0; +} + +/** + * cppi5_hdesc_update_flags - Replace descriptor flags + * @desc: Host packet descriptor + * @flags: supported values + * CPPI5_INFO0_HDESC_EPIB_PRESENT + * CPPI5_INFO0_HDESC_PSINFO_LOCATION + */ +static inline void cppi5_hdesc_update_flags(struct cppi5_host_desc_t *desc, + u32 flags) +{ + desc->hdr.pkt_info0 &= ~(CPPI5_INFO0_HDESC_EPIB_PRESENT | + CPPI5_INFO0_HDESC_PSINFO_LOCATION); + desc->hdr.pkt_info0 |= flags; +} + +/** + * cppi5_hdesc_update_psdata_size - Replace PSdata size + * @desc: Host packet descriptor + * @psdata_size: PSDATA size + */ +static inline void +cppi5_hdesc_update_psdata_size(struct cppi5_host_desc_t *desc, u32 psdata_size) +{ + desc->hdr.pkt_info0 &= ~CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK; + desc->hdr.pkt_info0 |= ((psdata_size >> 2) << + CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT) & + CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK; +} + +/** + * cppi5_hdesc_get_psdata_size - get PSdata size in bytes + * @desc: Host packet descriptor + */ +static inline u32 cppi5_hdesc_get_psdata_size(struct cppi5_host_desc_t *desc) +{ + u32 psdata_size = 0; + + if (!(desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PSINFO_LOCATION)) + psdata_size = (desc->hdr.pkt_info0 & + CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK) >> + CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT; + + return (psdata_size << 2); +} + +/** + * cppi5_hdesc_get_pktlen - get Packet Length from HDesc + * @desc: Host packet descriptor + * + * Returns Packet Length from Host Packet Descriptor + */ +static inline u32 cppi5_hdesc_get_pktlen(struct cppi5_host_desc_t *desc) +{ + return (desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PKTLEN_MASK); +} + +/** + * cppi5_hdesc_set_pktlen - set Packet Length in HDesc + * @desc: Host packet descriptor + */ +static inline void cppi5_hdesc_set_pktlen(struct cppi5_host_desc_t *desc, + u32 pkt_len) +{ + desc->hdr.pkt_info0 &= ~CPPI5_INFO0_HDESC_PKTLEN_MASK; + desc->hdr.pkt_info0 |= (pkt_len & CPPI5_INFO0_HDESC_PKTLEN_MASK); +} + +/** + * cppi5_hdesc_get_psflags - get Protocol Specific Flags from HDesc + * @desc: Host packet descriptor + * + * Returns Protocol Specific Flags from Host Packet Descriptor + */ +static inline u32 cppi5_hdesc_get_psflags(struct cppi5_host_desc_t *desc) +{ + return (desc->hdr.pkt_info1 & CPPI5_INFO1_HDESC_PSFLGS_MASK) >> + CPPI5_INFO1_HDESC_PSFLGS_SHIFT; +} + +/** + * cppi5_hdesc_set_psflags - set Protocol Specific Flags in HDesc + * @desc: Host packet descriptor + */ +static inline void cppi5_hdesc_set_psflags(struct cppi5_host_desc_t *desc, + u32 ps_flags) +{ + desc->hdr.pkt_info1 &= ~CPPI5_INFO1_HDESC_PSFLGS_MASK; + desc->hdr.pkt_info1 |= (ps_flags << + CPPI5_INFO1_HDESC_PSFLGS_SHIFT) & + CPPI5_INFO1_HDESC_PSFLGS_MASK; +} + +/** + * cppi5_hdesc_get_errflags - get Packet Type from HDesc + * @desc: Host packet descriptor + */ +static inline u32 cppi5_hdesc_get_pkttype(struct cppi5_host_desc_t *desc) +{ + return (desc->hdr.pkt_info2 & CPPI5_INFO2_HDESC_PKTTYPE_MASK) >> + CPPI5_INFO2_HDESC_PKTTYPE_SHIFT; +} + +/** + * cppi5_hdesc_get_errflags - set Packet Type in HDesc + * @desc: Host packet descriptor + * @pkt_type: Packet Type + */ +static inline void cppi5_hdesc_set_pkttype(struct cppi5_host_desc_t *desc, + u32 pkt_type) +{ + desc->hdr.pkt_info2 &= ~CPPI5_INFO2_HDESC_PKTTYPE_MASK; + desc->hdr.pkt_info2 |= + (pkt_type << CPPI5_INFO2_HDESC_PKTTYPE_SHIFT) & + CPPI5_INFO2_HDESC_PKTTYPE_MASK; +} + +/** + * cppi5_hdesc_attach_buf - attach buffer to HDesc + * @desc: Host packet descriptor + * @buf: Buffer physical address + * @buf_data_len: Buffer length + * @obuf: Original Buffer physical address + * @obuf_len: Original Buffer length + * + * Attaches buffer to Host Packet Descriptor + */ +static inline void cppi5_hdesc_attach_buf(struct cppi5_host_desc_t *desc, + dma_addr_t buf, u32 buf_data_len, + dma_addr_t obuf, u32 obuf_len) +{ + desc->buf_ptr = buf; + desc->buf_info1 = buf_data_len & CPPI5_BUFINFO1_HDESC_DATA_LEN_MASK; + desc->org_buf_ptr = obuf; + desc->org_buf_len = obuf_len & CPPI5_OBUFINFO0_HDESC_BUF_LEN_MASK; +} + +static inline void cppi5_hdesc_get_obuf(struct cppi5_host_desc_t *desc, + dma_addr_t *obuf, u32 *obuf_len) +{ + *obuf = desc->org_buf_ptr; + *obuf_len = desc->org_buf_len & CPPI5_OBUFINFO0_HDESC_BUF_LEN_MASK; +} + +static inline void cppi5_hdesc_reset_to_original(struct cppi5_host_desc_t *desc) +{ + desc->buf_ptr = desc->org_buf_ptr; + desc->buf_info1 = desc->org_buf_len; +} + +/** + * cppi5_hdesc_link_hbdesc - link Host Buffer Descriptor to HDesc + * @desc: Host Packet Descriptor + * @buf_desc: Host Buffer Descriptor physical address + * + * add and link Host Buffer Descriptor to HDesc + */ +static inline void cppi5_hdesc_link_hbdesc(struct cppi5_host_desc_t *desc, + dma_addr_t hbuf_desc) +{ + desc->next_desc = hbuf_desc; +} + +static inline dma_addr_t +cppi5_hdesc_get_next_hbdesc(struct cppi5_host_desc_t *desc) +{ + return (dma_addr_t)desc->next_desc; +} + +static inline void cppi5_hdesc_reset_hbdesc(struct cppi5_host_desc_t *desc) +{ + desc->hdr = (struct cppi5_desc_hdr_t) { 0 }; + desc->next_desc = 0; +} + +/** + * cppi5_hdesc_epib_present - check if EPIB present + * @desc_hdr: packet descriptor/TR header + * + * Returns true if EPIB present in the packet + */ +static inline bool cppi5_hdesc_epib_present(struct cppi5_desc_hdr_t *desc_hdr) +{ + return !!(desc_hdr->pkt_info0 & CPPI5_INFO0_HDESC_EPIB_PRESENT); +} + +/** + * cppi5_hdesc_get_psdata - Get pointer on PSDATA + * @desc: Host packet descriptor + * + * Returns pointer on PSDATA in HDesc. + * NULL - if ps_data placed at the start of data buffer. + */ +static inline void *cppi5_hdesc_get_psdata(struct cppi5_host_desc_t *desc) +{ + u32 psdata_size; + void *psdata; + + if (desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PSINFO_LOCATION) + return NULL; + + psdata_size = (desc->hdr.pkt_info0 & + CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK) >> + CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT; + + if (!psdata_size) + return NULL; + + psdata = &desc->epib; + + if (cppi5_hdesc_epib_present(&desc->hdr)) + psdata += CPPI5_INFO0_HDESC_EPIB_SIZE; + + return psdata; +} + +/** + * cppi5_hdesc_get_swdata - Get pointer on swdata + * @desc: Host packet descriptor + * + * Returns pointer on SWDATA in HDesc. + * NOTE. It's caller responsibility to be sure hdesc actually has swdata. + */ +static inline void *cppi5_hdesc_get_swdata(struct cppi5_host_desc_t *desc) +{ + u32 psdata_size = 0; + void *swdata; + + if (!(desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PSINFO_LOCATION)) + psdata_size = (desc->hdr.pkt_info0 & + CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK) >> + CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT; + + swdata = &desc->epib; + + if (cppi5_hdesc_epib_present(&desc->hdr)) + swdata += CPPI5_INFO0_HDESC_EPIB_SIZE; + + swdata += (psdata_size << 2); + + return swdata; +} + +/* ================================== TR ================================== */ + +#define CPPI5_TR_TYPE_SHIFT (0U) +#define CPPI5_TR_TYPE_MASK GENMASK(3, 0) +#define CPPI5_TR_STATIC BIT(4) +#define CPPI5_TR_WAIT BIT(5) +#define CPPI5_TR_EVENT_SIZE_SHIFT (6U) +#define CPPI5_TR_EVENT_SIZE_MASK GENMASK(7, 6) +#define CPPI5_TR_TRIGGER0_SHIFT (8U) +#define CPPI5_TR_TRIGGER0_MASK GENMASK(9, 8) +#define CPPI5_TR_TRIGGER0_TYPE_SHIFT (10U) +#define CPPI5_TR_TRIGGER0_TYPE_MASK GENMASK(11, 10) +#define CPPI5_TR_TRIGGER1_SHIFT (12U) +#define CPPI5_TR_TRIGGER1_MASK GENMASK(13, 12) +#define CPPI5_TR_TRIGGER1_TYPE_SHIFT (14U) +#define CPPI5_TR_TRIGGER1_TYPE_MASK GENMASK(15, 14) +#define CPPI5_TR_CMD_ID_SHIFT (16U) +#define CPPI5_TR_CMD_ID_MASK GENMASK(23, 16) +#define CPPI5_TR_CSF_FLAGS_SHIFT (24U) +#define CPPI5_TR_CSF_FLAGS_MASK GENMASK(31, 24) +#define CPPI5_TR_CSF_SA_INDIRECT BIT(0) +#define CPPI5_TR_CSF_DA_INDIRECT BIT(1) +#define CPPI5_TR_CSF_SUPR_EVT BIT(2) +#define CPPI5_TR_CSF_EOL_ADV_SHIFT (4U) +#define CPPI5_TR_CSF_EOL_ADV_MASK GENMASK(6, 4) +#define CPPI5_TR_CSF_EOP BIT(7) + +/** + * enum cppi5_tr_types - TR types + * @CPPI5_TR_TYPE0: One dimensional data move + * @CPPI5_TR_TYPE1: Two dimensional data move + * @CPPI5_TR_TYPE2: Three dimensional data move + * @CPPI5_TR_TYPE3: Four dimensional data move + * @CPPI5_TR_TYPE4: Four dimensional data move with data formatting + * @CPPI5_TR_TYPE5: Four dimensional Cache Warm + * @CPPI5_TR_TYPE8: Four Dimensional Block Move + * @CPPI5_TR_TYPE9: Four Dimensional Block Move with Repacking + * @CPPI5_TR_TYPE10: Two Dimensional Block Move + * @CPPI5_TR_TYPE11: Two Dimensional Block Move with Repacking + * @CPPI5_TR_TYPE15: Four Dimensional Block Move with Repacking and + * Indirection + */ +enum cppi5_tr_types { + CPPI5_TR_TYPE0 = 0, + CPPI5_TR_TYPE1, + CPPI5_TR_TYPE2, + CPPI5_TR_TYPE3, + CPPI5_TR_TYPE4, + CPPI5_TR_TYPE5, + /* type6-7: Reserved */ + CPPI5_TR_TYPE8 = 8, + CPPI5_TR_TYPE9, + CPPI5_TR_TYPE10, + CPPI5_TR_TYPE11, + /* type12-14: Reserved */ + CPPI5_TR_TYPE15 = 15, + CPPI5_TR_TYPE_MAX +}; + +/** + * enum cppi5_tr_event_size - TR Flags EVENT_SIZE field specifies when an event + * is generated for each TR. + * @CPPI5_TR_EVENT_SIZE_COMPLETION: When TR is complete and all status for + * the TR has been received + * @CPPI5_TR_EVENT_SIZE_ICNT1_DEC: Type 0: when the last data transaction + * is sent for the TR + * Type 1-11: when ICNT1 is decremented + * @CPPI5_TR_EVENT_SIZE_ICNT2_DEC: Type 0-1,10-11: when the last data + * transaction is sent for the TR + * All other types: when ICNT2 is + * decremented + * @CPPI5_TR_EVENT_SIZE_ICNT3_DEC: Type 0-2,10-11: when the last data + * transaction is sent for the TR + * All other types: when ICNT3 is + * decremented + */ +enum cppi5_tr_event_size { + CPPI5_TR_EVENT_SIZE_COMPLETION, + CPPI5_TR_EVENT_SIZE_ICNT1_DEC, + CPPI5_TR_EVENT_SIZE_ICNT2_DEC, + CPPI5_TR_EVENT_SIZE_ICNT3_DEC, + CPPI5_TR_EVENT_SIZE_MAX +}; + +/** + * enum cppi5_tr_trigger - TR Flags TRIGGERx field specifies the type of trigger + * used to enable the TR to transfer data as specified + * by TRIGGERx_TYPE field. + * @CPPI5_TR_TRIGGER_NONE: No trigger + * @CPPI5_TR_TRIGGER_GLOBAL0: Global trigger 0 + * @CPPI5_TR_TRIGGER_GLOBAL1: Global trigger 1 + * @CPPI5_TR_TRIGGER_LOCAL_EVENT: Local Event + */ +enum cppi5_tr_trigger { + CPPI5_TR_TRIGGER_NONE, + CPPI5_TR_TRIGGER_GLOBAL0, + CPPI5_TR_TRIGGER_GLOBAL1, + CPPI5_TR_TRIGGER_LOCAL_EVENT, + CPPI5_TR_TRIGGER_MAX +}; + +/** + * enum cppi5_tr_trigger_type - TR Flags TRIGGERx_TYPE field specifies the type + * of data transfer that will be enabled by + * receiving a trigger as specified by TRIGGERx. + * @CPPI5_TR_TRIGGER_TYPE_ICNT1_DEC: The second inner most loop (ICNT1) will + * be decremented by 1 + * @CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC: The third inner most loop (ICNT2) will + * be decremented by 1 + * @CPPI5_TR_TRIGGER_TYPE_ICNT3_DEC: The outer most loop (ICNT3) will be + * decremented by 1 + * @CPPI5_TR_TRIGGER_TYPE_ALL: The entire TR will be allowed to + * complete + */ +enum cppi5_tr_trigger_type { + CPPI5_TR_TRIGGER_TYPE_ICNT1_DEC, + CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC, + CPPI5_TR_TRIGGER_TYPE_ICNT3_DEC, + CPPI5_TR_TRIGGER_TYPE_ALL, + CPPI5_TR_TRIGGER_TYPE_MAX +}; + +typedef u32 cppi5_tr_flags_t; + +/** + * struct cppi5_tr_type0_t - Type 0 (One dimensional data move) TR (16 byte) + * @flags: TR flags (type, triggers, event, configuration) + * @icnt0: Total loop iteration count for level 0 (innermost) + * @_reserved: Not used + * @addr: Starting address for the source data or destination data + */ +struct cppi5_tr_type0_t { + cppi5_tr_flags_t flags; + u16 icnt0; + u16 _reserved; + u64 addr; +} __aligned(16) __packed; + +/** + * struct cppi5_tr_type1_t - Type 1 (Two dimensional data move) TR (32 byte) + * @flags: TR flags (type, triggers, event, configuration) + * @icnt0: Total loop iteration count for level 0 (innermost) + * @icnt1: Total loop iteration count for level 1 + * @addr: Starting address for the source data or destination data + * @dim1: Signed dimension for loop level 1 + */ +struct cppi5_tr_type1_t { + cppi5_tr_flags_t flags; + u16 icnt0; + u16 icnt1; + u64 addr; + s32 dim1; +} __aligned(32) __packed; + +/** + * struct cppi5_tr_type2_t - Type 2 (Three dimensional data move) TR (32 byte) + * @flags: TR flags (type, triggers, event, configuration) + * @icnt0: Total loop iteration count for level 0 (innermost) + * @icnt1: Total loop iteration count for level 1 + * @addr: Starting address for the source data or destination data + * @dim1: Signed dimension for loop level 1 + * @icnt2: Total loop iteration count for level 2 + * @_reserved: Not used + * @dim2: Signed dimension for loop level 2 + */ +struct cppi5_tr_type2_t { + cppi5_tr_flags_t flags; + u16 icnt0; + u16 icnt1; + u64 addr; + s32 dim1; + u16 icnt2; + u16 _reserved; + s32 dim2; +} __aligned(32) __packed; + +/** + * struct cppi5_tr_type3_t - Type 3 (Four dimensional data move) TR (32 byte) + * @flags: TR flags (type, triggers, event, configuration) + * @icnt0: Total loop iteration count for level 0 (innermost) + * @icnt1: Total loop iteration count for level 1 + * @addr: Starting address for the source data or destination data + * @dim1: Signed dimension for loop level 1 + * @icnt2: Total loop iteration count for level 2 + * @icnt3: Total loop iteration count for level 3 (outermost) + * @dim2: Signed dimension for loop level 2 + * @dim3: Signed dimension for loop level 3 + */ +struct cppi5_tr_type3_t { + cppi5_tr_flags_t flags; + u16 icnt0; + u16 icnt1; + u64 addr; + s32 dim1; + u16 icnt2; + u16 icnt3; + s32 dim2; + s32 dim3; +} __aligned(32) __packed; + +/** + * struct cppi5_tr_type15_t - Type 15 (Four Dimensional Block Copy with + * Repacking and Indirection Support) TR (64 byte) + * @flags: TR flags (type, triggers, event, configuration) + * @icnt0: Total loop iteration count for level 0 (innermost) for + * source + * @icnt1: Total loop iteration count for level 1 for source + * @addr: Starting address for the source data + * @dim1: Signed dimension for loop level 1 for source + * @icnt2: Total loop iteration count for level 2 for source + * @icnt3: Total loop iteration count for level 3 (outermost) for + * source + * @dim2: Signed dimension for loop level 2 for source + * @dim3: Signed dimension for loop level 3 for source + * @_reserved: Not used + * @ddim1: Signed dimension for loop level 1 for destination + * @daddr: Starting address for the destination data + * @ddim2: Signed dimension for loop level 2 for destination + * @ddim3: Signed dimension for loop level 3 for destination + * @dicnt0: Total loop iteration count for level 0 (innermost) for + * destination + * @dicnt1: Total loop iteration count for level 1 for destination + * @dicnt2: Total loop iteration count for level 2 for destination + * @sicnt3: Total loop iteration count for level 3 (outermost) for + * destination + */ +struct cppi5_tr_type15_t { + cppi5_tr_flags_t flags; + u16 icnt0; + u16 icnt1; + u64 addr; + s32 dim1; + u16 icnt2; + u16 icnt3; + s32 dim2; + s32 dim3; + u32 _reserved; + s32 ddim1; + u64 daddr; + s32 ddim2; + s32 ddim3; + u16 dicnt0; + u16 dicnt1; + u16 dicnt2; + u16 dicnt3; +} __aligned(64) __packed; + +/** + * struct cppi5_tr_resp_t - TR response record + * @status: Status type and info + * @_reserved: Not used + * @cmd_id: Command ID for the TR for TR identification + * @flags: Configuration Specific Flags + */ +struct cppi5_tr_resp_t { + u8 status; + u8 _reserved; + u8 cmd_id; + u8 flags; +} __packed; + +#define CPPI5_TR_RESPONSE_STATUS_TYPE_SHIFT (0U) +#define CPPI5_TR_RESPONSE_STATUS_TYPE_MASK GENMASK(3, 0) +#define CPPI5_TR_RESPONSE_STATUS_INFO_SHIFT (4U) +#define CPPI5_TR_RESPONSE_STATUS_INFO_MASK GENMASK(7, 4) +#define CPPI5_TR_RESPONSE_CMDID_SHIFT (16U) +#define CPPI5_TR_RESPONSE_CMDID_MASK GENMASK(23, 16) +#define CPPI5_TR_RESPONSE_CFG_SPECIFIC_SHIFT (24U) +#define CPPI5_TR_RESPONSE_CFG_SPECIFIC_MASK GENMASK(31, 24) + +/** + * enum cppi5_tr_resp_status_type - TR Response Status Type field is used to + * determine what type of status is being + * returned. + * @CPPI5_TR_RESPONSE_STATUS_NONE: No error, completion: completed + * @CPPI5_TR_RESPONSE_STATUS_TRANSFER_ERR: Transfer Error, completion: none + * or partially completed + * @CPPI5_TR_RESPONSE_STATUS_ABORTED_ERR: Aborted Error, completion: none + * or partially completed + * @CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ERR: Submission Error, completion: + * none + * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ERR: Unsupported Error, completion: + * none + * @CPPI5_TR_RESPONSE_STATUS_TRANSFER_EXCEPTION: Transfer Exception, completion: + * partially completed + * @CPPI5_TR_RESPONSE_STATUS__TEARDOWN_FLUSH: Teardown Flush, completion: none + */ +enum cppi5_tr_resp_status_type { + CPPI5_TR_RESPONSE_STATUS_NONE, + CPPI5_TR_RESPONSE_STATUS_TRANSFER_ERR, + CPPI5_TR_RESPONSE_STATUS_ABORTED_ERR, + CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ERR, + CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ERR, + CPPI5_TR_RESPONSE_STATUS_TRANSFER_EXCEPTION, + CPPI5_TR_RESPONSE_STATUS__TEARDOWN_FLUSH, + CPPI5_TR_RESPONSE_STATUS_MAX +}; + +/** + * enum cppi5_tr_resp_status_submission - TR Response Status field values which + * corresponds Submission Error + * @CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ICNT0: ICNT0 was 0 + * @CPPI5_TR_RESPONSE_STATUS_SUBMISSION_FIFO_FULL: Channel FIFO was full when TR + * received + * @CPPI5_TR_RESPONSE_STATUS_SUBMISSION_OWN: Channel is not owned by the + * submitter + */ +enum cppi5_tr_resp_status_submission { + CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ICNT0, + CPPI5_TR_RESPONSE_STATUS_SUBMISSION_FIFO_FULL, + CPPI5_TR_RESPONSE_STATUS_SUBMISSION_OWN, + CPPI5_TR_RESPONSE_STATUS_SUBMISSION_MAX +}; + +/** + * enum cppi5_tr_resp_status_unsupported - TR Response Status field values which + * corresponds Unsupported Error + * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_TR_TYPE: TR Type not supported + * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_STATIC: STATIC not supported + * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_EOL: EOL not supported + * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_CFG_SPECIFIC: CONFIGURATION SPECIFIC + * not supported + * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_AMODE: AMODE not supported + * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ELTYPE: ELTYPE not supported + * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_DFMT: DFMT not supported + * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_SECTR: SECTR not supported + * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_AMODE_SPECIFIC: AMODE SPECIFIC field + * not supported + */ +enum cppi5_tr_resp_status_unsupported { + CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_TR_TYPE, + CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_STATIC, + CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_EOL, + CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_CFG_SPECIFIC, + CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_AMODE, + CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ELTYPE, + CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_DFMT, + CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_SECTR, + CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_AMODE_SPECIFIC, + CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_MAX +}; + +/** + * cppi5_trdesc_calc_size - Calculate TR Descriptor size + * @tr_count: number of TR records + * @tr_size: Nominal size of TR record (max) [16, 32, 64, 128] + * + * Returns required TR Descriptor size + */ +static inline size_t cppi5_trdesc_calc_size(u32 tr_count, u32 tr_size) +{ + /* + * The Size of a TR descriptor is: + * 1 x tr_size : the first 16 bytes is used by the packet info block + + * tr_count x tr_size : Transfer Request Records + + * tr_count x sizeof(struct cppi5_tr_resp_t) : Transfer Response Records + */ + return tr_size * (tr_count + 1) + + sizeof(struct cppi5_tr_resp_t) * tr_count; +} + +/** + * cppi5_trdesc_init - Init TR Descriptor + * @desc: TR Descriptor + * @tr_count: number of TR records + * @tr_size: Nominal size of TR record (max) [16, 32, 64, 128] + * @reload_idx: Absolute index to jump to on the 2nd and following passes + * through the TR packet. + * @reload_count: Number of times to jump from last entry to reload_idx. 0x1ff + * indicates infinite looping. + * + * Init TR Descriptor + */ +static inline void cppi5_trdesc_init(struct cppi5_desc_hdr_t *desc_hdr, + u32 tr_count, u32 tr_size, u32 reload_idx, + u32 reload_count) +{ + desc_hdr->pkt_info0 = CPPI5_INFO0_DESC_TYPE_VAL_TR << + CPPI5_INFO0_HDESC_TYPE_SHIFT; + desc_hdr->pkt_info0 |= + (reload_count << CPPI5_INFO0_TRDESC_RLDCNT_SHIFT) & + CPPI5_INFO0_TRDESC_RLDCNT_MASK; + desc_hdr->pkt_info0 |= + (reload_idx << CPPI5_INFO0_TRDESC_RLDIDX_SHIFT) & + CPPI5_INFO0_TRDESC_RLDIDX_MASK; + desc_hdr->pkt_info0 |= (tr_count - 1) & CPPI5_INFO0_TRDESC_LASTIDX_MASK; + + desc_hdr->pkt_info1 |= ((ffs(tr_size >> 4) - 1) << + CPPI5_INFO1_TRDESC_RECSIZE_SHIFT) & + CPPI5_INFO1_TRDESC_RECSIZE_MASK; +} + +/** + * cppi5_tr_init - Init TR record + * @flags: Pointer to the TR's flags + * @type: TR type + * @static_tr: TR is static + * @wait: Wait for TR completion before allow the next TR to start + * @event_size: output event generation cfg + * @cmd_id: TR identifier (application specifics) + * + * Init TR record + */ +static inline void cppi5_tr_init(cppi5_tr_flags_t *flags, + enum cppi5_tr_types type, bool static_tr, + bool wait, enum cppi5_tr_event_size event_size, + u32 cmd_id) +{ + *flags = type; + *flags |= (event_size << CPPI5_TR_EVENT_SIZE_SHIFT) & + CPPI5_TR_EVENT_SIZE_MASK; + + *flags |= (cmd_id << CPPI5_TR_CMD_ID_SHIFT) & + CPPI5_TR_CMD_ID_MASK; + + if (static_tr && (type == CPPI5_TR_TYPE8 || type == CPPI5_TR_TYPE9)) + *flags |= CPPI5_TR_STATIC; + + if (wait) + *flags |= CPPI5_TR_WAIT; +} + +/** + * cppi5_tr_set_trigger - Configure trigger0/1 and trigger0/1_type + * @flags: Pointer to the TR's flags + * @trigger0: trigger0 selection + * @trigger0_type: type of data transfer that will be enabled by trigger0 + * @trigger1: trigger1 selection + * @trigger1_type: type of data transfer that will be enabled by trigger1 + * + * Configure the triggers for the TR + */ +static inline void cppi5_tr_set_trigger(cppi5_tr_flags_t *flags, + enum cppi5_tr_trigger trigger0, + enum cppi5_tr_trigger_type trigger0_type, + enum cppi5_tr_trigger trigger1, + enum cppi5_tr_trigger_type trigger1_type) +{ + *flags &= ~(CPPI5_TR_TRIGGER0_MASK | CPPI5_TR_TRIGGER0_TYPE_MASK | + CPPI5_TR_TRIGGER1_MASK | CPPI5_TR_TRIGGER1_TYPE_MASK); + *flags |= (trigger0 << CPPI5_TR_TRIGGER0_SHIFT) & + CPPI5_TR_TRIGGER0_MASK; + *flags |= (trigger0_type << CPPI5_TR_TRIGGER0_TYPE_SHIFT) & + CPPI5_TR_TRIGGER0_TYPE_MASK; + + *flags |= (trigger1 << CPPI5_TR_TRIGGER1_SHIFT) & + CPPI5_TR_TRIGGER1_MASK; + *flags |= (trigger1_type << CPPI5_TR_TRIGGER1_TYPE_SHIFT) & + CPPI5_TR_TRIGGER1_TYPE_MASK; +} + +/** + * cppi5_tr_cflag_set - Update the Configuration specific flags + * @flags: Pointer to the TR's flags + * @csf: Configuration specific flags + * + * Set a bit in Configuration Specific Flags section of the TR flags. + */ +static inline void cppi5_tr_csf_set(cppi5_tr_flags_t *flags, u32 csf) +{ + *flags &= ~CPPI5_TR_CSF_FLAGS_MASK; + *flags |= (csf << CPPI5_TR_CSF_FLAGS_SHIFT) & + CPPI5_TR_CSF_FLAGS_MASK; +} + +#endif /* __TI_CPPI5_H__ */ -- cgit From 8c6bb62f6b4a24c446511e8f894251946dcc2ef1 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Mon, 23 Dec 2019 13:04:48 +0200 Subject: dmaengine: ti: k3 PSI-L remote endpoint configuration In K3 architecture the DMA operates within threads. One end of the thread is UDMAP, the other is on the peripheral side. The UDMAP channel configuration depends on the needs of the remote endpoint and it can be differ from peripheral to peripheral. This patch adds database for am654 and j721e and small API to fetch the PSI-L endpoint configuration from the database which should only used by the DMA driver(s). Another API is added for native peripherals to give possibility to pass new configuration for the threads they are using, which is needed to be able to handle changes caused by different firmware loaded for the peripheral for example. Signed-off-by: Peter Ujfalusi Tested-by: Keerthy Reviewed-by: Grygorii Strashko Link: https://lore.kernel.org/r/20191223110458.30766-9-peter.ujfalusi@ti.com Signed-off-by: Vinod Koul --- include/linux/dma/k3-psil.h | 71 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) create mode 100644 include/linux/dma/k3-psil.h (limited to 'include/linux') diff --git a/include/linux/dma/k3-psil.h b/include/linux/dma/k3-psil.h new file mode 100644 index 000000000000..61d5cc0ad601 --- /dev/null +++ b/include/linux/dma/k3-psil.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + */ + +#ifndef K3_PSIL_H_ +#define K3_PSIL_H_ + +#include + +#define K3_PSIL_DST_THREAD_ID_OFFSET 0x8000 + +struct device; + +/** + * enum udma_tp_level - Channel Throughput Levels + * @UDMA_TP_NORMAL: Normal channel + * @UDMA_TP_HIGH: High Throughput channel + * @UDMA_TP_ULTRAHIGH: Ultra High Throughput channel + */ +enum udma_tp_level { + UDMA_TP_NORMAL = 0, + UDMA_TP_HIGH, + UDMA_TP_ULTRAHIGH, + UDMA_TP_LAST, +}; + +/** + * enum psil_endpoint_type - PSI-L Endpoint type + * @PSIL_EP_NATIVE: Normal channel + * @PSIL_EP_PDMA_XY: XY mode PDMA + * @PSIL_EP_PDMA_MCAN: MCAN mode PDMA + * @PSIL_EP_PDMA_AASRC: AASRC mode PDMA + */ +enum psil_endpoint_type { + PSIL_EP_NATIVE = 0, + PSIL_EP_PDMA_XY, + PSIL_EP_PDMA_MCAN, + PSIL_EP_PDMA_AASRC, +}; + +/** + * struct psil_endpoint_config - PSI-L Endpoint configuration + * @ep_type: PSI-L endpoint type + * @pkt_mode: If set, the channel must be in Packet mode, otherwise in + * TR mode + * @notdpkt: TDCM must be suppressed on the TX channel + * @needs_epib: Endpoint needs EPIB + * @psd_size: If set, PSdata is used by the endpoint + * @channel_tpl: Desired throughput level for the channel + * @pdma_acc32: ACC32 must be enabled on the PDMA side + * @pdma_burst: BURST must be enabled on the PDMA side + */ +struct psil_endpoint_config { + enum psil_endpoint_type ep_type; + + unsigned pkt_mode:1; + unsigned notdpkt:1; + unsigned needs_epib:1; + u32 psd_size; + enum udma_tp_level channel_tpl; + + /* PDMA properties, valid for PSIL_EP_PDMA_* */ + unsigned pdma_acc32:1; + unsigned pdma_burst:1; +}; + +int psil_set_new_ep_config(struct device *dev, const char *name, + struct psil_endpoint_config *ep_config); + +#endif /* K3_PSIL_H_ */ -- cgit From d702419134133db1eab2067dc6ea5723467fd917 Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Mon, 23 Dec 2019 13:04:51 +0200 Subject: dmaengine: ti: k3-udma: Add glue layer for non DMAengine users Certain users can not use right now the DMAengine API due to missing features in the core. Prime example is Networking. These users can use the glue layer interface to avoid misuse of DMAengine API and when the core gains the needed features they can be converted to use generic API. The most prominent features the glue layer clients are depending on: - most PSI-L native peripheral use extra rflow ranges on a receive channel and depending on the peripheral's configuration packets from a single free descriptor ring is going to be received to different receive ring - it is also possible to have different free descriptor rings per rflow and an rflow can also support 4 additional free descriptor ring based on the size of the incoming packet - out of order completion of descriptors on a channel - when we have several queues to handle different priority packets the descriptors will be completed 'out-of-order' - the notion of prep_slave_sg is not matching with what the streaming type of operation is demanding for networking - Streaming type of operation - Ability to fill the free descriptor ring with descriptors in anticipation of incoming traffic and when a packet arrives UDMAP will form a packet and gives it to the client driver - the descriptors are not backed with exact size data buffers as we don't know the size of the packet we will receive, but as a generic pool of buffers to be used by the receive channel - NAPI type of operation (polling instead of interrupt driven transfer) - without this we can not sustain gigabit speeds and we need to support NAPI - not to limit this to networking, but other high performance operations Signed-off-by: Grygorii Strashko Signed-off-by: Peter Ujfalusi Tested-by: Keerthy Link: https://lore.kernel.org/r/20191223110458.30766-12-peter.ujfalusi@ti.com Signed-off-by: Vinod Koul --- include/linux/dma/k3-udma-glue.h | 134 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 134 insertions(+) create mode 100644 include/linux/dma/k3-udma-glue.h (limited to 'include/linux') diff --git a/include/linux/dma/k3-udma-glue.h b/include/linux/dma/k3-udma-glue.h new file mode 100644 index 000000000000..caadbab1632a --- /dev/null +++ b/include/linux/dma/k3-udma-glue.h @@ -0,0 +1,134 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + */ + +#ifndef K3_UDMA_GLUE_H_ +#define K3_UDMA_GLUE_H_ + +#include +#include +#include + +struct k3_udma_glue_tx_channel_cfg { + struct k3_ring_cfg tx_cfg; + struct k3_ring_cfg txcq_cfg; + + bool tx_pause_on_err; + bool tx_filt_einfo; + bool tx_filt_pswords; + bool tx_supr_tdpkt; + u32 swdata_size; +}; + +struct k3_udma_glue_tx_channel; + +struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev, + const char *name, struct k3_udma_glue_tx_channel_cfg *cfg); + +void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn); +int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, + struct cppi5_host_desc_t *desc_tx, + dma_addr_t desc_dma); +int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, + dma_addr_t *desc_dma); +int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn); +void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn); +void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, + bool sync); +void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, + void *data, void (*cleanup)(void *data, dma_addr_t desc_dma)); +u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn); +u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn); +int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn); + +enum { + K3_UDMA_GLUE_SRC_TAG_LO_KEEP = 0, + K3_UDMA_GLUE_SRC_TAG_LO_USE_FLOW_REG = 1, + K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_FLOW_ID = 2, + K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG = 4, +}; + +/** + * k3_udma_glue_rx_flow_cfg - UDMA RX flow cfg + * + * @rx_cfg: RX ring configuration + * @rxfdq_cfg: RX free Host PD ring configuration + * @ring_rxq_id: RX ring id (or -1 for any) + * @ring_rxfdq0_id: RX free Host PD ring (FDQ) if (or -1 for any) + * @rx_error_handling: Rx Error Handling Mode (0 - drop, 1 - re-try) + * @src_tag_lo_sel: Rx Source Tag Low Byte Selector in Host PD + */ +struct k3_udma_glue_rx_flow_cfg { + struct k3_ring_cfg rx_cfg; + struct k3_ring_cfg rxfdq_cfg; + int ring_rxq_id; + int ring_rxfdq0_id; + bool rx_error_handling; + int src_tag_lo_sel; +}; + +/** + * k3_udma_glue_rx_channel_cfg - UDMA RX channel cfg + * + * @psdata_size: SW Data is present in Host PD of @swdata_size bytes + * @flow_id_base: first flow_id used by channel. + * if @flow_id_base = -1 - range of GP rflows will be + * allocated dynamically. + * @flow_id_num: number of RX flows used by channel + * @flow_id_use_rxchan_id: use RX channel id as flow id, + * used only if @flow_id_num = 1 + * @remote indication that RX channel is remote - some remote CPU + * core owns and control the RX channel. Linux Host only + * allowed to attach and configure RX Flow within RX + * channel. if set - not RX channel operation will be + * performed by K3 NAVSS DMA glue interface. + * @def_flow_cfg default RX flow configuration, + * used only if @flow_id_num = 1 + */ +struct k3_udma_glue_rx_channel_cfg { + u32 swdata_size; + int flow_id_base; + int flow_id_num; + bool flow_id_use_rxchan_id; + bool remote; + + struct k3_udma_glue_rx_flow_cfg *def_flow_cfg; +}; + +struct k3_udma_glue_rx_channel; + +struct k3_udma_glue_rx_channel *k3_udma_glue_request_rx_chn( + struct device *dev, + const char *name, + struct k3_udma_glue_rx_channel_cfg *cfg); + +void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn); +int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn); +void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn); +void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, + bool sync); +int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_num, struct cppi5_host_desc_t *desc_tx, + dma_addr_t desc_dma); +int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_num, dma_addr_t *desc_dma); +int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_idx, struct k3_udma_glue_rx_flow_cfg *flow_cfg); +u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_idx); +u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn); +int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_num); +void k3_udma_glue_rx_put_irq(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_num); +void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_num, void *data, + void (*cleanup)(void *data, dma_addr_t desc_dma), + bool skip_fdq); +int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_idx); +int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_idx); + +#endif /* K3_UDMA_GLUE_H_ */ -- cgit From 71ca5b78235e79c36f774773491064d7921d1942 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 21 Jan 2020 10:33:10 +0100 Subject: dmaengine: Remove dma_request_slave_channel_compat() wrapper At its original introduction, dma_request_slave_channel_compat() used a wrapper, to accommodate filter functions that modify the mask passed. Filter functions can no longer modify masks, and the mask parameter was made const in commit a53e28da574a40bc ("dma: Make the 'mask' parameter of __dma_request_channel const") consecutively. Hence remove the wrapper, and rename __dma_request_slave_channel_compat() to dma_request_slave_channel_compat(), to get rid of one more function name starting with a double underscore. Signed-off-by: Geert Uytterhoeven Acked-by: Arnd Bergmann Link: https://lore.kernel.org/r/20200121093311.28639-3-geert+renesas@glider.be Signed-off-by: Vinod Koul --- include/linux/dmaengine.h | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 62225d46908b..230d50ef7360 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -1526,11 +1526,9 @@ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); struct dma_chan *dma_get_any_slave_channel(struct dma_device *device); #define dma_request_channel(mask, x, y) \ __dma_request_channel(&(mask), x, y, NULL) -#define dma_request_slave_channel_compat(mask, x, y, dev, name) \ - __dma_request_slave_channel_compat(&(mask), x, y, dev, name) static inline struct dma_chan -*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask, +*dma_request_slave_channel_compat(const dma_cap_mask_t mask, dma_filter_fn fn, void *fn_param, struct device *dev, const char *name) { @@ -1543,7 +1541,7 @@ static inline struct dma_chan if (!fn || !fn_param) return NULL; - return __dma_request_channel(mask, fn, fn_param, NULL); + return __dma_request_channel(&mask, fn, fn_param, NULL); } static inline char * -- cgit From c3c431de99c068e3f64d01335c1532b22e4b1d1b Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 21 Jan 2020 10:33:11 +0100 Subject: dmaengine: Move dma_get_{,any_}slave_channel() to private dmaengine.h The functions dma_get_slave_channel() and dma_get_any_slave_channel() are called from DMA engine drivers only. Hence move their declarations from the public header file to the private header file drivers/dma/dmaengine.h. Signed-off-by: Geert Uytterhoeven Acked-by: Arnd Bergmann Link: https://lore.kernel.org/r/20200121093311.28639-4-geert+renesas@glider.be Signed-off-by: Vinod Koul --- include/linux/dmaengine.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 230d50ef7360..9cc0e70e7c35 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -1522,8 +1522,6 @@ int dma_async_device_register(struct dma_device *device); int dmaenginem_async_device_register(struct dma_device *device); void dma_async_device_unregister(struct dma_device *device); void dma_run_dependencies(struct dma_async_tx_descriptor *tx); -struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); -struct dma_chan *dma_get_any_slave_channel(struct dma_device *device); #define dma_request_channel(mask, x, y) \ __dma_request_channel(&(mask), x, y, NULL) -- cgit From e81274cd6b5264809384066e09a5253708822522 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Tue, 21 Jan 2020 16:43:53 -0700 Subject: dmaengine: add support to dynamic register/unregister of channels With the channel registration routines broken out, now add support code to allow independent registering and unregistering of channels in a hotplug fashion. Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/157965023364.73301.7821862091077299040.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul --- include/linux/dmaengine.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include/linux') diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 9cc0e70e7c35..f52f274773ed 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -1521,6 +1521,10 @@ static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc) int dma_async_device_register(struct dma_device *device); int dmaenginem_async_device_register(struct dma_device *device); void dma_async_device_unregister(struct dma_device *device); +int dma_async_device_channel_register(struct dma_device *device, + struct dma_chan *chan); +void dma_async_device_channel_unregister(struct dma_device *device, + struct dma_chan *chan); void dma_run_dependencies(struct dma_async_tx_descriptor *tx); #define dma_request_channel(mask, x, y) \ __dma_request_channel(&(mask), x, y, NULL) -- cgit From 71723a96b8b1367fefc18f60025dae792477d602 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Fri, 17 Jan 2020 16:30:56 +0100 Subject: dmaengine: Create symlinks between DMA channels and slaves MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently it is not easy to find out which DMA channels are in use, and which slave devices are using which channels. Fix this by creating two symlinks between the DMA channel and the actual slave device when a channel is requested: 1. A "slave" symlink from DMA channel to slave device, 2. A "dma:" symlink slave device to DMA channel. When the channel is released, the symlinks are removed again. The latter requires keeping track of the slave device and the channel name in the dma_chan structure. Note that this is limited to channel request functions for requesting an exclusive slave channel that take a device pointer (dma_request_chan() and dma_request_slave_channel*()). Signed-off-by: Geert Uytterhoeven Tested-by: Niklas Söderlund Link: https://lore.kernel.org/r/20200117153056.31363-1-geert+renesas@glider.be Signed-off-by: Vinod Koul --- include/linux/dmaengine.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include/linux') diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index f52f274773ed..fef69a9c5824 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -294,10 +294,12 @@ struct dma_router { /** * struct dma_chan - devices supply DMA channels, clients use them * @device: ptr to the dma device who supplies this channel, always !%NULL + * @slave: ptr to the device using this channel * @cookie: last cookie value returned to client * @completed_cookie: last completed cookie for this channel * @chan_id: channel ID for sysfs * @dev: class device for sysfs + * @name: backlink name for sysfs * @device_node: used to add this to the device chan list * @local: per-cpu pointer to a struct dma_chan_percpu * @client_count: how many clients are using this channel @@ -308,12 +310,14 @@ struct dma_router { */ struct dma_chan { struct dma_device *device; + struct device *slave; dma_cookie_t cookie; dma_cookie_t completed_cookie; /* sysfs */ int chan_id; struct dma_chan_dev *dev; + const char *name; struct list_head device_node; struct dma_chan_percpu __percpu *local; -- cgit