summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/freescale
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-11-01 13:15:54 -1000
committerLinus Torvalds <torvalds@linux-foundation.org>2023-11-01 13:15:54 -1000
commit009fbfc97b6367762efa257f1478ec86d37949f9 (patch)
tree99842c2e12ac6c39e1636a6256bb96e3335cecb9 /drivers/net/ethernet/freescale
parent3c86a44d623ee8734a02636d3544812e31496460 (diff)
parent36d91e851598a9ea523ad4681dd11fa661d59695 (diff)
Merge tag 'dma-mapping-6.7-2023-10-30' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping updates from Christoph Hellwig: - get rid of the fake support for coherent DMA allocation on coldfire with caches (Christoph Hellwig) - add a few Kconfig dependencies so that Kconfig catches the use of invalid configurations (Christoph Hellwig) - fix a type in dma-debug output (Chuck Lever) - rewrite a comment in swiotlb (Sean Christopherson) * tag 'dma-mapping-6.7-2023-10-30' of git://git.infradead.org/users/hch/dma-mapping: dma-debug: Fix a typo in a debugging eye-catcher swiotlb: rewrite comment explaining why the source is preserved on DMA_FROM_DEVICE m68k: remove unused includes from dma.c m68k: don't provide arch_dma_alloc for nommu/coldfire net: fec: use dma_alloc_noncoherent for data cache enabled coldfire m68k: use the coherent DMA code for coldfire without data cache dma-direct: warn when coherent allocations aren't supported dma-direct: simplify the use atomic pool logic in dma_direct_alloc dma-direct: add a CONFIG_ARCH_HAS_DMA_ALLOC symbol dma-direct: add dependencies to CONFIG_DMA_GLOBAL_POOL
Diffstat (limited to 'drivers/net/ethernet/freescale')
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c86
1 files changed, 76 insertions, 10 deletions
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 032c15b541ff..c3b7694a7485 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -363,6 +363,70 @@ static void fec_dump(struct net_device *ndev)
} while (bdp != txq->bd.base);
}
+/*
+ * Coldfire does not support DMA coherent allocations, and has historically used
+ * a band-aid with a manual flush in fec_enet_rx_queue.
+ */
+#if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA)
+static void *fec_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t gfp)
+{
+ return dma_alloc_noncoherent(dev, size, handle, DMA_BIDIRECTIONAL, gfp);
+}
+
+static void fec_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle)
+{
+ dma_free_noncoherent(dev, size, cpu_addr, handle, DMA_BIDIRECTIONAL);
+}
+#else /* !CONFIG_COLDFIRE || CONFIG_COLDFIRE_COHERENT_DMA */
+static void *fec_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t gfp)
+{
+ return dma_alloc_coherent(dev, size, handle, gfp);
+}
+
+static void fec_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle)
+{
+ dma_free_coherent(dev, size, cpu_addr, handle);
+}
+#endif /* !CONFIG_COLDFIRE || CONFIG_COLDFIRE_COHERENT_DMA */
+
+struct fec_dma_devres {
+ size_t size;
+ void *vaddr;
+ dma_addr_t dma_handle;
+};
+
+static void fec_dmam_release(struct device *dev, void *res)
+{
+ struct fec_dma_devres *this = res;
+
+ fec_dma_free(dev, this->size, this->vaddr, this->dma_handle);
+}
+
+static void *fec_dmam_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t gfp)
+{
+ struct fec_dma_devres *dr;
+ void *vaddr;
+
+ dr = devres_alloc(fec_dmam_release, sizeof(*dr), gfp);
+ if (!dr)
+ return NULL;
+ vaddr = fec_dma_alloc(dev, size, handle, gfp);
+ if (!vaddr) {
+ devres_free(dr);
+ return NULL;
+ }
+ dr->vaddr = vaddr;
+ dr->dma_handle = *handle;
+ dr->size = size;
+ devres_add(dev, dr);
+ return vaddr;
+}
+
static inline bool is_ipv4_pkt(struct sk_buff *skb)
{
return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
@@ -1617,7 +1681,11 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
}
#endif
-#ifdef CONFIG_M532x
+#if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA)
+ /*
+ * Hacky flush of all caches instead of using the DMA API for the TSO
+ * headers.
+ */
flush_cache_all();
#endif
rxq = fep->rx_queue[queue_id];
@@ -3243,10 +3311,9 @@ static void fec_enet_free_queue(struct net_device *ndev)
for (i = 0; i < fep->num_tx_queues; i++)
if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) {
txq = fep->tx_queue[i];
- dma_free_coherent(&fep->pdev->dev,
- txq->bd.ring_size * TSO_HEADER_SIZE,
- txq->tso_hdrs,
- txq->tso_hdrs_dma);
+ fec_dma_free(&fep->pdev->dev,
+ txq->bd.ring_size * TSO_HEADER_SIZE,
+ txq->tso_hdrs, txq->tso_hdrs_dma);
}
for (i = 0; i < fep->num_rx_queues; i++)
@@ -3276,10 +3343,9 @@ static int fec_enet_alloc_queue(struct net_device *ndev)
txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
txq->tx_wake_threshold = FEC_MAX_SKB_DESCS + 2 * MAX_SKB_FRAGS;
- txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev,
+ txq->tso_hdrs = fec_dma_alloc(&fep->pdev->dev,
txq->bd.ring_size * TSO_HEADER_SIZE,
- &txq->tso_hdrs_dma,
- GFP_KERNEL);
+ &txq->tso_hdrs_dma, GFP_KERNEL);
if (!txq->tso_hdrs) {
ret = -ENOMEM;
goto alloc_failed;
@@ -3998,8 +4064,8 @@ static int fec_enet_init(struct net_device *ndev)
bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
/* Allocate memory for buffer descriptors. */
- cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
- GFP_KERNEL);
+ cbd_base = fec_dmam_alloc(&fep->pdev->dev, bd_size, &bd_dma,
+ GFP_KERNEL);
if (!cbd_base) {
ret = -ENOMEM;
goto free_queue_mem;