diff options
-rw-r--r-- | Documentation/devicetree/bindings/net/cdns,macb.yaml | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/cadence/macb.h | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/cadence/macb_main.c | 138 |
3 files changed, 69 insertions, 75 deletions
diff --git a/Documentation/devicetree/bindings/net/cdns,macb.yaml b/Documentation/devicetree/bindings/net/cdns,macb.yaml index 559d0f733e7e..6e20d67e7628 100644 --- a/Documentation/devicetree/bindings/net/cdns,macb.yaml +++ b/Documentation/devicetree/bindings/net/cdns,macb.yaml @@ -85,7 +85,7 @@ properties: items: - enum: [ ether_clk, hclk, pclk ] - enum: [ hclk, pclk ] - - const: tx_clk + - enum: [ tx_clk, tsu_clk ] - enum: [ rx_clk, tsu_clk ] - const: tsu_clk diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index c9a5c8beb2fa..a7e845fee4b3 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -213,10 +213,8 @@ #define GEM_ISR(hw_q) (0x0400 + ((hw_q) << 2)) #define GEM_TBQP(hw_q) (0x0440 + ((hw_q) << 2)) -#define GEM_TBQPH(hw_q) (0x04C8) #define GEM_RBQP(hw_q) (0x0480 + ((hw_q) << 2)) #define GEM_RBQS(hw_q) (0x04A0 + ((hw_q) << 2)) -#define GEM_RBQPH(hw_q) (0x04D4) #define GEM_IER(hw_q) (0x0600 + ((hw_q) << 2)) #define GEM_IDR(hw_q) (0x0620 + ((hw_q) << 2)) #define GEM_IMR(hw_q) (0x0640 + ((hw_q) << 2)) @@ -1214,10 +1212,8 @@ struct macb_queue { unsigned int IDR; unsigned int IMR; unsigned int TBQP; - unsigned int TBQPH; unsigned int RBQS; unsigned int RBQP; - unsigned int RBQPH; /* Lock to protect tx_head and tx_tail */ spinlock_t tx_ptr_lock; diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index c769b7dbd3ba..4af2ec705ba5 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -51,14 +51,10 @@ struct sifive_fu540_macb_mgmt { #define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */ #define MIN_RX_RING_SIZE 64 #define MAX_RX_RING_SIZE 8192 -#define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \ - * (bp)->rx_ring_size) #define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */ #define MIN_TX_RING_SIZE 64 #define MAX_TX_RING_SIZE 4096 -#define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \ - * (bp)->tx_ring_size) /* level of occupied TX descriptors under which we wake up TX process */ #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4) @@ -278,9 +274,9 @@ static void macb_set_hwaddr(struct macb *bp) u32 bottom; u16 top; - bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr)); + bottom = get_unaligned_le32(bp->dev->dev_addr); macb_or_gem_writel(bp, SA1B, bottom); - top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); + top = get_unaligned_le16(bp->dev->dev_addr + 4); macb_or_gem_writel(bp, SA1T, top); if (gem_has_ptp(bp)) { @@ -495,19 +491,19 @@ static void macb_init_buffers(struct macb *bp) struct macb_queue *queue; unsigned int q; - for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { - queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma)); #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - if (bp->hw_dma_cap & HW_DMA_CAP_64B) - queue_writel(queue, RBQPH, - upper_32_bits(queue->rx_ring_dma)); + /* Single register for all queues' high 32 bits. */ + if (bp->hw_dma_cap & HW_DMA_CAP_64B) { + macb_writel(bp, RBQPH, + upper_32_bits(bp->queues[0].rx_ring_dma)); + macb_writel(bp, TBQPH, + upper_32_bits(bp->queues[0].tx_ring_dma)); + } #endif + + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { + queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma)); queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - if (bp->hw_dma_cap & HW_DMA_CAP_64B) - queue_writel(queue, TBQPH, - upper_32_bits(queue->tx_ring_dma)); -#endif } } @@ -1166,10 +1162,6 @@ static void macb_tx_error_task(struct work_struct *work) /* Reinitialize the TX desc queue */ queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - if (bp->hw_dma_cap & HW_DMA_CAP_64B) - queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma)); -#endif /* Make TX ring reflect state of hardware */ queue->tx_head = 0; queue->tx_tail = 0; @@ -2474,35 +2466,42 @@ static void macb_free_rx_buffers(struct macb *bp) } } +static unsigned int macb_tx_ring_size_per_queue(struct macb *bp) +{ + return macb_dma_desc_get_size(bp) * bp->tx_ring_size + bp->tx_bd_rd_prefetch; +} + +static unsigned int macb_rx_ring_size_per_queue(struct macb *bp) +{ + return macb_dma_desc_get_size(bp) * bp->rx_ring_size + bp->rx_bd_rd_prefetch; +} + static void macb_free_consistent(struct macb *bp) { + struct device *dev = &bp->pdev->dev; struct macb_queue *queue; unsigned int q; - int size; + size_t size; if (bp->rx_ring_tieoff) { - dma_free_coherent(&bp->pdev->dev, macb_dma_desc_get_size(bp), + dma_free_coherent(dev, macb_dma_desc_get_size(bp), bp->rx_ring_tieoff, bp->rx_ring_tieoff_dma); bp->rx_ring_tieoff = NULL; } bp->macbgem_ops.mog_free_rx_buffers(bp); + size = bp->num_queues * macb_tx_ring_size_per_queue(bp); + dma_free_coherent(dev, size, bp->queues[0].tx_ring, bp->queues[0].tx_ring_dma); + + size = bp->num_queues * macb_rx_ring_size_per_queue(bp); + dma_free_coherent(dev, size, bp->queues[0].rx_ring, bp->queues[0].rx_ring_dma); + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { kfree(queue->tx_skb); queue->tx_skb = NULL; - if (queue->tx_ring) { - size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; - dma_free_coherent(&bp->pdev->dev, size, - queue->tx_ring, queue->tx_ring_dma); - queue->tx_ring = NULL; - } - if (queue->rx_ring) { - size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; - dma_free_coherent(&bp->pdev->dev, size, - queue->rx_ring, queue->rx_ring_dma); - queue->rx_ring = NULL; - } + queue->tx_ring = NULL; + queue->rx_ring = NULL; } } @@ -2544,35 +2543,45 @@ static int macb_alloc_rx_buffers(struct macb *bp) static int macb_alloc_consistent(struct macb *bp) { + struct device *dev = &bp->pdev->dev; + dma_addr_t tx_dma, rx_dma; struct macb_queue *queue; unsigned int q; - int size; + void *tx, *rx; + size_t size; + + /* + * Upper 32-bits of Tx/Rx DMA descriptor for each queues much match! + * We cannot enforce this guarantee, the best we can do is do a single + * allocation and hope it will land into alloc_pages() that guarantees + * natural alignment of physical addresses. + */ + + size = bp->num_queues * macb_tx_ring_size_per_queue(bp); + tx = dma_alloc_coherent(dev, size, &tx_dma, GFP_KERNEL); + if (!tx || upper_32_bits(tx_dma) != upper_32_bits(tx_dma + size - 1)) + goto out_err; + netdev_dbg(bp->dev, "Allocated %zu bytes for %u TX rings at %08lx (mapped %p)\n", + size, bp->num_queues, (unsigned long)tx_dma, tx); + + size = bp->num_queues * macb_rx_ring_size_per_queue(bp); + rx = dma_alloc_coherent(dev, size, &rx_dma, GFP_KERNEL); + if (!rx || upper_32_bits(rx_dma) != upper_32_bits(rx_dma + size - 1)) + goto out_err; + netdev_dbg(bp->dev, "Allocated %zu bytes for %u RX rings at %08lx (mapped %p)\n", + size, bp->num_queues, (unsigned long)rx_dma, rx); for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { - size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; - queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, - &queue->tx_ring_dma, - GFP_KERNEL); - if (!queue->tx_ring) - goto out_err; - netdev_dbg(bp->dev, - "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n", - q, size, (unsigned long)queue->tx_ring_dma, - queue->tx_ring); + queue->tx_ring = tx + macb_tx_ring_size_per_queue(bp) * q; + queue->tx_ring_dma = tx_dma + macb_tx_ring_size_per_queue(bp) * q; + + queue->rx_ring = rx + macb_rx_ring_size_per_queue(bp) * q; + queue->rx_ring_dma = rx_dma + macb_rx_ring_size_per_queue(bp) * q; size = bp->tx_ring_size * sizeof(struct macb_tx_skb); queue->tx_skb = kmalloc(size, GFP_KERNEL); if (!queue->tx_skb) goto out_err; - - size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; - queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, - &queue->rx_ring_dma, GFP_KERNEL); - if (!queue->rx_ring) - goto out_err; - netdev_dbg(bp->dev, - "Allocated RX ring of %d bytes at %08lx (mapped %p)\n", - size, (unsigned long)queue->rx_ring_dma, queue->rx_ring); } if (bp->macbgem_ops.mog_alloc_rx_buffers(bp)) goto out_err; @@ -4309,12 +4318,6 @@ static int macb_init(struct platform_device *pdev) queue->TBQP = GEM_TBQP(hw_q - 1); queue->RBQP = GEM_RBQP(hw_q - 1); queue->RBQS = GEM_RBQS(hw_q - 1); -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - if (bp->hw_dma_cap & HW_DMA_CAP_64B) { - queue->TBQPH = GEM_TBQPH(hw_q - 1); - queue->RBQPH = GEM_RBQPH(hw_q - 1); - } -#endif } else { /* queue0 uses legacy registers */ queue->ISR = MACB_ISR; @@ -4323,12 +4326,6 @@ static int macb_init(struct platform_device *pdev) queue->IMR = MACB_IMR; queue->TBQP = MACB_TBQP; queue->RBQP = MACB_RBQP; -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - if (bp->hw_dma_cap & HW_DMA_CAP_64B) { - queue->TBQPH = MACB_TBQPH; - queue->RBQPH = MACB_RBQPH; - } -#endif } /* get irq: here we use the linux queue index, not the hardware @@ -5452,6 +5449,11 @@ static int __maybe_unused macb_suspend(struct device *dev) */ tmp = macb_readl(bp, NCR); macb_writel(bp, NCR, tmp & ~(MACB_BIT(TE) | MACB_BIT(RE))); +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + if (!(bp->caps & MACB_CAPS_QUEUE_DISABLE)) + macb_writel(bp, RBQPH, + upper_32_bits(bp->rx_ring_tieoff_dma)); +#endif for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { /* Disable RX queues */ @@ -5461,10 +5463,6 @@ static int __maybe_unused macb_suspend(struct device *dev) /* Tie off RX queues */ queue_writel(queue, RBQP, lower_32_bits(bp->rx_ring_tieoff_dma)); -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - queue_writel(queue, RBQPH, - upper_32_bits(bp->rx_ring_tieoff_dma)); -#endif } /* Disable all interrupts */ queue_writel(queue, IDR, -1); |