From 4780b7cae60cf10af4ae75bc5d6643f41d4c2969 Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Tue, 4 Nov 2014 16:06:26 -0600 Subject: amd-xgbe: Move ring allocation to device open Move the channel and ring tracking structures allocation to device open. This will allow for future support to vary the number of Tx/Rx queues without unloading the module. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 93 ++++++++++++++++++++++++++++++-- 1 file changed, 89 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet/amd/xgbe/xgbe-drv.c') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 2349ea970255..07b00bdcd9f9 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -129,6 +129,80 @@ static int xgbe_poll(struct napi_struct *, int); static void xgbe_set_rx_mode(struct net_device *); +static int xgbe_alloc_channels(struct xgbe_prv_data *pdata) +{ + struct xgbe_channel *channel_mem, *channel; + struct xgbe_ring *tx_ring, *rx_ring; + unsigned int count, i; + + count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count); + + channel_mem = kcalloc(count, sizeof(struct xgbe_channel), GFP_KERNEL); + if (!channel_mem) + goto err_channel; + + tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xgbe_ring), + GFP_KERNEL); + if (!tx_ring) + goto err_tx_ring; + + rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xgbe_ring), + GFP_KERNEL); + if (!rx_ring) + goto err_rx_ring; + + for (i = 0, channel = channel_mem; i < count; i++, channel++) { + snprintf(channel->name, sizeof(channel->name), "channel-%d", i); + channel->pdata = pdata; + channel->queue_index = i; + channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE + + (DMA_CH_INC * i); + + if (i < pdata->tx_ring_count) { + spin_lock_init(&tx_ring->lock); + channel->tx_ring = tx_ring++; + } + + if (i < pdata->rx_ring_count) { + spin_lock_init(&rx_ring->lock); + channel->rx_ring = rx_ring++; + } + + DBGPR(" %s - queue_index=%u, dma_regs=%p, tx=%p, rx=%p\n", + channel->name, channel->queue_index, channel->dma_regs, + channel->tx_ring, channel->rx_ring); + } + + pdata->channel = channel_mem; + pdata->channel_count = count; + + return 0; + +err_rx_ring: + kfree(tx_ring); + +err_tx_ring: + kfree(channel_mem); + +err_channel: + netdev_err(pdata->netdev, "channel allocation failed\n"); + + return -ENOMEM; +} + +static void xgbe_free_channels(struct xgbe_prv_data *pdata) +{ + if (!pdata->channel) + return; + + kfree(pdata->channel->rx_ring); + kfree(pdata->channel->tx_ring); + kfree(pdata->channel); + + pdata->channel = NULL; + pdata->channel_count = 0; +} + static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring) { return (ring->rdesc_count - (ring->cur - ring->dirty)); @@ -1119,10 +1193,15 @@ static int xgbe_open(struct net_device *netdev) goto err_ptpclk; pdata->rx_buf_size = ret; + /* Allocate the channel and ring structures */ + ret = xgbe_alloc_channels(pdata); + if (ret) + goto err_ptpclk; + /* Allocate the ring descriptors and buffers */ ret = desc_if->alloc_ring_resources(pdata); if (ret) - goto err_ptpclk; + goto err_channels; /* Initialize the device restart and Tx timestamp work struct */ INIT_WORK(&pdata->restart_work, xgbe_restart); @@ -1134,7 +1213,7 @@ static int xgbe_open(struct net_device *netdev) if (ret) { netdev_alert(netdev, "error requesting irq %d\n", pdata->irq_number); - goto err_irq; + goto err_rings; } pdata->irq_number = netdev->irq; @@ -1152,9 +1231,12 @@ err_start: devm_free_irq(pdata->dev, pdata->irq_number, pdata); pdata->irq_number = 0; -err_irq: +err_rings: desc_if->free_ring_resources(pdata); +err_channels: + xgbe_free_channels(pdata); + err_ptpclk: clk_disable_unprepare(pdata->ptpclk); @@ -1181,9 +1263,12 @@ static int xgbe_close(struct net_device *netdev) /* Issue software reset to device */ hw_if->exit(pdata); - /* Free all the ring data */ + /* Free the ring descriptors and buffers */ desc_if->free_ring_resources(pdata); + /* Free the channel and ring structures */ + xgbe_free_channels(pdata); + /* Release the interrupt */ if (pdata->irq_number != 0) { devm_free_irq(pdata->dev, pdata->irq_number, pdata); -- cgit From a9d41981e95651143125352f0233138efc17378a Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Tue, 4 Nov 2014 16:06:32 -0600 Subject: amd-xgbe: Rename pre_xmit function to dev_xmit The pre_xmit function name implies that it performs operations prior to transmitting the packet when in fact it is responsible for setting up the descriptors and initiating the transmit. Rename this to function from pre_xmit to dev_xmit, which is consistent with the name used during receive processing - dev_read. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet/amd/xgbe/xgbe-drv.c') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 07b00bdcd9f9..8cb2372f8fa9 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1343,7 +1343,7 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) xgbe_prep_tx_tstamp(pdata, skb, packet); /* Configure required descriptor fields for transmission */ - hw_if->pre_xmit(channel); + hw_if->dev_xmit(channel); #ifdef XGMAC_ENABLE_TX_PKT_DUMP xgbe_print_pkt(netdev, skb, true); -- cgit From 08dcc47c06c79de31b9b2c0b4637f6119e5701fa Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Tue, 4 Nov 2014 16:06:44 -0600 Subject: amd-xgbe: Use page allocations for Rx buffers Use page allocations for Rx buffers instead of pre-allocating skbs of a set size. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 95 ++++++++++++++++++++------------ 1 file changed, 61 insertions(+), 34 deletions(-) (limited to 'drivers/net/ethernet/amd/xgbe/xgbe-drv.c') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 8cb2372f8fa9..d65f5aa8fdce 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -218,8 +218,8 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu) } rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; - if (rx_buf_size < XGBE_RX_MIN_BUF_SIZE) - rx_buf_size = XGBE_RX_MIN_BUF_SIZE; + rx_buf_size = clamp_val(rx_buf_size, XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE); + rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) & ~(XGBE_RX_BUF_ALIGN - 1); @@ -546,7 +546,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata) DBGPR("<--xgbe_init_rx_coalesce\n"); } -static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata) +static void xgbe_free_tx_data(struct xgbe_prv_data *pdata) { struct xgbe_desc_if *desc_if = &pdata->desc_if; struct xgbe_channel *channel; @@ -554,7 +554,7 @@ static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata) struct xgbe_ring_data *rdata; unsigned int i, j; - DBGPR("-->xgbe_free_tx_skbuff\n"); + DBGPR("-->xgbe_free_tx_data\n"); channel = pdata->channel; for (i = 0; i < pdata->channel_count; i++, channel++) { @@ -564,14 +564,14 @@ static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata) for (j = 0; j < ring->rdesc_count; j++) { rdata = XGBE_GET_DESC_DATA(ring, j); - desc_if->unmap_skb(pdata, rdata); + desc_if->unmap_rdata(pdata, rdata); } } - DBGPR("<--xgbe_free_tx_skbuff\n"); + DBGPR("<--xgbe_free_tx_data\n"); } -static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata) +static void xgbe_free_rx_data(struct xgbe_prv_data *pdata) { struct xgbe_desc_if *desc_if = &pdata->desc_if; struct xgbe_channel *channel; @@ -579,7 +579,7 @@ static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata) struct xgbe_ring_data *rdata; unsigned int i, j; - DBGPR("-->xgbe_free_rx_skbuff\n"); + DBGPR("-->xgbe_free_rx_data\n"); channel = pdata->channel; for (i = 0; i < pdata->channel_count; i++, channel++) { @@ -589,11 +589,11 @@ static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata) for (j = 0; j < ring->rdesc_count; j++) { rdata = XGBE_GET_DESC_DATA(ring, j); - desc_if->unmap_skb(pdata, rdata); + desc_if->unmap_rdata(pdata, rdata); } } - DBGPR("<--xgbe_free_rx_skbuff\n"); + DBGPR("<--xgbe_free_rx_data\n"); } static void xgbe_adjust_link(struct net_device *netdev) @@ -839,8 +839,8 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset) xgbe_stop(pdata); synchronize_irq(pdata->irq_number); - xgbe_free_tx_skbuff(pdata); - xgbe_free_rx_skbuff(pdata); + xgbe_free_tx_data(pdata); + xgbe_free_rx_data(pdata); /* Issue software reset to device if requested */ if (reset) @@ -1609,7 +1609,7 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel) struct xgbe_ring *ring = channel->rx_ring; struct xgbe_ring_data *rdata; - desc_if->realloc_skb(channel); + desc_if->realloc_rx_buffer(channel); /* Update the Rx Tail Pointer Register with address of * the last cleaned entry */ @@ -1618,6 +1618,37 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel) lower_32_bits(rdata->rdesc_dma)); } +static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata, + struct xgbe_ring_data *rdata, + unsigned int len) +{ + struct net_device *netdev = pdata->netdev; + struct sk_buff *skb; + u8 *packet; + unsigned int copy_len; + + skb = netdev_alloc_skb_ip_align(netdev, XGBE_SKB_ALLOC_SIZE); + if (!skb) + return NULL; + + packet = page_address(rdata->rx_pa.pages) + rdata->rx_pa.pages_offset; + copy_len = min_t(unsigned int, XGBE_SKB_ALLOC_SIZE, len); + skb_copy_to_linear_data(skb, packet, copy_len); + skb_put(skb, copy_len); + + rdata->rx_pa.pages_offset += copy_len; + len -= copy_len; + if (len) + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + rdata->rx_pa.pages, + rdata->rx_pa.pages_offset, + len, rdata->rx_dma_len); + else + put_page(rdata->rx_pa.pages); + + return skb; +} + static int xgbe_tx_poll(struct xgbe_channel *channel) { struct xgbe_prv_data *pdata = channel->pdata; @@ -1651,7 +1682,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel) #endif /* Free the SKB and reset the descriptor for re-use */ - desc_if->unmap_skb(pdata, rdata); + desc_if->unmap_rdata(pdata, rdata); hw_if->tx_desc_reset(rdata); processed++; @@ -1726,9 +1757,9 @@ read_again: ring->cur++; ring->dirty++; - dma_unmap_single(pdata->dev, rdata->skb_dma, - rdata->skb_dma_len, DMA_FROM_DEVICE); - rdata->skb_dma = 0; + dma_sync_single_for_cpu(pdata->dev, rdata->rx_dma, + rdata->rx_dma_len, + DMA_FROM_DEVICE); incomplete = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, @@ -1753,26 +1784,22 @@ read_again: if (!context) { put_len = rdata->len - len; - if (skb) { - if (pskb_expand_head(skb, 0, put_len, - GFP_ATOMIC)) { - DBGPR("pskb_expand_head error\n"); - if (incomplete) { - error = 1; - goto read_again; - } - - dev_kfree_skb(skb); - goto next_packet; + len += put_len; + + if (!skb) { + skb = xgbe_create_skb(pdata, rdata, put_len); + if (!skb) { + error = 1; + goto read_again; } - memcpy(skb_tail_pointer(skb), rdata->skb->data, - put_len); } else { - skb = rdata->skb; - rdata->skb = NULL; + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + rdata->rx_pa.pages, + rdata->rx_pa.pages_offset, + put_len, rdata->rx_dma_len); } - skb_put(skb, put_len); - len += put_len; + + rdata->rx_pa.pages = NULL; } if (incomplete || context_next) -- cgit From 174fd2597b0bd8c19fce6a97e8b0f753ef4ce7cb Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Tue, 4 Nov 2014 16:06:50 -0600 Subject: amd-xgbe: Implement split header receive support Provide support for splitting IP packets so that the header and payload can be sent to different DMA addresses. This will allow the IP header to be put into the linear part of the skb while the payload can be added as frags. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 63 ++++++++++++++++---------------- 1 file changed, 32 insertions(+), 31 deletions(-) (limited to 'drivers/net/ethernet/amd/xgbe/xgbe-drv.c') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index d65f5aa8fdce..07e2d216323a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1620,31 +1620,25 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel) static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata, struct xgbe_ring_data *rdata, - unsigned int len) + unsigned int *len) { struct net_device *netdev = pdata->netdev; struct sk_buff *skb; u8 *packet; unsigned int copy_len; - skb = netdev_alloc_skb_ip_align(netdev, XGBE_SKB_ALLOC_SIZE); + skb = netdev_alloc_skb_ip_align(netdev, rdata->rx_hdr.dma_len); if (!skb) return NULL; - packet = page_address(rdata->rx_pa.pages) + rdata->rx_pa.pages_offset; - copy_len = min_t(unsigned int, XGBE_SKB_ALLOC_SIZE, len); + packet = page_address(rdata->rx_hdr.pa.pages) + + rdata->rx_hdr.pa.pages_offset; + copy_len = (rdata->hdr_len) ? rdata->hdr_len : *len; + copy_len = min(rdata->rx_hdr.dma_len, copy_len); skb_copy_to_linear_data(skb, packet, copy_len); skb_put(skb, copy_len); - rdata->rx_pa.pages_offset += copy_len; - len -= copy_len; - if (len) - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - rdata->rx_pa.pages, - rdata->rx_pa.pages_offset, - len, rdata->rx_dma_len); - else - put_page(rdata->rx_pa.pages); + *len -= copy_len; return skb; } @@ -1757,10 +1751,6 @@ read_again: ring->cur++; ring->dirty++; - dma_sync_single_for_cpu(pdata->dev, rdata->rx_dma, - rdata->rx_dma_len, - DMA_FROM_DEVICE); - incomplete = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, INCOMPLETE); @@ -1787,19 +1777,30 @@ read_again: len += put_len; if (!skb) { - skb = xgbe_create_skb(pdata, rdata, put_len); + dma_sync_single_for_cpu(pdata->dev, + rdata->rx_hdr.dma, + rdata->rx_hdr.dma_len, + DMA_FROM_DEVICE); + + skb = xgbe_create_skb(pdata, rdata, &put_len); if (!skb) { error = 1; goto read_again; } - } else { - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - rdata->rx_pa.pages, - rdata->rx_pa.pages_offset, - put_len, rdata->rx_dma_len); } - rdata->rx_pa.pages = NULL; + if (put_len) { + dma_sync_single_for_cpu(pdata->dev, + rdata->rx_buf.dma, + rdata->rx_buf.dma_len, + DMA_FROM_DEVICE); + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + rdata->rx_buf.pa.pages, + rdata->rx_buf.pa.pages_offset, + put_len, rdata->rx_buf.dma_len); + rdata->rx_buf.pa.pages = NULL; + } } if (incomplete || context_next) @@ -1924,10 +1925,10 @@ void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx, while (count--) { rdata = XGBE_GET_DESC_DATA(ring, idx); rdesc = rdata->rdesc; - DBGPR("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx, - (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE", - le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1), - le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3)); + pr_alert("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx, + (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE", + le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1), + le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3)); idx++; } } @@ -1935,9 +1936,9 @@ void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx, void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc, unsigned int idx) { - DBGPR("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx, - le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1), - le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3)); + pr_alert("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx, + le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1), + le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3)); } void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx) -- cgit From 9227dc5e579b6b2ef58ad0d3d0d23ddac77846ef Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Tue, 4 Nov 2014 16:06:56 -0600 Subject: amd-xgbe: Add support for per DMA channel interrupts This patch provides support for interrupts that are generated by the Tx/Rx DMA channel pairs of the device. This allows for Tx and Rx processing to run across multiple processsors. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 226 +++++++++++++++++++++++++------ 1 file changed, 184 insertions(+), 42 deletions(-) (limited to 'drivers/net/ethernet/amd/xgbe/xgbe-drv.c') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 07e2d216323a..c3533e104c61 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -114,6 +114,7 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ +#include #include #include #include @@ -126,7 +127,8 @@ #include "xgbe.h" #include "xgbe-common.h" -static int xgbe_poll(struct napi_struct *, int); +static int xgbe_one_poll(struct napi_struct *, int); +static int xgbe_all_poll(struct napi_struct *, int); static void xgbe_set_rx_mode(struct net_device *); static int xgbe_alloc_channels(struct xgbe_prv_data *pdata) @@ -134,6 +136,7 @@ static int xgbe_alloc_channels(struct xgbe_prv_data *pdata) struct xgbe_channel *channel_mem, *channel; struct xgbe_ring *tx_ring, *rx_ring; unsigned int count, i; + int ret = -ENOMEM; count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count); @@ -158,6 +161,19 @@ static int xgbe_alloc_channels(struct xgbe_prv_data *pdata) channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE + (DMA_CH_INC * i); + if (pdata->per_channel_irq) { + /* Get the DMA interrupt (offset 1) */ + ret = platform_get_irq(pdata->pdev, i + 1); + if (ret < 0) { + netdev_err(pdata->netdev, + "platform_get_irq %u failed\n", + i + 1); + goto err_irq; + } + + channel->dma_irq = ret; + } + if (i < pdata->tx_ring_count) { spin_lock_init(&tx_ring->lock); channel->tx_ring = tx_ring++; @@ -168,9 +184,9 @@ static int xgbe_alloc_channels(struct xgbe_prv_data *pdata) channel->rx_ring = rx_ring++; } - DBGPR(" %s - queue_index=%u, dma_regs=%p, tx=%p, rx=%p\n", + DBGPR(" %s: queue=%u, dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n", channel->name, channel->queue_index, channel->dma_regs, - channel->tx_ring, channel->rx_ring); + channel->dma_irq, channel->tx_ring, channel->rx_ring); } pdata->channel = channel_mem; @@ -178,6 +194,9 @@ static int xgbe_alloc_channels(struct xgbe_prv_data *pdata) return 0; +err_irq: + kfree(rx_ring); + err_rx_ring: kfree(tx_ring); @@ -185,9 +204,7 @@ err_tx_ring: kfree(channel_mem); err_channel: - netdev_err(pdata->netdev, "channel allocation failed\n"); - - return -ENOMEM; + return ret; } static void xgbe_free_channels(struct xgbe_prv_data *pdata) @@ -287,11 +304,7 @@ static irqreturn_t xgbe_isr(int irq, void *data) if (!dma_isr) goto isr_done; - DBGPR("-->xgbe_isr\n"); - DBGPR(" DMA_ISR = %08x\n", dma_isr); - DBGPR(" DMA_DS0 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR0)); - DBGPR(" DMA_DS1 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR1)); for (i = 0; i < pdata->channel_count; i++) { if (!(dma_isr & (1 << i))) @@ -302,6 +315,10 @@ static irqreturn_t xgbe_isr(int irq, void *data) dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR); DBGPR(" DMA_CH%u_ISR = %08x\n", i, dma_ch_isr); + /* If we get a TI or RI interrupt that means per channel DMA + * interrupts are not enabled, so we use the private data napi + * structure, not the per channel napi structure + */ if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) || XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI)) { if (napi_schedule_prep(&pdata->napi)) { @@ -344,12 +361,28 @@ static irqreturn_t xgbe_isr(int irq, void *data) DBGPR(" DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR)); - DBGPR("<--xgbe_isr\n"); - isr_done: return IRQ_HANDLED; } +static irqreturn_t xgbe_dma_isr(int irq, void *data) +{ + struct xgbe_channel *channel = data; + + /* Per channel DMA interrupts are enabled, so we use the per + * channel napi structure and not the private data napi structure + */ + if (napi_schedule_prep(&channel->napi)) { + /* Disable Tx and Rx interrupts */ + disable_irq(channel->dma_irq); + + /* Turn on polling */ + __napi_schedule(&channel->napi); + } + + return IRQ_HANDLED; +} + static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer) { struct xgbe_channel *channel = container_of(timer, @@ -357,18 +390,24 @@ static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer) tx_timer); struct xgbe_ring *ring = channel->tx_ring; struct xgbe_prv_data *pdata = channel->pdata; + struct napi_struct *napi; unsigned long flags; DBGPR("-->xgbe_tx_timer\n"); + napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; + spin_lock_irqsave(&ring->lock, flags); - if (napi_schedule_prep(&pdata->napi)) { + if (napi_schedule_prep(napi)) { /* Disable Tx and Rx interrupts */ - xgbe_disable_rx_tx_ints(pdata); + if (pdata->per_channel_irq) + disable_irq(channel->dma_irq); + else + xgbe_disable_rx_tx_ints(pdata); /* Turn on polling */ - __napi_schedule(&pdata->napi); + __napi_schedule(napi); } channel->tx_timer_active = 0; @@ -504,18 +543,46 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add) { - if (add) - netif_napi_add(pdata->netdev, &pdata->napi, xgbe_poll, - NAPI_POLL_WEIGHT); - napi_enable(&pdata->napi); + struct xgbe_channel *channel; + unsigned int i; + + if (pdata->per_channel_irq) { + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (add) + netif_napi_add(pdata->netdev, &channel->napi, + xgbe_one_poll, NAPI_POLL_WEIGHT); + + napi_enable(&channel->napi); + } + } else { + if (add) + netif_napi_add(pdata->netdev, &pdata->napi, + xgbe_all_poll, NAPI_POLL_WEIGHT); + + napi_enable(&pdata->napi); + } } static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del) { - napi_disable(&pdata->napi); + struct xgbe_channel *channel; + unsigned int i; + + if (pdata->per_channel_irq) { + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + napi_disable(&channel->napi); - if (del) - netif_napi_del(&pdata->napi); + if (del) + netif_napi_del(&channel->napi); + } + } else { + napi_disable(&pdata->napi); + + if (del) + netif_napi_del(&pdata->napi); + } } void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata) @@ -828,7 +895,9 @@ static void xgbe_stop(struct xgbe_prv_data *pdata) static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset) { + struct xgbe_channel *channel; struct xgbe_hw_if *hw_if = &pdata->hw_if; + unsigned int i; DBGPR("-->xgbe_restart_dev\n"); @@ -837,7 +906,12 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset) return; xgbe_stop(pdata); - synchronize_irq(pdata->irq_number); + synchronize_irq(pdata->dev_irq); + if (pdata->per_channel_irq) { + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) + synchronize_irq(channel->dma_irq); + } xgbe_free_tx_data(pdata); xgbe_free_rx_data(pdata); @@ -1165,6 +1239,9 @@ static int xgbe_open(struct net_device *netdev) struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_hw_if *hw_if = &pdata->hw_if; struct xgbe_desc_if *desc_if = &pdata->desc_if; + struct xgbe_channel *channel = NULL; + char dma_irq_name[IFNAMSIZ + 32]; + unsigned int i = 0; int ret; DBGPR("-->xgbe_open\n"); @@ -1208,14 +1285,32 @@ static int xgbe_open(struct net_device *netdev) INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp); /* Request interrupts */ - ret = devm_request_irq(pdata->dev, netdev->irq, xgbe_isr, 0, + ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0, netdev->name, pdata); if (ret) { netdev_alert(netdev, "error requesting irq %d\n", - pdata->irq_number); + pdata->dev_irq); goto err_rings; } - pdata->irq_number = netdev->irq; + + if (pdata->per_channel_irq) { + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + snprintf(dma_irq_name, sizeof(dma_irq_name) - 1, + "%s-TxRx-%u", netdev_name(netdev), + channel->queue_index); + + ret = devm_request_irq(pdata->dev, channel->dma_irq, + xgbe_dma_isr, 0, dma_irq_name, + channel); + if (ret) { + netdev_alert(netdev, + "error requesting irq %d\n", + channel->dma_irq); + goto err_irq; + } + } + } ret = xgbe_start(pdata); if (ret) @@ -1228,8 +1323,14 @@ static int xgbe_open(struct net_device *netdev) err_start: hw_if->exit(pdata); - devm_free_irq(pdata->dev, pdata->irq_number, pdata); - pdata->irq_number = 0; +err_irq: + if (pdata->per_channel_irq) { + /* Using an unsigned int, 'i' will go to UINT_MAX and exit */ + for (i--, channel--; i < pdata->channel_count; i--, channel--) + devm_free_irq(pdata->dev, channel->dma_irq, channel); + } + + devm_free_irq(pdata->dev, pdata->dev_irq, pdata); err_rings: desc_if->free_ring_resources(pdata); @@ -1254,6 +1355,8 @@ static int xgbe_close(struct net_device *netdev) struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_hw_if *hw_if = &pdata->hw_if; struct xgbe_desc_if *desc_if = &pdata->desc_if; + struct xgbe_channel *channel; + unsigned int i; DBGPR("-->xgbe_close\n"); @@ -1269,10 +1372,12 @@ static int xgbe_close(struct net_device *netdev) /* Free the channel and ring structures */ xgbe_free_channels(pdata); - /* Release the interrupt */ - if (pdata->irq_number != 0) { - devm_free_irq(pdata->dev, pdata->irq_number, pdata); - pdata->irq_number = 0; + /* Release the interrupts */ + devm_free_irq(pdata->dev, pdata->dev_irq, pdata); + if (pdata->per_channel_irq) { + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) + devm_free_irq(pdata->dev, channel->dma_irq, channel); } /* Disable the clocks */ @@ -1505,14 +1610,20 @@ static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, static void xgbe_poll_controller(struct net_device *netdev) { struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct xgbe_channel *channel; + unsigned int i; DBGPR("-->xgbe_poll_controller\n"); - disable_irq(pdata->irq_number); - - xgbe_isr(pdata->irq_number, pdata); - - enable_irq(pdata->irq_number); + if (pdata->per_channel_irq) { + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) + xgbe_dma_isr(channel->dma_irq, channel); + } else { + disable_irq(pdata->dev_irq); + xgbe_isr(pdata->dev_irq, pdata); + enable_irq(pdata->dev_irq); + } DBGPR("<--xgbe_poll_controller\n"); } @@ -1704,6 +1815,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) struct xgbe_ring_data *rdata; struct xgbe_packet_data *packet; struct net_device *netdev = pdata->netdev; + struct napi_struct *napi; struct sk_buff *skb; struct skb_shared_hwtstamps *hwtstamps; unsigned int incomplete, error, context_next, context; @@ -1717,6 +1829,8 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) if (!ring) return 0; + napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; + rdata = XGBE_GET_DESC_DATA(ring, ring->cur); packet = &ring->packet_data; while (packet_count < budget) { @@ -1849,10 +1963,10 @@ read_again: skb->dev = netdev; skb->protocol = eth_type_trans(skb, netdev); skb_record_rx_queue(skb, channel->queue_index); - skb_mark_napi_id(skb, &pdata->napi); + skb_mark_napi_id(skb, napi); netdev->last_rx = jiffies; - napi_gro_receive(&pdata->napi, skb); + napi_gro_receive(napi, skb); next_packet: packet_count++; @@ -1874,7 +1988,35 @@ next_packet: return packet_count; } -static int xgbe_poll(struct napi_struct *napi, int budget) +static int xgbe_one_poll(struct napi_struct *napi, int budget) +{ + struct xgbe_channel *channel = container_of(napi, struct xgbe_channel, + napi); + int processed = 0; + + DBGPR("-->xgbe_one_poll: budget=%d\n", budget); + + /* Cleanup Tx ring first */ + xgbe_tx_poll(channel); + + /* Process Rx ring next */ + processed = xgbe_rx_poll(channel, budget); + + /* If we processed everything, we are done */ + if (processed < budget) { + /* Turn off polling */ + napi_complete(napi); + + /* Enable Tx and Rx interrupts */ + enable_irq(channel->dma_irq); + } + + DBGPR("<--xgbe_one_poll: received = %d\n", processed); + + return processed; +} + +static int xgbe_all_poll(struct napi_struct *napi, int budget) { struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data, napi); @@ -1883,7 +2025,7 @@ static int xgbe_poll(struct napi_struct *napi, int budget) int processed, last_processed; unsigned int i; - DBGPR("-->xgbe_poll: budget=%d\n", budget); + DBGPR("-->xgbe_all_poll: budget=%d\n", budget); processed = 0; ring_budget = budget / pdata->rx_ring_count; @@ -1911,7 +2053,7 @@ static int xgbe_poll(struct napi_struct *napi, int budget) xgbe_enable_rx_tx_ints(pdata); } - DBGPR("<--xgbe_poll: received = %d\n", processed); + DBGPR("<--xgbe_all_poll: received = %d\n", processed); return processed; } -- cgit From 5b9dfe299e55604af47cdca9d03d2e9d4fe2ad53 Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Tue, 4 Nov 2014 16:07:02 -0600 Subject: amd-xgbe: Provide support for receive side scaling This patch provides support for receive side scaling (RSS). RSS allows for spreading incoming network packets across the Rx queues. When used in conjunction with the per DMA channel interrupt support, this allows the receive processing to be spread across multiple processors. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet/amd/xgbe/xgbe-drv.c') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index c3533e104c61..6c5a7079697c 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1661,12 +1661,21 @@ static int xgbe_set_features(struct net_device *netdev, { struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_hw_if *hw_if = &pdata->hw_if; - netdev_features_t rxcsum, rxvlan, rxvlan_filter; + netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter; + int ret = 0; + rxhash = pdata->netdev_features & NETIF_F_RXHASH; rxcsum = pdata->netdev_features & NETIF_F_RXCSUM; rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX; rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER; + if ((features & NETIF_F_RXHASH) && !rxhash) + ret = hw_if->enable_rss(pdata); + else if (!(features & NETIF_F_RXHASH) && rxhash) + ret = hw_if->disable_rss(pdata); + if (ret) + return ret; + if ((features & NETIF_F_RXCSUM) && !rxcsum) hw_if->enable_rx_csum(pdata); else if (!(features & NETIF_F_RXCSUM) && rxcsum) @@ -1960,6 +1969,11 @@ read_again: hwtstamps->hwtstamp = ns_to_ktime(nsec); } + if (XGMAC_GET_BITS(packet->attributes, + RX_PACKET_ATTRIBUTES, RSS_HASH)) + skb_set_hash(skb, packet->rss_hash, + packet->rss_hash_type); + skb->dev = netdev; skb->protocol = eth_type_trans(skb, netdev); skb_record_rx_queue(skb, channel->queue_index); -- cgit From a7beaf23007ac701ebffee1cb48d5d1aea5e7803 Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Tue, 4 Nov 2014 16:07:29 -0600 Subject: amd-xgbe: Fix a spelling error This patch fixes the spelling of the word "descriptor" in a couple of locations. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet/amd/xgbe/xgbe-drv.c') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 6c5a7079697c..ced9f52eb45b 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1185,13 +1185,13 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata, packet->rdesc_count = 0; if (xgbe_is_tso(skb)) { - /* TSO requires an extra desriptor if mss is different */ + /* TSO requires an extra descriptor if mss is different */ if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) { context_desc = 1; packet->rdesc_count++; } - /* TSO requires an extra desriptor for TSO header */ + /* TSO requires an extra descriptor for TSO header */ packet->rdesc_count++; XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, -- cgit From e98c72c94205c59745f9ed0ac2837d5b83084a46 Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Thu, 6 Nov 2014 17:02:13 -0600 Subject: amd-xgbe: Free channel/ring structures later The channel structure is freed before freeing the per channel interrupts resulting in a kernel oops. Move the call to free the channel structure to after the freeing of the per channel interrupts. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet/amd/xgbe/xgbe-drv.c') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index ced9f52eb45b..ec5fff38108d 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1369,9 +1369,6 @@ static int xgbe_close(struct net_device *netdev) /* Free the ring descriptors and buffers */ desc_if->free_ring_resources(pdata); - /* Free the channel and ring structures */ - xgbe_free_channels(pdata); - /* Release the interrupts */ devm_free_irq(pdata->dev, pdata->dev_irq, pdata); if (pdata->per_channel_irq) { @@ -1380,6 +1377,9 @@ static int xgbe_close(struct net_device *netdev) devm_free_irq(pdata->dev, channel->dma_irq, channel); } + /* Free the channel and ring structures */ + xgbe_free_channels(pdata); + /* Disable the clocks */ clk_disable_unprepare(pdata->ptpclk); clk_disable_unprepare(pdata->sysclk); -- cgit From f5eecbbef0b7d28b171b3b873e1e4a69aabd7b58 Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Thu, 6 Nov 2014 17:02:19 -0600 Subject: amd-xgbe: Check for complete packet on skb allocation error If the skb allocation fails during receive processing, the driver would continue reading descriptors without first determining if there were any more descriptors for the current packet. Update the code to check whether more descriptors are associated with the current packet or whether to move on to the next descriptor as a new packet. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet/amd/xgbe/xgbe-drv.c') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index ec5fff38108d..0544931329d1 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1908,7 +1908,7 @@ read_again: skb = xgbe_create_skb(pdata, rdata, &put_len); if (!skb) { error = 1; - goto read_again; + goto skip_data; } } @@ -1926,10 +1926,10 @@ read_again: } } +skip_data: if (incomplete || context_next) goto read_again; - /* Stray Context Descriptor? */ if (!skb) goto next_packet; -- cgit From 5449e27167c9945d24108690aff86943d79774a7 Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Thu, 20 Nov 2014 11:03:26 -0600 Subject: amd-xgbe: Add a read memory barrier to Tx/Rx path Add a read memory barrier to the Tx and Rx paths where the ownership bit is checked to be sure that all descriptor fields are read after having read the ownership bit for the descriptor. This has not been an issue to date, but it's a good safe-guard to have. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/net/ethernet/amd/xgbe/xgbe-drv.c') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 0544931329d1..eebb787251c4 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1791,6 +1791,10 @@ static int xgbe_tx_poll(struct xgbe_channel *channel) if (!hw_if->tx_complete(rdesc)) break; + /* Make sure descriptor fields are read after reading the OWN + * bit */ + rmb(); + #ifdef XGMAC_ENABLE_TX_DESC_DUMP xgbe_dump_tx_desc(ring, ring->dirty, 1, 0); #endif -- cgit From c9f140ebb00891c5bfd6b5cdd0552493bcbeac20 Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Thu, 20 Nov 2014 11:03:44 -0600 Subject: amd-xgbe: Separate Tx/Rx ring data fields into new structs Move the Tx and Rx related fields within the xgbe_ring_data struct into their own structs in order to more easily see what fields are used for each operation. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) (limited to 'drivers/net/ethernet/amd/xgbe/xgbe-drv.c') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index eebb787251c4..46ea423f9a08 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1747,14 +1747,14 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata, u8 *packet; unsigned int copy_len; - skb = netdev_alloc_skb_ip_align(netdev, rdata->rx_hdr.dma_len); + skb = netdev_alloc_skb_ip_align(netdev, rdata->rx.hdr.dma_len); if (!skb) return NULL; - packet = page_address(rdata->rx_hdr.pa.pages) + - rdata->rx_hdr.pa.pages_offset; - copy_len = (rdata->hdr_len) ? rdata->hdr_len : *len; - copy_len = min(rdata->rx_hdr.dma_len, copy_len); + packet = page_address(rdata->rx.hdr.pa.pages) + + rdata->rx.hdr.pa.pages_offset; + copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : *len; + copy_len = min(rdata->rx.hdr.dma_len, copy_len); skb_copy_to_linear_data(skb, packet, copy_len); skb_put(skb, copy_len); @@ -1900,13 +1900,13 @@ read_again: } if (!context) { - put_len = rdata->len - len; + put_len = rdata->rx.len - len; len += put_len; if (!skb) { dma_sync_single_for_cpu(pdata->dev, - rdata->rx_hdr.dma, - rdata->rx_hdr.dma_len, + rdata->rx.hdr.dma, + rdata->rx.hdr.dma_len, DMA_FROM_DEVICE); skb = xgbe_create_skb(pdata, rdata, &put_len); @@ -1918,15 +1918,15 @@ read_again: if (put_len) { dma_sync_single_for_cpu(pdata->dev, - rdata->rx_buf.dma, - rdata->rx_buf.dma_len, + rdata->rx.buf.dma, + rdata->rx.buf.dma_len, DMA_FROM_DEVICE); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - rdata->rx_buf.pa.pages, - rdata->rx_buf.pa.pages_offset, - put_len, rdata->rx_buf.dma_len); - rdata->rx_buf.pa.pages = NULL; + rdata->rx.buf.pa.pages, + rdata->rx.buf.pa.pages_offset, + put_len, rdata->rx.buf.dma_len); + rdata->rx.buf.pa.pages = NULL; } } -- cgit From 5fb4b86a66363e275add87b441bf80b24144a0c9 Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Thu, 20 Nov 2014 11:03:50 -0600 Subject: amd-xgbe: Add BQL support Call the appropriate BQL functions to track the number of bytes queued during Tx processing and to track the number of packets and bytes that have been transmitted during Tx complete processing. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 43 +++++++++++++++++++++++++++++++- 1 file changed, 42 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet/amd/xgbe/xgbe-drv.c') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 46ea423f9a08..f9635281c7df 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -876,7 +876,10 @@ static int xgbe_start(struct xgbe_prv_data *pdata) static void xgbe_stop(struct xgbe_prv_data *pdata) { struct xgbe_hw_if *hw_if = &pdata->hw_if; + struct xgbe_channel *channel; struct net_device *netdev = pdata->netdev; + struct netdev_queue *txq; + unsigned int i; DBGPR("-->xgbe_stop\n"); @@ -890,6 +893,15 @@ static void xgbe_stop(struct xgbe_prv_data *pdata) hw_if->disable_tx(pdata); hw_if->disable_rx(pdata); + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + continue; + + txq = netdev_get_tx_queue(netdev, channel->queue_index); + netdev_tx_reset_queue(txq); + } + DBGPR("<--xgbe_stop\n"); } @@ -1156,6 +1168,12 @@ static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet) packet->tcp_header_len, packet->tcp_payload_len); DBGPR(" packet->mss=%u\n", packet->mss); + /* Update the number of packets that will ultimately be transmitted + * along with the extra bytes for each extra packet + */ + packet->tx_packets = skb_shinfo(skb)->gso_segs; + packet->tx_bytes += (packet->tx_packets - 1) * packet->header_len; + return 0; } @@ -1184,6 +1202,9 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata, context_desc = 0; packet->rdesc_count = 0; + packet->tx_packets = 1; + packet->tx_bytes = skb->len; + if (xgbe_is_tso(skb)) { /* TSO requires an extra descriptor if mss is different */ if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) { @@ -1400,12 +1421,14 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) struct xgbe_channel *channel; struct xgbe_ring *ring; struct xgbe_packet_data *packet; + struct netdev_queue *txq; unsigned long flags; int ret; DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len); channel = pdata->channel + skb->queue_mapping; + txq = netdev_get_tx_queue(netdev, channel->queue_index); ring = channel->tx_ring; packet = &ring->packet_data; @@ -1447,6 +1470,9 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) xgbe_prep_tx_tstamp(pdata, skb, packet); + /* Report on the actual number of bytes (to be) sent */ + netdev_tx_sent_queue(txq, packet->tx_bytes); + /* Configure required descriptor fields for transmission */ hw_if->dev_xmit(channel); @@ -1772,8 +1798,10 @@ static int xgbe_tx_poll(struct xgbe_channel *channel) struct xgbe_ring_data *rdata; struct xgbe_ring_desc *rdesc; struct net_device *netdev = pdata->netdev; + struct netdev_queue *txq; unsigned long flags; int processed = 0; + unsigned int tx_packets = 0, tx_bytes = 0; DBGPR("-->xgbe_tx_poll\n"); @@ -1781,6 +1809,8 @@ static int xgbe_tx_poll(struct xgbe_channel *channel) if (!ring) return 0; + txq = netdev_get_tx_queue(netdev, channel->queue_index); + spin_lock_irqsave(&ring->lock, flags); while ((processed < XGBE_TX_DESC_MAX_PROC) && @@ -1799,6 +1829,11 @@ static int xgbe_tx_poll(struct xgbe_channel *channel) xgbe_dump_tx_desc(ring, ring->dirty, 1, 0); #endif + if (hw_if->is_last_desc(rdesc)) { + tx_packets += rdata->tx.packets; + tx_bytes += rdata->tx.bytes; + } + /* Free the SKB and reset the descriptor for re-use */ desc_if->unmap_rdata(pdata, rdata); hw_if->tx_desc_reset(rdata); @@ -1807,14 +1842,20 @@ static int xgbe_tx_poll(struct xgbe_channel *channel) ring->dirty++; } + if (!processed) + goto unlock; + + netdev_tx_completed_queue(txq, tx_packets, tx_bytes); + if ((ring->tx.queue_stopped == 1) && (xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) { ring->tx.queue_stopped = 0; - netif_wake_subqueue(netdev, channel->queue_index); + netif_tx_wake_queue(txq); } DBGPR("<--xgbe_tx_poll: processed=%d\n", processed); +unlock: spin_unlock_irqrestore(&ring->lock, flags); return processed; -- cgit From 16958a2b05def4ed214ae681b7ee4ce8537b00fb Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Thu, 20 Nov 2014 11:04:08 -0600 Subject: amd-xgbe: Add support for the skb->xmit_more flag Add support to delay telling the hardware about data that is ready to be transmitted if the skb->xmit_more flag is set. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 37 ++++++++++++++++++++++++++------ 1 file changed, 31 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet/amd/xgbe/xgbe-drv.c') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index f9635281c7df..02c104dc2aa4 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -225,6 +225,28 @@ static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring) return (ring->rdesc_count - (ring->cur - ring->dirty)); } +static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel, + struct xgbe_ring *ring, unsigned int count) +{ + struct xgbe_prv_data *pdata = channel->pdata; + + if (count > xgbe_tx_avail_desc(ring)) { + DBGPR(" Tx queue stopped, not enough descriptors available\n"); + netif_stop_subqueue(pdata->netdev, channel->queue_index); + ring->tx.queue_stopped = 1; + + /* If we haven't notified the hardware because of xmit_more + * support, tell it now + */ + if (ring->tx.xmit_more) + pdata->hw_if.tx_start_xmit(channel, ring); + + return NETDEV_TX_BUSY; + } + + return 0; +} + static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu) { unsigned int rx_buf_size; @@ -1199,6 +1221,8 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata, unsigned int len; unsigned int i; + packet->skb = skb; + context_desc = 0; packet->rdesc_count = 0; @@ -1447,13 +1471,9 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) xgbe_packet_info(pdata, ring, skb, packet); /* Check that there are enough descriptors available */ - if (packet->rdesc_count > xgbe_tx_avail_desc(ring)) { - DBGPR(" Tx queue stopped, not enough descriptors available\n"); - netif_stop_subqueue(netdev, channel->queue_index); - ring->tx.queue_stopped = 1; - ret = NETDEV_TX_BUSY; + ret = xgbe_maybe_stop_tx_queue(channel, ring, packet->rdesc_count); + if (ret) goto tx_netdev_return; - } ret = xgbe_prep_tso(skb, packet); if (ret) { @@ -1480,6 +1500,11 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) xgbe_print_pkt(netdev, skb, true); #endif + /* Stop the queue in advance if there may not be enough descriptors */ + xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS); + + ret = NETDEV_TX_OK; + tx_netdev_return: spin_unlock_irqrestore(&ring->lock, flags); -- cgit From 54ceb9ec6fe8e64067b164e6b161dd63905c059f Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Tue, 2 Dec 2014 18:07:18 -0600 Subject: amd-xgbe: IRQ names require allocated memory When requesting an irq, the name passed in must be (part of) allocated memory. The irq name was a local variable and resulted in random characters when listing /proc/interrupts. Add a character field to the xgbe_channel structure to hold the irq name and use that. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet/amd/xgbe/xgbe-drv.c') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 02c104dc2aa4..bedfdb1c430d 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1285,7 +1285,6 @@ static int xgbe_open(struct net_device *netdev) struct xgbe_hw_if *hw_if = &pdata->hw_if; struct xgbe_desc_if *desc_if = &pdata->desc_if; struct xgbe_channel *channel = NULL; - char dma_irq_name[IFNAMSIZ + 32]; unsigned int i = 0; int ret; @@ -1341,13 +1340,14 @@ static int xgbe_open(struct net_device *netdev) if (pdata->per_channel_irq) { channel = pdata->channel; for (i = 0; i < pdata->channel_count; i++, channel++) { - snprintf(dma_irq_name, sizeof(dma_irq_name) - 1, + snprintf(channel->dma_irq_name, + sizeof(channel->dma_irq_name) - 1, "%s-TxRx-%u", netdev_name(netdev), channel->queue_index); ret = devm_request_irq(pdata->dev, channel->dma_irq, - xgbe_dma_isr, 0, dma_irq_name, - channel); + xgbe_dma_isr, 0, + channel->dma_irq_name, channel); if (ret) { netdev_alert(netdev, "error requesting irq %d\n", -- cgit From 244d62be91ddcea55ec6d456dbb7f71d411d21f0 Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Thu, 4 Dec 2014 11:52:35 -0600 Subject: amd-xgbe: Prevent Tx cleanup stall When performing Tx cleanup, the dirty index counter is compared to the current index counter as one of the tests used to determine when to stop cleanup. The "less than" test will fail when the current index counter rolls over to zero causing cleanup to never occur again. Update the test to a "not equal" to avoid this situation. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet/amd/xgbe/xgbe-drv.c') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 2349ea970255..d0e35302410f 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1554,7 +1554,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel) spin_lock_irqsave(&ring->lock, flags); while ((processed < XGBE_TX_DESC_MAX_PROC) && - (ring->dirty < ring->cur)) { + (ring->dirty != ring->cur)) { rdata = XGBE_GET_DESC_DATA(ring, ring->dirty); rdesc = rdata->rdesc; -- cgit From f9c5c62db1c9bc20f396c3527b5074c4e7f1c275 Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Tue, 9 Dec 2014 14:54:08 -0600 Subject: amd-xgbe: Use disable_irq_nosync when in IRQ context The disable_irq_nosync function, not the disable_irq function, must be used to disable the DMA channel interrupt from within the interrupt service routine. Change the disable_irq call to disable_irq_nosync. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet/amd/xgbe/xgbe-drv.c') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index bedfdb1c430d..bf6bf1118b0f 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -396,7 +396,7 @@ static irqreturn_t xgbe_dma_isr(int irq, void *data) */ if (napi_schedule_prep(&channel->napi)) { /* Disable Tx and Rx interrupts */ - disable_irq(channel->dma_irq); + disable_irq_nosync(channel->dma_irq); /* Turn on polling */ __napi_schedule(&channel->napi); -- cgit