From e612c4322c625c5eb93c8a4f2b72860e7c0c12c1 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 11 Mar 2014 18:06:47 +0000 Subject: net: fec: better indexing for transmit descriptor ring Maintaining the transmit ring position via pointers is inefficient, especially when it involves complex pointer manipulation, and overlap with the receive descriptor logic. Re-implement this using indexes, and a single function which returns the descriptor (appropriate to the descriptor type). As an additional benefit, using a union allows cleaner access to the descriptor. Signed-off-by: Russell King --- drivers/net/ethernet/freescale/fec.h | 6 +- drivers/net/ethernet/freescale/fec_main.c | 188 +++++++++++++++--------------- 2 files changed, 98 insertions(+), 96 deletions(-) diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index e850e7e5118f..76fe5a9bb85e 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -296,9 +296,9 @@ struct fec_enet_private { union bufdesc_u *rx_bd_base; union bufdesc_u *tx_bd_base; /* The next free ring entry */ - union bufdesc_u *cur_rx, *cur_tx; - /* The ring entries to be free()ed */ - union bufdesc_u *dirty_tx; + unsigned short tx_next; + unsigned short tx_dirty; + union bufdesc_u *cur_rx; unsigned short tx_ring_size; unsigned short rx_ring_size; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index c49f2e22e6ee..fdd190f4bf0e 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -264,19 +264,28 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); static int mii_cnt; +static union bufdesc_u * +fec_enet_tx_get(unsigned int index, struct fec_enet_private *fep) +{ + union bufdesc_u *base = fep->tx_bd_base; + union bufdesc_u *bdp; + + if (fep->bufdesc_ex) + bdp = (union bufdesc_u *)(&base->ebd + index); + else + bdp = (union bufdesc_u *)(&base->bd + index); + + return bdp; +} + static inline union bufdesc_u *fec_enet_get_nextdesc(union bufdesc_u *bdp, struct fec_enet_private *fep) { union bufdesc_u *base; int ring_size; - if (bdp >= fep->tx_bd_base) { - base = fep->tx_bd_base; - ring_size = fep->tx_ring_size; - } else { - base = fep->rx_bd_base; - ring_size = fep->rx_ring_size; - } + base = fep->rx_bd_base; + ring_size = fep->rx_ring_size; if (fep->bufdesc_ex) { struct bufdesc_ex *ebd = &bdp->ebd + 1; @@ -295,13 +304,8 @@ union bufdesc_u *fec_enet_get_prevdesc(union bufdesc_u *bdp, struct fec_enet_pri union bufdesc_u *base; int ring_size; - if (bdp >= fep->tx_bd_base) { - base = fep->tx_bd_base; - ring_size = fep->tx_ring_size; - } else { - base = fep->rx_bd_base; - ring_size = fep->rx_ring_size; - } + base = fep->rx_bd_base; + ring_size = fep->rx_ring_size; if (fep->bufdesc_ex) { struct bufdesc_ex *ebd = &bdp->ebd - 1; @@ -325,13 +329,7 @@ static int fec_enet_get_bd_index(union bufdesc_u *base, union bufdesc_u *bdp, static unsigned int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep) { - int num; - - if (fep->bufdesc_ex) - num = &fep->dirty_tx->ebd - &fep->cur_tx->ebd; - else - num = &fep->dirty_tx->bd - &fep->cur_tx->bd; - + int num = fep->tx_dirty - fep->tx_next; return num < 0 ? num + fep->tx_ring_size : num; } @@ -349,23 +347,23 @@ static void *swap_buffer(void *bufaddr, int len) static void fec_dump(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - union bufdesc_u *bdp = fep->tx_bd_base; + union bufdesc_u *bdp; unsigned int index = 0; netdev_info(ndev, "TX ring dump\n"); pr_info("Nr SC addr len SKB\n"); - do { + for (index = 0; index < fep->tx_ring_size; index++) { + bdp = fec_enet_tx_get(index, fep); + pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n", index, - bdp == fep->cur_tx ? 'S' : ' ', - bdp == fep->dirty_tx ? 'H' : ' ', + index == fep->tx_next ? 'S' : ' ', + index == fep->tx_dirty ? 'H' : ' ', bdp->bd.cbd_sc, bdp->bd.cbd_bufaddr, bdp->bd.cbd_datlen, fep->tx_skbuff[index]); - bdp = fec_enet_get_nextdesc(bdp, fep); - index++; - } while (bdp != fep->tx_bd_base); + } } static inline bool is_ipv4_pkt(struct sk_buff *skb) @@ -414,21 +412,24 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev) struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = platform_get_device_id(fep->pdev); - union bufdesc_u *bdp = fep->cur_tx; + union bufdesc_u *bdp; int nr_frags = skb_shinfo(skb)->nr_frags; int frag, frag_len; unsigned short status; unsigned int estatus = 0; skb_frag_t *this_frag; - unsigned int index; + unsigned int index = fep->tx_next; void *bufaddr; dma_addr_t addr; int i; for (frag = 0; frag < nr_frags; frag++) { this_frag = &skb_shinfo(skb)->frags[frag]; - bdp = fec_enet_get_nextdesc(bdp, fep); + if (++index >= fep->tx_ring_size) + index = 0; + + bdp = fec_enet_tx_get(index, fep); status = bdp->bd.cbd_sc; status &= ~BD_ENET_TX_STATS; status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); @@ -454,7 +455,6 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev) bufaddr = page_address(this_frag->page.p) + this_frag->page_offset; - index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); if (((unsigned long) bufaddr) & FEC_ALIGNMENT || id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { memcpy(fep->tx_bounce[index], bufaddr, frag_len); @@ -478,14 +478,16 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev) bdp->bd.cbd_sc = status; } - fep->cur_tx = bdp; + fep->tx_next = index; return 0; dma_mapping_error: - bdp = fep->cur_tx; + index = fep->tx_next; for (i = 0; i < frag; i++) { - bdp = fec_enet_get_nextdesc(bdp, fep); + if (++index >= fep->tx_ring_size) + index = 0; + bdp = fec_enet_tx_get(index, fep); dma_unmap_single(&fep->pdev->dev, bdp->bd.cbd_bufaddr, bdp->bd.cbd_datlen, DMA_TO_DEVICE); } @@ -498,7 +500,7 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) const struct platform_device_id *id_entry = platform_get_device_id(fep->pdev); int nr_frags = skb_shinfo(skb)->nr_frags; - union bufdesc_u *bdp, *last_bdp; + union bufdesc_u *bdp; void *bufaddr; dma_addr_t addr; unsigned short status; @@ -521,15 +523,17 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) } /* Fill in a Tx ring entry */ - bdp = fep->cur_tx; - status = bdp->bd.cbd_sc; - status &= ~BD_ENET_TX_STATS; + index = fep->tx_next; /* Set buffer length and buffer pointer */ bufaddr = skb->data; buflen = skb_headlen(skb); - index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); + /* + * On some FEC implementations data must be aligned on + * 4-byte boundaries. Use bounce buffers to copy data + * and get it aligned. Ugh. + */ if (((unsigned long) bufaddr) & FEC_ALIGNMENT || id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { memcpy(fep->tx_bounce[index], skb->data, buflen); @@ -548,6 +552,9 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) return NETDEV_TX_OK; } + bdp = fec_enet_tx_get(index, fep); + status = bdp->bd.cbd_sc & ~BD_ENET_TX_STATS; + if (nr_frags) { ret = fec_enet_txq_submit_frag_skb(skb, ndev); if (ret) @@ -574,8 +581,7 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) bdp->ebd.cbd_esc = estatus; } - last_bdp = fep->cur_tx; - index = fec_enet_get_bd_index(fep->tx_bd_base, last_bdp, fep); + index = fep->tx_next; /* Save skb pointer */ fep->tx_skbuff[index] = skb; @@ -592,16 +598,15 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) /* Send it on its way. Tell FEC it's ready, interrupt when done, * it's the last BD of the frame, and to put the CRC on the end. */ - status |= (BD_ENET_TX_READY | BD_ENET_TX_TC); - bdp->bd.cbd_sc = status; - - /* If this was the last BD in the ring, start at the beginning again. */ - bdp = fec_enet_get_nextdesc(last_bdp, fep); + bdp->bd.cbd_sc = status | BD_ENET_TX_READY | BD_ENET_TX_TC; skb_tx_timestamp(skb); netdev_sent_queue(ndev, skb->len); - fep->cur_tx = bdp; + if (++index >= fep->tx_ring_size) + index = 0; + + fep->tx_next = index; /* Trigger transmission start */ if (readl(fep->hwp + FEC_X_DES_ACTIVE) == 0) @@ -725,9 +730,9 @@ static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev) struct fec_enet_private *fep = netdev_priv(ndev); int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); int total_len, data_left; - union bufdesc_u *bdp = fep->cur_tx; struct tso_t tso; - unsigned int index = 0; + unsigned int index = fep->tx_next; + unsigned int last_index = index; int ret; if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep)) { @@ -748,9 +753,10 @@ static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev) total_len = skb->len - hdr_len; while (total_len > 0) { + union bufdesc_u *bdp; char *hdr; - index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); + bdp = fec_enet_tx_get(index, fep); data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); total_len -= data_left; @@ -764,9 +770,11 @@ static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev) while (data_left > 0) { int size; + if (++index >= fep->tx_ring_size) + index = 0; + size = min_t(int, tso.size, data_left); - bdp = fec_enet_get_nextdesc(bdp, fep); - index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); + bdp = fec_enet_tx_get(index, fep); ret = fec_enet_txq_put_data_tso(skb, ndev, bdp, index, tso.data, size, size == data_left, total_len == 0); @@ -777,15 +785,17 @@ static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev) tso_build_data(skb, &tso, size); } - bdp = fec_enet_get_nextdesc(bdp, fep); + last_index = index; + if (++index >= fep->tx_ring_size) + index = 0; } /* Save skb pointer */ - fep->tx_skbuff[index] = skb; + fep->tx_skbuff[last_index] = skb; skb_tx_timestamp(skb); netdev_sent_queue(ndev, skb->len); - fep->cur_tx = bdp; + fep->tx_next = index; /* Trigger transmission start */ if (readl(fep->hwp + FEC_X_DES_ACTIVE) == 0) @@ -844,12 +854,14 @@ static void fec_enet_bd_init(struct net_device *dev) fep->cur_rx = fep->rx_bd_base; /* ...and the same for transmit */ - bdp = fep->tx_bd_base; - fep->cur_tx = bdp; for (i = 0; i < fep->tx_ring_size; i++) { + bdp = fec_enet_tx_get(i, fep); /* Initialize the BD for every fragment in the page. */ - bdp->bd.cbd_sc = 0; + if (i == fep->tx_ring_size - 1) + bdp->bd.cbd_sc = BD_SC_WRAP; + else + bdp->bd.cbd_sc = 0; if (bdp->bd.cbd_bufaddr) fec_enet_tx_unmap(bdp, fep); bdp->bd.cbd_bufaddr = 0; @@ -857,13 +869,10 @@ static void fec_enet_bd_init(struct net_device *dev) dev_kfree_skb_any(fep->tx_skbuff[i]); fep->tx_skbuff[i] = NULL; } - bdp = fec_enet_get_nextdesc(bdp, fep); } - /* Set the last buffer to wrap */ - bdp = fec_enet_get_prevdesc(bdp, fep); - bdp->bd.cbd_sc |= BD_SC_WRAP; - fep->dirty_tx = bdp; + fep->tx_next = 0; + fep->tx_dirty = fep->tx_ring_size - 1; } /* @@ -1126,37 +1135,35 @@ fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts, static void fec_enet_tx(struct net_device *ndev) { - struct fec_enet_private *fep; + struct fec_enet_private *fep = netdev_priv(ndev); union bufdesc_u *bdp; unsigned short status; struct sk_buff *skb; - int index = 0; + unsigned int index = fep->tx_dirty; unsigned int pkts_compl, bytes_compl; - fep = netdev_priv(ndev); - bdp = fep->dirty_tx; - - /* get next bdp of dirty_tx */ - bdp = fec_enet_get_nextdesc(bdp, fep); - pkts_compl = bytes_compl = 0; - while (((status = bdp->bd.cbd_sc) & BD_ENET_TX_READY) == 0) { + do { + if (++index >= fep->tx_ring_size) + index = 0; /* current queue is empty */ - if (bdp == fep->cur_tx) + if (index == fep->tx_next) break; - index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); + bdp = fec_enet_tx_get(index, fep); + + status = bdp->bd.cbd_sc; + if (status & BD_ENET_TX_READY) + break; skb = fep->tx_skbuff[index]; fep->tx_skbuff[index] = NULL; if (!IS_TSO_HEADER(fep, bdp->bd.cbd_bufaddr)) fec_enet_tx_unmap(bdp, fep); bdp->bd.cbd_bufaddr = 0; - if (!skb) { - bdp = fec_enet_get_nextdesc(bdp, fep); + if (!skb) continue; - } /* Check for errors. */ if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | @@ -1198,16 +1205,13 @@ fec_enet_tx(struct net_device *ndev) /* Free the sk buffer associated with this last transmit */ dev_kfree_skb_any(skb); - fep->dirty_tx = bdp; - - /* Update pointer to next buffer descriptor to be transmitted */ - bdp = fec_enet_get_nextdesc(bdp, fep); - } + fep->tx_dirty = index; + } while (1); netdev_completed_queue(ndev, pkts_compl, bytes_compl); /* ERR006538: Keep the transmitter going */ - if (bdp != fep->cur_tx && readl(fep->hwp + FEC_X_DES_ACTIVE) == 0) + if (index != fep->tx_next && readl(fep->hwp + FEC_X_DES_ACTIVE) == 0) writel(0, fep->hwp + FEC_X_DES_ACTIVE); if (netif_queue_stopped(ndev) && @@ -2253,8 +2257,8 @@ static void fec_enet_free_buffers(struct net_device *ndev) bdp = fec_enet_get_nextdesc(bdp, fep); } - bdp = fep->tx_bd_base; for (i = 0; i < fep->tx_ring_size; i++) { + bdp = fec_enet_tx_get(i, fep); if (bdp->bd.cbd_bufaddr) fec_enet_tx_unmap(bdp, fep); bdp->bd.cbd_bufaddr = 0; @@ -2304,25 +2308,23 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) bdp = fec_enet_get_prevdesc(bdp, fep); bdp->bd.cbd_sc |= BD_SC_WRAP; - bdp = fep->tx_bd_base; for (i = 0; i < fep->tx_ring_size; i++) { + bdp = fec_enet_tx_get(i, fep); fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); if (!fep->tx_bounce[i]) goto err_alloc; - bdp->bd.cbd_sc = 0; + /* Set the last buffer to wrap. */ + if (i == fep->tx_ring_size - 1) + bdp->bd.cbd_sc = BD_SC_WRAP; + else + bdp->bd.cbd_sc = 0; bdp->bd.cbd_bufaddr = 0; if (fep->bufdesc_ex) bdp->ebd.cbd_esc = BD_ENET_TX_INT; - - bdp = fec_enet_get_nextdesc(bdp, fep); } - /* Set the last buffer to wrap. */ - bdp = fec_enet_get_prevdesc(bdp, fep); - bdp->bd.cbd_sc |= BD_SC_WRAP; - return 0; err_alloc: -- cgit