summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2014-06-18 23:28:09 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2014-10-17 14:35:35 +0100
commit37ad6b09d60f1667826284e8c0096d6117fab82d (patch)
treed63bdcfcaa05e3d4fa56fa786630124efdf2bd4a
parent3981daaed7772bf25b0f96323520e53bc78bf8cd (diff)
Revert FEC commits
This reverts commit d5e42c86e07935f3cab4dcfca787a733146d0c70 ("net: fec: Don't clear IPV6 header checksum field when IP accelerator enable"). This reverts commit 79f339125ea316e910220e5f5b4ad30370f4de85 ("net: fec: Add software TSO support"), as 96c50caa5148 ("net: fec: Enable IP header hardware checksum") causes a regression with IPv6 by overwriting the location where the IPv4 header checksum would be: - 0x0000: 6000 0000 0028 0640 fd8f 7570 feb6 0001 `....(.@..up.... + 0x0000: 6000 0000 0028 0640 fd8f 0000 feb6 0001 `....(.@........ This reverts commit 96c50caa5148e0e0a077672574785700885c6764 ("net: fec: Enable IP header hardware checksum"), as it causes a regression with IPv6 by overwriting the location where the IPv4 header checksum would be: - 0x0000: 6000 0000 0028 0640 fd8f 7570 feb6 0001 `....(.@..up.... + 0x0000: 6000 0000 0028 0640 fd8f 0000 feb6 0001 `....(.@........
-rw-r--r--drivers/net/ethernet/freescale/fec.h6
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c288
2 files changed, 24 insertions, 270 deletions
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 6f576a3cc204..4777242aab79 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -303,12 +303,6 @@ struct fec_enet_private {
unsigned short tx_ring_size;
unsigned short rx_ring_size;
- unsigned short tx_stop_threshold;
- unsigned short tx_wake_threshold;
-
- /* Software TSO */
- char *tso_hdrs;
- dma_addr_t tso_hdrs_dma;
unsigned char flags;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 7f6e5a5654fd..1ec5385e7863 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -33,13 +33,6 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <net/ip.h>
-#include <net/tso.h>
-#include <linux/tcp.h>
-#include <linux/udp.h>
-#include <linux/icmp.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
@@ -262,15 +255,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define FEC_FLAG_RX_CSUM BIT(1)
#define FEC_FLAG_RX_VLAN BIT(2)
-#define TSO_HEADER_SIZE 128
-/* Max number of allowed TCP segments for software TSO */
-#define FEC_MAX_TSO_SEGS 100
-#define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
-
-#define IS_TSO_HEADER(txq, addr) \
- ((addr >= txq->tso_hdrs_dma) && \
- (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
-
static int mii_cnt;
static union bufdesc_u *
@@ -303,10 +287,10 @@ fec_enet_rx_get(unsigned int index, struct fec_enet_private *fep)
return bdp;
}
-static unsigned int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep)
+static unsigned ring_free(unsigned ins, unsigned rem, unsigned size)
{
- int num = fep->tx_dirty - fep->tx_next;
- return num < 0 ? num + fep->tx_ring_size : num;
+ int num = rem - ins;
+ return num < 0 ? num + size : num;
}
static void *swap_buffer(void *bufaddr, int len)
@@ -342,11 +326,6 @@ static void fec_dump(struct net_device *ndev)
}
}
-static inline bool is_ipv4_pkt(struct sk_buff *skb)
-{
- return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
-}
-
static int
fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
{
@@ -366,8 +345,6 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
if (unlikely(skb_cow_head(skb, 0)))
return -1;
- if (is_ipv4_pkt(skb))
- ip_hdr(skb)->check = 0;
*(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
return 0;
@@ -424,7 +401,7 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev)
if (fep->flags & FEC_FLAG_BUFDESC_EX) {
if (skb->ip_summed == CHECKSUM_PARTIAL)
- estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
+ estatus |= BD_ENET_TX_PINS;
bdp->ebd.cbd_bdu = 0;
bdp->ebd.cbd_esc = estatus;
}
@@ -485,13 +462,6 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
unsigned int index;
int ret;
- if (fec_enet_get_free_txdesc_num(fep) < fep->tx_min) {
- dev_kfree_skb_any(skb);
- if (net_ratelimit())
- netdev_err(ndev, "NOT enough BD for SG!\n");
- return NETDEV_TX_OK;
- }
-
/* Protocol checksum off-load for TCP and UDP. */
if (fec_enet_clear_csum(skb, ndev)) {
dev_kfree_skb_any(skb);
@@ -551,7 +521,7 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
if (skb->ip_summed == CHECKSUM_PARTIAL)
- estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
+ estatus |= BD_ENET_TX_PINS;
bdp->ebd.cbd_bdu = 0;
bdp->ebd.cbd_esc = estatus;
@@ -591,213 +561,27 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
return 0;
}
-static int
-fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev,
- union bufdesc_u *bdp, int index, char *data,
- int size, bool last_tcp, bool is_last)
-{
- struct fec_enet_private *fep = netdev_priv(ndev);
- const struct platform_device_id *id_entry =
- platform_get_device_id(fep->pdev);
- unsigned short status;
- unsigned int estatus = 0;
- dma_addr_t addr;
-
- status = bdp->bd.cbd_sc;
- status &= ~BD_ENET_TX_STATS;
-
- status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
-
- if (((unsigned long) data) & FEC_ALIGNMENT ||
- id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
- memcpy(fep->tx_bounce[index], data, size);
- data = fep->tx_bounce[index];
-
- if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
- swap_buffer(data, size);
- }
-
- addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE);
- if (dma_mapping_error(&fep->pdev->dev, addr)) {
- dev_kfree_skb_any(skb);
- if (net_ratelimit())
- netdev_err(ndev, "Tx DMA memory map failed\n");
- return NETDEV_TX_BUSY;
- }
-
- bdp->bd.cbd_datlen = size;
- bdp->bd.cbd_bufaddr = addr;
-
- if (fep->flags & FEC_FLAG_BUFDESC_EX) {
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
- bdp->ebd.cbd_bdu = 0;
- bdp->ebd.cbd_esc = estatus;
- }
-
- /* Handle the last BD specially */
- if (last_tcp)
- status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC);
- if (is_last) {
- status |= BD_ENET_TX_INTR;
- if (fep->flags & FEC_FLAG_BUFDESC_EX)
- bdp->ebd.cbd_esc |= BD_ENET_TX_INT;
- }
-
- bdp->bd.cbd_sc = status;
-
- return 0;
-}
-
-static int
-fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev,
- union bufdesc_u *bdp, int index)
-{
- struct fec_enet_private *fep = netdev_priv(ndev);
- const struct platform_device_id *id_entry =
- platform_get_device_id(fep->pdev);
- int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
- void *bufaddr;
- unsigned long dmabuf;
- unsigned short status;
- unsigned int estatus = 0;
-
- status = bdp->bd.cbd_sc;
- status &= ~BD_ENET_TX_STATS;
- status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
-
- bufaddr = fep->tso_hdrs + index * TSO_HEADER_SIZE;
- dmabuf = fep->tso_hdrs_dma + index * TSO_HEADER_SIZE;
- if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
- id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
- memcpy(fep->tx_bounce[index], skb->data, hdr_len);
- bufaddr = fep->tx_bounce[index];
-
- if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
- swap_buffer(bufaddr, hdr_len);
-
- dmabuf = dma_map_single(&fep->pdev->dev, bufaddr,
- hdr_len, DMA_TO_DEVICE);
- if (dma_mapping_error(&fep->pdev->dev, dmabuf)) {
- dev_kfree_skb_any(skb);
- if (net_ratelimit())
- netdev_err(ndev, "Tx DMA memory map failed\n");
- return NETDEV_TX_BUSY;
- }
- }
-
- bdp->bd.cbd_bufaddr = dmabuf;
- bdp->bd.cbd_datlen = hdr_len;
-
- if (fep->flags & FEC_FLAG_BUFDESC_EX) {
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
- bdp->ebd.cbd_bdu = 0;
- bdp->ebd.cbd_esc = estatus;
- }
-
- bdp->bd.cbd_sc = status;
-
- return 0;
-}
-
-static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t
+fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
- int total_len, data_left;
- struct tso_t tso;
- unsigned int index = fep->tx_next;
- unsigned int last_index = index;
+ int nr_frags = skb_shinfo(skb)->nr_frags;
int ret;
- if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep)) {
- dev_kfree_skb_any(skb);
+ if (ring_free(fep->tx_next, fep->tx_dirty, fep->tx_ring_size) < 1 + nr_frags) {
+ /* Ooops. All transmit buffers are full. Bail out.
+ * This should not happen, since ndev->tbusy should be set.
+ */
if (net_ratelimit())
- netdev_err(ndev, "NOT enough BD for TSO!\n");
- return NETDEV_TX_OK;
- }
-
- /* Protocol checksum off-load for TCP and UDP. */
- if (fec_enet_clear_csum(skb, ndev)) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
- /* Initialize the TSO handler, and prepare the first payload */
- tso_start(skb, &tso);
-
- total_len = skb->len - hdr_len;
- while (total_len > 0) {
- union bufdesc_u *bdp;
- char *hdr;
-
- bdp = fec_enet_tx_get(index, fep);
- data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
- total_len -= data_left;
-
- /* prepare packet headers: MAC + IP + TCP */
- hdr = fep->tso_hdrs + index * TSO_HEADER_SIZE;
- tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
- ret = fec_enet_txq_put_hdr_tso(skb, ndev, bdp, index);
- if (ret)
- goto err_release;
-
- while (data_left > 0) {
- int size;
-
- if (++index >= fep->tx_ring_size)
- index = 0;
-
- size = min_t(int, tso.size, data_left);
- bdp = fec_enet_tx_get(index, fep);
- ret = fec_enet_txq_put_data_tso(skb, ndev, bdp, index, tso.data,
- size, size == data_left,
- total_len == 0);
- if (ret)
- goto err_release;
-
- data_left -= size;
- tso_build_data(skb, &tso, size);
- }
-
- last_index = index;
- if (++index >= fep->tx_ring_size)
- index = 0;
+ netdev_err(ndev, "tx queue full!\n");
+ return NETDEV_TX_BUSY;
}
- /* Save skb pointer */
- fep->tx_skbuff[last_index] = skb;
-
- skb_tx_timestamp(skb);
- netdev_sent_queue(ndev, skb->len);
- fep->tx_next = index;
-
- /* Trigger transmission start */
- if (readl(fep->hwp + FEC_X_DES_ACTIVE) == 0)
- writel(0, fep->hwp + FEC_X_DES_ACTIVE);
-
- return 0;
-
-err_release:
- /* TODO: Release all used data descriptors for TSO */
- return ret;
-}
-
-static netdev_tx_t
-fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
-{
- struct fec_enet_private *fep = netdev_priv(ndev);
- int ret;
-
- if (skb_is_gso(skb))
- ret = fec_enet_txq_submit_tso(skb, ndev);
- else
- ret = fec_enet_txq_submit_skb(skb, ndev);
+ ret = fec_enet_txq_submit_skb(skb, ndev);
if (ret)
return ret;
- if (fec_enet_get_free_txdesc_num(fep) < fep->tx_stop_threshold)
+ if (ring_free(fep->tx_next, fep->tx_dirty, fep->tx_ring_size) < fep->tx_min)
netif_stop_queue(ndev);
return NETDEV_TX_OK;
@@ -1135,9 +919,7 @@ fec_enet_tx(struct net_device *ndev)
skb = fep->tx_skbuff[index];
fep->tx_skbuff[index] = NULL;
- if (!IS_TSO_HEADER(fep, bdp->bd.cbd_bufaddr))
- fec_enet_tx_unmap(bdp, fep);
- bdp->bd.cbd_bufaddr = 0;
+ fec_enet_tx_unmap(bdp, fep);
if (!skb)
continue;
@@ -1191,7 +973,8 @@ fec_enet_tx(struct net_device *ndev)
writel(0, fep->hwp + FEC_X_DES_ACTIVE);
if (netif_queue_stopped(ndev) &&
- fec_enet_get_free_txdesc_num(fep) >= fep->tx_wake_threshold)
+ ring_free(fep->tx_next, fep->tx_dirty, fep->tx_ring_size) >=
+ fep->tx_min)
netif_wake_queue(ndev);
}
@@ -2591,15 +2374,10 @@ static int fec_set_features(struct net_device *netdev,
}
/* Set the appropriate minimum transmit ring free threshold */
- if (features & NETIF_F_SG) {
+ if (features & NETIF_F_SG)
fep->tx_min = MAX_SKB_FRAGS + 1;
- fep->tx_stop_threshold = FEC_MAX_SKB_DESCS;
- fep->tx_wake_threshold = (fep->tx_ring_size - fep->tx_stop_threshold) / 2;
- } else {
+ else
fep->tx_min = 1;
- fep->tx_stop_threshold = 1;
- fep->tx_wake_threshold = 1;
- }
/* Resume the device after updates */
if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) {
@@ -2647,9 +2425,6 @@ static int fec_enet_init(struct net_device *ndev)
fep->tx_ring_size = TX_RING_SIZE;
fep->rx_ring_size = RX_RING_SIZE;
- fep->tx_stop_threshold = FEC_MAX_SKB_DESCS;
- fep->tx_wake_threshold = (fep->tx_ring_size - fep->tx_stop_threshold) / 2;
-
bd_size = fep->tx_ring_size + fep->rx_ring_size;
if (fep->flags & FEC_FLAG_BUFDESC_EX)
bd_size *= sizeof(struct bufdesc_ex);
@@ -2661,13 +2436,6 @@ static int fec_enet_init(struct net_device *ndev)
if (!cbd_base)
return -ENOMEM;
- fep->tso_hdrs = dma_alloc_coherent(NULL, fep->tx_ring_size * TSO_HEADER_SIZE,
- &fep->tso_hdrs_dma, GFP_KERNEL);
- if (!fep->tso_hdrs) {
- dma_free_coherent(NULL, bd_size, cbd_base, cbd_dma);
- return -ENOMEM;
- }
-
memset(cbd_base, 0, PAGE_SIZE);
fep->netdev = ndev;
@@ -2708,25 +2476,17 @@ static int fec_enet_init(struct net_device *ndev)
}
if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) {
- ndev->gso_max_segs = FEC_MAX_TSO_SEGS;
-
/* enable hw accelerator */
ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
- | NETIF_F_RXCSUM | NETIF_F_SG |
- NETIF_F_TSO);
+ | NETIF_F_RXCSUM | NETIF_F_SG);
fep->flags |= FEC_FLAG_RX_CSUM;
}
}
- if (ndev->features & NETIF_F_SG) {
+ if (ndev->features & NETIF_F_SG)
fep->tx_min = MAX_SKB_FRAGS + 1;
- fep->tx_stop_threshold = FEC_MAX_SKB_DESCS;
- fep->tx_wake_threshold = (fep->tx_ring_size - fep->tx_stop_threshold) / 2;
- } else {
+ else
fep->tx_min = 1;
- fep->tx_stop_threshold = 1;
- fep->tx_wake_threshold = 1;
- }
ndev->hw_features = ndev->features;