summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/freescale/fec.h1
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c183
2 files changed, 87 insertions, 97 deletions
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 4777242aab79..d9eb328559d8 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -284,6 +284,7 @@ struct fec_enet_private {
bool ptp_clk_on;
struct mutex ptp_clk_mutex;
+ unsigned char tx_page_map[TX_RING_SIZE];
/* The saved address of a sent-in-place packet/buffer, for skfree(). */
unsigned char *tx_bounce[TX_RING_SIZE];
struct sk_buff *tx_skbuff[TX_RING_SIZE];
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 1ec5385e7863..93eeb70c644b 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -351,30 +351,31 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
}
static void
-fec_enet_tx_unmap(union bufdesc_u *bdp, struct fec_enet_private *fep)
+fec_enet_tx_unmap(unsigned index, union bufdesc_u *bdp, struct fec_enet_private *fep)
{
dma_addr_t addr = bdp->bd.cbd_bufaddr;
size_t length = bdp->bd.cbd_datlen;
- dma_unmap_single(&fep->pdev->dev, addr, length, DMA_TO_DEVICE);
+ if (fep->tx_page_map[index])
+ dma_unmap_page(&fep->pdev->dev, addr, length, DMA_TO_DEVICE);
+ else
+ dma_unmap_single(&fep->pdev->dev, addr, length, DMA_TO_DEVICE);
}
-static int
-fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev)
+static bool
+fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev,
+ unsigned int estatus)
{
struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
union bufdesc_u *bdp;
int nr_frags = skb_shinfo(skb)->nr_frags;
- int frag, frag_len;
unsigned short status;
- unsigned int estatus = 0;
- skb_frag_t *this_frag;
- unsigned int index = fep->tx_next;
- void *bufaddr;
+ const skb_frag_t *this_frag;
+ unsigned int frag_len, index = fep->tx_next;
dma_addr_t addr;
- int i;
+ int frag, i;
for (frag = 0; frag < nr_frags; frag++) {
this_frag = &skb_shinfo(skb)->frags[frag];
@@ -382,58 +383,49 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev)
if (++index >= fep->tx_ring_size)
index = 0;
- bdp = fec_enet_tx_get(index, fep);
- status = bdp->bd.cbd_sc;
- status &= ~BD_ENET_TX_STATS;
- status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
- frag_len = skb_shinfo(skb)->frags[frag].size;
+ frag_len = skb_frag_size(this_frag);
- /* Handle the last BD specially */
- if (frag == nr_frags - 1) {
- status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
- if (fep->flags & FEC_FLAG_BUFDESC_EX) {
- estatus |= BD_ENET_TX_INT;
- if (unlikely(skb_shinfo(skb)->tx_flags &
- SKBTX_HW_TSTAMP && fep->hwts_tx_en))
- estatus |= BD_ENET_TX_TS;
- }
- }
+ /* If the alignment is unsuitable, we need to bounce. */
+ if (this_frag->page_offset & FEC_ALIGNMENT ||
+ id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
+ unsigned char *bounce = fep->tx_bounce[index];
- if (fep->flags & FEC_FLAG_BUFDESC_EX) {
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- estatus |= BD_ENET_TX_PINS;
- bdp->ebd.cbd_bdu = 0;
- bdp->ebd.cbd_esc = estatus;
+ memcpy(bounce, skb_frag_address(this_frag), frag_len);
+ if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+ swap_buffer(bounce, frag_len);
+
+ addr = dma_map_single(&fep->pdev->dev, bounce,
+ frag_len, DMA_TO_DEVICE);
+ fep->tx_page_map[index] = 0;
+ } else {
+ addr = skb_frag_dma_map(&fep->pdev->dev, this_frag, 0,
+ frag_len, DMA_TO_DEVICE);
+ fep->tx_page_map[index] = 1;
}
- bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
+ if (dma_mapping_error(&fep->pdev->dev, addr))
+ goto dma_mapping_error;
- if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
- id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
- memcpy(fep->tx_bounce[index], bufaddr, frag_len);
- bufaddr = fep->tx_bounce[index];
+ status = BD_ENET_TX_TC | BD_ENET_TX_READY;
- if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
- swap_buffer(bufaddr, frag_len);
- }
+ /* Handle the last BD specially */
+ if (frag == nr_frags - 1)
+ status |= BD_ENET_TX_INTR | BD_ENET_TX_LAST;
- addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&fep->pdev->dev, addr)) {
- dev_kfree_skb_any(skb);
- if (net_ratelimit())
- netdev_err(ndev, "Tx DMA memory map failed\n");
- goto dma_mapping_error;
+ bdp = fec_enet_tx_get(index, fep);
+ if (fep->flags & FEC_FLAG_BUFDESC_EX) {
+ bdp->ebd.cbd_bdu = 0;
+ bdp->ebd.cbd_esc = estatus;
}
bdp->bd.cbd_bufaddr = addr;
bdp->bd.cbd_datlen = frag_len;
- bdp->bd.cbd_sc = status;
+ bdp->bd.cbd_sc = (bdp->bd.cbd_sc & ~BD_ENET_TX_STATS) | status;
}
fep->tx_next = index;
- return 0;
+ return true;
dma_mapping_error:
index = fep->tx_next;
@@ -441,40 +433,32 @@ dma_mapping_error:
if (++index >= fep->tx_ring_size)
index = 0;
bdp = fec_enet_tx_get(index, fep);
- dma_unmap_single(&fep->pdev->dev, bdp->bd.cbd_bufaddr,
- bdp->bd.cbd_datlen, DMA_TO_DEVICE);
+ fec_enet_tx_unmap(index, bdp, fep);
}
- return NETDEV_TX_OK;
+ return false;
}
-static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
+static bool fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
- int nr_frags = skb_shinfo(skb)->nr_frags;
union bufdesc_u *bdp;
void *bufaddr;
dma_addr_t addr;
unsigned short status;
- unsigned short buflen;
- unsigned int estatus = 0;
- unsigned int index;
- int ret;
+ unsigned int index, buflen, estatus;
/* Protocol checksum off-load for TCP and UDP. */
- if (fec_enet_clear_csum(skb, ndev)) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
- /* Fill in a Tx ring entry */
- index = fep->tx_next;
+ if (fec_enet_clear_csum(skb, ndev))
+ return false;
/* Set buffer length and buffer pointer */
bufaddr = skb->data;
buflen = skb_headlen(skb);
+ index = fep->tx_next;
+
/*
* On some FEC implementations data must be aligned on
* 4-byte boundaries. Use bounce buffers to copy data
@@ -491,34 +475,24 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
/* Push the data cache so the CPM does not get stale memory data. */
addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE);
- if (dma_mapping_error(&fep->pdev->dev, addr)) {
- dev_kfree_skb_any(skb);
- if (net_ratelimit())
- netdev_err(ndev, "Tx DMA memory map failed\n");
- return NETDEV_TX_OK;
- }
+ if (dma_mapping_error(&fep->pdev->dev, addr))
+ goto release;
bdp = fec_enet_tx_get(index, fep);
- status = bdp->bd.cbd_sc & ~BD_ENET_TX_STATS;
- if (nr_frags) {
- ret = fec_enet_txq_submit_frag_skb(skb, ndev);
- if (ret)
- return ret;
- } else {
- status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
- if (fep->flags & FEC_FLAG_BUFDESC_EX) {
- estatus = BD_ENET_TX_INT;
- if (unlikely(skb_shinfo(skb)->tx_flags &
- SKBTX_HW_TSTAMP && fep->hwts_tx_en))
- estatus |= BD_ENET_TX_TS;
- }
- }
+ /* Fill in a Tx ring entry */
+ bdp->bd.cbd_datlen = buflen;
+ bdp->bd.cbd_bufaddr = addr;
+
+ fep->tx_page_map[index] = 0;
+ estatus = BD_ENET_TX_INT;
if (fep->flags & FEC_FLAG_BUFDESC_EX) {
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
- fep->hwts_tx_en))
+ fep->hwts_tx_en)) {
+ estatus |= BD_ENET_TX_TS;
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ }
if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS;
@@ -527,13 +501,19 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
bdp->ebd.cbd_esc = estatus;
}
+ status = BD_ENET_TX_READY | BD_ENET_TX_TC;
+ if (skb_shinfo(skb)->nr_frags) {
+ if (!fec_enet_txq_submit_frag_skb(skb, ndev, estatus))
+ goto unmap;
+ } else {
+ status |= BD_ENET_TX_INTR | BD_ENET_TX_LAST;
+ }
+
index = fep->tx_next;
+
/* Save skb pointer */
fep->tx_skbuff[index] = skb;
- bdp->bd.cbd_datlen = buflen;
- bdp->bd.cbd_bufaddr = addr;
-
/*
* We need the preceding stores to the descriptor to complete
* before updating the status field, which hands it over to the
@@ -544,7 +524,7 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
/* Send it on its way. Tell FEC it's ready, interrupt when done,
* it's the last BD of the frame, and to put the CRC on the end.
*/
- bdp->bd.cbd_sc = status | BD_ENET_TX_READY | BD_ENET_TX_TC;
+ bdp->bd.cbd_sc = status | (bdp->bd.cbd_sc & BD_ENET_TX_WRAP);
skb_tx_timestamp(skb);
netdev_sent_queue(ndev, skb->len);
@@ -558,7 +538,14 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
if (readl(fep->hwp + FEC_X_DES_ACTIVE) == 0)
writel(0, fep->hwp + FEC_X_DES_ACTIVE);
- return 0;
+ return true;
+
+ unmap:
+ fec_enet_tx_unmap(index, bdp, fep);
+ release:
+ if (net_ratelimit())
+ netdev_err(ndev, "Tx DMA memory map failed\n");
+ return false;
}
static netdev_tx_t
@@ -566,7 +553,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
int nr_frags = skb_shinfo(skb)->nr_frags;
- int ret;
if (ring_free(fep->tx_next, fep->tx_dirty, fep->tx_ring_size) < 1 + nr_frags) {
/* Ooops. All transmit buffers are full. Bail out.
@@ -577,9 +563,10 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_BUSY;
}
- ret = fec_enet_txq_submit_skb(skb, ndev);
- if (ret)
- return ret;
+ if (!fec_enet_txq_submit_skb(skb, ndev)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
if (ring_free(fep->tx_next, fep->tx_dirty, fep->tx_ring_size) < fep->tx_min)
netif_stop_queue(ndev);
@@ -621,7 +608,7 @@ static void fec_enet_bd_init(struct net_device *dev)
else
bdp->bd.cbd_sc = 0;
if (bdp->bd.cbd_bufaddr)
- fec_enet_tx_unmap(bdp, fep);
+ fec_enet_tx_unmap(i, bdp, fep);
bdp->bd.cbd_bufaddr = 0;
if (fep->tx_skbuff[i]) {
dev_kfree_skb_any(fep->tx_skbuff[i]);
@@ -919,7 +906,7 @@ fec_enet_tx(struct net_device *ndev)
skb = fep->tx_skbuff[index];
fep->tx_skbuff[index] = NULL;
- fec_enet_tx_unmap(bdp, fep);
+ fec_enet_tx_unmap(index, bdp, fep);
if (!skb)
continue;
@@ -2077,7 +2064,7 @@ static void fec_enet_free_buffers(struct net_device *ndev)
for (i = 0; i < fep->tx_ring_size; i++) {
bdp = fec_enet_tx_get(i, fep);
if (bdp->bd.cbd_bufaddr)
- fec_enet_tx_unmap(bdp, fep);
+ fec_enet_tx_unmap(i, bdp, fep);
bdp->bd.cbd_bufaddr = 0;
kfree(fep->tx_bounce[i]);
fep->tx_bounce[i] = NULL;
@@ -2478,11 +2465,13 @@ static int fec_enet_init(struct net_device *ndev)
if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) {
/* enable hw accelerator */
ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
- | NETIF_F_RXCSUM | NETIF_F_SG);
+ | NETIF_F_RXCSUM);
fep->flags |= FEC_FLAG_RX_CSUM;
}
}
+ ndev->features |= NETIF_F_SG;
+
if (ndev->features & NETIF_F_SG)
fep->tx_min = MAX_SKB_FRAGS + 1;
else