summaryrefslogtreecommitdiff
path: root/drivers/net/vmxnet3
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2012-10-29 07:30:49 +0000
committerDavid S. Miller <davem@davemloft.net>2012-11-02 21:58:09 -0400
commita4d7e485bca65bd516fced77b03f92419308df72 (patch)
treec4d53f5194944832b04d1724f2993ac65dede67f /drivers/net/vmxnet3
parent789336360e0a2aeb9750c16ab704a02cbe035e9e (diff)
vmxnet3: must split too big fragments
vmxnet3 has a 16Kbytes limit per tx descriptor, that happened to work as long as we provided PAGE_SIZE fragments. Our stack can now build larger fragments, so we need to split them to the 16kbytes boundary. Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: jongman heo <jongman.heo@samsung.com> Tested-by: jongman heo <jongman.heo@samsung.com> Cc: Shreyas Bhatewara <sbhatewara@vmware.com> Reviewed-by: Bhavesh Davda <bhavesh@vmware.com> Signed-off-by: Shreyas Bhatewara <sbhatewara@vmware.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/vmxnet3')
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c65
1 files changed, 45 insertions, 20 deletions
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index ce9d4f2c9776..0ae1bcc6da73 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -744,28 +744,43 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ u32 buf_size;
- tbi = tq->buf_info + tq->tx_ring.next2fill;
- tbi->map_type = VMXNET3_MAP_PAGE;
- tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
- 0, skb_frag_size(frag),
- DMA_TO_DEVICE);
+ buf_offset = 0;
+ len = skb_frag_size(frag);
+ while (len) {
+ tbi = tq->buf_info + tq->tx_ring.next2fill;
+ if (len < VMXNET3_MAX_TX_BUF_SIZE) {
+ buf_size = len;
+ dw2 |= len;
+ } else {
+ buf_size = VMXNET3_MAX_TX_BUF_SIZE;
+ /* spec says that for TxDesc.len, 0 == 2^14 */
+ }
+ tbi->map_type = VMXNET3_MAP_PAGE;
+ tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
+ buf_offset, buf_size,
+ DMA_TO_DEVICE);
- tbi->len = skb_frag_size(frag);
+ tbi->len = buf_size;
- gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
- BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
+ gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
+ BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
- gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
- gdesc->dword[2] = cpu_to_le32(dw2 | skb_frag_size(frag));
- gdesc->dword[3] = 0;
+ gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
+ gdesc->dword[2] = cpu_to_le32(dw2);
+ gdesc->dword[3] = 0;
- dev_dbg(&adapter->netdev->dev,
- "txd[%u]: 0x%llu %u %u\n",
- tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
- le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
- vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
- dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
+ dev_dbg(&adapter->netdev->dev,
+ "txd[%u]: 0x%llu %u %u\n",
+ tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
+ le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
+ vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
+ dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
+
+ len -= buf_size;
+ buf_offset += buf_size;
+ }
}
ctx->eop_txd = gdesc;
@@ -886,6 +901,18 @@ vmxnet3_prepare_tso(struct sk_buff *skb,
}
}
+static int txd_estimate(const struct sk_buff *skb)
+{
+ int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
+ int i;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+
+ count += VMXNET3_TXD_NEEDED(skb_frag_size(frag));
+ }
+ return count;
+}
/*
* Transmits a pkt thru a given tq
@@ -914,9 +941,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
union Vmxnet3_GenericDesc tempTxDesc;
#endif
- /* conservatively estimate # of descriptors to use */
- count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) +
- skb_shinfo(skb)->nr_frags + 1;
+ count = txd_estimate(skb);
ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));