summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
diff options
context:
space:
mode:
authorLendacky, Thomas <Thomas.Lendacky@amd.com>2015-07-06 11:57:37 -0500
committerDavid S. Miller <davem@davemloft.net>2015-07-08 15:57:14 -0700
commitcfbfd86bfde15020bccde377e11586ee5c8b701d (patch)
tree6b129aeb7b38ea566f1548463ec32a7a9550dcf5 /drivers/net/ethernet/amd/xgbe/xgbe-drv.c
parent95ec655bc465ccb2a3329d4aff9a45e3c8188db5 (diff)
amd-xgbe: Fix DMA API debug warning
When running a kernel configured with CONFIG_DMA_API_DEBUG=y a warning is issued: DMA-API: device driver tries to sync DMA memory it has not allocated This warning is the result of mapping the full range of the Rx buffer pages allocated and then performing a dma_sync_single_for_cpu against a calculated DMA address. The proper thing to do is to use the dma_sync_single_range_for_cpu with a base DMA address and an offset. Reported-by: Kim Phillips <kim.phillips@arm.com> Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> Tested-by: Kim Phillips <kim.phillips@arm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/amd/xgbe/xgbe-drv.c')
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c17
1 files changed, 11 insertions, 6 deletions
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 1e9c28d19ef8..aae9d5ecd182 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1765,8 +1765,9 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
/* Start with the header buffer which may contain just the header
* or the header plus data
*/
- dma_sync_single_for_cpu(pdata->dev, rdata->rx.hdr.dma,
- rdata->rx.hdr.dma_len, DMA_FROM_DEVICE);
+ dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base,
+ rdata->rx.hdr.dma_off,
+ rdata->rx.hdr.dma_len, DMA_FROM_DEVICE);
packet = page_address(rdata->rx.hdr.pa.pages) +
rdata->rx.hdr.pa.pages_offset;
@@ -1778,8 +1779,11 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
len -= copy_len;
if (len) {
/* Add the remaining data as a frag */
- dma_sync_single_for_cpu(pdata->dev, rdata->rx.buf.dma,
- rdata->rx.buf.dma_len, DMA_FROM_DEVICE);
+ dma_sync_single_range_for_cpu(pdata->dev,
+ rdata->rx.buf.dma_base,
+ rdata->rx.buf.dma_off,
+ rdata->rx.buf.dma_len,
+ DMA_FROM_DEVICE);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
rdata->rx.buf.pa.pages,
@@ -1945,8 +1949,9 @@ read_again:
if (!skb)
error = 1;
} else if (rdesc_len) {
- dma_sync_single_for_cpu(pdata->dev,
- rdata->rx.buf.dma,
+ dma_sync_single_range_for_cpu(pdata->dev,
+ rdata->rx.buf.dma_base,
+ rdata->rx.buf.dma_off,
rdata->rx.buf.dma_len,
DMA_FROM_DEVICE);