diff options
author | Eric Dumazet <edumazet@google.com> | 2017-03-08 08:17:06 -0800 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-03-09 09:54:46 -0800 |
commit | 69ba943151b2e40e201700cf5b3a94e433c6fd83 (patch) | |
tree | 4b33088842a174f4454c57770a0cc6d6b9388f92 /drivers/net/ethernet/mellanox/mlx4/en_rx.c | |
parent | 3c66d1c7ed4131bbaea128803cfa247defa2c376 (diff) |
mlx4: dma_dir is a mlx4_en_priv attribute
No need to duplicate it for all queues and frags.
num_frags & log_rx_info become u8 to save space.
u8 accesses are a bit faster than u16 anyway.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Acked-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx4/en_rx.c')
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/en_rx.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 867292880c07..6183128b2d3d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -72,7 +72,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv, return -ENOMEM; } dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE << order, - frag_info->dma_dir); + priv->dma_dir); if (unlikely(dma_mapping_error(priv->ddev, dma))) { put_page(page); return -ENOMEM; @@ -128,7 +128,7 @@ out: if (page_alloc[i].page != ring_alloc[i].page) { dma_unmap_page(priv->ddev, page_alloc[i].dma, page_alloc[i].page_size, - priv->frag_info[i].dma_dir); + priv->dma_dir); page = page_alloc[i].page; /* Revert changes done by mlx4_alloc_pages */ page_ref_sub(page, page_alloc[i].page_size / @@ -149,7 +149,7 @@ static void mlx4_en_free_frag(struct mlx4_en_priv *priv, if (next_frag_end > frags[i].page_size) dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size, - frag_info->dma_dir); + priv->dma_dir); if (frags[i].page) put_page(frags[i].page); @@ -181,7 +181,7 @@ out: page_alloc = &ring->page_alloc[i]; dma_unmap_page(priv->ddev, page_alloc->dma, page_alloc->page_size, - priv->frag_info[i].dma_dir); + priv->dma_dir); page = page_alloc->page; /* Revert changes done by mlx4_alloc_pages */ page_ref_sub(page, page_alloc->page_size / @@ -206,7 +206,7 @@ static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv, i, page_count(page_alloc->page)); dma_unmap_page(priv->ddev, page_alloc->dma, - page_alloc->page_size, frag_info->dma_dir); + page_alloc->page_size, priv->dma_dir); while (page_alloc->page_offset + frag_info->frag_stride < page_alloc->page_size) { put_page(page_alloc->page); @@ -570,7 +570,7 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, struct mlx4_en_rx_alloc *frame = &ring->page_cache.buf[i]; dma_unmap_page(priv->ddev, frame->dma, frame->page_size, - priv->frag_info[0].dma_dir); + priv->dma_dir); put_page(frame->page); } ring->page_cache.index = 0; @@ -1202,7 +1202,7 @@ void mlx4_en_calc_rx_buf(struct net_device *dev) * expense of more costly truesize accounting */ priv->frag_info[0].frag_stride = PAGE_SIZE; - priv->frag_info[0].dma_dir = PCI_DMA_BIDIRECTIONAL; + priv->dma_dir = PCI_DMA_BIDIRECTIONAL; priv->frag_info[0].rx_headroom = XDP_PACKET_HEADROOM; i = 1; } else { @@ -1217,11 +1217,11 @@ void mlx4_en_calc_rx_buf(struct net_device *dev) priv->frag_info[i].frag_stride = ALIGN(priv->frag_info[i].frag_size, SMP_CACHE_BYTES); - priv->frag_info[i].dma_dir = PCI_DMA_FROMDEVICE; priv->frag_info[i].rx_headroom = 0; buf_size += priv->frag_info[i].frag_size; i++; } + priv->dma_dir = PCI_DMA_FROMDEVICE; } priv->num_frags = i; |