summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/hisilicon/hns/hnae.h
diff options
context:
space:
mode:
authorlipeng <lipeng321@huawei.com>2017-04-01 12:03:37 +0100
committerDavid S. Miller <davem@davemloft.net>2017-04-03 14:48:43 -0700
commitb29bd41259f38fc1a22735cd69b374a75d6a213c (patch)
tree329a6d43f806f4168e4433ee67353a0e6770384b /drivers/net/ethernet/hisilicon/hns/hnae.h
parent36eedfde1a3602e8054c16bc295c47764647a208 (diff)
net: hns: Fix to adjust buf_size of ring according to mtu
Because buf_size of ring set to 2048, the process of rx_poll_one can reuse the page, therefore the performance of XGE can improve. But the chip only supports three bds in one package, so the max mtu is 6K when it sets to 2048. For better performane in litter mtu, we need change buf_size according to mtu. When user change mtu, hns is only change the desc in memory. There are some desc has been fetched by the chip, these desc can not be changed by the code. So it needs set the port loopback and send some packages to let the chip consumes the wrong desc and fetch new desc. Because the Pv660 do not support rss indirection, we need add version check in mtu change process. Signed-off-by: lipeng <lipeng321@huawei.com> reviewed-by: Yisen Zhuang <yisen.zhuang@huawei.com> Signed-off-by: Salil Mehta <salil.mehta@huawei.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/hisilicon/hns/hnae.h')
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h37
1 files changed, 37 insertions, 0 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index 8016854796fb..c66581db72ac 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -67,6 +67,8 @@ do { \
#define AE_IS_VER1(ver) ((ver) == AE_VERSION_1)
#define AE_NAME_SIZE 16
+#define BD_SIZE_2048_MAX_MTU 6000
+
/* some said the RX and TX RCB format should not be the same in the future. But
* it is the same now...
*/
@@ -646,6 +648,41 @@ static inline void hnae_reuse_buffer(struct hnae_ring *ring, int i)
ring->desc[i].rx.ipoff_bnum_pid_flag = 0;
}
+/* when reinit buffer size, we should reinit buffer description */
+static inline void hnae_reinit_all_ring_desc(struct hnae_handle *h)
+{
+ int i, j;
+ struct hnae_ring *ring;
+
+ for (i = 0; i < h->q_num; i++) {
+ ring = &h->qs[i]->rx_ring;
+ for (j = 0; j < ring->desc_num; j++)
+ ring->desc[j].addr = cpu_to_le64(ring->desc_cb[j].dma);
+ }
+
+ wmb(); /* commit all data before submit */
+}
+
+/* when reinit buffer size, we should reinit page offset */
+static inline void hnae_reinit_all_ring_page_off(struct hnae_handle *h)
+{
+ int i, j;
+ struct hnae_ring *ring;
+
+ for (i = 0; i < h->q_num; i++) {
+ ring = &h->qs[i]->rx_ring;
+ for (j = 0; j < ring->desc_num; j++) {
+ ring->desc_cb[j].page_offset = 0;
+ if (ring->desc[j].addr !=
+ cpu_to_le64(ring->desc_cb[j].dma))
+ ring->desc[j].addr =
+ cpu_to_le64(ring->desc_cb[j].dma);
+ }
+ }
+
+ wmb(); /* commit all data before submit */
+}
+
#define hnae_set_field(origin, mask, shift, val) \
do { \
(origin) &= (~(mask)); \