diff options
Diffstat (limited to 'drivers/net/ethernet/hisilicon/hns3/hns3_enet.c')
| -rw-r--r-- | drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 3530 |
1 files changed, 2476 insertions, 1054 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index b14f2abc2425..7a0654e2d3dd 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -8,18 +8,23 @@ #include <linux/cpu_rmap.h> #endif #include <linux/if_vlan.h> +#include <linux/irq.h> #include <linux/ip.h> #include <linux/ipv6.h> +#include <linux/iommu.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/aer.h> #include <linux/skbuff.h> #include <linux/sctp.h> #include <net/gre.h> +#include <net/gro.h> #include <net/ip6_checksum.h> +#include <net/page_pool/helpers.h> #include <net/pkt_cls.h> +#include <net/pkt_sched.h> #include <net/tcp.h> #include <net/vxlan.h> +#include <net/geneve.h> #include "hnae3.h" #include "hns3_enet.h" @@ -30,7 +35,7 @@ #define CREATE_TRACE_POINTS #include "hns3_trace.h" -#define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift))) +#define hns3_set_field(origin, shift, val) ((origin) |= (val) << (shift)) #define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE) #define hns3_rl_err(fmt, ...) \ @@ -51,6 +56,18 @@ static int debug = -1; module_param(debug, int, 0); MODULE_PARM_DESC(debug, " Network interface message level setting"); +static unsigned int tx_sgl = 1; +module_param(tx_sgl, uint, 0600); +MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to optimize the IOMMU mapping"); + +static bool page_pool_enabled = true; +module_param(page_pool_enabled, bool, 0400); + +#define HNS3_SGL_SIZE(nfrag) (sizeof(struct scatterlist) * (nfrag) + \ + sizeof(struct sg_table)) +#define HNS3_MAX_SGL_SIZE ALIGN(HNS3_SGL_SIZE(HNS3_MAX_TSO_BD_NUM), \ + dma_get_cache_alignment()) + #define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \ NETIF_MSG_IFDOWN | NETIF_MSG_IFUP) @@ -58,6 +75,7 @@ MODULE_PARM_DESC(debug, " Network interface message level setting"); #define HNS3_OUTER_VLAN_TAG 2 #define HNS3_MIN_TX_LEN 33U +#define HNS3_MIN_TUN_PKT_LEN 65U /* hns3_pci_tbl - PCI Device ID Table * @@ -79,19 +97,296 @@ static const struct pci_device_id hns3_pci_tbl[] = { HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), + HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF), HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, /* required last entry */ - {0, } + {0,} }; MODULE_DEVICE_TABLE(pci, hns3_pci_tbl); +#define HNS3_RX_PTYPE_ENTRY(ptype, l, s, t, h) \ + { ptype, \ + l, \ + CHECKSUM_##s, \ + HNS3_L3_TYPE_##t, \ + 1, \ + h} + +#define HNS3_RX_PTYPE_UNUSED_ENTRY(ptype) \ + { ptype, 0, CHECKSUM_NONE, HNS3_L3_TYPE_PARSE_FAIL, 0, \ + PKT_HASH_TYPE_NONE } + +static const struct hns3_rx_ptype hns3_rx_ptype_tbl[] = { + HNS3_RX_PTYPE_UNUSED_ENTRY(0), + HNS3_RX_PTYPE_ENTRY(1, 0, COMPLETE, ARP, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(2, 0, COMPLETE, RARP, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(3, 0, COMPLETE, LLDP, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(4, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(5, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(6, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(7, 0, COMPLETE, CNM, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(8, 0, NONE, PARSE_FAIL, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_UNUSED_ENTRY(9), + HNS3_RX_PTYPE_UNUSED_ENTRY(10), + HNS3_RX_PTYPE_UNUSED_ENTRY(11), + HNS3_RX_PTYPE_UNUSED_ENTRY(12), + HNS3_RX_PTYPE_UNUSED_ENTRY(13), + HNS3_RX_PTYPE_UNUSED_ENTRY(14), + HNS3_RX_PTYPE_UNUSED_ENTRY(15), + HNS3_RX_PTYPE_ENTRY(16, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(17, 0, COMPLETE, IPV4, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(18, 0, COMPLETE, IPV4, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(19, 0, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(20, 0, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(21, 0, NONE, IPV4, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(22, 0, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(23, 0, NONE, IPV4, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(24, 0, NONE, IPV4, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(25, 0, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_UNUSED_ENTRY(26), + HNS3_RX_PTYPE_UNUSED_ENTRY(27), + HNS3_RX_PTYPE_UNUSED_ENTRY(28), + HNS3_RX_PTYPE_ENTRY(29, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(30, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(31, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(32, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(33, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(34, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(35, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(36, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(37, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_UNUSED_ENTRY(38), + HNS3_RX_PTYPE_ENTRY(39, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(40, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(41, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(42, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(43, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(44, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(45, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_UNUSED_ENTRY(46), + HNS3_RX_PTYPE_UNUSED_ENTRY(47), + HNS3_RX_PTYPE_UNUSED_ENTRY(48), + HNS3_RX_PTYPE_UNUSED_ENTRY(49), + HNS3_RX_PTYPE_UNUSED_ENTRY(50), + HNS3_RX_PTYPE_UNUSED_ENTRY(51), + HNS3_RX_PTYPE_UNUSED_ENTRY(52), + HNS3_RX_PTYPE_UNUSED_ENTRY(53), + HNS3_RX_PTYPE_UNUSED_ENTRY(54), + HNS3_RX_PTYPE_UNUSED_ENTRY(55), + HNS3_RX_PTYPE_UNUSED_ENTRY(56), + HNS3_RX_PTYPE_UNUSED_ENTRY(57), + HNS3_RX_PTYPE_UNUSED_ENTRY(58), + HNS3_RX_PTYPE_UNUSED_ENTRY(59), + HNS3_RX_PTYPE_UNUSED_ENTRY(60), + HNS3_RX_PTYPE_UNUSED_ENTRY(61), + HNS3_RX_PTYPE_UNUSED_ENTRY(62), + HNS3_RX_PTYPE_UNUSED_ENTRY(63), + HNS3_RX_PTYPE_UNUSED_ENTRY(64), + HNS3_RX_PTYPE_UNUSED_ENTRY(65), + HNS3_RX_PTYPE_UNUSED_ENTRY(66), + HNS3_RX_PTYPE_UNUSED_ENTRY(67), + HNS3_RX_PTYPE_UNUSED_ENTRY(68), + HNS3_RX_PTYPE_UNUSED_ENTRY(69), + HNS3_RX_PTYPE_UNUSED_ENTRY(70), + HNS3_RX_PTYPE_UNUSED_ENTRY(71), + HNS3_RX_PTYPE_UNUSED_ENTRY(72), + HNS3_RX_PTYPE_UNUSED_ENTRY(73), + HNS3_RX_PTYPE_UNUSED_ENTRY(74), + HNS3_RX_PTYPE_UNUSED_ENTRY(75), + HNS3_RX_PTYPE_UNUSED_ENTRY(76), + HNS3_RX_PTYPE_UNUSED_ENTRY(77), + HNS3_RX_PTYPE_UNUSED_ENTRY(78), + HNS3_RX_PTYPE_UNUSED_ENTRY(79), + HNS3_RX_PTYPE_UNUSED_ENTRY(80), + HNS3_RX_PTYPE_UNUSED_ENTRY(81), + HNS3_RX_PTYPE_UNUSED_ENTRY(82), + HNS3_RX_PTYPE_UNUSED_ENTRY(83), + HNS3_RX_PTYPE_UNUSED_ENTRY(84), + HNS3_RX_PTYPE_UNUSED_ENTRY(85), + HNS3_RX_PTYPE_UNUSED_ENTRY(86), + HNS3_RX_PTYPE_UNUSED_ENTRY(87), + HNS3_RX_PTYPE_UNUSED_ENTRY(88), + HNS3_RX_PTYPE_UNUSED_ENTRY(89), + HNS3_RX_PTYPE_UNUSED_ENTRY(90), + HNS3_RX_PTYPE_UNUSED_ENTRY(91), + HNS3_RX_PTYPE_UNUSED_ENTRY(92), + HNS3_RX_PTYPE_UNUSED_ENTRY(93), + HNS3_RX_PTYPE_UNUSED_ENTRY(94), + HNS3_RX_PTYPE_UNUSED_ENTRY(95), + HNS3_RX_PTYPE_UNUSED_ENTRY(96), + HNS3_RX_PTYPE_UNUSED_ENTRY(97), + HNS3_RX_PTYPE_UNUSED_ENTRY(98), + HNS3_RX_PTYPE_UNUSED_ENTRY(99), + HNS3_RX_PTYPE_UNUSED_ENTRY(100), + HNS3_RX_PTYPE_UNUSED_ENTRY(101), + HNS3_RX_PTYPE_UNUSED_ENTRY(102), + HNS3_RX_PTYPE_UNUSED_ENTRY(103), + HNS3_RX_PTYPE_UNUSED_ENTRY(104), + HNS3_RX_PTYPE_UNUSED_ENTRY(105), + HNS3_RX_PTYPE_UNUSED_ENTRY(106), + HNS3_RX_PTYPE_UNUSED_ENTRY(107), + HNS3_RX_PTYPE_UNUSED_ENTRY(108), + HNS3_RX_PTYPE_UNUSED_ENTRY(109), + HNS3_RX_PTYPE_UNUSED_ENTRY(110), + HNS3_RX_PTYPE_ENTRY(111, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(112, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(113, 0, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(114, 0, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(115, 0, NONE, IPV6, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(116, 0, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(117, 0, NONE, IPV6, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(118, 0, NONE, IPV6, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(119, 0, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_UNUSED_ENTRY(120), + HNS3_RX_PTYPE_UNUSED_ENTRY(121), + HNS3_RX_PTYPE_UNUSED_ENTRY(122), + HNS3_RX_PTYPE_ENTRY(123, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(124, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(125, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(126, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(127, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(128, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(129, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(130, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(131, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_UNUSED_ENTRY(132), + HNS3_RX_PTYPE_ENTRY(133, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(134, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(135, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(136, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(137, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(138, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(139, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_UNUSED_ENTRY(140), + HNS3_RX_PTYPE_UNUSED_ENTRY(141), + HNS3_RX_PTYPE_UNUSED_ENTRY(142), + HNS3_RX_PTYPE_UNUSED_ENTRY(143), + HNS3_RX_PTYPE_UNUSED_ENTRY(144), + HNS3_RX_PTYPE_UNUSED_ENTRY(145), + HNS3_RX_PTYPE_UNUSED_ENTRY(146), + HNS3_RX_PTYPE_UNUSED_ENTRY(147), + HNS3_RX_PTYPE_UNUSED_ENTRY(148), + HNS3_RX_PTYPE_UNUSED_ENTRY(149), + HNS3_RX_PTYPE_UNUSED_ENTRY(150), + HNS3_RX_PTYPE_UNUSED_ENTRY(151), + HNS3_RX_PTYPE_UNUSED_ENTRY(152), + HNS3_RX_PTYPE_UNUSED_ENTRY(153), + HNS3_RX_PTYPE_UNUSED_ENTRY(154), + HNS3_RX_PTYPE_UNUSED_ENTRY(155), + HNS3_RX_PTYPE_UNUSED_ENTRY(156), + HNS3_RX_PTYPE_UNUSED_ENTRY(157), + HNS3_RX_PTYPE_UNUSED_ENTRY(158), + HNS3_RX_PTYPE_UNUSED_ENTRY(159), + HNS3_RX_PTYPE_UNUSED_ENTRY(160), + HNS3_RX_PTYPE_UNUSED_ENTRY(161), + HNS3_RX_PTYPE_UNUSED_ENTRY(162), + HNS3_RX_PTYPE_UNUSED_ENTRY(163), + HNS3_RX_PTYPE_UNUSED_ENTRY(164), + HNS3_RX_PTYPE_UNUSED_ENTRY(165), + HNS3_RX_PTYPE_UNUSED_ENTRY(166), + HNS3_RX_PTYPE_UNUSED_ENTRY(167), + HNS3_RX_PTYPE_UNUSED_ENTRY(168), + HNS3_RX_PTYPE_UNUSED_ENTRY(169), + HNS3_RX_PTYPE_UNUSED_ENTRY(170), + HNS3_RX_PTYPE_UNUSED_ENTRY(171), + HNS3_RX_PTYPE_UNUSED_ENTRY(172), + HNS3_RX_PTYPE_UNUSED_ENTRY(173), + HNS3_RX_PTYPE_UNUSED_ENTRY(174), + HNS3_RX_PTYPE_UNUSED_ENTRY(175), + HNS3_RX_PTYPE_UNUSED_ENTRY(176), + HNS3_RX_PTYPE_UNUSED_ENTRY(177), + HNS3_RX_PTYPE_UNUSED_ENTRY(178), + HNS3_RX_PTYPE_UNUSED_ENTRY(179), + HNS3_RX_PTYPE_UNUSED_ENTRY(180), + HNS3_RX_PTYPE_UNUSED_ENTRY(181), + HNS3_RX_PTYPE_UNUSED_ENTRY(182), + HNS3_RX_PTYPE_UNUSED_ENTRY(183), + HNS3_RX_PTYPE_UNUSED_ENTRY(184), + HNS3_RX_PTYPE_UNUSED_ENTRY(185), + HNS3_RX_PTYPE_UNUSED_ENTRY(186), + HNS3_RX_PTYPE_UNUSED_ENTRY(187), + HNS3_RX_PTYPE_UNUSED_ENTRY(188), + HNS3_RX_PTYPE_UNUSED_ENTRY(189), + HNS3_RX_PTYPE_UNUSED_ENTRY(190), + HNS3_RX_PTYPE_UNUSED_ENTRY(191), + HNS3_RX_PTYPE_UNUSED_ENTRY(192), + HNS3_RX_PTYPE_UNUSED_ENTRY(193), + HNS3_RX_PTYPE_UNUSED_ENTRY(194), + HNS3_RX_PTYPE_UNUSED_ENTRY(195), + HNS3_RX_PTYPE_UNUSED_ENTRY(196), + HNS3_RX_PTYPE_UNUSED_ENTRY(197), + HNS3_RX_PTYPE_UNUSED_ENTRY(198), + HNS3_RX_PTYPE_UNUSED_ENTRY(199), + HNS3_RX_PTYPE_UNUSED_ENTRY(200), + HNS3_RX_PTYPE_UNUSED_ENTRY(201), + HNS3_RX_PTYPE_UNUSED_ENTRY(202), + HNS3_RX_PTYPE_UNUSED_ENTRY(203), + HNS3_RX_PTYPE_UNUSED_ENTRY(204), + HNS3_RX_PTYPE_UNUSED_ENTRY(205), + HNS3_RX_PTYPE_UNUSED_ENTRY(206), + HNS3_RX_PTYPE_UNUSED_ENTRY(207), + HNS3_RX_PTYPE_UNUSED_ENTRY(208), + HNS3_RX_PTYPE_UNUSED_ENTRY(209), + HNS3_RX_PTYPE_UNUSED_ENTRY(210), + HNS3_RX_PTYPE_UNUSED_ENTRY(211), + HNS3_RX_PTYPE_UNUSED_ENTRY(212), + HNS3_RX_PTYPE_UNUSED_ENTRY(213), + HNS3_RX_PTYPE_UNUSED_ENTRY(214), + HNS3_RX_PTYPE_UNUSED_ENTRY(215), + HNS3_RX_PTYPE_UNUSED_ENTRY(216), + HNS3_RX_PTYPE_UNUSED_ENTRY(217), + HNS3_RX_PTYPE_UNUSED_ENTRY(218), + HNS3_RX_PTYPE_UNUSED_ENTRY(219), + HNS3_RX_PTYPE_UNUSED_ENTRY(220), + HNS3_RX_PTYPE_UNUSED_ENTRY(221), + HNS3_RX_PTYPE_UNUSED_ENTRY(222), + HNS3_RX_PTYPE_UNUSED_ENTRY(223), + HNS3_RX_PTYPE_UNUSED_ENTRY(224), + HNS3_RX_PTYPE_UNUSED_ENTRY(225), + HNS3_RX_PTYPE_UNUSED_ENTRY(226), + HNS3_RX_PTYPE_UNUSED_ENTRY(227), + HNS3_RX_PTYPE_UNUSED_ENTRY(228), + HNS3_RX_PTYPE_UNUSED_ENTRY(229), + HNS3_RX_PTYPE_UNUSED_ENTRY(230), + HNS3_RX_PTYPE_UNUSED_ENTRY(231), + HNS3_RX_PTYPE_UNUSED_ENTRY(232), + HNS3_RX_PTYPE_UNUSED_ENTRY(233), + HNS3_RX_PTYPE_UNUSED_ENTRY(234), + HNS3_RX_PTYPE_UNUSED_ENTRY(235), + HNS3_RX_PTYPE_UNUSED_ENTRY(236), + HNS3_RX_PTYPE_UNUSED_ENTRY(237), + HNS3_RX_PTYPE_UNUSED_ENTRY(238), + HNS3_RX_PTYPE_UNUSED_ENTRY(239), + HNS3_RX_PTYPE_UNUSED_ENTRY(240), + HNS3_RX_PTYPE_UNUSED_ENTRY(241), + HNS3_RX_PTYPE_UNUSED_ENTRY(242), + HNS3_RX_PTYPE_UNUSED_ENTRY(243), + HNS3_RX_PTYPE_UNUSED_ENTRY(244), + HNS3_RX_PTYPE_UNUSED_ENTRY(245), + HNS3_RX_PTYPE_UNUSED_ENTRY(246), + HNS3_RX_PTYPE_UNUSED_ENTRY(247), + HNS3_RX_PTYPE_UNUSED_ENTRY(248), + HNS3_RX_PTYPE_UNUSED_ENTRY(249), + HNS3_RX_PTYPE_UNUSED_ENTRY(250), + HNS3_RX_PTYPE_UNUSED_ENTRY(251), + HNS3_RX_PTYPE_UNUSED_ENTRY(252), + HNS3_RX_PTYPE_UNUSED_ENTRY(253), + HNS3_RX_PTYPE_UNUSED_ENTRY(254), + HNS3_RX_PTYPE_UNUSED_ENTRY(255), +}; + +#define HNS3_INVALID_PTYPE \ + ARRAY_SIZE(hns3_rx_ptype_tbl) + static irqreturn_t hns3_irq_handle(int irq, void *vector) { struct hns3_enet_tqp_vector *tqp_vector = vector; napi_schedule_irqoff(&tqp_vector->napi); + tqp_vector->event_cnt++; return IRQ_HANDLED; } @@ -154,6 +449,7 @@ static int hns3_nic_init_irq(struct hns3_nic_priv *priv) tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0'; + irq_set_status_flags(tqp_vectors->vector_irq, IRQ_NOAUTOEN); ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0, tqp_vectors->name, tqp_vectors); if (ret) { @@ -163,8 +459,6 @@ static int hns3_nic_init_irq(struct hns3_nic_priv *priv) return ret; } - disable_irq(tqp_vectors->vector_irq); - irq_set_affinity_hint(tqp_vectors->vector_irq, &tqp_vectors->affinity_mask); @@ -180,22 +474,18 @@ static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector, writel(mask_en, tqp_vector->mask_addr); } -static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector) +static void hns3_irq_enable(struct hns3_enet_tqp_vector *tqp_vector) { napi_enable(&tqp_vector->napi); enable_irq(tqp_vector->vector_irq); - - /* enable vector */ - hns3_mask_vector_irq(tqp_vector, 1); } -static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector) +static void hns3_irq_disable(struct hns3_enet_tqp_vector *tqp_vector) { - /* disable vector */ - hns3_mask_vector_irq(tqp_vector, 0); - disable_irq(tqp_vector->vector_irq); napi_disable(&tqp_vector->napi); + cancel_work_sync(&tqp_vector->rx_group.dim.work); + cancel_work_sync(&tqp_vector->tx_group.dim.work); } void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, @@ -207,9 +497,8 @@ void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, * Rl defines rate of interrupts i.e. number of interrupts-per-second * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing */ - - if (rl_reg > 0 && !tqp_vector->tx_group.coal.gl_adapt_enable && - !tqp_vector->rx_group.coal.gl_adapt_enable) + if (rl_reg > 0 && !tqp_vector->tx_group.coal.adapt_enable && + !tqp_vector->rx_group.coal.adapt_enable) /* According to the hardware, the range of rl_reg is * 0-59 and the unit is 4. */ @@ -221,76 +510,117 @@ void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector, u32 gl_value) { - u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value); + u32 new_val; + + if (tqp_vector->rx_group.coal.unit_1us) + new_val = gl_value | HNS3_INT_GL_1US; + else + new_val = hns3_gl_usec_to_reg(gl_value); - writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET); + writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET); } void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector, u32 gl_value) { - u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value); + u32 new_val; + + if (tqp_vector->tx_group.coal.unit_1us) + new_val = gl_value | HNS3_INT_GL_1US; + else + new_val = hns3_gl_usec_to_reg(gl_value); - writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET); + writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET); } -static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector, - struct hns3_nic_priv *priv) +void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector *tqp_vector, + u32 ql_value) { - /* initialize the configuration for interrupt coalescing. - * 1. GL (Interrupt Gap Limiter) - * 2. RL (Interrupt Rate Limiter) - * - * Default: enable interrupt coalescing self-adaptive and GL - */ - tqp_vector->tx_group.coal.gl_adapt_enable = 1; - tqp_vector->rx_group.coal.gl_adapt_enable = 1; - - tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K; - tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K; + writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_TX_QL_OFFSET); +} - tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW; - tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW; +void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector, + u32 ql_value) +{ + writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_RX_QL_OFFSET); } -static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector *tqp_vector, +static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector, struct hns3_nic_priv *priv) { + struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal; + struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal; + struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle); + struct hns3_enet_coalesce *ptx_coal = &priv->tx_coal; + struct hns3_enet_coalesce *prx_coal = &priv->rx_coal; + + tx_coal->adapt_enable = ptx_coal->adapt_enable; + rx_coal->adapt_enable = prx_coal->adapt_enable; + + tx_coal->int_gl = ptx_coal->int_gl; + rx_coal->int_gl = prx_coal->int_gl; + + rx_coal->flow_level = prx_coal->flow_level; + tx_coal->flow_level = ptx_coal->flow_level; + + /* device version above V3(include V3), GL can configure 1us + * unit, so uses 1us unit. + */ + if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) { + tx_coal->unit_1us = 1; + rx_coal->unit_1us = 1; + } + + if (ae_dev->dev_specs.int_ql_max) { + tx_coal->ql_enable = 1; + rx_coal->ql_enable = 1; + tx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max; + rx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max; + tx_coal->int_ql = ptx_coal->int_ql; + rx_coal->int_ql = prx_coal->int_ql; + } +} + +static void +hns3_vector_coalesce_init_hw(struct hns3_enet_tqp_vector *tqp_vector, + struct hns3_nic_priv *priv) +{ + struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal; + struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal; struct hnae3_handle *h = priv->ae_handle; - hns3_set_vector_coalesce_tx_gl(tqp_vector, - tqp_vector->tx_group.coal.int_gl); - hns3_set_vector_coalesce_rx_gl(tqp_vector, - tqp_vector->rx_group.coal.int_gl); + hns3_set_vector_coalesce_tx_gl(tqp_vector, tx_coal->int_gl); + hns3_set_vector_coalesce_rx_gl(tqp_vector, rx_coal->int_gl); hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting); + + if (tx_coal->ql_enable) + hns3_set_vector_coalesce_tx_ql(tqp_vector, tx_coal->int_ql); + + if (rx_coal->ql_enable) + hns3_set_vector_coalesce_rx_ql(tqp_vector, rx_coal->int_ql); } static int hns3_nic_set_real_num_queue(struct net_device *netdev) { struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_knic_private_info *kinfo = &h->kinfo; - unsigned int queue_size = kinfo->rss_size * kinfo->num_tc; + struct hnae3_tc_info *tc_info = &kinfo->tc_info; + unsigned int queue_size = kinfo->num_tqps; int i, ret; - if (kinfo->num_tc <= 1) { + if (tc_info->num_tc <= 1 && !tc_info->mqprio_active) { netdev_reset_tc(netdev); } else { - ret = netdev_set_num_tc(netdev, kinfo->num_tc); + ret = netdev_set_num_tc(netdev, tc_info->num_tc); if (ret) { netdev_err(netdev, "netdev_set_num_tc fail, ret=%d!\n", ret); return ret; } - for (i = 0; i < HNAE3_MAX_TC; i++) { - if (!kinfo->tc_info[i].enable) - continue; - - netdev_set_tc_queue(netdev, - kinfo->tc_info[i].tc, - kinfo->tc_info[i].tqp_count, - kinfo->tc_info[i].tqp_offset); - } + for (i = 0; i < tc_info->num_tc; i++) + netdev_set_tc_queue(netdev, i, tc_info->tqp_count[i], + tc_info->tqp_offset[i]); } ret = netif_set_real_num_tx_queues(netdev, queue_size); @@ -310,12 +640,12 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev) return 0; } -static u16 hns3_get_max_available_channels(struct hnae3_handle *h) +u16 hns3_get_max_available_channels(struct hnae3_handle *h) { u16 alloc_tqps, max_rss_size, rss_size; h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size); - rss_size = alloc_tqps / h->kinfo.num_tc; + rss_size = alloc_tqps / h->kinfo.tc_info.num_tc; return min_t(u16, rss_size, max_rss_size); } @@ -372,11 +702,42 @@ static int hns3_set_rx_cpu_rmap(struct net_device *netdev) return 0; } +static void hns3_enable_irqs_and_tqps(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + u16 i; + + for (i = 0; i < priv->vector_num; i++) + hns3_irq_enable(&priv->tqp_vector[i]); + + for (i = 0; i < priv->vector_num; i++) + hns3_mask_vector_irq(&priv->tqp_vector[i], 1); + + for (i = 0; i < h->kinfo.num_tqps; i++) + hns3_tqp_enable(h->kinfo.tqp[i]); +} + +static void hns3_disable_irqs_and_tqps(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + u16 i; + + for (i = 0; i < h->kinfo.num_tqps; i++) + hns3_tqp_disable(h->kinfo.tqp[i]); + + for (i = 0; i < priv->vector_num; i++) + hns3_mask_vector_irq(&priv->tqp_vector[i], 0); + + for (i = 0; i < priv->vector_num; i++) + hns3_irq_disable(&priv->tqp_vector[i]); +} + static int hns3_nic_net_up(struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); struct hnae3_handle *h = priv->ae_handle; - int i, j; int ret; ret = hns3_nic_reset_all_ring(h); @@ -385,23 +746,13 @@ static int hns3_nic_net_up(struct net_device *netdev) clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); - /* enable the vectors */ - for (i = 0; i < priv->vector_num; i++) - hns3_vector_enable(&priv->tqp_vector[i]); - - /* enable rcb */ - for (j = 0; j < h->kinfo.num_tqps; j++) - hns3_tqp_enable(h->kinfo.tqp[j]); + hns3_enable_irqs_and_tqps(netdev); /* start the ae_dev */ ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0; if (ret) { set_bit(HNS3_NIC_STATE_DOWN, &priv->state); - while (j--) - hns3_tqp_disable(h->kinfo.tqp[j]); - - for (j = i - 1; j >= 0; j--) - hns3_vector_disable(&priv->tqp_vector[j]); + hns3_disable_irqs_and_tqps(netdev); } return ret; @@ -440,6 +791,11 @@ static int hns3_nic_net_open(struct net_device *netdev) if (hns3_nic_resetting(netdev)) return -EBUSY; + if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { + netdev_warn(netdev, "net open repeatedly!\n"); + return 0; + } + netif_carrier_off(netdev); ret = hns3_nic_set_real_num_queue(netdev); @@ -454,7 +810,7 @@ static int hns3_nic_net_open(struct net_device *netdev) kinfo = &h->kinfo; for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) - netdev_set_prio_tc_map(netdev, i, kinfo->prio_tc[i]); + netdev_set_prio_tc_map(netdev, i, kinfo->tc_info.prio_tc[i]); if (h->ae_algo->ops->set_timer_task) h->ae_algo->ops->set_timer_task(priv->ae_handle, true); @@ -483,17 +839,9 @@ static void hns3_reset_tx_queue(struct hnae3_handle *h) static void hns3_nic_net_down(struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = hns3_get_handle(netdev); const struct hnae3_ae_ops *ops; - int i; - /* disable vectors */ - for (i = 0; i < priv->vector_num; i++) - hns3_vector_disable(&priv->tqp_vector[i]); - - /* disable rcb */ - for (i = 0; i < h->kinfo.num_tqps; i++) - hns3_tqp_disable(h->kinfo.tqp[i]); + hns3_disable_irqs_and_tqps(netdev); /* stop ae_dev */ ops = priv->ae_handle->ae_algo->ops; @@ -523,8 +871,8 @@ static int hns3_nic_net_stop(struct net_device *netdev) if (h->ae_algo->ops->set_timer_task) h->ae_algo->ops->set_timer_task(priv->ae_handle, false); - netif_tx_stop_all_queues(netdev); netif_carrier_off(netdev); + netif_tx_disable(netdev); hns3_nic_net_down(netdev); @@ -586,13 +934,10 @@ static u8 hns3_get_netdev_flags(struct net_device *netdev) { u8 flags = 0; - if (netdev->flags & IFF_PROMISC) { + if (netdev->flags & IFF_PROMISC) flags = HNAE3_USER_UPE | HNAE3_USER_MPE | HNAE3_BPE; - } else { - flags |= HNAE3_VLAN_FLTR; - if (netdev->flags & IFF_ALLMULTI) - flags |= HNAE3_USER_MPE; - } + else if (netdev->flags & IFF_ALLMULTI) + flags = HNAE3_USER_MPE; return flags; } @@ -616,45 +961,219 @@ static void hns3_nic_set_rx_mode(struct net_device *netdev) void hns3_request_update_promisc_mode(struct hnae3_handle *handle) { - const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + const struct hnae3_ae_ops *ops = hns3_get_ops(handle); if (ops->request_update_promisc_mode) ops->request_update_promisc_mode(handle); } -int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags) +static u32 hns3_tx_spare_space(struct hns3_enet_ring *ring) { - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = priv->ae_handle; + struct hns3_tx_spare *tx_spare = ring->tx_spare; + u32 ntc, ntu; + + /* This smp_load_acquire() pairs with smp_store_release() in + * hns3_tx_spare_update() called in tx desc cleaning process. + */ + ntc = smp_load_acquire(&tx_spare->last_to_clean); + ntu = tx_spare->next_to_use; + + if (ntc > ntu) + return ntc - ntu - 1; + + /* The free tx buffer is divided into two part, so pick the + * larger one. + */ + return max(ntc, tx_spare->len - ntu) - 1; +} + +static void hns3_tx_spare_update(struct hns3_enet_ring *ring) +{ + struct hns3_tx_spare *tx_spare = ring->tx_spare; + + if (!tx_spare || + tx_spare->last_to_clean == tx_spare->next_to_clean) + return; + + /* This smp_store_release() pairs with smp_load_acquire() in + * hns3_tx_spare_space() called in xmit process. + */ + smp_store_release(&tx_spare->last_to_clean, + tx_spare->next_to_clean); +} + +static bool hns3_can_use_tx_bounce(struct hns3_enet_ring *ring, + struct sk_buff *skb, + u32 space) +{ + u32 len = skb->len <= ring->tx_copybreak ? skb->len : + skb_headlen(skb); + + if (len > ring->tx_copybreak) + return false; - if (h->ae_algo->ops->set_promisc_mode) { - return h->ae_algo->ops->set_promisc_mode(h, - promisc_flags & HNAE3_UPE, - promisc_flags & HNAE3_MPE); + if (ALIGN(len, dma_get_cache_alignment()) > space) { + hns3_ring_stats_update(ring, tx_spare_full); + return false; } - return 0; + return true; +} + +static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring, + struct sk_buff *skb, + u32 space) +{ + if (skb->len <= ring->tx_copybreak || !tx_sgl || + (!skb_has_frag_list(skb) && + skb_shinfo(skb)->nr_frags < tx_sgl)) + return false; + + if (space < HNS3_MAX_SGL_SIZE) { + hns3_ring_stats_update(ring, tx_spare_full); + return false; + } + + return true; } -void hns3_enable_vlan_filter(struct net_device *netdev, bool enable) +static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring) { + u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size; + struct net_device *netdev = ring_to_netdev(ring); struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = priv->ae_handle; - bool last_state; - - if (h->pdev->revision >= 0x21 && h->ae_algo->ops->enable_vlan_filter) { - last_state = h->netdev_flags & HNAE3_VLAN_FLTR ? true : false; - if (enable != last_state) { - netdev_info(netdev, - "%s vlan filter\n", - enable ? "enable" : "disable"); - h->ae_algo->ops->enable_vlan_filter(h, enable); + struct hns3_tx_spare *tx_spare; + struct page *page; + dma_addr_t dma; + int order; + + if (!alloc_size) + return; + + order = get_order(alloc_size); + if (order > MAX_PAGE_ORDER) { + if (net_ratelimit()) + dev_warn(ring_to_dev(ring), "failed to allocate tx spare buffer, exceed to max order\n"); + return; + } + + tx_spare = devm_kzalloc(ring_to_dev(ring), sizeof(*tx_spare), + GFP_KERNEL); + if (!tx_spare) { + /* The driver still work without the tx spare buffer */ + dev_warn(ring_to_dev(ring), "failed to allocate hns3_tx_spare\n"); + goto devm_kzalloc_error; + } + + page = alloc_pages_node(dev_to_node(ring_to_dev(ring)), + GFP_KERNEL, order); + if (!page) { + dev_warn(ring_to_dev(ring), "failed to allocate tx spare pages\n"); + goto alloc_pages_error; + } + + dma = dma_map_page(ring_to_dev(ring), page, 0, + PAGE_SIZE << order, DMA_TO_DEVICE); + if (dma_mapping_error(ring_to_dev(ring), dma)) { + dev_warn(ring_to_dev(ring), "failed to map pages for tx spare\n"); + goto dma_mapping_error; + } + + tx_spare->dma = dma; + tx_spare->buf = page_address(page); + tx_spare->len = PAGE_SIZE << order; + ring->tx_spare = tx_spare; + ring->tx_copybreak = priv->tx_copybreak; + return; + +dma_mapping_error: + put_page(page); +alloc_pages_error: + devm_kfree(ring_to_dev(ring), tx_spare); +devm_kzalloc_error: + ring->tqp->handle->kinfo.tx_spare_buf_size = 0; +} + +/* Use hns3_tx_spare_space() to make sure there is enough buffer + * before calling below function to allocate tx buffer. + */ +static void *hns3_tx_spare_alloc(struct hns3_enet_ring *ring, + unsigned int size, dma_addr_t *dma, + u32 *cb_len) +{ + struct hns3_tx_spare *tx_spare = ring->tx_spare; + u32 ntu = tx_spare->next_to_use; + + size = ALIGN(size, dma_get_cache_alignment()); + *cb_len = size; + + /* Tx spare buffer wraps back here because the end of + * freed tx buffer is not enough. + */ + if (ntu + size > tx_spare->len) { + *cb_len += (tx_spare->len - ntu); + ntu = 0; + } + + tx_spare->next_to_use = ntu + size; + if (tx_spare->next_to_use == tx_spare->len) + tx_spare->next_to_use = 0; + + *dma = tx_spare->dma + ntu; + + return tx_spare->buf + ntu; +} + +static void hns3_tx_spare_rollback(struct hns3_enet_ring *ring, u32 len) +{ + struct hns3_tx_spare *tx_spare = ring->tx_spare; + + if (len > tx_spare->next_to_use) { + len -= tx_spare->next_to_use; + tx_spare->next_to_use = tx_spare->len - len; + } else { + tx_spare->next_to_use -= len; + } +} + +static void hns3_tx_spare_reclaim_cb(struct hns3_enet_ring *ring, + struct hns3_desc_cb *cb) +{ + struct hns3_tx_spare *tx_spare = ring->tx_spare; + u32 ntc = tx_spare->next_to_clean; + u32 len = cb->length; + + tx_spare->next_to_clean += len; + + if (tx_spare->next_to_clean >= tx_spare->len) { + tx_spare->next_to_clean -= tx_spare->len; + + if (tx_spare->next_to_clean) { + ntc = 0; + len = tx_spare->next_to_clean; } } + + /* This tx spare buffer is only really reclaimed after calling + * hns3_tx_spare_update(), so it is still safe to use the info in + * the tx buffer to do the dma sync or sg unmapping after + * tx_spare->next_to_clean is moved forword. + */ + if (cb->type & (DESC_TYPE_BOUNCE_HEAD | DESC_TYPE_BOUNCE_ALL)) { + dma_addr_t dma = tx_spare->dma + ntc; + + dma_sync_single_for_cpu(ring_to_dev(ring), dma, len, + DMA_TO_DEVICE); + } else { + struct sg_table *sgt = tx_spare->buf + ntc; + + dma_unmap_sg(ring_to_dev(ring), sgt->sgl, sgt->orig_nents, + DMA_TO_DEVICE); + } } -static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, - u16 *mss, u32 *type_cs_vlan_tso) +static int hns3_set_tso(struct sk_buff *skb, u32 *paylen_fdop_ol4cs, + u16 *mss, u32 *type_cs_vlan_tso, u32 *send_bytes) { u32 l4_offset, hdr_len; union l3_hdr_info l3; @@ -683,15 +1202,6 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, SKB_GSO_GRE_CSUM | SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)) { - if ((!(skb_shinfo(skb)->gso_type & - SKB_GSO_PARTIAL)) && - (skb_shinfo(skb)->gso_type & - SKB_GSO_UDP_TUNNEL_CSUM)) { - /* Software should clear the udp's checksum - * field when tso is needed. - */ - l4.udp->check = 0; - } /* reset l3&l4 pointers from outer to inner headers */ l3.hdr = skb_inner_network_header(skb); l4.hdr = skb_inner_transport_header(skb); @@ -705,17 +1215,30 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, /* normal or tunnel packet */ l4_offset = l4.hdr - skb->data; - hdr_len = (l4.tcp->doff << 2) + l4_offset; /* remove payload length from inner pseudo checksum when tso */ l4_paylen = skb->len - l4_offset; - csum_replace_by_diff(&l4.tcp->check, - (__force __wsum)htonl(l4_paylen)); + + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { + hdr_len = sizeof(*l4.udp) + l4_offset; + csum_replace_by_diff(&l4.udp->check, + (__force __wsum)htonl(l4_paylen)); + } else { + hdr_len = (l4.tcp->doff << 2) + l4_offset; + csum_replace_by_diff(&l4.tcp->check, + (__force __wsum)htonl(l4_paylen)); + } + + *send_bytes = (skb_shinfo(skb)->gso_segs - 1) * hdr_len + skb->len; /* find the txbd field values */ - *paylen = skb->len - hdr_len; + *paylen_fdop_ol4cs = skb->len - hdr_len; hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1); + /* offload outer UDP header checksum */ + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) + hns3_set_field(*paylen_fdop_ol4cs, HNS3_TXD_OL4CS_B, 1); + /* get MSS for TSO */ *mss = skb_shinfo(skb)->gso_size; @@ -780,20 +1303,28 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, * and it is udp packet, which has a dest port as the IANA assigned. * the hardware is expected to do the checksum offload, but the * hardware will not do the checksum offload when udp dest port is - * 4789. + * 4789, 4790 or 6081. */ static bool hns3_tunnel_csum_bug(struct sk_buff *skb) { + struct hns3_nic_priv *priv = netdev_priv(skb->dev); + struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle); union l4_hdr_info l4; + /* device version above V3(include V3), the hardware can + * do this checksum offload. + */ + if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) + return false; + l4.hdr = skb_transport_header(skb); if (!(!skb->encapsulation && - l4.udp->dest == htons(IANA_VXLAN_UDP_PORT))) + (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) || + l4.udp->dest == htons(GENEVE_UDP_PORT) || + l4.udp->dest == htons(IANA_VXLAN_GPE_UDP_PORT)))) return false; - skb_checksum_help(skb); - return true; } @@ -831,7 +1362,6 @@ static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto, hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_NO_CSUM); - } else if (skb->protocol == htons(ETH_P_IPV6)) { hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6); @@ -845,45 +1375,9 @@ static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto, HNS3_TUN_NVGRE); } -static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto, - u8 il4_proto, u32 *type_cs_vlan_tso, - u32 *ol_type_vlan_len_msec) +static void hns3_set_l3_type(struct sk_buff *skb, union l3_hdr_info l3, + u32 *type_cs_vlan_tso) { - unsigned char *l2_hdr = skb->data; - u32 l4_proto = ol4_proto; - union l4_hdr_info l4; - union l3_hdr_info l3; - u32 l2_len, l3_len; - - l4.hdr = skb_transport_header(skb); - l3.hdr = skb_network_header(skb); - - /* handle encapsulation skb */ - if (skb->encapsulation) { - /* If this is a not UDP/GRE encapsulation skb */ - if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) { - /* drop the skb tunnel packet if hardware don't support, - * because hardware can't calculate csum when TSO. - */ - if (skb_is_gso(skb)) - return -EDOM; - - /* the stack computes the IP header already, - * driver calculate l4 checksum when not TSO. - */ - skb_checksum_help(skb); - return 0; - } - - hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec); - - /* switch to inner header */ - l2_hdr = skb_inner_mac_header(skb); - l3.hdr = skb_inner_network_header(skb); - l4.hdr = skb_inner_transport_header(skb); - l4_proto = il4_proto; - } - if (l3.v4->version == 4) { hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S, HNS3_L3T_IPV4); @@ -897,15 +1391,11 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto, hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S, HNS3_L3T_IPV6); } +} - /* compute inner(/normal) L2 header size, defined in 2 Bytes */ - l2_len = l3.hdr - l2_hdr; - hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1); - - /* compute inner(/normal) L3 header size, defined in 4 Bytes */ - l3_len = l4.hdr - l3.hdr; - hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2); - +static int hns3_set_l4_csum_length(struct sk_buff *skb, union l4_hdr_info l4, + u32 l4_proto, u32 *type_cs_vlan_tso) +{ /* compute inner(/normal) L4 header size, defined in 4 Bytes */ switch (l4_proto) { case IPPROTO_TCP: @@ -916,8 +1406,11 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto, l4.tcp->doff); break; case IPPROTO_UDP: - if (hns3_tunnel_csum_bug(skb)) - break; + if (hns3_tunnel_csum_bug(skb)) { + int ret = skb_put_padto(skb, HNS3_MIN_TUN_PKT_LEN); + + return ret ? ret : skb_checksum_help(skb); + } hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, @@ -942,17 +1435,68 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto, /* the stack computes the IP header already, * driver calculate l4 checksum when not TSO. */ - skb_checksum_help(skb); - return 0; + return skb_checksum_help(skb); } return 0; } +static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto, + u8 il4_proto, u32 *type_cs_vlan_tso, + u32 *ol_type_vlan_len_msec) +{ + unsigned char *l2_hdr = skb->data; + u32 l4_proto = ol4_proto; + union l4_hdr_info l4; + union l3_hdr_info l3; + u32 l2_len, l3_len; + + l4.hdr = skb_transport_header(skb); + l3.hdr = skb_network_header(skb); + + /* handle encapsulation skb */ + if (skb->encapsulation) { + /* If this is a not UDP/GRE encapsulation skb */ + if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) { + /* drop the skb tunnel packet if hardware don't support, + * because hardware can't calculate csum when TSO. + */ + if (skb_is_gso(skb)) + return -EDOM; + + /* the stack computes the IP header already, + * driver calculate l4 checksum when not TSO. + */ + return skb_checksum_help(skb); + } + + hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec); + + /* switch to inner header */ + l2_hdr = skb_inner_mac_header(skb); + l3.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + l4_proto = il4_proto; + } + + hns3_set_l3_type(skb, l3, type_cs_vlan_tso); + + /* compute inner(/normal) L2 header size, defined in 2 Bytes */ + l2_len = l3.hdr - l2_hdr; + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1); + + /* compute inner(/normal) L3 header size, defined in 4 Bytes */ + l3_len = l4.hdr - l3.hdr; + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2); + + return hns3_set_l4_csum_length(skb, l4, l4_proto, type_cs_vlan_tso); +} + static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring, struct sk_buff *skb) { struct hnae3_handle *handle = tx_ring->tqp->handle; + struct hnae3_ae_dev *ae_dev; struct vlan_ethhdr *vhdr; int rc; @@ -960,10 +1504,13 @@ static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring, skb_vlan_tag_present(skb))) return 0; - /* Since HW limitation, if port based insert VLAN enabled, only one VLAN - * header is allowed in skb, otherwise it will cause RAS error. + /* For HW limitation on HNAE3_DEVICE_VERSION_V2, if port based insert + * VLAN enabled, only one VLAN header is allowed in skb, otherwise it + * will cause RAS error. */ + ae_dev = hns3_get_ae_dev(handle); if (unlikely(skb_vlan_tagged_multi(skb) && + ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 && handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE)) return -EINVAL; @@ -997,7 +1544,7 @@ static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring, if (unlikely(rc < 0)) return rc; - vhdr = (struct vlan_ethhdr *)skb->data; + vhdr = skb_vlan_eth_hdr(skb); vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK); @@ -1005,125 +1552,152 @@ static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring, return 0; } -static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, - struct sk_buff *skb, struct hns3_desc *desc) -{ - u32 ol_type_vlan_len_msec = 0; - u32 type_cs_vlan_tso = 0; - u32 paylen = skb->len; - u16 inner_vtag = 0; - u16 out_vtag = 0; - u16 mss = 0; +/* check if the hardware is capable of checksum offloading */ +static bool hns3_check_hw_tx_csum(struct sk_buff *skb) +{ + struct hns3_nic_priv *priv = netdev_priv(skb->dev); + + /* Kindly note, due to backward compatibility of the TX descriptor, + * HW checksum of the non-IP packets and GSO packets is handled at + * different place in the following code + */ + if (skb_csum_is_sctp(skb) || skb_is_gso(skb) || + !test_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state)) + return false; + + return true; +} + +struct hns3_desc_param { + u32 paylen_ol4cs; + u32 ol_type_vlan_len_msec; + u32 type_cs_vlan_tso; + u16 mss_hw_csum; + u16 inner_vtag; + u16 out_vtag; +}; + +static void hns3_init_desc_data(struct sk_buff *skb, struct hns3_desc_param *pa) +{ + pa->paylen_ol4cs = skb->len; + pa->ol_type_vlan_len_msec = 0; + pa->type_cs_vlan_tso = 0; + pa->mss_hw_csum = 0; + pa->inner_vtag = 0; + pa->out_vtag = 0; +} + +static int hns3_handle_vlan_info(struct hns3_enet_ring *ring, + struct sk_buff *skb, + struct hns3_desc_param *param) +{ int ret; ret = hns3_handle_vtags(ring, skb); if (unlikely(ret < 0)) { - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_vlan_err++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, tx_vlan_err); return ret; } else if (ret == HNS3_INNER_VLAN_TAG) { - inner_vtag = skb_vlan_tag_get(skb); - inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & + param->inner_vtag = skb_vlan_tag_get(skb); + param->inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK; - hns3_set_field(type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1); + hns3_set_field(param->type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1); } else if (ret == HNS3_OUTER_VLAN_TAG) { - out_vtag = skb_vlan_tag_get(skb); - out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & + param->out_vtag = skb_vlan_tag_get(skb); + param->out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK; - hns3_set_field(ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B, + hns3_set_field(param->ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B, 1); } + return 0; +} - if (skb->ip_summed == CHECKSUM_PARTIAL) { - u8 ol4_proto, il4_proto; - - skb_reset_mac_len(skb); +static int hns3_handle_csum_partial(struct hns3_enet_ring *ring, + struct sk_buff *skb, + struct hns3_desc_cb *desc_cb, + struct hns3_desc_param *param) +{ + u8 ol4_proto, il4_proto; + int ret; - ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); - if (unlikely(ret < 0)) { - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_l4_proto_err++; - u64_stats_update_end(&ring->syncp); - return ret; - } + if (hns3_check_hw_tx_csum(skb)) { + /* set checksum start and offset, defined in 2 Bytes */ + hns3_set_field(param->type_cs_vlan_tso, HNS3_TXD_CSUM_START_S, + skb_checksum_start_offset(skb) >> 1); + hns3_set_field(param->ol_type_vlan_len_msec, + HNS3_TXD_CSUM_OFFSET_S, + skb->csum_offset >> 1); + param->mss_hw_csum |= BIT(HNS3_TXD_HW_CS_B); + return 0; + } - ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto, - &type_cs_vlan_tso, - &ol_type_vlan_len_msec); - if (unlikely(ret < 0)) { - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_l2l3l4_err++; - u64_stats_update_end(&ring->syncp); - return ret; - } + skb_reset_mac_len(skb); - ret = hns3_set_tso(skb, &paylen, &mss, - &type_cs_vlan_tso); - if (unlikely(ret < 0)) { - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_tso_err++; - u64_stats_update_end(&ring->syncp); - return ret; - } + ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); + if (unlikely(ret < 0)) { + hns3_ring_stats_update(ring, tx_l4_proto_err); + return ret; } - /* Set txbd */ - desc->tx.ol_type_vlan_len_msec = - cpu_to_le32(ol_type_vlan_len_msec); - desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso); - desc->tx.paylen = cpu_to_le32(paylen); - desc->tx.mss = cpu_to_le16(mss); - desc->tx.vlan_tag = cpu_to_le16(inner_vtag); - desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag); + ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto, + ¶m->type_cs_vlan_tso, + ¶m->ol_type_vlan_len_msec); + if (unlikely(ret < 0)) { + hns3_ring_stats_update(ring, tx_l2l3l4_err); + return ret; + } + ret = hns3_set_tso(skb, ¶m->paylen_ol4cs, ¶m->mss_hw_csum, + ¶m->type_cs_vlan_tso, &desc_cb->send_bytes); + if (unlikely(ret < 0)) { + hns3_ring_stats_update(ring, tx_tso_err); + return ret; + } return 0; } -static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, - unsigned int size, enum hns_desc_type type) +static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, + struct sk_buff *skb, struct hns3_desc *desc, + struct hns3_desc_cb *desc_cb) { -#define HNS3_LIKELY_BD_NUM 1 + struct hns3_desc_param param; + int ret; - struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; - struct hns3_desc *desc = &ring->desc[ring->next_to_use]; - struct device *dev = ring_to_dev(ring); - skb_frag_t *frag; - unsigned int frag_buf_num; - int k, sizeoflast; - dma_addr_t dma; + hns3_init_desc_data(skb, ¶m); + ret = hns3_handle_vlan_info(ring, skb, ¶m); + if (unlikely(ret < 0)) + return ret; - if (type == DESC_TYPE_SKB) { - struct sk_buff *skb = (struct sk_buff *)priv; - int ret; + desc_cb->send_bytes = skb->len; - ret = hns3_fill_skb_desc(ring, skb, desc); - if (unlikely(ret < 0)) + if (skb->ip_summed == CHECKSUM_PARTIAL) { + ret = hns3_handle_csum_partial(ring, skb, desc_cb, ¶m); + if (ret) return ret; + } - dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); - } else if (type == DESC_TYPE_FRAGLIST_SKB) { - struct sk_buff *skb = (struct sk_buff *)priv; + /* Set txbd */ + desc->tx.ol_type_vlan_len_msec = + cpu_to_le32(param.ol_type_vlan_len_msec); + desc->tx.type_cs_vlan_tso_len = cpu_to_le32(param.type_cs_vlan_tso); + desc->tx.paylen_ol4cs = cpu_to_le32(param.paylen_ol4cs); + desc->tx.mss_hw_csum = cpu_to_le16(param.mss_hw_csum); + desc->tx.vlan_tag = cpu_to_le16(param.inner_vtag); + desc->tx.outer_vlan_tag = cpu_to_le16(param.out_vtag); - dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); - } else { - frag = (skb_frag_t *)priv; - dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); - } + return 0; +} - if (unlikely(dma_mapping_error(dev, dma))) { - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); - return -ENOMEM; - } +static int hns3_fill_desc(struct hns3_enet_ring *ring, dma_addr_t dma, + unsigned int size) +{ +#define HNS3_LIKELY_BD_NUM 1 - desc_cb->length = size; + struct hns3_desc *desc = &ring->desc[ring->next_to_use]; + unsigned int frag_buf_num, k; + int sizeoflast; if (likely(size <= HNS3_MAX_BD_SIZE)) { - desc_cb->priv = priv; - desc_cb->dma = dma; - desc_cb->type = type; desc->addr = cpu_to_le64(dma); desc->tx.send_size = cpu_to_le16(size); desc->tx.bdtp_fe_sc_vld_ra_ri = @@ -1135,18 +1709,11 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, } frag_buf_num = hns3_tx_bd_count(size); - sizeoflast = size & HNS3_TX_LAST_SIZE_M; + sizeoflast = size % HNS3_MAX_BD_SIZE; sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; /* When frag size is bigger than hardware limit, split this frag */ for (k = 0; k < frag_buf_num; k++) { - /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */ - desc_cb->priv = priv; - desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k; - desc_cb->type = ((type == DESC_TYPE_FRAGLIST_SKB || - type == DESC_TYPE_SKB) && !k) ? - type : DESC_TYPE_PAGE; - /* now, fill the descriptor */ desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k); desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ? @@ -1158,13 +1725,56 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, /* move ring pointer to next */ ring_ptr_move_fw(ring, next_to_use); - desc_cb = &ring->desc_cb[ring->next_to_use]; desc = &ring->desc[ring->next_to_use]; } return frag_buf_num; } +static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv, + unsigned int type) +{ + struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; + struct device *dev = ring_to_dev(ring); + unsigned int size; + dma_addr_t dma; + + if (type & (DESC_TYPE_FRAGLIST_SKB | DESC_TYPE_SKB)) { + struct sk_buff *skb = (struct sk_buff *)priv; + + size = skb_headlen(skb); + if (!size) + return 0; + + dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); + } else if (type & DESC_TYPE_BOUNCE_HEAD) { + /* Head data has been filled in hns3_handle_tx_bounce(), + * just return 0 here. + */ + return 0; + } else { + skb_frag_t *frag = (skb_frag_t *)priv; + + size = skb_frag_size(frag); + if (!size) + return 0; + + dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); + } + + if (unlikely(dma_mapping_error(dev, dma))) { + hns3_ring_stats_update(ring, sw_err_cnt); + return -ENOMEM; + } + + desc_cb->priv = priv; + desc_cb->length = size; + desc_cb->dma = dma; + desc_cb->type = type; + + return hns3_fill_desc(ring, dma, size); +} + static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size, unsigned int bd_num) { @@ -1208,30 +1818,30 @@ static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size, return bd_num; } -static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size) +static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size, + u8 max_non_tso_bd_num, unsigned int bd_num, + unsigned int recursion_level) { +#define HNS3_MAX_RECURSION_LEVEL 24 + struct sk_buff *frag_skb; - unsigned int bd_num = 0; /* If the total len is within the max bd limit */ - if (likely(skb->len <= HNS3_MAX_BD_SIZE && !skb_has_frag_list(skb) && - skb_shinfo(skb)->nr_frags < HNS3_MAX_NON_TSO_BD_NUM)) + if (likely(skb->len <= HNS3_MAX_BD_SIZE && !recursion_level && + !skb_has_frag_list(skb) && + skb_shinfo(skb)->nr_frags < max_non_tso_bd_num)) return skb_shinfo(skb)->nr_frags + 1U; - /* The below case will always be linearized, return - * HNS3_MAX_BD_NUM_TSO + 1U to make sure it is linearized. - */ - if (unlikely(skb->len > HNS3_MAX_TSO_SIZE || - (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE))) - return HNS3_MAX_TSO_BD_NUM + 1U; + if (unlikely(recursion_level >= HNS3_MAX_RECURSION_LEVEL)) + return UINT_MAX; bd_num = hns3_skb_bd_num(skb, bd_size, bd_num); - if (!skb_has_frag_list(skb) || bd_num > HNS3_MAX_TSO_BD_NUM) return bd_num; skb_walk_frags(skb, frag_skb) { - bd_num = hns3_skb_bd_num(frag_skb, bd_size, bd_num); + bd_num = hns3_tx_bd_num(frag_skb, bd_size, max_non_tso_bd_num, + bd_num, recursion_level + 1); if (bd_num > HNS3_MAX_TSO_BD_NUM) return bd_num; } @@ -1242,36 +1852,39 @@ static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size) static unsigned int hns3_gso_hdr_len(struct sk_buff *skb) { if (!skb->encapsulation) - return skb_transport_offset(skb) + tcp_hdrlen(skb); + return skb_tcp_all_headers(skb); - return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); + return skb_inner_tcp_all_headers(skb); } -/* HW need every continuous 8 buffer data to be larger than MSS, - * we simplify it by ensuring skb_headlen + the first continuous - * 7 frags to to be larger than gso header len + mss, and the remaining - * continuous 7 frags to be larger than MSS except the last 7 frags. +/* HW need every continuous max_non_tso_bd_num buffer data to be larger + * than MSS, we simplify it by ensuring skb_headlen + the first continuous + * max_non_tso_bd_num - 1 frags to be larger than gso header len + mss, + * and the remaining continuous max_non_tso_bd_num - 1 frags to be larger + * than MSS except the last max_non_tso_bd_num - 1 frags. */ static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size, - unsigned int bd_num) + unsigned int bd_num, u8 max_non_tso_bd_num) { unsigned int tot_len = 0; - int i; + unsigned int i; - for (i = 0; i < HNS3_MAX_NON_TSO_BD_NUM - 1U; i++) + for (i = 0; i < max_non_tso_bd_num - 1U; i++) tot_len += bd_size[i]; - /* ensure the first 8 frags is greater than mss + header */ - if (tot_len + bd_size[HNS3_MAX_NON_TSO_BD_NUM - 1U] < + /* ensure the first max_non_tso_bd_num frags is greater than + * mss + header + */ + if (tot_len + bd_size[max_non_tso_bd_num - 1U] < skb_shinfo(skb)->gso_size + hns3_gso_hdr_len(skb)) return true; - /* ensure every continuous 7 buffer is greater than mss - * except the last one. + /* ensure every continuous max_non_tso_bd_num - 1 buffer is greater + * than mss except the last one. */ - for (i = 0; i < bd_num - HNS3_MAX_NON_TSO_BD_NUM; i++) { + for (i = 0; i < bd_num - max_non_tso_bd_num; i++) { tot_len -= bd_size[i]; - tot_len += bd_size[i + HNS3_MAX_NON_TSO_BD_NUM - 1U]; + tot_len += bd_size[i + max_non_tso_bd_num - 1U]; if (tot_len < skb_shinfo(skb)->gso_size) return true; @@ -1282,42 +1895,65 @@ static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size, void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size) { - int i = 0; + u32 i; for (i = 0; i < MAX_SKB_FRAGS; i++) size[i] = skb_frag_size(&shinfo->frags[i]); } +static int hns3_skb_linearize(struct hns3_enet_ring *ring, + struct sk_buff *skb, + unsigned int bd_num) +{ + /* 'bd_num == UINT_MAX' means the skb' fraglist has a + * recursion level of over HNS3_MAX_RECURSION_LEVEL. + */ + if (bd_num == UINT_MAX) { + hns3_ring_stats_update(ring, over_max_recursion); + return -ENOMEM; + } + + /* The skb->len has exceeded the hw limitation, linearization + * will not help. + */ + if (skb->len > HNS3_MAX_TSO_SIZE || + (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) { + hns3_ring_stats_update(ring, hw_limitation); + return -ENOMEM; + } + + if (__skb_linearize(skb)) { + hns3_ring_stats_update(ring, sw_err_cnt); + return -ENOMEM; + } + + return 0; +} + static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, struct net_device *netdev, struct sk_buff *skb) { struct hns3_nic_priv *priv = netdev_priv(netdev); + u8 max_non_tso_bd_num = priv->max_non_tso_bd_num; unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U]; unsigned int bd_num; - bd_num = hns3_tx_bd_num(skb, bd_size); - if (unlikely(bd_num > HNS3_MAX_NON_TSO_BD_NUM)) { + bd_num = hns3_tx_bd_num(skb, bd_size, max_non_tso_bd_num, 0, 0); + if (unlikely(bd_num > max_non_tso_bd_num)) { if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) && - !hns3_skb_need_linearized(skb, bd_size, bd_num)) { - trace_hns3_over_8bd(skb); + !hns3_skb_need_linearized(skb, bd_size, bd_num, + max_non_tso_bd_num)) { + trace_hns3_over_max_bd(skb); goto out; } - if (__skb_linearize(skb)) + if (hns3_skb_linearize(ring, skb, bd_num)) return -ENOMEM; bd_num = hns3_tx_bd_count(skb->len); - if ((skb_is_gso(skb) && bd_num > HNS3_MAX_TSO_BD_NUM) || - (!skb_is_gso(skb) && - bd_num > HNS3_MAX_NON_TSO_BD_NUM)) { - trace_hns3_over_8bd(skb); - return -ENOMEM; - } - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_copy++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, tx_copy); } out: @@ -1337,6 +1973,8 @@ out: return bd_num; } + hns3_ring_stats_update(ring, tx_busy); + return -EBUSY; } @@ -1346,6 +1984,11 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) unsigned int i; for (i = 0; i < ring->desc_num; i++) { + struct hns3_desc *desc = &ring->desc[ring->next_to_use]; + struct hns3_desc_cb *desc_cb; + + memset(desc, 0, sizeof(*desc)); + /* check if this is where we started */ if (ring->next_to_use == next_to_use_orig) break; @@ -1353,69 +1996,329 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) /* rollback one */ ring_ptr_move_bw(ring, next_to_use); + desc_cb = &ring->desc_cb[ring->next_to_use]; + + if (!desc_cb->dma) + continue; + /* unmap the descriptor dma address */ - if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB || - ring->desc_cb[ring->next_to_use].type == - DESC_TYPE_FRAGLIST_SKB) - dma_unmap_single(dev, - ring->desc_cb[ring->next_to_use].dma, - ring->desc_cb[ring->next_to_use].length, - DMA_TO_DEVICE); - else if (ring->desc_cb[ring->next_to_use].length) - dma_unmap_page(dev, - ring->desc_cb[ring->next_to_use].dma, - ring->desc_cb[ring->next_to_use].length, + if (desc_cb->type & (DESC_TYPE_SKB | DESC_TYPE_FRAGLIST_SKB)) + dma_unmap_single(dev, desc_cb->dma, desc_cb->length, + DMA_TO_DEVICE); + else if (desc_cb->type & + (DESC_TYPE_BOUNCE_HEAD | DESC_TYPE_BOUNCE_ALL)) + hns3_tx_spare_rollback(ring, desc_cb->length); + else if (desc_cb->length) + dma_unmap_page(dev, desc_cb->dma, desc_cb->length, DMA_TO_DEVICE); - ring->desc_cb[ring->next_to_use].length = 0; - ring->desc_cb[ring->next_to_use].dma = 0; + desc_cb->length = 0; + desc_cb->dma = 0; + desc_cb->type = DESC_TYPE_UNKNOWN; } } static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring, - struct sk_buff *skb, enum hns_desc_type type) + struct sk_buff *skb, unsigned int type) { - unsigned int size = skb_headlen(skb); + struct sk_buff *frag_skb; int i, ret, bd_num = 0; - if (size) { - ret = hns3_fill_desc(ring, skb, size, type); + ret = hns3_map_and_fill_desc(ring, skb, type); + if (unlikely(ret < 0)) + return ret; + + bd_num += ret; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + ret = hns3_map_and_fill_desc(ring, frag, DESC_TYPE_PAGE); if (unlikely(ret < 0)) return ret; bd_num += ret; } - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + skb_walk_frags(skb, frag_skb) { + ret = hns3_fill_skb_to_desc(ring, frag_skb, + DESC_TYPE_FRAGLIST_SKB); + if (unlikely(ret < 0)) + return ret; - size = skb_frag_size(frag); - if (!size) - continue; + bd_num += ret; + } + + return bd_num; +} + +static void hns3_tx_push_bd(struct hns3_enet_ring *ring, int num) +{ +#define HNS3_BYTES_PER_64BIT 8 + + struct hns3_desc desc[HNS3_MAX_PUSH_BD_NUM] = {}; + int offset = 0; + + /* make sure everything is visible to device before + * excuting tx push or updating doorbell + */ + dma_wmb(); + + do { + int idx = (ring->next_to_use - num + ring->desc_num) % + ring->desc_num; + + u64_stats_update_begin(&ring->syncp); + ring->stats.tx_push++; + u64_stats_update_end(&ring->syncp); + memcpy(&desc[offset], &ring->desc[idx], + sizeof(struct hns3_desc)); + offset++; + } while (--num); + + __iowrite64_copy(ring->tqp->mem_base, desc, + (sizeof(struct hns3_desc) * HNS3_MAX_PUSH_BD_NUM) / + HNS3_BYTES_PER_64BIT); +} + +static void hns3_tx_mem_doorbell(struct hns3_enet_ring *ring) +{ +#define HNS3_MEM_DOORBELL_OFFSET 64 + + __le64 bd_num = cpu_to_le64((u64)ring->pending_buf); + + /* make sure everything is visible to device before + * excuting tx push or updating doorbell + */ + dma_wmb(); + + __iowrite64_copy(ring->tqp->mem_base + HNS3_MEM_DOORBELL_OFFSET, + &bd_num, 1); + u64_stats_update_begin(&ring->syncp); + ring->stats.tx_mem_doorbell += ring->pending_buf; + u64_stats_update_end(&ring->syncp); +} + +static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num, + bool doorbell) +{ + struct net_device *netdev = ring_to_netdev(ring); + struct hns3_nic_priv *priv = netdev_priv(netdev); + + /* when tx push is enabled, the packet whose number of BD below + * HNS3_MAX_PUSH_BD_NUM can be pushed directly. + */ + if (test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state) && num && + !ring->pending_buf && num <= HNS3_MAX_PUSH_BD_NUM && doorbell) { + /* This smp_store_release() pairs with smp_load_acquire() in + * hns3_nic_reclaim_desc(). Ensure that the BD valid bit + * is updated. + */ + smp_store_release(&ring->last_to_use, ring->next_to_use); + hns3_tx_push_bd(ring, num); + return; + } + + ring->pending_buf += num; + + if (!doorbell) { + hns3_ring_stats_update(ring, tx_more); + return; + } + + /* This smp_store_release() pairs with smp_load_acquire() in + * hns3_nic_reclaim_desc(). Ensure that the BD valid bit is updated. + */ + smp_store_release(&ring->last_to_use, ring->next_to_use); + + if (ring->tqp->mem_base) + hns3_tx_mem_doorbell(ring); + else + writel(ring->pending_buf, + ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG); + + ring->pending_buf = 0; +} + +static void hns3_tsyn(struct net_device *netdev, struct sk_buff *skb, + struct hns3_desc *desc) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (!(h->ae_algo->ops->set_tx_hwts_info && + h->ae_algo->ops->set_tx_hwts_info(h, skb))) + return; + + desc->tx.bdtp_fe_sc_vld_ra_ri |= cpu_to_le16(BIT(HNS3_TXD_TSYN_B)); +} + +static int hns3_handle_tx_bounce(struct hns3_enet_ring *ring, + struct sk_buff *skb) +{ + struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; + unsigned int type = DESC_TYPE_BOUNCE_HEAD; + unsigned int size = skb_headlen(skb); + dma_addr_t dma; + int bd_num = 0; + u32 cb_len; + void *buf; + int ret; + + if (skb->len <= ring->tx_copybreak) { + size = skb->len; + type = DESC_TYPE_BOUNCE_ALL; + } + + /* hns3_can_use_tx_bounce() is called to ensure the below + * function can always return the tx buffer. + */ + buf = hns3_tx_spare_alloc(ring, size, &dma, &cb_len); + + ret = skb_copy_bits(skb, 0, buf, size); + if (unlikely(ret < 0)) { + hns3_tx_spare_rollback(ring, cb_len); + hns3_ring_stats_update(ring, copy_bits_err); + return ret; + } + + desc_cb->priv = skb; + desc_cb->length = cb_len; + desc_cb->dma = dma; + desc_cb->type = type; - ret = hns3_fill_desc(ring, frag, size, DESC_TYPE_PAGE); + bd_num += hns3_fill_desc(ring, dma, size); + + if (type == DESC_TYPE_BOUNCE_HEAD) { + ret = hns3_fill_skb_to_desc(ring, skb, + DESC_TYPE_BOUNCE_HEAD); if (unlikely(ret < 0)) return ret; bd_num += ret; } + dma_sync_single_for_device(ring_to_dev(ring), dma, size, + DMA_TO_DEVICE); + + hns3_ring_stats_update(ring, tx_bounce); + return bd_num; } +static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring, + struct sk_buff *skb) +{ + struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; + u32 nfrag = skb_shinfo(skb)->nr_frags + 1; + struct sg_table *sgt; + int bd_num = 0; + dma_addr_t dma; + u32 cb_len, i; + int nents; + + if (skb_has_frag_list(skb)) + nfrag = HNS3_MAX_TSO_BD_NUM; + + /* hns3_can_use_tx_sgl() is called to ensure the below + * function can always return the tx buffer. + */ + sgt = hns3_tx_spare_alloc(ring, HNS3_SGL_SIZE(nfrag), + &dma, &cb_len); + + /* scatterlist follows by the sg table */ + sgt->sgl = (struct scatterlist *)(sgt + 1); + sg_init_table(sgt->sgl, nfrag); + nents = skb_to_sgvec(skb, sgt->sgl, 0, skb->len); + if (unlikely(nents < 0)) { + hns3_tx_spare_rollback(ring, cb_len); + hns3_ring_stats_update(ring, skb2sgl_err); + return -ENOMEM; + } + + sgt->orig_nents = nents; + sgt->nents = dma_map_sg(ring_to_dev(ring), sgt->sgl, sgt->orig_nents, + DMA_TO_DEVICE); + if (unlikely(!sgt->nents)) { + hns3_tx_spare_rollback(ring, cb_len); + hns3_ring_stats_update(ring, map_sg_err); + return -ENOMEM; + } + + desc_cb->priv = skb; + desc_cb->length = cb_len; + desc_cb->dma = dma; + desc_cb->type = DESC_TYPE_SGL_SKB; + + for (i = 0; i < sgt->nents; i++) + bd_num += hns3_fill_desc(ring, sg_dma_address(sgt->sgl + i), + sg_dma_len(sgt->sgl + i)); + hns3_ring_stats_update(ring, tx_sgl); + + return bd_num; +} + +static int hns3_handle_desc_filling(struct hns3_enet_ring *ring, + struct sk_buff *skb) +{ + u32 space; + + if (!ring->tx_spare) + goto out; + + space = hns3_tx_spare_space(ring); + + if (hns3_can_use_tx_sgl(ring, skb, space)) + return hns3_handle_tx_sgl(ring, skb); + + if (hns3_can_use_tx_bounce(ring, skb, space)) + return hns3_handle_tx_bounce(ring, skb); + +out: + return hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB); +} + +static int hns3_handle_skb_desc(struct hns3_enet_ring *ring, + struct sk_buff *skb, + struct hns3_desc_cb *desc_cb, + int next_to_use_head) +{ + int ret; + + ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use], + desc_cb); + if (unlikely(ret < 0)) + goto fill_err; + + /* 'ret < 0' means filling error, 'ret == 0' means skb->len is + * zero, which is unlikely, and 'ret > 0' means how many tx desc + * need to be notified to the hw. + */ + ret = hns3_handle_desc_filling(ring, skb); + if (likely(ret > 0)) + return ret; + +fill_err: + hns3_clear_desc(ring, next_to_use_head); + return ret; +} + netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping]; + struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; struct netdev_queue *dev_queue; - int pre_ntu, next_to_use_head; - struct sk_buff *frag_skb; - int bd_num = 0; - int ret; + int pre_ntu, ret; + bool doorbell; /* Hardware can only handle short frames above 32 bytes */ - if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) + if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) { + hns3_tx_doorbell(ring, 0, !netdev_xmit_more()); + + hns3_ring_stats_update(ring, sw_err_cnt); + return NETDEV_TX_OK; + } /* Prefetch the data used later */ prefetch(skb->data); @@ -1423,63 +2326,48 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) ret = hns3_nic_maybe_stop_tx(ring, netdev, skb); if (unlikely(ret <= 0)) { if (ret == -EBUSY) { - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_busy++; - u64_stats_update_end(&ring->syncp); + hns3_tx_doorbell(ring, 0, true); return NETDEV_TX_BUSY; - } else if (ret == -ENOMEM) { - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); } hns3_rl_err(netdev, "xmit error: %d!\n", ret); goto out_err_tx_ok; } - next_to_use_head = ring->next_to_use; - - ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB); - if (unlikely(ret < 0)) - goto fill_err; - - bd_num += ret; - - skb_walk_frags(skb, frag_skb) { - ret = hns3_fill_skb_to_desc(ring, frag_skb, - DESC_TYPE_FRAGLIST_SKB); - if (unlikely(ret < 0)) - goto fill_err; - - bd_num += ret; - } + ret = hns3_handle_skb_desc(ring, skb, desc_cb, ring->next_to_use); + if (unlikely(ret <= 0)) + goto out_err_tx_ok; pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) : (ring->desc_num - 1); + + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) + hns3_tsyn(netdev, skb, &ring->desc[pre_ntu]); + ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |= cpu_to_le16(BIT(HNS3_TXD_FE_B)); trace_hns3_tx_desc(ring, pre_ntu); + skb_tx_timestamp(skb); + /* Complete translate all packets */ dev_queue = netdev_get_tx_queue(netdev, ring->queue_index); - netdev_tx_sent_queue(dev_queue, skb->len); - - wmb(); /* Commit all data before submit */ - - hnae3_queue_xmit(ring->tqp, bd_num); + doorbell = __netdev_tx_sent_queue(dev_queue, desc_cb->send_bytes, + netdev_xmit_more()); + hns3_tx_doorbell(ring, ret, doorbell); return NETDEV_TX_OK; -fill_err: - hns3_clear_desc(ring, next_to_use_head); - out_err_tx_ok: dev_kfree_skb_any(skb); + hns3_tx_doorbell(ring, 0, !netdev_xmit_more()); return NETDEV_TX_OK; } static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) { + char format_mac_addr_perm[HNAE3_FORMAT_MAC_ADDR_LEN]; + char format_mac_addr_sa[HNAE3_FORMAT_MAC_ADDR_LEN]; struct hnae3_handle *h = hns3_get_handle(netdev); struct sockaddr *mac_addr = p; int ret; @@ -1488,8 +2376,9 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) return -EADDRNOTAVAIL; if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) { - netdev_info(netdev, "already using mac address %pM\n", - mac_addr->sa_data); + hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data); + netdev_info(netdev, "already using mac address %s\n", + format_mac_addr_sa); return 0; } @@ -1498,8 +2387,10 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) */ if (!hns3_is_phys_func(h->pdev) && !is_zero_ether_addr(netdev->perm_addr)) { - netdev_err(netdev, "has permanent MAC %pM, user MAC %pM not allow\n", - netdev->perm_addr, mac_addr->sa_data); + hnae3_format_mac_addr(format_mac_addr_perm, netdev->perm_addr); + hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data); + netdev_err(netdev, "has permanent MAC %s, user MAC %s not allow\n", + format_mac_addr_perm, format_mac_addr_sa); return -EPERM; } @@ -1509,7 +2400,7 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) return ret; } - ether_addr_copy(netdev->dev_addr, mac_addr->sa_data); + eth_hw_addr_set(netdev, mac_addr->sa_data); return 0; } @@ -1528,6 +2419,35 @@ static int hns3_nic_do_ioctl(struct net_device *netdev, return h->ae_algo->ops->do_ioctl(h, ifr, cmd); } +static int hns3_nic_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *config) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (!netif_running(netdev)) + return -EINVAL; + + if (!h->ae_algo->ops->hwtstamp_get) + return -EOPNOTSUPP; + + return h->ae_algo->ops->hwtstamp_get(h, config); +} + +static int hns3_nic_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (!netif_running(netdev)) + return -EINVAL; + + if (!h->ae_algo->ops->hwtstamp_set) + return -EOPNOTSUPP; + + return h->ae_algo->ops->hwtstamp_set(h, config, extack); +} + static int hns3_nic_set_features(struct net_device *netdev, netdev_features_t features) { @@ -1557,7 +2477,21 @@ static int hns3_nic_set_features(struct net_device *netdev, h->ae_algo->ops->enable_fd(h, enable); } - netdev->features = features; + if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) && + h->ae_algo->ops->cls_flower_active(h)) { + netdev_err(netdev, + "there are offloaded TC filters active, cannot disable HW TC offload\n"); + return -EINVAL; + } + + if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) && + h->ae_algo->ops->enable_vlan_filter) { + enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER); + ret = h->ae_algo->ops->enable_vlan_filter(h, enable); + if (ret) + return ret; + } + return 0; } @@ -1574,9 +2508,9 @@ static netdev_features_t hns3_features_check(struct sk_buff *skb, return features; if (skb->encapsulation) - len = skb_inner_transport_header(skb) - skb->data; + len = skb_inner_transport_offset(skb); else - len = skb_transport_header(skb) - skb->data; + len = skb_transport_offset(skb); /* Assume L4 is 60 byte as TCP is the only protocol with a * a flexible value, and it's max len is 60 bytes. @@ -1592,80 +2526,89 @@ static netdev_features_t hns3_features_check(struct sk_buff *skb, return features; } +static void hns3_fetch_stats(struct rtnl_link_stats64 *stats, + struct hns3_enet_ring *ring, bool is_tx) +{ + unsigned int start; + + do { + start = u64_stats_fetch_begin(&ring->syncp); + if (is_tx) { + stats->tx_bytes += ring->stats.tx_bytes; + stats->tx_packets += ring->stats.tx_pkts; + stats->tx_dropped += ring->stats.sw_err_cnt; + stats->tx_dropped += ring->stats.tx_vlan_err; + stats->tx_dropped += ring->stats.tx_l4_proto_err; + stats->tx_dropped += ring->stats.tx_l2l3l4_err; + stats->tx_dropped += ring->stats.tx_tso_err; + stats->tx_dropped += ring->stats.over_max_recursion; + stats->tx_dropped += ring->stats.hw_limitation; + stats->tx_dropped += ring->stats.copy_bits_err; + stats->tx_dropped += ring->stats.skb2sgl_err; + stats->tx_dropped += ring->stats.map_sg_err; + stats->tx_errors += ring->stats.sw_err_cnt; + stats->tx_errors += ring->stats.tx_vlan_err; + stats->tx_errors += ring->stats.tx_l4_proto_err; + stats->tx_errors += ring->stats.tx_l2l3l4_err; + stats->tx_errors += ring->stats.tx_tso_err; + stats->tx_errors += ring->stats.over_max_recursion; + stats->tx_errors += ring->stats.hw_limitation; + stats->tx_errors += ring->stats.copy_bits_err; + stats->tx_errors += ring->stats.skb2sgl_err; + stats->tx_errors += ring->stats.map_sg_err; + } else { + stats->rx_bytes += ring->stats.rx_bytes; + stats->rx_packets += ring->stats.rx_pkts; + stats->rx_dropped += ring->stats.l2_err; + stats->rx_errors += ring->stats.l2_err; + stats->rx_errors += ring->stats.l3l4_csum_err; + stats->rx_crc_errors += ring->stats.l2_err; + stats->multicast += ring->stats.rx_multicast; + stats->rx_length_errors += ring->stats.err_pkt_len; + } + } while (u64_stats_fetch_retry(&ring->syncp, start)); +} + static void hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct hns3_nic_priv *priv = netdev_priv(netdev); int queue_num = priv->ae_handle->kinfo.num_tqps; struct hnae3_handle *handle = priv->ae_handle; + struct rtnl_link_stats64 ring_total_stats; struct hns3_enet_ring *ring; - u64 rx_length_errors = 0; - u64 rx_crc_errors = 0; - u64 rx_multicast = 0; - unsigned int start; - u64 tx_errors = 0; - u64 rx_errors = 0; - unsigned int idx; - u64 tx_bytes = 0; - u64 rx_bytes = 0; - u64 tx_pkts = 0; - u64 rx_pkts = 0; - u64 tx_drop = 0; - u64 rx_drop = 0; + int idx; if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) return; - handle->ae_algo->ops->update_stats(handle, &netdev->stats); + handle->ae_algo->ops->update_stats(handle); + memset(&ring_total_stats, 0, sizeof(ring_total_stats)); for (idx = 0; idx < queue_num; idx++) { /* fetch the tx stats */ ring = &priv->ring[idx]; - do { - start = u64_stats_fetch_begin_irq(&ring->syncp); - tx_bytes += ring->stats.tx_bytes; - tx_pkts += ring->stats.tx_pkts; - tx_drop += ring->stats.sw_err_cnt; - tx_drop += ring->stats.tx_vlan_err; - tx_drop += ring->stats.tx_l4_proto_err; - tx_drop += ring->stats.tx_l2l3l4_err; - tx_drop += ring->stats.tx_tso_err; - tx_errors += ring->stats.sw_err_cnt; - tx_errors += ring->stats.tx_vlan_err; - tx_errors += ring->stats.tx_l4_proto_err; - tx_errors += ring->stats.tx_l2l3l4_err; - tx_errors += ring->stats.tx_tso_err; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + hns3_fetch_stats(&ring_total_stats, ring, true); /* fetch the rx stats */ ring = &priv->ring[idx + queue_num]; - do { - start = u64_stats_fetch_begin_irq(&ring->syncp); - rx_bytes += ring->stats.rx_bytes; - rx_pkts += ring->stats.rx_pkts; - rx_drop += ring->stats.l2_err; - rx_errors += ring->stats.l2_err; - rx_errors += ring->stats.l3l4_csum_err; - rx_crc_errors += ring->stats.l2_err; - rx_multicast += ring->stats.rx_multicast; - rx_length_errors += ring->stats.err_pkt_len; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); - } - - stats->tx_bytes = tx_bytes; - stats->tx_packets = tx_pkts; - stats->rx_bytes = rx_bytes; - stats->rx_packets = rx_pkts; - - stats->rx_errors = rx_errors; - stats->multicast = rx_multicast; - stats->rx_length_errors = rx_length_errors; - stats->rx_crc_errors = rx_crc_errors; + hns3_fetch_stats(&ring_total_stats, ring, false); + } + + stats->tx_bytes = ring_total_stats.tx_bytes; + stats->tx_packets = ring_total_stats.tx_packets; + stats->rx_bytes = ring_total_stats.rx_bytes; + stats->rx_packets = ring_total_stats.rx_packets; + + stats->rx_errors = ring_total_stats.rx_errors; + stats->multicast = ring_total_stats.multicast; + stats->rx_length_errors = ring_total_stats.rx_length_errors; + stats->rx_crc_errors = ring_total_stats.rx_crc_errors; stats->rx_missed_errors = netdev->stats.rx_missed_errors; - stats->tx_errors = tx_errors; - stats->rx_dropped = rx_drop; - stats->tx_dropped = tx_drop; + stats->tx_errors = ring_total_stats.tx_errors; + stats->rx_dropped = ring_total_stats.rx_dropped; + stats->tx_dropped = ring_total_stats.tx_dropped; stats->collisions = netdev->stats.collisions; stats->rx_over_errors = netdev->stats.rx_over_errors; stats->rx_frame_errors = netdev->stats.rx_frame_errors; @@ -1682,7 +2625,6 @@ static void hns3_nic_get_stats64(struct net_device *netdev, static int hns3_setup_tc(struct net_device *netdev, void *type_data) { struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; - u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map; struct hnae3_knic_private_info *kinfo; u8 tc = mqprio_qopt->qopt.num_tc; u16 mode = mqprio_qopt->mode; @@ -1705,16 +2647,70 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data) netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc); return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? - kinfo->dcb_ops->setup_tc(h, tc ? tc : 1, prio_tc) : -EOPNOTSUPP; + kinfo->dcb_ops->setup_tc(h, mqprio_qopt) : -EOPNOTSUPP; +} + +static int hns3_setup_tc_cls_flower(struct hns3_nic_priv *priv, + struct flow_cls_offload *flow) +{ + int tc = tc_classid_to_hwtc(priv->netdev, flow->classid); + struct hnae3_handle *h = hns3_get_handle(priv->netdev); + + switch (flow->command) { + case FLOW_CLS_REPLACE: + if (h->ae_algo->ops->add_cls_flower) + return h->ae_algo->ops->add_cls_flower(h, flow, tc); + break; + case FLOW_CLS_DESTROY: + if (h->ae_algo->ops->del_cls_flower) + return h->ae_algo->ops->del_cls_flower(h, flow); + break; + default: + break; + } + + return -EOPNOTSUPP; +} + +static int hns3_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct hns3_nic_priv *priv = cb_priv; + + if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data)) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return hns3_setup_tc_cls_flower(priv, type_data); + default: + return -EOPNOTSUPP; + } } +static LIST_HEAD(hns3_block_cb_list); + static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { - if (type != TC_SETUP_QDISC_MQPRIO) + struct hns3_nic_priv *priv = netdev_priv(dev); + int ret; + + switch (type) { + case TC_SETUP_QDISC_MQPRIO: + ret = hns3_setup_tc(dev, type_data); + break; + case TC_SETUP_BLOCK: + ret = flow_block_cb_setup_simple(type_data, + &hns3_block_cb_list, + hns3_setup_tc_block_cb, + priv, priv, true); + break; + default: return -EOPNOTSUPP; + } - return hns3_setup_tc(dev, type_data); + return ret; } static int hns3_vlan_rx_add_vid(struct net_device *netdev, @@ -1800,24 +2796,14 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) netdev_err(netdev, "failed to change MTU in hardware %d\n", ret); else - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); return ret; } -static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) +static int hns3_get_timeout_queue(struct net_device *ndev) { - struct hns3_nic_priv *priv = netdev_priv(ndev); - struct hnae3_handle *h = hns3_get_handle(ndev); - struct hns3_enet_ring *tx_ring; - struct napi_struct *napi; - int timeout_queue = 0; - int hw_head, hw_tail; - int fbd_num, fbd_oft; - int ebd_num, ebd_oft; - int bd_num, bd_err; - int ring_en, tc; - int i; + unsigned int i; /* Find the stopped queue the same way the stack does */ for (i = 0; i < ndev->num_tx_queues; i++) { @@ -1825,11 +2811,17 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) unsigned long trans_start; q = netdev_get_tx_queue(ndev, i); - trans_start = q->trans_start; + trans_start = READ_ONCE(q->trans_start); if (netif_xmit_stopped(q) && time_after(jiffies, (trans_start + ndev->watchdog_timeo))) { - timeout_queue = i; +#ifdef CONFIG_BQL + struct dql *dql = &q->dql; + + netdev_info(ndev, "DQL info last_cnt: %u, queued: %u, adj_limit: %u, completed: %u\n", + dql->last_obj_cnt, dql->num_queued, + dql->adj_limit, dql->num_completed); +#endif netdev_info(ndev, "queue state: 0x%lx, delta msecs: %u\n", q->state, jiffies_to_msecs(jiffies - trans_start)); @@ -1837,17 +2829,15 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) } } - if (i == ndev->num_tx_queues) { - netdev_info(ndev, - "no netdev TX timeout queue found, timeout count: %llu\n", - priv->tx_timeout_count); - return false; - } - - priv->tx_timeout_count++; + return i; +} - tx_ring = &priv->ring[timeout_queue]; - napi = &tx_ring->tqp_vector->napi; +static void hns3_dump_queue_stats(struct net_device *ndev, + struct hns3_enet_ring *tx_ring, + int timeout_queue) +{ + struct napi_struct *napi = &tx_ring->tqp_vector->napi; + struct hns3_nic_priv *priv = netdev_priv(ndev); netdev_info(ndev, "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n", @@ -1855,15 +2845,60 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) tx_ring->next_to_clean, napi->state); netdev_info(ndev, - "tx_pkts: %llu, tx_bytes: %llu, io_err_cnt: %llu, sw_err_cnt: %llu\n", + "tx_pkts: %llu, tx_bytes: %llu, sw_err_cnt: %llu, tx_pending: %d\n", tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes, - tx_ring->stats.io_err_cnt, tx_ring->stats.sw_err_cnt); + tx_ring->stats.sw_err_cnt, tx_ring->pending_buf); netdev_info(ndev, - "seg_pkt_cnt: %llu, tx_err_cnt: %llu, restart_queue: %llu, tx_busy: %llu\n", - tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_err_cnt, + "seg_pkt_cnt: %llu, tx_more: %llu, restart_queue: %llu, tx_busy: %llu\n", + tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_more, tx_ring->stats.restart_queue, tx_ring->stats.tx_busy); + netdev_info(ndev, "tx_push: %llu, tx_mem_doorbell: %llu\n", + tx_ring->stats.tx_push, tx_ring->stats.tx_mem_doorbell); +} + +static void hns3_dump_queue_reg(struct net_device *ndev, + struct hns3_enet_ring *tx_ring) +{ + netdev_info(ndev, + "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n", + hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_BD_NUM_REG), + hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_HEAD_REG), + hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_TAIL_REG), + hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_BD_ERR_REG), + readl(tx_ring->tqp_vector->mask_addr)); + netdev_info(ndev, + "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n", + hns3_tqp_read_reg(tx_ring, HNS3_RING_EN_REG), + hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_TC_REG), + hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_FBDNUM_REG), + hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_OFFSET_REG), + hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_EBDNUM_REG), + hns3_tqp_read_reg(tx_ring, + HNS3_RING_TX_RING_EBD_OFFSET_REG)); +} + +static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = hns3_get_handle(ndev); + struct hns3_enet_ring *tx_ring; + u32 timeout_queue; + + timeout_queue = hns3_get_timeout_queue(ndev); + if (timeout_queue >= ndev->num_tx_queues) { + netdev_info(ndev, + "no netdev TX timeout queue found, timeout count: %llu\n", + priv->tx_timeout_count); + return false; + } + + priv->tx_timeout_count++; + + tx_ring = &priv->ring[timeout_queue]; + hns3_dump_queue_stats(ndev, tx_ring, timeout_queue); + /* When mac received many pause frames continuous, it's unable to send * packets, which may cause tx timeout */ @@ -1875,32 +2910,7 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) mac_stats.tx_pause_cnt, mac_stats.rx_pause_cnt); } - hw_head = readl_relaxed(tx_ring->tqp->io_base + - HNS3_RING_TX_RING_HEAD_REG); - hw_tail = readl_relaxed(tx_ring->tqp->io_base + - HNS3_RING_TX_RING_TAIL_REG); - fbd_num = readl_relaxed(tx_ring->tqp->io_base + - HNS3_RING_TX_RING_FBDNUM_REG); - fbd_oft = readl_relaxed(tx_ring->tqp->io_base + - HNS3_RING_TX_RING_OFFSET_REG); - ebd_num = readl_relaxed(tx_ring->tqp->io_base + - HNS3_RING_TX_RING_EBDNUM_REG); - ebd_oft = readl_relaxed(tx_ring->tqp->io_base + - HNS3_RING_TX_RING_EBD_OFFSET_REG); - bd_num = readl_relaxed(tx_ring->tqp->io_base + - HNS3_RING_TX_RING_BD_NUM_REG); - bd_err = readl_relaxed(tx_ring->tqp->io_base + - HNS3_RING_TX_RING_BD_ERR_REG); - ring_en = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_EN_REG); - tc = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG); - - netdev_info(ndev, - "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n", - bd_num, hw_head, hw_tail, bd_err, - readl(tx_ring->tqp_vector->mask_addr)); - netdev_info(ndev, - "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n", - ring_en, tc, fbd_num, fbd_oft, ebd_num, ebd_oft); + hns3_dump_queue_reg(ndev, tx_ring); return true; } @@ -1983,27 +2993,71 @@ static int hns3_nic_set_vf_rate(struct net_device *ndev, int vf, static int hns3_nic_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) { struct hnae3_handle *h = hns3_get_handle(netdev); + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; if (!h->ae_algo->ops->set_vf_mac) return -EOPNOTSUPP; if (is_multicast_ether_addr(mac)) { + hnae3_format_mac_addr(format_mac_addr, mac); netdev_err(netdev, - "Invalid MAC:%pM specified. Could not set MAC\n", - mac); + "Invalid MAC:%s specified. Could not set MAC\n", + format_mac_addr); return -EINVAL; } return h->ae_algo->ops->set_vf_mac(h, vf_id, mac); } +#define HNS3_INVALID_DSCP 0xff +#define HNS3_DSCP_SHIFT 2 + +static u8 hns3_get_skb_dscp(struct sk_buff *skb) +{ + __be16 protocol = skb->protocol; + u8 dscp = HNS3_INVALID_DSCP; + + if (protocol == htons(ETH_P_8021Q)) + protocol = vlan_get_protocol(skb); + + if (protocol == htons(ETH_P_IP)) + dscp = ipv4_get_dsfield(ip_hdr(skb)) >> HNS3_DSCP_SHIFT; + else if (protocol == htons(ETH_P_IPV6)) + dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> HNS3_DSCP_SHIFT; + + return dscp; +} + +static u16 hns3_nic_select_queue(struct net_device *netdev, + struct sk_buff *skb, + struct net_device *sb_dev) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + u8 dscp; + + if (h->kinfo.tc_map_mode != HNAE3_TC_MAP_MODE_DSCP || + !h->ae_algo->ops->get_dscp_prio) + goto out; + + dscp = hns3_get_skb_dscp(skb); + if (unlikely(dscp >= HNAE3_MAX_DSCP)) + goto out; + + skb->priority = h->kinfo.dscp_prio[dscp]; + if (skb->priority == HNAE3_PRIO_ID_INVALID) + skb->priority = 0; + +out: + return netdev_pick_tx(netdev, skb, sb_dev); +} + static const struct net_device_ops hns3_nic_netdev_ops = { .ndo_open = hns3_nic_net_open, .ndo_stop = hns3_nic_net_stop, .ndo_start_xmit = hns3_nic_net_xmit, .ndo_tx_timeout = hns3_nic_net_timeout, .ndo_set_mac_address = hns3_nic_net_set_mac_address, - .ndo_do_ioctl = hns3_nic_do_ioctl, + .ndo_eth_ioctl = hns3_nic_do_ioctl, .ndo_change_mtu = hns3_nic_change_mtu, .ndo_set_features = hns3_nic_set_features, .ndo_features_check = hns3_features_check, @@ -2022,6 +3076,9 @@ static const struct net_device_ops hns3_nic_netdev_ops = { .ndo_set_vf_link_state = hns3_nic_set_vf_link_state, .ndo_set_vf_rate = hns3_nic_set_vf_rate, .ndo_set_vf_mac = hns3_nic_set_vf_mac, + .ndo_select_queue = hns3_nic_select_queue, + .ndo_hwtstamp_get = hns3_nic_hwtstamp_get, + .ndo_hwtstamp_set = hns3_nic_hwtstamp_set, }; bool hns3_is_phys_func(struct pci_dev *pdev) @@ -2036,9 +3093,10 @@ bool hns3_is_phys_func(struct pci_dev *pdev) case HNAE3_DEV_ID_50GE_RDMA: case HNAE3_DEV_ID_50GE_RDMA_MACSEC: case HNAE3_DEV_ID_100G_RDMA_MACSEC: + case HNAE3_DEV_ID_200G_RDMA: return true; - case HNAE3_DEV_ID_100G_VF: - case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF: + case HNAE3_DEV_ID_VF: + case HNAE3_DEV_ID_RDMA_DCB_PFC_VF: return false; default: dev_warn(&pdev->dev, "un-recognized pci device-id %u", @@ -2063,15 +3121,6 @@ static void hns3_disable_sriov(struct pci_dev *pdev) pci_disable_sriov(pdev); } -static void hns3_get_dev_capability(struct pci_dev *pdev, - struct hnae3_ae_dev *ae_dev) -{ - if (pdev->revision >= 0x21) { - hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1); - hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B, 1); - } -} - /* hns3_probe - Device initialization routine * @pdev: PCI device information struct * @ent: entry in hns3_pci_tbl @@ -2093,18 +3142,30 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ae_dev->pdev = pdev; ae_dev->flag = ent->driver_data; - hns3_get_dev_capability(pdev, ae_dev); pci_set_drvdata(pdev, ae_dev); ret = hnae3_register_ae_dev(ae_dev); - if (ret) { - devm_kfree(&pdev->dev, ae_dev); + if (ret) pci_set_drvdata(pdev, NULL); - } return ret; } +/** + * hns3_clean_vf_config + * @pdev: pointer to a pci_dev structure + * @num_vfs: number of VFs allocated + * + * Clean residual vf config after disable sriov + **/ +static void hns3_clean_vf_config(struct pci_dev *pdev, int num_vfs) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + + if (ae_dev->ops->clean_vf_config) + ae_dev->ops->clean_vf_config(ae_dev, num_vfs); +} + /* hns3_remove - Device removal routine * @pdev: PCI device information struct */ @@ -2143,7 +3204,10 @@ static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) else return num_vfs; } else if (!pci_vfs_assigned(pdev)) { + int num_vfs_pre = pci_num_vf(pdev); + pci_disable_sriov(pdev); + hns3_clean_vf_config(pdev, num_vfs_pre); } else { dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); @@ -2157,20 +3221,45 @@ static void hns3_shutdown(struct pci_dev *pdev) struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); hnae3_unregister_ae_dev(ae_dev); - devm_kfree(&pdev->dev, ae_dev); pci_set_drvdata(pdev, NULL); if (system_state == SYSTEM_POWER_OFF) pci_set_power_state(pdev, PCI_D3hot); } +static int __maybe_unused hns3_suspend(struct device *dev) +{ + struct hnae3_ae_dev *ae_dev = dev_get_drvdata(dev); + + if (ae_dev && hns3_is_phys_func(ae_dev->pdev)) { + dev_info(dev, "Begin to suspend.\n"); + if (ae_dev->ops && ae_dev->ops->reset_prepare) + ae_dev->ops->reset_prepare(ae_dev, HNAE3_FUNC_RESET); + } + + return 0; +} + +static int __maybe_unused hns3_resume(struct device *dev) +{ + struct hnae3_ae_dev *ae_dev = dev_get_drvdata(dev); + + if (ae_dev && hns3_is_phys_func(ae_dev->pdev)) { + dev_info(dev, "Begin to resume.\n"); + if (ae_dev->ops && ae_dev->ops->reset_done) + ae_dev->ops->reset_done(ae_dev); + } + + return 0; +} + static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); pci_ers_result_t ret; - dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state); + dev_info(&pdev->dev, "PCI error detected, state(=%u)!!\n", state); if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT; @@ -2222,8 +3311,8 @@ static void hns3_reset_prepare(struct pci_dev *pdev) struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); dev_info(&pdev->dev, "FLR prepare\n"); - if (ae_dev && ae_dev->ops && ae_dev->ops->flr_prepare) - ae_dev->ops->flr_prepare(ae_dev); + if (ae_dev && ae_dev->ops && ae_dev->ops->reset_prepare) + ae_dev->ops->reset_prepare(ae_dev, HNAE3_FLR_RESET); } static void hns3_reset_done(struct pci_dev *pdev) @@ -2231,8 +3320,8 @@ static void hns3_reset_done(struct pci_dev *pdev) struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); dev_info(&pdev->dev, "FLR done\n"); - if (ae_dev && ae_dev->ops && ae_dev->ops->flr_done) - ae_dev->ops->flr_done(ae_dev); + if (ae_dev && ae_dev->ops && ae_dev->ops->reset_done) + ae_dev->ops->reset_done(ae_dev); } static const struct pci_error_handlers hns3_err_handler = { @@ -2242,12 +3331,15 @@ static const struct pci_error_handlers hns3_err_handler = { .reset_done = hns3_reset_done, }; +static SIMPLE_DEV_PM_OPS(hns3_pm_ops, hns3_suspend, hns3_resume); + static struct pci_driver hns3_driver = { .name = hns3_driver_name, .id_table = hns3_pci_tbl, .probe = hns3_probe, .remove = hns3_remove, .shutdown = hns3_shutdown, + .driver.pm = &hns3_pm_ops, .sriov_configure = hns3_pci_sriov_configure, .err_handler = &hns3_err_handler, }; @@ -2257,51 +3349,55 @@ static void hns3_set_default_feature(struct net_device *netdev) { struct hnae3_handle *h = hns3_get_handle(netdev); struct pci_dev *pdev = h->pdev; + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); netdev->priv_flags |= IFF_UNICAST_FLT; - netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC | - NETIF_F_TSO_MANGLEID | NETIF_F_FRAGLIST; + NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST; - netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; + if (hnae3_ae_dev_gro_supported(ae_dev)) + netdev->features |= NETIF_F_GRO_HW; - netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_HW_VLAN_CTAG_FILTER | - NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | - NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | - NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC | - NETIF_F_FRAGLIST; + if (hnae3_ae_dev_fd_supported(ae_dev)) + netdev->features |= NETIF_F_NTUPLE; - netdev->vlan_features |= - NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | - NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | - NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | - NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC | - NETIF_F_FRAGLIST; + if (test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps)) + netdev->features |= NETIF_F_GSO_UDP_L4; - netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | - NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | - NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC | - NETIF_F_FRAGLIST; + if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) + netdev->features |= NETIF_F_HW_CSUM; + else + netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; - if (pdev->revision >= 0x21) { - netdev->hw_features |= NETIF_F_GRO_HW; - netdev->features |= NETIF_F_GRO_HW; + if (test_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps)) + netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; - if (!(h->flags & HNAE3_SUPPORT_VF)) { - netdev->hw_features |= NETIF_F_NTUPLE; - netdev->features |= NETIF_F_NTUPLE; - } + if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) + netdev->features |= NETIF_F_HW_TC; + + netdev->hw_features |= netdev->features; + if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) + netdev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + + netdev->vlan_features |= netdev->features & + ~(NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_GRO_HW | NETIF_F_NTUPLE | + NETIF_F_HW_TC); + + netdev->hw_enc_features |= netdev->vlan_features | NETIF_F_TSO_MANGLEID; + + /* The device_version V3 hardware can't offload the checksum for IP in + * GRE packets, but can do it for NvGRE. So default to disable the + * checksum and GSO offload for GRE. + */ + if (ae_dev->dev_version > HNAE3_DEVICE_VERSION_V2) { + netdev->features &= ~NETIF_F_GSO_GRE; + netdev->features &= ~NETIF_F_GSO_GRE_CSUM; } } @@ -2311,6 +3407,21 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring, unsigned int order = hns3_page_order(ring); struct page *p; + if (ring->page_pool) { + p = page_pool_dev_alloc_frag(ring->page_pool, + &cb->page_offset, + hns3_buf_size(ring)); + if (unlikely(!p)) + return -ENOMEM; + + cb->priv = p; + cb->buf = page_address(p); + cb->dma = page_pool_get_dma_addr(p); + cb->type = DESC_TYPE_PP_FRAG; + cb->reuse_flag = 0; + return 0; + } + p = dev_alloc_pages(order); if (!p) return -ENOMEM; @@ -2321,17 +3432,25 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring, cb->buf = page_address(p); cb->length = hns3_page_size(ring); cb->type = DESC_TYPE_PAGE; + page_ref_add(p, USHRT_MAX - 1); + cb->pagecnt_bias = USHRT_MAX; return 0; } static void hns3_free_buffer(struct hns3_enet_ring *ring, - struct hns3_desc_cb *cb) -{ - if (cb->type == DESC_TYPE_SKB) - dev_kfree_skb_any((struct sk_buff *)cb->priv); - else if (!HNAE3_IS_TX_RING(ring)) - put_page((struct page *)cb->priv); + struct hns3_desc_cb *cb, int budget) +{ + if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_BOUNCE_HEAD | + DESC_TYPE_BOUNCE_ALL | DESC_TYPE_SGL_SKB)) + napi_consume_skb(cb->priv, budget); + else if (!HNAE3_IS_TX_RING(ring)) { + if (cb->type & DESC_TYPE_PAGE && cb->pagecnt_bias) + __page_frag_cache_drain(cb->priv, cb->pagecnt_bias); + else if (cb->type & DESC_TYPE_PP_FRAG) + page_pool_put_full_page(ring->page_pool, cb->priv, + false); + } memset(cb, 0, sizeof(*cb)); } @@ -2349,21 +3468,26 @@ static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) static void hns3_unmap_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) { - if (cb->type == DESC_TYPE_SKB || cb->type == DESC_TYPE_FRAGLIST_SKB) + if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_FRAGLIST_SKB)) dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, ring_to_dma_dir(ring)); - else if (cb->length) + else if ((cb->type & DESC_TYPE_PAGE) && cb->length) dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, ring_to_dma_dir(ring)); + else if (cb->type & (DESC_TYPE_BOUNCE_ALL | DESC_TYPE_BOUNCE_HEAD | + DESC_TYPE_SGL_SKB)) + hns3_tx_spare_reclaim_cb(ring, cb); } static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i) { hns3_unmap_buffer(ring, &ring->desc_cb[i]); ring->desc[i].addr = 0; + ring->desc_cb[i].refill = 0; } -static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i) +static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i, + int budget) { struct hns3_desc_cb *cb = &ring->desc_cb[i]; @@ -2371,7 +3495,7 @@ static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i) return; hns3_buffer_detach(ring, i); - hns3_free_buffer(ring, cb); + hns3_free_buffer(ring, cb, budget); } static void hns3_free_buffers(struct hns3_enet_ring *ring) @@ -2379,7 +3503,7 @@ static void hns3_free_buffers(struct hns3_enet_ring *ring) int i; for (i = 0; i < ring->desc_num; i++) - hns3_free_buffer_detach(ring, i); + hns3_free_buffer_detach(ring, i, 0); } /* free desc along with its attached buffer */ @@ -2408,13 +3532,13 @@ static int hns3_alloc_desc(struct hns3_enet_ring *ring) return 0; } -static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring, +static int hns3_alloc_and_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) { int ret; ret = hns3_alloc_buffer(ring, cb); - if (ret) + if (ret || ring->page_pool) goto out; ret = hns3_map_buffer(ring, cb); @@ -2424,19 +3548,21 @@ static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring, return 0; out_with_buf: - hns3_free_buffer(ring, cb); + hns3_free_buffer(ring, cb, 0); out: return ret; } -static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i) +static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i) { - int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]); + int ret = hns3_alloc_and_map_buffer(ring, &ring->desc_cb[i]); if (ret) return ret; - ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); + ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + + ring->desc_cb[i].page_offset); + ring->desc_cb[i].refill = 1; return 0; } @@ -2447,16 +3573,19 @@ static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring) int i, j, ret; for (i = 0; i < ring->desc_num; i++) { - ret = hns3_alloc_buffer_attach(ring, i); + ret = hns3_alloc_and_attach_buffer(ring, i); if (ret) goto out_buffer_fail; + + if (!(i % HNS3_RESCHED_BD_NUM)) + cond_resched(); } return 0; out_buffer_fail: for (j = i - 1; j >= 0; j--) - hns3_free_buffer_detach(ring, j); + hns3_free_buffer_detach(ring, j, 0); return ret; } @@ -2466,83 +3595,90 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, { hns3_unmap_buffer(ring, &ring->desc_cb[i]); ring->desc_cb[i] = *res_cb; - ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); + ring->desc_cb[i].refill = 1; + ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + + ring->desc_cb[i].page_offset); ring->desc[i].rx.bd_base_info = 0; } static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) { ring->desc_cb[i].reuse_flag = 0; + ring->desc_cb[i].refill = 1; ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + ring->desc_cb[i].page_offset); ring->desc[i].rx.bd_base_info = 0; + + dma_sync_single_for_device(ring_to_dev(ring), + ring->desc_cb[i].dma + ring->desc_cb[i].page_offset, + hns3_buf_size(ring), + DMA_FROM_DEVICE); } -static void hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, int head, - int *bytes, int *pkts) +static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, + int *bytes, int *pkts, int budget) { + /* This smp_load_acquire() pairs with smp_store_release() in + * hns3_tx_doorbell(). + */ + int ltu = smp_load_acquire(&ring->last_to_use); int ntc = ring->next_to_clean; struct hns3_desc_cb *desc_cb; + bool reclaimed = false; + struct hns3_desc *desc; + + while (ltu != ntc) { + desc = &ring->desc[ntc]; + + if (le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri) & + BIT(HNS3_TXD_VLD_B)) + break; - while (head != ntc) { desc_cb = &ring->desc_cb[ntc]; - (*pkts) += (desc_cb->type == DESC_TYPE_SKB); - (*bytes) += desc_cb->length; + + if (desc_cb->type & (DESC_TYPE_SKB | DESC_TYPE_BOUNCE_ALL | + DESC_TYPE_BOUNCE_HEAD | + DESC_TYPE_SGL_SKB)) { + (*pkts)++; + (*bytes) += desc_cb->send_bytes; + } + /* desc_cb will be cleaned, after hnae3_free_buffer_detach */ - hns3_free_buffer_detach(ring, ntc); + hns3_free_buffer_detach(ring, ntc, budget); if (++ntc == ring->desc_num) ntc = 0; /* Issue prefetch for next Tx descriptor */ prefetch(&ring->desc_cb[ntc]); + reclaimed = true; } + if (unlikely(!reclaimed)) + return false; + /* This smp_store_release() pairs with smp_load_acquire() in * ring_space called by hns3_nic_net_xmit. */ smp_store_release(&ring->next_to_clean, ntc); -} -static int is_valid_clean_head(struct hns3_enet_ring *ring, int h) -{ - int u = ring->next_to_use; - int c = ring->next_to_clean; + hns3_tx_spare_update(ring); - if (unlikely(h > ring->desc_num)) - return 0; - - return u > c ? (h > c && h <= u) : (h > c || h <= u); + return true; } -void hns3_clean_tx_ring(struct hns3_enet_ring *ring) +void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) { struct net_device *netdev = ring_to_netdev(ring); struct hns3_nic_priv *priv = netdev_priv(netdev); struct netdev_queue *dev_queue; int bytes, pkts; - int head; - - head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG); - - if (is_ring_empty(ring) || head == ring->next_to_clean) - return; /* no data to poll */ - - rmb(); /* Make sure head is ready before touch any data */ - - if (unlikely(!is_valid_clean_head(ring, head))) { - hns3_rl_err(netdev, "wrong head (%d, %d-%d)\n", head, - ring->next_to_use, ring->next_to_clean); - - u64_stats_update_begin(&ring->syncp); - ring->stats.io_err_cnt++; - u64_stats_update_end(&ring->syncp); - return; - } bytes = 0; pkts = 0; - hns3_nic_reclaim_desc(ring, head, &bytes, &pkts); + + if (unlikely(!hns3_nic_reclaim_desc(ring, &bytes, &pkts, budget))) + return; ring->tqp_vector->tx_group.total_bytes += bytes; ring->tqp_vector->tx_group.total_packets += pkts; @@ -2574,10 +3710,14 @@ static int hns3_desc_unused(struct hns3_enet_ring *ring) int ntc = ring->next_to_clean; int ntu = ring->next_to_use; + if (unlikely(ntc == ntu && !ring->desc_cb[ntc].refill)) + return ring->desc_num; + return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; } -static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, +/* Return true if there is any allocation failure */ +static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count) { struct hns3_desc_cb *desc_cb; @@ -2587,41 +3727,65 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, for (i = 0; i < cleand_count; i++) { desc_cb = &ring->desc_cb[ring->next_to_use]; if (desc_cb->reuse_flag) { - u64_stats_update_begin(&ring->syncp); - ring->stats.reuse_pg_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, reuse_pg_cnt); hns3_reuse_buffer(ring, ring->next_to_use); } else { - ret = hns3_reserve_buffer_map(ring, &res_cbs); + ret = hns3_alloc_and_map_buffer(ring, &res_cbs); if (ret) { - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, sw_err_cnt); hns3_rl_err(ring_to_netdev(ring), "alloc rx buffer failed: %d\n", ret); - break; + + writel(i, ring->tqp->io_base + + HNS3_RING_RX_RING_HEAD_REG); + return true; } hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); - u64_stats_update_begin(&ring->syncp); - ring->stats.non_reuse_pg++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, non_reuse_pg); } ring_ptr_move_fw(ring, next_to_use); } - wmb(); /* Make all data has been write before submit */ - writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); + writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); + return false; +} + +static bool hns3_can_reuse_page(struct hns3_desc_cb *cb) +{ + return page_count(cb->priv) == cb->pagecnt_bias; } -static bool hns3_page_is_reusable(struct page *page) +static int hns3_handle_rx_copybreak(struct sk_buff *skb, int i, + struct hns3_enet_ring *ring, + int pull_len, + struct hns3_desc_cb *desc_cb) { - return page_to_nid(page) == numa_mem_id() && - !page_is_pfmemalloc(page); + struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; + u32 frag_offset = desc_cb->page_offset + pull_len; + int size = le16_to_cpu(desc->rx.size); + u32 frag_size = size - pull_len; + void *frag = napi_alloc_frag(frag_size); + + if (unlikely(!frag)) { + hns3_ring_stats_update(ring, frag_alloc_err); + + hns3_rl_err(ring_to_netdev(ring), + "failed to allocate rx frag\n"); + return -ENOMEM; + } + + desc_cb->reuse_flag = 1; + memcpy(frag, desc_cb->buf + frag_offset, frag_size); + skb_add_rx_frag(skb, i, virt_to_page(frag), + offset_in_page(frag), frag_size, frag_size); + + hns3_ring_stats_update(ring, frag_alloc); + return 0; } static void hns3_nic_reuse_page(struct sk_buff *skb, int i, @@ -2629,38 +3793,70 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, struct hns3_desc_cb *desc_cb) { struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; + u32 frag_offset = desc_cb->page_offset + pull_len; int size = le16_to_cpu(desc->rx.size); u32 truesize = hns3_buf_size(ring); + u32 frag_size = size - pull_len; + int ret = 0; + bool reused; - skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, - size - pull_len, truesize); - - /* Avoid re-using remote pages, or the stack is still using the page - * when page_offset rollback to zero, flag default unreuse - */ - if (unlikely(!hns3_page_is_reusable(desc_cb->priv)) || - (!desc_cb->page_offset && page_count(desc_cb->priv) > 1)) + if (ring->page_pool) { + skb_add_rx_frag(skb, i, desc_cb->priv, frag_offset, + frag_size, truesize); return; + } - /* Move offset up to the next cache line */ - desc_cb->page_offset += truesize; + /* Avoid re-using remote or pfmem page */ + if (unlikely(!dev_page_is_reusable(desc_cb->priv))) + goto out; - if (desc_cb->page_offset + truesize <= hns3_page_size(ring)) { - desc_cb->reuse_flag = 1; - /* Bump ref count on page before it is given */ - get_page(desc_cb->priv); - } else if (page_count(desc_cb->priv) == 1) { + reused = hns3_can_reuse_page(desc_cb); + + /* Rx page can be reused when: + * 1. Rx page is only owned by the driver when page_offset + * is zero, which means 0 @ truesize will be used by + * stack after skb_add_rx_frag() is called, and the rest + * of rx page can be reused by driver. + * Or + * 2. Rx page is only owned by the driver when page_offset + * is non-zero, which means page_offset @ truesize will + * be used by stack after skb_add_rx_frag() is called, + * and 0 @ truesize can be reused by driver. + */ + if ((!desc_cb->page_offset && reused) || + ((desc_cb->page_offset + truesize + truesize) <= + hns3_page_size(ring) && desc_cb->page_offset)) { + desc_cb->page_offset += truesize; desc_cb->reuse_flag = 1; + } else if (desc_cb->page_offset && reused) { desc_cb->page_offset = 0; - get_page(desc_cb->priv); + desc_cb->reuse_flag = 1; + } else if (frag_size <= ring->rx_copybreak) { + ret = hns3_handle_rx_copybreak(skb, i, ring, pull_len, desc_cb); + if (!ret) + return; } + +out: + desc_cb->pagecnt_bias--; + + if (unlikely(!desc_cb->pagecnt_bias)) { + page_ref_add(desc_cb->priv, USHRT_MAX); + desc_cb->pagecnt_bias = USHRT_MAX; + } + + skb_add_rx_frag(skb, i, desc_cb->priv, frag_offset, + frag_size, truesize); + + if (unlikely(!desc_cb->reuse_flag)) + __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias); } static int hns3_gro_complete(struct sk_buff *skb, u32 l234info) { __be16 type = skb->protocol; struct tcphdr *th; - int depth = 0; + u32 depth = 0; while (eth_type_vlan(type)) { struct vlan_hdr *vh; @@ -2714,30 +3910,27 @@ static int hns3_gro_complete(struct sk_buff *skb, u32 l234info) return 0; } -static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, - u32 l234info, u32 bd_base_info, u32 ol_info) +static void hns3_checksum_complete(struct hns3_enet_ring *ring, + struct sk_buff *skb, u32 ptype, u16 csum) { - struct net_device *netdev = ring_to_netdev(ring); - int l3_type, l4_type; - int ol4_type; - - skb->ip_summed = CHECKSUM_NONE; - - skb_checksum_none_assert(skb); - - if (!(netdev->features & NETIF_F_RXCSUM)) + if (ptype == HNS3_INVALID_PTYPE || + hns3_rx_ptype_tbl[ptype].ip_summed != CHECKSUM_COMPLETE) return; - /* check if hardware has done checksum */ - if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B))) - return; + hns3_ring_stats_update(ring, csum_complete); + skb->ip_summed = CHECKSUM_COMPLETE; + skb->csum = csum_unfold((__force __sum16)csum); +} - if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) | - BIT(HNS3_RXD_OL3E_B) | - BIT(HNS3_RXD_OL4E_B)))) { - u64_stats_update_begin(&ring->syncp); - ring->stats.l3l4_csum_err++; - u64_stats_update_end(&ring->syncp); +static void hns3_rx_handle_csum(struct sk_buff *skb, u32 l234info, + u32 ol_info, u32 ptype) +{ + int l3_type, l4_type; + int ol4_type; + + if (ptype != HNS3_INVALID_PTYPE) { + skb->csum_level = hns3_rx_ptype_tbl[ptype].csum_level; + skb->ip_summed = hns3_rx_ptype_tbl[ptype].ip_summed; return; } @@ -2748,13 +3941,12 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, case HNS3_OL4_TYPE_MAC_IN_UDP: case HNS3_OL4_TYPE_NVGRE: skb->csum_level = 1; - /* fall through */ + fallthrough; case HNS3_OL4_TYPE_NO_TUN: l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S); l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S); - /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */ if ((l3_type == HNS3_L3_TYPE_IPV4 || l3_type == HNS3_L3_TYPE_IPV6) && @@ -2768,6 +3960,43 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, } } +static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, + u32 l234info, u32 bd_base_info, u32 ol_info, + u16 csum) +{ + struct net_device *netdev = ring_to_netdev(ring); + struct hns3_nic_priv *priv = netdev_priv(netdev); + u32 ptype = HNS3_INVALID_PTYPE; + + skb->ip_summed = CHECKSUM_NONE; + + skb_checksum_none_assert(skb); + + if (!(netdev->features & NETIF_F_RXCSUM)) + return; + + if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) + ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M, + HNS3_RXD_PTYPE_S); + + hns3_checksum_complete(ring, skb, ptype, csum); + + /* check if hardware has done checksum */ + if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B))) + return; + + if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) | + BIT(HNS3_RXD_OL3E_B) | + BIT(HNS3_RXD_OL4E_B)))) { + skb->ip_summed = CHECKSUM_NONE; + hns3_ring_stats_update(ring, l3l4_csum_err); + + return; + } + + hns3_rx_handle_csum(skb, l234info, ol_info, ptype); +} + static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb) { if (skb_has_frag_list(skb)) @@ -2782,8 +4011,9 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring, { struct hnae3_handle *handle = ring->tqp->handle; struct pci_dev *pdev = ring->tqp->handle->pdev; + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); - if (pdev->revision == 0x20) { + if (unlikely(ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)) { *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); if (!(*vlan_tag & VLAN_VID_MASK)) *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); @@ -2828,6 +4058,17 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring, } } +static void hns3_rx_ring_move_fw(struct hns3_enet_ring *ring) +{ + ring->desc[ring->next_to_clean].rx.bd_base_info &= + cpu_to_le32(~BIT(HNS3_RXD_VLD_B)); + ring->desc_cb[ring->next_to_clean].refill = 0; + ring->next_to_clean += 1; + + if (unlikely(ring->next_to_clean == ring->desc_num)) + ring->next_to_clean = 0; +} + static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, unsigned char *va) { @@ -2839,10 +4080,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, skb = ring->skb; if (unlikely(!skb)) { hns3_rl_err(netdev, "alloc rx skb fail\n"); - - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, sw_err_cnt); return -ENOMEM; } @@ -2856,24 +4094,30 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, if (length <= HNS3_RX_HEAD_SIZE) { memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); - /* We can reuse buffer as-is, just make sure it is local */ - if (likely(hns3_page_is_reusable(desc_cb->priv))) + /* We can reuse buffer as-is, just make sure it is reusable */ + if (dev_page_is_reusable(desc_cb->priv)) desc_cb->reuse_flag = 1; + else if (desc_cb->type & DESC_TYPE_PP_FRAG) + page_pool_put_full_page(ring->page_pool, desc_cb->priv, + false); else /* This page cannot be reused so discard it */ - put_page(desc_cb->priv); + __page_frag_cache_drain(desc_cb->priv, + desc_cb->pagecnt_bias); - ring_ptr_move_fw(ring, next_to_clean); + hns3_rx_ring_move_fw(ring); return 0; } - u64_stats_update_begin(&ring->syncp); - ring->stats.seg_pkt_cnt++; - u64_stats_update_end(&ring->syncp); + + if (ring->page_pool) + skb_mark_for_recycle(skb); + + hns3_ring_stats_update(ring, seg_pkt_cnt); ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE); __skb_put(skb, ring->pull_len); hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len, desc_cb); - ring_ptr_move_fw(ring, next_to_clean); + hns3_rx_ring_move_fw(ring); return 0; } @@ -2903,6 +4147,10 @@ static int hns3_add_frag(struct hns3_enet_ring *ring) "alloc rx fraglist skb fail\n"); return -ENXIO; } + + if (ring->page_pool) + skb_mark_for_recycle(new_skb); + ring->frag_num = 0; if (ring->tail_skb) { @@ -2921,9 +4169,14 @@ static int hns3_add_frag(struct hns3_enet_ring *ring) skb = ring->tail_skb; } + dma_sync_single_for_cpu(ring_to_dev(ring), + desc_cb->dma + desc_cb->page_offset, + hns3_buf_size(ring), + DMA_FROM_DEVICE); + hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb); trace_hns3_rx_desc(ring); - ring_ptr_move_fw(ring, next_to_clean); + hns3_rx_ring_move_fw(ring); ring->pending_buf++; } while (!(bd_base_info & BIT(HNS3_RXD_FE_B))); @@ -2932,8 +4185,10 @@ static int hns3_add_frag(struct hns3_enet_ring *ring) static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, u32 l234info, - u32 bd_base_info, u32 ol_info) + u32 bd_base_info, u32 ol_info, u16 csum) { + struct net_device *netdev = ring_to_netdev(ring); + struct hns3_nic_priv *priv = netdev_priv(netdev); u32 l3_type; skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info, @@ -2941,7 +4196,8 @@ static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring, HNS3_RXD_GRO_SIZE_S); /* if there is no HW GRO, do not set gro params */ if (!skb_shinfo(skb)->gso_size) { - hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info); + hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info, + csum); return 0; } @@ -2949,7 +4205,16 @@ static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring, HNS3_RXD_GRO_COUNT_M, HNS3_RXD_GRO_COUNT_S); - l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S); + if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) { + u32 ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M, + HNS3_RXD_PTYPE_S); + + l3_type = hns3_rx_ptype_tbl[ptype].l3_type; + } else { + l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, + HNS3_RXD_L3ID_S); + } + if (l3_type == HNS3_L3_TYPE_IPV4) skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; else if (l3_type == HNS3_L3_TYPE_IPV6) @@ -2961,19 +4226,72 @@ static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring, } static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring, - struct sk_buff *skb, u32 rss_hash) + struct sk_buff *skb, u32 rss_hash, + u32 l234info, u32 ol_info) { - struct hnae3_handle *handle = ring->tqp->handle; - enum pkt_hash_types rss_type; + enum pkt_hash_types rss_type = PKT_HASH_TYPE_NONE; + struct net_device *netdev = ring_to_netdev(ring); + struct hns3_nic_priv *priv = netdev_priv(netdev); - if (rss_hash) - rss_type = handle->kinfo.rss_type; - else - rss_type = PKT_HASH_TYPE_NONE; + if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) { + u32 ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M, + HNS3_RXD_PTYPE_S); + + rss_type = hns3_rx_ptype_tbl[ptype].hash_type; + } else { + int l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, + HNS3_RXD_L3ID_S); + int l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M, + HNS3_RXD_L4ID_S); + + if (l3_type == HNS3_L3_TYPE_IPV4 || + l3_type == HNS3_L3_TYPE_IPV6) { + if (l4_type == HNS3_L4_TYPE_UDP || + l4_type == HNS3_L4_TYPE_TCP || + l4_type == HNS3_L4_TYPE_SCTP) + rss_type = PKT_HASH_TYPE_L4; + else if (l4_type == HNS3_L4_TYPE_IGMP || + l4_type == HNS3_L4_TYPE_ICMP) + rss_type = PKT_HASH_TYPE_L3; + } + } skb_set_hash(skb, rss_hash, rss_type); } +static void hns3_handle_rx_ts_info(struct net_device *netdev, + struct hns3_desc *desc, struct sk_buff *skb, + u32 bd_base_info) +{ + if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) { + struct hnae3_handle *h = hns3_get_handle(netdev); + u32 nsec = le32_to_cpu(desc->ts_nsec); + u32 sec = le32_to_cpu(desc->ts_sec); + + if (h->ae_algo->ops->get_rx_hwts) + h->ae_algo->ops->get_rx_hwts(h, skb, nsec, sec); + } +} + +static void hns3_handle_rx_vlan_tag(struct hns3_enet_ring *ring, + struct hns3_desc *desc, struct sk_buff *skb, + u32 l234info) +{ + struct net_device *netdev = ring_to_netdev(ring); + + /* Based on hw strategy, the tag offloaded will be stored at + * ot_vlan_tag in two layer tag case, and stored at vlan_tag + * in one layer tag case. + */ + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { + u16 vlan_tag; + + if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + vlan_tag); + } +} + static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb) { struct net_device *netdev = ring_to_netdev(ring); @@ -2982,6 +4300,7 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb) struct hns3_desc *desc; unsigned int len; int pre_ntc, ret; + u16 csum; /* bdinfo handled below is only valid on the last BD of the * current packet, and ring->next_to_clean indicates the first @@ -2993,18 +4312,11 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb) bd_base_info = le32_to_cpu(desc->rx.bd_base_info); l234info = le32_to_cpu(desc->rx.l234_info); ol_info = le32_to_cpu(desc->rx.ol_info); + csum = le16_to_cpu(desc->csum); - /* Based on hw strategy, the tag offloaded will be stored at - * ot_vlan_tag in two layer tag case, and stored at vlan_tag - * in one layer tag case. - */ - if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { - u16 vlan_tag; + hns3_handle_rx_ts_info(netdev, desc, skb, bd_base_info); - if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag)) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), - vlan_tag); - } + hns3_handle_rx_vlan_tag(ring, desc, skb, l234info); if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) | BIT(HNS3_RXD_L2E_B))))) { @@ -3025,11 +4337,9 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb) /* This is needed in order to enable forwarding support */ ret = hns3_set_gro_and_checksum(ring, skb, l234info, - bd_base_info, ol_info); + bd_base_info, ol_info, csum); if (unlikely(ret)) { - u64_stats_update_begin(&ring->syncp); - ring->stats.rx_err_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, rx_err_cnt); return ret; } @@ -3047,7 +4357,8 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb) ring->tqp_vector->rx_group.total_bytes += len; - hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash)); + hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash), + l234info, ol_info); return 0; } @@ -3065,29 +4376,31 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring) prefetch(desc); - length = le16_to_cpu(desc->rx.size); - bd_base_info = le32_to_cpu(desc->rx.bd_base_info); + if (!skb) { + bd_base_info = le32_to_cpu(desc->rx.bd_base_info); + /* Check valid BD */ + if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) + return -ENXIO; - /* Check valid BD */ - if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) - return -ENXIO; + dma_rmb(); + length = le16_to_cpu(desc->rx.size); - if (!skb) - ring->va = (unsigned char *)desc_cb->buf + desc_cb->page_offset; + ring->va = desc_cb->buf + desc_cb->page_offset; - /* Prefetch first cache line of first page - * Idea is to cache few bytes of the header of the packet. Our L1 Cache - * line size is 64B so need to prefetch twice to make it 128B. But in - * actual we can have greater size of caches with 128B Level 1 cache - * lines. In such a case, single fetch would suffice to cache in the - * relevant part of the header. - */ - prefetch(ring->va); -#if L1_CACHE_BYTES < 128 - prefetch(ring->va + L1_CACHE_BYTES); -#endif + dma_sync_single_for_cpu(ring_to_dev(ring), + desc_cb->dma + desc_cb->page_offset, + hns3_buf_size(ring), + DMA_FROM_DEVICE); + + /* Prefetch first cache line of first page. + * Idea is to cache few bytes of the header of the packet. + * Our L1 Cache line size is 64B so need to prefetch twice to make + * it 128B. But in actual we can have greater size of caches with + * 128B Level 1 cache lines. In such a case, single fetch would + * suffice to cache in the relevant part of the header. + */ + net_prefetch(ring->va); - if (!skb) { ret = hns3_alloc_skb(ring, length, ring->va); skb = ring->skb; @@ -3126,25 +4439,18 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget, { #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 int unused_count = hns3_desc_unused(ring); + bool failure = false; int recv_pkts = 0; - int recv_bds = 0; - int err, num; + int err; - num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG); - num -= unused_count; unused_count -= ring->pending_buf; - if (num <= 0) - goto out; - - rmb(); /* Make sure num taken effect before the other data is touched */ - - while (recv_pkts < budget && recv_bds < num) { + while (recv_pkts < budget) { /* Reuse or realloc buffers */ if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { - hns3_nic_alloc_rx_buffers(ring, unused_count); - unused_count = hns3_desc_unused(ring) - - ring->pending_buf; + failure = failure || + hns3_nic_alloc_rx_buffers(ring, unused_count); + unused_count = 0; } /* Poll one pkt */ @@ -3157,153 +4463,46 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget, recv_pkts++; } - recv_bds += ring->pending_buf; unused_count += ring->pending_buf; ring->skb = NULL; ring->pending_buf = 0; } out: - /* Make all data has been write before submit */ - if (unused_count > 0) - hns3_nic_alloc_rx_buffers(ring, unused_count); - - return recv_pkts; -} - -static bool hns3_get_new_flow_lvl(struct hns3_enet_ring_group *ring_group) -{ -#define HNS3_RX_LOW_BYTE_RATE 10000 -#define HNS3_RX_MID_BYTE_RATE 20000 -#define HNS3_RX_ULTRA_PACKET_RATE 40 - - enum hns3_flow_level_range new_flow_level; - struct hns3_enet_tqp_vector *tqp_vector; - int packets_per_msecs, bytes_per_msecs; - u32 time_passed_ms; - - tqp_vector = ring_group->ring->tqp_vector; - time_passed_ms = - jiffies_to_msecs(jiffies - tqp_vector->last_jiffies); - if (!time_passed_ms) - return false; - - do_div(ring_group->total_packets, time_passed_ms); - packets_per_msecs = ring_group->total_packets; - - do_div(ring_group->total_bytes, time_passed_ms); - bytes_per_msecs = ring_group->total_bytes; - - new_flow_level = ring_group->coal.flow_level; - - /* Simple throttlerate management - * 0-10MB/s lower (50000 ints/s) - * 10-20MB/s middle (20000 ints/s) - * 20-1249MB/s high (18000 ints/s) - * > 40000pps ultra (8000 ints/s) + /* sync head pointer before exiting, since hardware will calculate + * FBD number with head pointer */ - switch (new_flow_level) { - case HNS3_FLOW_LOW: - if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE) - new_flow_level = HNS3_FLOW_MID; - break; - case HNS3_FLOW_MID: - if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE) - new_flow_level = HNS3_FLOW_HIGH; - else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE) - new_flow_level = HNS3_FLOW_LOW; - break; - case HNS3_FLOW_HIGH: - case HNS3_FLOW_ULTRA: - default: - if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE) - new_flow_level = HNS3_FLOW_MID; - break; - } - - if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE && - &tqp_vector->rx_group == ring_group) - new_flow_level = HNS3_FLOW_ULTRA; - - ring_group->total_bytes = 0; - ring_group->total_packets = 0; - ring_group->coal.flow_level = new_flow_level; + if (unused_count > 0) + failure = failure || + hns3_nic_alloc_rx_buffers(ring, unused_count); - return true; + return failure ? budget : recv_pkts; } -static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) +static void hns3_update_rx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector) { - struct hns3_enet_tqp_vector *tqp_vector; - u16 new_int_gl; - - if (!ring_group->ring) - return false; - - tqp_vector = ring_group->ring->tqp_vector; - if (!tqp_vector->last_jiffies) - return false; - - if (ring_group->total_packets == 0) { - ring_group->coal.int_gl = HNS3_INT_GL_50K; - ring_group->coal.flow_level = HNS3_FLOW_LOW; - return true; - } - - if (!hns3_get_new_flow_lvl(ring_group)) - return false; + struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group; + struct dim_sample sample = {}; - new_int_gl = ring_group->coal.int_gl; - switch (ring_group->coal.flow_level) { - case HNS3_FLOW_LOW: - new_int_gl = HNS3_INT_GL_50K; - break; - case HNS3_FLOW_MID: - new_int_gl = HNS3_INT_GL_20K; - break; - case HNS3_FLOW_HIGH: - new_int_gl = HNS3_INT_GL_18K; - break; - case HNS3_FLOW_ULTRA: - new_int_gl = HNS3_INT_GL_8K; - break; - default: - break; - } + if (!rx_group->coal.adapt_enable) + return; - if (new_int_gl != ring_group->coal.int_gl) { - ring_group->coal.int_gl = new_int_gl; - return true; - } - return false; + dim_update_sample(tqp_vector->event_cnt, rx_group->total_packets, + rx_group->total_bytes, &sample); + net_dim(&rx_group->dim, &sample); } -static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector) +static void hns3_update_tx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector) { - struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group; struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group; - bool rx_update, tx_update; + struct dim_sample sample = {}; - /* update param every 1000ms */ - if (time_before(jiffies, - tqp_vector->last_jiffies + msecs_to_jiffies(1000))) + if (!tx_group->coal.adapt_enable) return; - if (rx_group->coal.gl_adapt_enable) { - rx_update = hns3_get_new_int_gl(rx_group); - if (rx_update) - hns3_set_vector_coalesce_rx_gl(tqp_vector, - rx_group->coal.int_gl); - } - - if (tx_group->coal.gl_adapt_enable) { - tx_update = hns3_get_new_int_gl(tx_group); - if (tx_update) - hns3_set_vector_coalesce_tx_gl(tqp_vector, - tx_group->coal.int_gl); - } - - tqp_vector->last_jiffies = jiffies; + dim_update_sample(tqp_vector->event_cnt, tx_group->total_packets, + tx_group->total_bytes, &sample); + net_dim(&tx_group->dim, &sample); } static int hns3_nic_common_poll(struct napi_struct *napi, int budget) @@ -3326,7 +4525,7 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget) * budget and be more aggressive about cleaning up the Tx descriptors. */ hns3_for_each_ring(ring, tqp_vector->tx_group) - hns3_clean_tx_ring(ring); + hns3_clean_tx_ring(ring, budget); /* make sure rx ring budget not smaller than 1 */ if (tqp_vector->num_tqps > 1) @@ -3335,7 +4534,6 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget) hns3_for_each_ring(ring, tqp_vector->rx_group) { int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget, hns3_rx_skb); - if (rx_cleaned >= rx_budget) clean_complete = false; @@ -3349,94 +4547,79 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget) if (napi_complete(napi) && likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { - hns3_update_new_int_gl(tqp_vector); + hns3_update_rx_int_coalesce(tqp_vector); + hns3_update_tx_int_coalesce(tqp_vector); + hns3_mask_vector_irq(tqp_vector, 1); } return rx_pkt_total; } -static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, - struct hnae3_ring_chain_node *head) +static int hns3_create_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, + struct hnae3_ring_chain_node **head, + bool is_tx) { + u32 bit_value = is_tx ? HNAE3_RING_TYPE_TX : HNAE3_RING_TYPE_RX; + u32 field_value = is_tx ? HNAE3_RING_GL_TX : HNAE3_RING_GL_RX; + struct hnae3_ring_chain_node *cur_chain = *head; struct pci_dev *pdev = tqp_vector->handle->pdev; - struct hnae3_ring_chain_node *cur_chain = head; struct hnae3_ring_chain_node *chain; - struct hns3_enet_ring *tx_ring; - struct hns3_enet_ring *rx_ring; - - tx_ring = tqp_vector->tx_group.ring; - if (tx_ring) { - cur_chain->tqp_index = tx_ring->tqp->tqp_index; - hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_TX); - hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX); - - cur_chain->next = NULL; - - while (tx_ring->next) { - tx_ring = tx_ring->next; - - chain = devm_kzalloc(&pdev->dev, sizeof(*chain), - GFP_KERNEL); - if (!chain) - goto err_free_chain; - - cur_chain->next = chain; - chain->tqp_index = tx_ring->tqp->tqp_index; - hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_TX); - hnae3_set_field(chain->int_gl_idx, - HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S, - HNAE3_RING_GL_TX); - - cur_chain = chain; - } - } + struct hns3_enet_ring *ring; - rx_ring = tqp_vector->rx_group.ring; - if (!tx_ring && rx_ring) { - cur_chain->next = NULL; - cur_chain->tqp_index = rx_ring->tqp->tqp_index; - hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_RX); - hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); + ring = is_tx ? tqp_vector->tx_group.ring : tqp_vector->rx_group.ring; - rx_ring = rx_ring->next; + if (cur_chain) { + while (cur_chain->next) + cur_chain = cur_chain->next; } - while (rx_ring) { + while (ring) { chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); if (!chain) - goto err_free_chain; - - cur_chain->next = chain; - chain->tqp_index = rx_ring->tqp->tqp_index; + return -ENOMEM; + if (cur_chain) + cur_chain->next = chain; + else + *head = chain; + chain->tqp_index = ring->tqp->tqp_index; hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_RX); - hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); + bit_value); + hnae3_set_field(chain->int_gl_idx, + HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S, field_value); cur_chain = chain; - rx_ring = rx_ring->next; + ring = ring->next; } return 0; +} + +static struct hnae3_ring_chain_node * +hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector) +{ + struct pci_dev *pdev = tqp_vector->handle->pdev; + struct hnae3_ring_chain_node *cur_chain = NULL; + struct hnae3_ring_chain_node *chain; + + if (hns3_create_ring_chain(tqp_vector, &cur_chain, true)) + goto err_free_chain; + + if (hns3_create_ring_chain(tqp_vector, &cur_chain, false)) + goto err_free_chain; + + return cur_chain; err_free_chain: - cur_chain = head->next; while (cur_chain) { chain = cur_chain->next; devm_kfree(&pdev->dev, cur_chain); cur_chain = chain; } - head->next = NULL; - return -ENOMEM; + return NULL; } static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, @@ -3445,7 +4628,7 @@ static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, struct pci_dev *pdev = tqp_vector->handle->pdev; struct hnae3_ring_chain_node *chain_tmp, *chain; - chain = head->next; + chain = head; while (chain) { chain_tmp = chain->next; @@ -3480,20 +4663,66 @@ static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv) } } +static void hns3_rx_dim_work(struct work_struct *work) +{ + struct dim *dim = container_of(work, struct dim, work); + struct hns3_enet_ring_group *group = container_of(dim, + struct hns3_enet_ring_group, dim); + struct hns3_enet_tqp_vector *tqp_vector = group->ring->tqp_vector; + struct dim_cq_moder cur_moder = + net_dim_get_rx_moderation(dim->mode, dim->profile_ix); + + hns3_set_vector_coalesce_rx_gl(group->ring->tqp_vector, cur_moder.usec); + tqp_vector->rx_group.coal.int_gl = cur_moder.usec; + + if (cur_moder.pkts < tqp_vector->rx_group.coal.int_ql_max) { + hns3_set_vector_coalesce_rx_ql(tqp_vector, cur_moder.pkts); + tqp_vector->rx_group.coal.int_ql = cur_moder.pkts; + } + + dim->state = DIM_START_MEASURE; +} + +static void hns3_tx_dim_work(struct work_struct *work) +{ + struct dim *dim = container_of(work, struct dim, work); + struct hns3_enet_ring_group *group = container_of(dim, + struct hns3_enet_ring_group, dim); + struct hns3_enet_tqp_vector *tqp_vector = group->ring->tqp_vector; + struct dim_cq_moder cur_moder = + net_dim_get_tx_moderation(dim->mode, dim->profile_ix); + + hns3_set_vector_coalesce_tx_gl(tqp_vector, cur_moder.usec); + tqp_vector->tx_group.coal.int_gl = cur_moder.usec; + + if (cur_moder.pkts < tqp_vector->tx_group.coal.int_ql_max) { + hns3_set_vector_coalesce_tx_ql(tqp_vector, cur_moder.pkts); + tqp_vector->tx_group.coal.int_ql = cur_moder.pkts; + } + + dim->state = DIM_START_MEASURE; +} + +static void hns3_nic_init_dim(struct hns3_enet_tqp_vector *tqp_vector) +{ + INIT_WORK(&tqp_vector->rx_group.dim.work, hns3_rx_dim_work); + INIT_WORK(&tqp_vector->tx_group.dim.work, hns3_tx_dim_work); +} + static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) { - struct hnae3_ring_chain_node vector_ring_chain; struct hnae3_handle *h = priv->ae_handle; struct hns3_enet_tqp_vector *tqp_vector; - int ret = 0; + int ret; int i; hns3_nic_set_cpumask(priv); for (i = 0; i < priv->vector_num; i++) { tqp_vector = &priv->tqp_vector[i]; - hns3_vector_gl_rl_init_hw(tqp_vector, priv); + hns3_vector_coalesce_init_hw(tqp_vector, priv); tqp_vector->num_tqps = 0; + hns3_nic_init_dim(tqp_vector); } for (i = 0; i < h->kinfo.num_tqps; i++) { @@ -3514,6 +4743,8 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) } for (i = 0; i < priv->vector_num; i++) { + struct hnae3_ring_chain_node *vector_ring_chain; + tqp_vector = &priv->tqp_vector[i]; tqp_vector->rx_group.total_bytes = 0; @@ -3522,21 +4753,22 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) tqp_vector->tx_group.total_packets = 0; tqp_vector->handle = h; - ret = hns3_get_vector_ring_chain(tqp_vector, - &vector_ring_chain); - if (ret) + vector_ring_chain = hns3_get_vector_ring_chain(tqp_vector); + if (!vector_ring_chain) { + ret = -ENOMEM; goto map_ring_fail; + } ret = h->ae_algo->ops->map_ring_to_vector(h, - tqp_vector->vector_irq, &vector_ring_chain); + tqp_vector->vector_irq, vector_ring_chain); - hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); + hns3_free_vector_ring_chain(tqp_vector, vector_ring_chain); if (ret) goto map_ring_fail; netif_napi_add(priv->netdev, &tqp_vector->napi, - hns3_nic_common_poll, NAPI_POLL_WEIGHT); + hns3_nic_common_poll); } return 0; @@ -3548,10 +4780,36 @@ map_ring_fail: return ret; } -static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv) +static void hns3_nic_init_coal_cfg(struct hns3_nic_priv *priv) { -#define HNS3_VECTOR_PF_MAX_NUM 64 + struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle); + struct hns3_enet_coalesce *tx_coal = &priv->tx_coal; + struct hns3_enet_coalesce *rx_coal = &priv->rx_coal; + + /* initialize the configuration for interrupt coalescing. + * 1. GL (Interrupt Gap Limiter) + * 2. RL (Interrupt Rate Limiter) + * 3. QL (Interrupt Quantity Limiter) + * + * Default: enable interrupt coalescing self-adaptive and GL + */ + tx_coal->adapt_enable = 1; + rx_coal->adapt_enable = 1; + + tx_coal->int_gl = HNS3_INT_GL_50K; + rx_coal->int_gl = HNS3_INT_GL_50K; + + rx_coal->flow_level = HNS3_FLOW_LOW; + tx_coal->flow_level = HNS3_FLOW_LOW; + + if (ae_dev->dev_specs.int_ql_max) { + tx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG; + rx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG; + } +} +static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv) +{ struct hnae3_handle *h = priv->ae_handle; struct hns3_enet_tqp_vector *tqp_vector; struct hnae3_vector_info *vector; @@ -3564,7 +4822,6 @@ static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv) /* RSS size, cpu online and vector_num should be the same */ /* Should consider 2p/4p later */ vector_num = min_t(u16, num_online_cpus(), tqp_num); - vector_num = min_t(u16, vector_num, HNS3_VECTOR_PF_MAX_NUM); vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector), GFP_KERNEL); @@ -3588,7 +4845,7 @@ static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv) tqp_vector->idx = i; tqp_vector->mask_addr = vector[i].io_addr; tqp_vector->vector_irq = vector[i].vector; - hns3_vector_gl_rl_init(tqp_vector, priv); + hns3_vector_coalesce_init(tqp_vector, priv); } out: @@ -3604,7 +4861,7 @@ static void hns3_clear_ring_group(struct hns3_enet_ring_group *group) static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) { - struct hnae3_ring_chain_node vector_ring_chain; + struct hnae3_ring_chain_node *vector_ring_chain; struct hnae3_handle *h = priv->ae_handle; struct hns3_enet_tqp_vector *tqp_vector; int i; @@ -3619,13 +4876,14 @@ static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) * chain between vector and ring, we should go on to deal with * the remaining options. */ - if (hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain)) + vector_ring_chain = hns3_get_vector_ring_chain(tqp_vector); + if (!vector_ring_chain) dev_warn(priv->dev, "failed to get ring chain\n"); h->ae_algo->ops->unmap_ring_from_vector(h, - tqp_vector->vector_irq, &vector_ring_chain); + tqp_vector->vector_irq, vector_ring_chain); - hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); + hns3_free_vector_ring_chain(tqp_vector, vector_ring_chain); hns3_clear_ring_group(&tqp_vector->rx_group); hns3_clear_ring_group(&tqp_vector->tx_group); @@ -3651,6 +4909,30 @@ static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv) devm_kfree(&pdev->dev, priv->tqp_vector); } +static void hns3_update_tx_spare_buf_config(struct hns3_nic_priv *priv) +{ +#define HNS3_MIN_SPARE_BUF_SIZE (2 * 1024 * 1024) +#define HNS3_MAX_PACKET_SIZE (64 * 1024) + + struct iommu_domain *domain = iommu_get_domain_for_dev(priv->dev); + struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle); + struct hnae3_handle *handle = priv->ae_handle; + + if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) + return; + + if (!(domain && iommu_is_dma_domain(domain))) + return; + + priv->min_tx_copybreak = HNS3_MAX_PACKET_SIZE; + priv->min_tx_spare_buf_size = HNS3_MIN_SPARE_BUF_SIZE; + + if (priv->tx_copybreak < priv->min_tx_copybreak) + priv->tx_copybreak = priv->min_tx_copybreak; + if (handle->kinfo.tx_spare_buf_size < priv->min_tx_spare_buf_size) + handle->kinfo.tx_spare_buf_size = priv->min_tx_spare_buf_size; +} + static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, unsigned int ring_type) { @@ -3662,12 +4944,13 @@ static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, ring = &priv->ring[q->tqp_index]; desc_num = priv->ae_handle->kinfo.num_tx_desc; ring->queue_index = q->tqp_index; - ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET; + ring->tx_copybreak = priv->tx_copybreak; + ring->last_to_use = 0; } else { ring = &priv->ring[q->tqp_index + queue_num]; desc_num = priv->ae_handle->kinfo.num_rx_desc; ring->queue_index = q->tqp_index; - ring->io_base = q->io_base; + ring->rx_copybreak = priv->rx_copybreak; } hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); @@ -3718,6 +5001,28 @@ static void hns3_put_ring_config(struct hns3_nic_priv *priv) priv->ring = NULL; } +static void hns3_alloc_page_pool(struct hns3_enet_ring *ring) +{ + struct page_pool_params pp_params = { + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, + .order = hns3_page_order(ring), + .pool_size = ring->desc_num * hns3_buf_size(ring) / + (PAGE_SIZE << hns3_page_order(ring)), + .nid = dev_to_node(ring_to_dev(ring)), + .dev = ring_to_dev(ring), + .dma_dir = DMA_FROM_DEVICE, + .offset = 0, + .max_len = PAGE_SIZE << hns3_page_order(ring), + }; + + ring->page_pool = page_pool_create(&pp_params); + if (IS_ERR(ring->page_pool)) { + dev_warn(ring_to_dev(ring), "page pool creation failed: %ld\n", + PTR_ERR(ring->page_pool)); + ring->page_pool = NULL; + } +} + static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) { int ret; @@ -3737,9 +5042,14 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) goto out_with_desc_cb; if (!HNAE3_IS_TX_RING(ring)) { + if (page_pool_enabled) + hns3_alloc_page_pool(ring); + ret = hns3_alloc_ring_buffers(ring); if (ret) goto out_with_desc; + } else { + hns3_init_tx_spare_buffer(ring); } return 0; @@ -3760,10 +5070,25 @@ void hns3_fini_ring(struct hns3_enet_ring *ring) ring->desc_cb = NULL; ring->next_to_clean = 0; ring->next_to_use = 0; + ring->last_to_use = 0; ring->pending_buf = 0; - if (ring->skb) { + if (!HNAE3_IS_TX_RING(ring) && ring->skb) { dev_kfree_skb_any(ring->skb); ring->skb = NULL; + } else if (HNAE3_IS_TX_RING(ring) && ring->tx_spare) { + struct hns3_tx_spare *tx_spare = ring->tx_spare; + + dma_unmap_page(ring_to_dev(ring), tx_spare->dma, tx_spare->len, + DMA_TO_DEVICE); + free_pages((unsigned long)tx_spare->buf, + get_order(tx_spare->len)); + devm_kfree(ring_to_dev(ring), tx_spare); + ring->tx_spare = NULL; + } + + if (!HNAE3_IS_TX_RING(ring) && ring->page_pool) { + page_pool_destroy(ring->page_pool); + ring->page_pool = NULL; } } @@ -3805,7 +5130,6 @@ static void hns3_init_ring_hw(struct hns3_enet_ring *ring) hns3_buf_size2type(ring->buf_size)); hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG, ring->desc_num / 8 - 1); - } else { hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG, (u32)dma); @@ -3820,21 +5144,17 @@ static void hns3_init_ring_hw(struct hns3_enet_ring *ring) static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv) { struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; + struct hnae3_tc_info *tc_info = &kinfo->tc_info; int i; - for (i = 0; i < HNAE3_MAX_TC; i++) { - struct hnae3_tc_info *tc_info = &kinfo->tc_info[i]; + for (i = 0; i < tc_info->num_tc; i++) { int j; - if (!tc_info->enable) - continue; - - for (j = 0; j < tc_info->tqp_count; j++) { + for (j = 0; j < tc_info->tqp_count[i]; j++) { struct hnae3_queue *q; - q = priv->ring[tc_info->tqp_offset + j].tqp; - hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG, - tc_info->tc); + q = priv->ring[tc_info->tqp_offset[i] + j].tqp; + hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG, i); } } } @@ -3846,6 +5166,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv) int i, j; int ret; + hns3_update_tx_spare_buf_config(priv); for (i = 0; i < ring_num; i++) { ret = hns3_alloc_ring_memory(&priv->ring[i]); if (ret) { @@ -3855,6 +5176,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv) } u64_stats_init(&priv->ring[i].syncp); + cond_resched(); } return 0; @@ -3866,7 +5188,7 @@ out_when_alloc_ring_memory: return -ENOMEM; } -int hns3_uninit_all_ring(struct hns3_nic_priv *priv) +static void hns3_uninit_all_ring(struct hns3_nic_priv *priv) { struct hnae3_handle *h = priv->ae_handle; int i; @@ -3875,15 +5197,15 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv) hns3_fini_ring(&priv->ring[i]); hns3_fini_ring(&priv->ring[i + h->kinfo.num_tqps]); } - return 0; } /* Set mac addr if it is configured. or leave it to the AE driver */ static int hns3_init_mac_addr(struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; struct hnae3_handle *h = priv->ae_handle; - u8 mac_addr_temp[ETH_ALEN]; + u8 mac_addr_temp[ETH_ALEN] = {0}; int ret = 0; if (h->ae_algo->ops->get_mac_addr) @@ -3892,10 +5214,11 @@ static int hns3_init_mac_addr(struct net_device *netdev) /* Check if the MAC address is valid, if not get a random one */ if (!is_valid_ether_addr(mac_addr_temp)) { eth_hw_addr_random(netdev); - dev_warn(priv->dev, "using random MAC address %pM\n", - netdev->dev_addr); + hnae3_format_mac_addr(format_mac_addr, netdev->dev_addr); + dev_warn(priv->dev, "using random MAC address %s\n", + format_mac_addr); } else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) { - ether_addr_copy(netdev->dev_addr, mac_addr_temp); + eth_hw_addr_set(netdev, mac_addr_temp); ether_addr_copy(netdev->perm_addr, mac_addr_temp); } else { return 0; @@ -3926,14 +5249,6 @@ static void hns3_uninit_phy(struct net_device *netdev) h->ae_algo->ops->mac_disconnect_phy(h); } -static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - - if (h->ae_algo->ops->del_all_fd_entries) - h->ae_algo->ops->del_all_fd_entries(h, clear_list); -} - static int hns3_client_start(struct hnae3_handle *handle) { if (!handle->ae_algo->ops->client_start) @@ -3953,26 +5268,99 @@ static void hns3_client_stop(struct hnae3_handle *handle) static void hns3_info_show(struct hns3_nic_priv *priv) { struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; - dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr); + hnae3_format_mac_addr(format_mac_addr, priv->netdev->dev_addr); + dev_info(priv->dev, "MAC address: %s\n", format_mac_addr); dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps); dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size); dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size); dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len); dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc); dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc); - dev_info(priv->dev, "Total number of enabled TCs: %u\n", kinfo->num_tc); + dev_info(priv->dev, "Total number of enabled TCs: %u\n", + kinfo->tc_info.num_tc); dev_info(priv->dev, "Max mtu size: %u\n", priv->netdev->max_mtu); } +static void hns3_set_cq_period_mode(struct hns3_nic_priv *priv, + enum dim_cq_period_mode mode, bool is_tx) +{ + struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle); + struct hnae3_handle *handle = priv->ae_handle; + int i; + + if (is_tx) { + priv->tx_cqe_mode = mode; + + for (i = 0; i < priv->vector_num; i++) + priv->tqp_vector[i].tx_group.dim.mode = mode; + } else { + priv->rx_cqe_mode = mode; + + for (i = 0; i < priv->vector_num; i++) + priv->tqp_vector[i].rx_group.dim.mode = mode; + } + + if (hnae3_ae_dev_cq_supported(ae_dev)) { + u32 new_mode; + u64 reg; + + new_mode = (mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE) ? + HNS3_CQ_MODE_CQE : HNS3_CQ_MODE_EQE; + reg = is_tx ? HNS3_GL1_CQ_MODE_REG : HNS3_GL0_CQ_MODE_REG; + + writel(new_mode, handle->kinfo.io_base + reg); + } +} + +void hns3_cq_period_mode_init(struct hns3_nic_priv *priv, + enum dim_cq_period_mode tx_mode, + enum dim_cq_period_mode rx_mode) +{ + hns3_set_cq_period_mode(priv, tx_mode, true); + hns3_set_cq_period_mode(priv, rx_mode, false); +} + +static void hns3_state_init(struct hnae3_handle *handle) +{ + struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle); + struct net_device *netdev = handle->kinfo.netdev; + struct hns3_nic_priv *priv = netdev_priv(netdev); + + set_bit(HNS3_NIC_STATE_INITED, &priv->state); + + if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps)) + set_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state); + + if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) + set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags); + + if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) + set_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state); + + if (hnae3_ae_dev_rxd_adv_layout_supported(ae_dev)) + set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state); +} + +static void hns3_state_uninit(struct hnae3_handle *handle) +{ + struct hns3_nic_priv *priv = handle->priv; + + clear_bit(HNS3_NIC_STATE_INITED, &priv->state); +} + static int hns3_client_init(struct hnae3_handle *handle) { struct pci_dev *pdev = handle->pdev; + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); u16 alloc_tqps, max_rss_size; struct hns3_nic_priv *priv; struct net_device *netdev; int ret; + ae_dev->handle = handle; + handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps, &max_rss_size); netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps); @@ -3984,6 +5372,9 @@ static int hns3_client_init(struct hnae3_handle *handle) priv->netdev = netdev; priv->ae_handle = handle; priv->tx_timeout_count = 0; + priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num; + priv->min_tx_copybreak = 0; + priv->min_tx_spare_buf_size = 0; set_bit(HNS3_NIC_STATE_DOWN, &priv->state); handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL); @@ -4010,6 +5401,8 @@ static int hns3_client_init(struct hnae3_handle *handle) goto out_get_ring_cfg; } + hns3_nic_init_coal_cfg(priv); + ret = hns3_nic_alloc_vector_data(priv); if (ret) { ret = -ENOMEM; @@ -4028,16 +5421,13 @@ static int hns3_client_init(struct hnae3_handle *handle) goto out_init_ring; } + hns3_cq_period_mode_init(priv, DIM_CQ_PERIOD_MODE_START_FROM_EQE, + DIM_CQ_PERIOD_MODE_START_FROM_EQE); + ret = hns3_init_phy(netdev); if (ret) goto out_init_phy; - ret = register_netdev(netdev); - if (ret) { - dev_err(priv->dev, "probe register netdev fail!\n"); - goto out_reg_netdev_fail; - } - /* the device can work without cpu rmap, only aRFS needs it */ ret = hns3_set_rx_cpu_rmap(netdev); if (ret) @@ -4058,24 +5448,36 @@ static int hns3_client_init(struct hnae3_handle *handle) hns3_dcbnl_setup(handle); - hns3_dbg_init(handle); + ret = hns3_dbg_init(handle); + if (ret) { + dev_err(priv->dev, "failed to init debugfs, ret = %d\n", + ret); + goto out_client_start; + } - /* MTU range: (ETH_MIN_MTU(kernel default) - 9702) */ - netdev->max_mtu = HNS3_MAX_MTU; + netdev->max_mtu = HNS3_MAX_MTU(ae_dev->dev_specs.max_frm_size); - set_bit(HNS3_NIC_STATE_INITED, &priv->state); + hns3_state_init(handle); + + ret = register_netdev(netdev); + if (ret) { + dev_err(priv->dev, "probe register netdev fail!\n"); + goto out_reg_netdev_fail; + } if (netif_msg_drv(handle)) hns3_info_show(priv); return ret; +out_reg_netdev_fail: + hns3_state_uninit(handle); + hns3_dbg_uninit(handle); + hns3_client_stop(handle); out_client_start: hns3_free_rx_cpu_rmap(netdev); hns3_nic_uninit_irq(priv); out_init_irq_fail: - unregister_netdev(netdev); -out_reg_netdev_fail: hns3_uninit_phy(netdev); out_init_phy: hns3_uninit_all_ring(priv); @@ -4095,7 +5497,6 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) { struct net_device *netdev = handle->kinfo.netdev; struct hns3_nic_priv *priv = netdev_priv(netdev); - int ret; if (netdev->reg_state != NETREG_UNINITIALIZED) unregister_netdev(netdev); @@ -4113,23 +5514,18 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) hns3_nic_uninit_irq(priv); - hns3_del_all_fd_rules(netdev, true); - hns3_clear_all_ring(handle, true); hns3_nic_uninit_vector_data(priv); hns3_nic_dealloc_vector_data(priv); - ret = hns3_uninit_all_ring(priv); - if (ret) - netdev_err(netdev, "uninit ring error\n"); + hns3_uninit_all_ring(priv); hns3_put_ring_config(priv); - hns3_dbg_uninit(handle); - out_netdev_free: + hns3_dbg_uninit(handle); free_netdev(netdev); } @@ -4141,8 +5537,8 @@ static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup) return; if (linkup) { - netif_carrier_on(netdev); netif_tx_wake_all_queues(netdev); + netif_carrier_on(netdev); if (netif_msg_link(handle)) netdev_info(netdev, "link up\n"); } else { @@ -4153,27 +5549,15 @@ static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup) } } -static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) -{ - struct hnae3_knic_private_info *kinfo = &handle->kinfo; - struct net_device *ndev = kinfo->netdev; - - if (tc > HNAE3_MAX_TC) - return -EINVAL; - - if (!ndev) - return -ENODEV; - - return hns3_nic_set_real_num_queue(ndev); -} - static void hns3_clear_tx_ring(struct hns3_enet_ring *ring) { while (ring->next_to_clean != ring->next_to_use) { ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0; - hns3_free_buffer_detach(ring, ring->next_to_clean); + hns3_free_buffer_detach(ring, ring->next_to_clean, 0); ring_ptr_move_fw(ring, next_to_clean); } + + ring->pending_buf = 0; } static int hns3_clear_rx_ring(struct hns3_enet_ring *ring) @@ -4187,11 +5571,9 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring) * stack, so we need to replace the buffer here. */ if (!ring->desc_cb[ring->next_to_use].reuse_flag) { - ret = hns3_reserve_buffer_map(ring, &res_cbs); + ret = hns3_alloc_and_map_buffer(ring, &res_cbs); if (ret) { - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, sw_err_cnt); /* if alloc new buffer fail, exit directly * and reclear in up flow. */ @@ -4263,11 +5645,11 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h) int i, j; int ret; - for (i = 0; i < h->kinfo.num_tqps; i++) { - ret = h->ae_algo->ops->reset_queue(h, i); - if (ret) - return ret; + ret = h->ae_algo->ops->reset_queue(h); + if (ret) + return ret; + for (i = 0; i < h->kinfo.num_tqps; i++) { hns3_init_ring_hw(&priv->ring[i]); /* We need to clear tx ring here because self test will @@ -4276,6 +5658,7 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h) hns3_clear_tx_ring(&priv->ring[i]); priv->ring[i].next_to_clean = 0; priv->ring[i].next_to_use = 0; + priv->ring[i].last_to_use = 0; rx_ring = &priv->ring[i + h->kinfo.num_tqps]; hns3_init_ring_hw(rx_ring); @@ -4298,31 +5681,6 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h) return 0; } -static void hns3_store_coal(struct hns3_nic_priv *priv) -{ - /* ethtool only support setting and querying one coal - * configuration for now, so save the vector 0' coal - * configuration here in order to restore it. - */ - memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal, - sizeof(struct hns3_enet_coalesce)); - memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal, - sizeof(struct hns3_enet_coalesce)); -} - -static void hns3_restore_coal(struct hns3_nic_priv *priv) -{ - u16 vector_num = priv->vector_num; - int i; - - for (i = 0; i < vector_num; i++) { - memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal, - sizeof(struct hns3_enet_coalesce)); - memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal, - sizeof(struct hns3_enet_coalesce)); - } -} - static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) { struct hnae3_knic_private_info *kinfo = &handle->kinfo; @@ -4344,6 +5702,11 @@ static int hns3_reset_notify_up_enet(struct hnae3_handle *handle) struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev); int ret = 0; + if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) { + netdev_err(kinfo->netdev, "device is not initialized yet\n"); + return -EFAULT; + } + clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state); if (netif_running(kinfo->netdev)) { @@ -4376,8 +5739,6 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) if (ret) goto err_put_ring; - hns3_restore_coal(priv); - ret = hns3_nic_init_vector_data(priv); if (ret) goto err_dealloc_vector; @@ -4386,6 +5747,8 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) if (ret) goto err_uninit_vector; + hns3_cq_period_mode_init(priv, priv->tx_cqe_mode, priv->rx_cqe_mode); + /* the device can work without cpu rmap, only aRFS needs it */ ret = hns3_set_rx_cpu_rmap(netdev); if (ret) @@ -4430,7 +5793,9 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) { struct net_device *netdev = handle->kinfo.netdev; struct hns3_nic_priv *priv = netdev_priv(netdev); - int ret; + + if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) + hns3_nic_net_stop(netdev); if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { netdev_warn(netdev, "already uninitialized\n"); @@ -4444,21 +5809,17 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) hns3_nic_uninit_vector_data(priv); - hns3_store_coal(priv); - hns3_nic_dealloc_vector_data(priv); - ret = hns3_uninit_all_ring(priv); - if (ret) - netdev_err(netdev, "uninit ring error\n"); + hns3_uninit_all_ring(priv); hns3_put_ring_config(priv); - return ret; + return 0; } -static int hns3_reset_notify(struct hnae3_handle *handle, - enum hnae3_reset_notify_type type) +int hns3_reset_notify(struct hnae3_handle *handle, + enum hnae3_reset_notify_type type) { int ret = 0; @@ -4522,6 +5883,12 @@ int hns3_set_channels(struct net_device *netdev, if (ch->rx_count || ch->tx_count) return -EINVAL; + if (kinfo->tc_info.mqprio_active) { + dev_err(&netdev->dev, + "it's not allowed to set channels via ethtool when MQPRIO mode is on\n"); + return -EINVAL; + } + if (new_tqp_num > hns3_get_max_available_channels(h) || new_tqp_num < 1) { dev_err(&netdev->dev, @@ -4565,6 +5932,58 @@ int hns3_set_channels(struct net_device *netdev, return 0; } +void hns3_external_lb_prepare(struct net_device *ndev, bool if_running) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + + if (!if_running) + return; + + if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) + return; + + netif_carrier_off(ndev); + netif_tx_disable(ndev); + + hns3_disable_irqs_and_tqps(ndev); + + /* delay ring buffer clearing to hns3_reset_notify_uninit_enet + * during reset process, because driver may not be able + * to disable the ring through firmware when downing the netdev. + */ + if (!hns3_nic_resetting(ndev)) + hns3_nic_reset_all_ring(priv->ae_handle); + + hns3_reset_tx_queue(priv->ae_handle); +} + +void hns3_external_lb_restore(struct net_device *ndev, bool if_running) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + + if (!if_running) + return; + + if (hns3_nic_resetting(ndev)) + return; + + if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) + return; + + if (hns3_nic_reset_all_ring(priv->ae_handle)) + return; + + clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); + + hns3_enable_irqs_and_tqps(ndev); + + netif_tx_wake_all_queues(ndev); + + if (h->ae_algo->ops->get_status(h)) + netif_carrier_on(ndev); +} + static const struct hns3_hw_error_info hns3_hw_err[] = { { .type = HNAE3_PPU_POISON_ERROR, .msg = "PPU poison" }, @@ -4572,12 +5991,14 @@ static const struct hns3_hw_error_info hns3_hw_err[] = { .msg = "IMP CMDQ error" }, { .type = HNAE3_IMP_RD_POISON_ERROR, .msg = "IMP RD poison" }, + { .type = HNAE3_ROCEE_AXI_RESP_ERROR, + .msg = "ROCEE AXI RESP error" }, }; static void hns3_process_hw_error(struct hnae3_handle *handle, enum hnae3_hw_error_type type) { - int i; + u32 i; for (i = 0; i < ARRAY_SIZE(hns3_hw_err); i++) { if (hns3_hw_err[i].type == type) { @@ -4592,7 +6013,6 @@ static const struct hnae3_client_ops client_ops = { .init_instance = hns3_client_init, .uninit_instance = hns3_client_uninit, .link_status_change = hns3_link_status_change, - .setup_tc = hns3_client_setup_tc, .reset_notify = hns3_reset_notify, .process_hw_error = hns3_process_hw_error, }; @@ -4605,8 +6025,8 @@ static int __init hns3_init_module(void) { int ret; - pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string); - pr_info("%s: %s\n", hns3_driver_name, hns3_copyright); + pr_debug("%s: %s - version\n", hns3_driver_name, hns3_driver_string); + pr_debug("%s: %s\n", hns3_driver_name, hns3_copyright); client.type = HNAE3_CLIENT_KNIC; snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH, "%s", @@ -4642,9 +6062,11 @@ module_init(hns3_init_module); */ static void __exit hns3_exit_module(void) { + hnae3_acquire_unload_lock(); pci_unregister_driver(&hns3_driver); hnae3_unregister_client(&client); hns3_dbg_unregister_debugfs(); + hnae3_release_unload_lock(); } module_exit(hns3_exit_module); |
