diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-27 13:04:52 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-27 13:04:52 -0800 |
commit | e0c38a4d1f196a4b17d2eba36afff8f656a4f1de (patch) | |
tree | b26a69fabef0160adb127416a9744217700feeb7 /drivers/net/ethernet/netronome/nfp/nfp_net_common.c | |
parent | 7f9f852c75e7d776b078813586c76a2bc7dca993 (diff) | |
parent | 90cadbbf341dd5b2df991c33a6bd6341f3a53788 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller:
1) New ipset extensions for matching on destination MAC addresses, from
Stefano Brivio.
2) Add ipv4 ttl and tos, plus ipv6 flow label and hop limit offloads to
nfp driver. From Stefano Brivio.
3) Implement GRO for plain UDP sockets, from Paolo Abeni.
4) Lots of work from Michał Mirosław to eliminate the VLAN_TAG_PRESENT
bit so that we could support the entire vlan_tci value.
5) Rework the IPSEC policy lookups to better optimize more usecases,
from Florian Westphal.
6) Infrastructure changes eliminating direct manipulation of SKB lists
wherever possible, and to always use the appropriate SKB list
helpers. This work is still ongoing...
7) Lots of PHY driver and state machine improvements and
simplifications, from Heiner Kallweit.
8) Various TSO deferral refinements, from Eric Dumazet.
9) Add ntuple filter support to aquantia driver, from Dmitry Bogdanov.
10) Batch dropping of XDP packets in tuntap, from Jason Wang.
11) Lots of cleanups and improvements to the r8169 driver from Heiner
Kallweit, including support for ->xmit_more. This driver has been
getting some much needed love since he started working on it.
12) Lots of new forwarding selftests from Petr Machata.
13) Enable VXLAN learning in mlxsw driver, from Ido Schimmel.
14) Packed ring support for virtio, from Tiwei Bie.
15) Add new Aquantia AQtion USB driver, from Dmitry Bezrukov.
16) Add XDP support to dpaa2-eth driver, from Ioana Ciocoi Radulescu.
17) Implement coalescing on TCP backlog queue, from Eric Dumazet.
18) Implement carrier change in tun driver, from Nicolas Dichtel.
19) Support msg_zerocopy in UDP, from Willem de Bruijn.
20) Significantly improve garbage collection of neighbor objects when
the table has many PERMANENT entries, from David Ahern.
21) Remove egdev usage from nfp and mlx5, and remove the facility
completely from the tree as it no longer has any users. From Oz
Shlomo and others.
22) Add a NETDEV_PRE_CHANGEADDR so that drivers can veto the change and
therefore abort the operation before the commit phase (which is the
NETDEV_CHANGEADDR event). From Petr Machata.
23) Add indirect call wrappers to avoid retpoline overhead, and use them
in the GRO code paths. From Paolo Abeni.
24) Add support for netlink FDB get operations, from Roopa Prabhu.
25) Support bloom filter in mlxsw driver, from Nir Dotan.
26) Add SKB extension infrastructure. This consolidates the handling of
the auxiliary SKB data used by IPSEC and bridge netfilter, and is
designed to support the needs to MPTCP which could be integrated in
the future.
27) Lots of XDP TX optimizations in mlx5 from Tariq Toukan.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1845 commits)
net: dccp: fix kernel crash on module load
drivers/net: appletalk/cops: remove redundant if statement and mask
bnx2x: Fix NULL pointer dereference in bnx2x_del_all_vlans() on some hw
net/net_namespace: Check the return value of register_pernet_subsys()
net/netlink_compat: Fix a missing check of nla_parse_nested
ieee802154: lowpan_header_create check must check daddr
net/mlx4_core: drop useless LIST_HEAD
mlxsw: spectrum: drop useless LIST_HEAD
net/mlx5e: drop useless LIST_HEAD
iptunnel: Set tun_flags in the iptunnel_metadata_reply from src
net/mlx5e: fix semicolon.cocci warnings
staging: octeon: fix build failure with XFRM enabled
net: Revert recent Spectre-v1 patches.
can: af_can: Fix Spectre v1 vulnerability
packet: validate address length if non-zero
nfc: af_nfc: Fix Spectre v1 vulnerability
phonet: af_phonet: Fix Spectre v1 vulnerability
net: core: Fix Spectre v1 vulnerability
net: minor cleanup in skb_ext_add()
net: drop the unused helper skb_ext_get()
...
Diffstat (limited to 'drivers/net/ethernet/netronome/nfp/nfp_net_common.c')
-rw-r--r-- | drivers/net/ethernet/netronome/nfp/nfp_net_common.c | 113 |
1 files changed, 70 insertions, 43 deletions
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 6bddfcfdec34..e97636d2e6ee 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -101,6 +101,7 @@ static void nfp_net_reconfig_start(struct nfp_net *nn, u32 update) /* ensure update is written before pinging HW */ nn_pci_flush(nn); nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1); + nn->reconfig_in_progress_update = update; } /* Pass 0 as update to run posted reconfigs. */ @@ -123,10 +124,14 @@ static bool nfp_net_reconfig_check_done(struct nfp_net *nn, bool last_check) if (reg == 0) return true; if (reg & NFP_NET_CFG_UPDATE_ERR) { - nn_err(nn, "Reconfig error: 0x%08x\n", reg); + nn_err(nn, "Reconfig error (status: 0x%08x update: 0x%08x ctrl: 0x%08x)\n", + reg, nn->reconfig_in_progress_update, + nn_readl(nn, NFP_NET_CFG_CTRL)); return true; } else if (last_check) { - nn_err(nn, "Reconfig timeout: 0x%08x\n", reg); + nn_err(nn, "Reconfig timeout (status: 0x%08x update: 0x%08x ctrl: 0x%08x)\n", + reg, nn->reconfig_in_progress_update, + nn_readl(nn, NFP_NET_CFG_CTRL)); return true; } @@ -279,7 +284,7 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update) * * Return: Negative errno on error, 0 on success */ -static int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd) +int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd) { u32 mbox = nn->tlv_caps.mbox_off; int ret; @@ -647,27 +652,29 @@ static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q, * @txbuf: Pointer to driver soft TX descriptor * @txd: Pointer to HW TX descriptor * @skb: Pointer to SKB + * @md_bytes: Prepend length * * Set up Tx descriptor for LSO, do nothing for non-LSO skbs. * Return error on packet header greater than maximum supported LSO header size. */ static void nfp_net_tx_tso(struct nfp_net_r_vector *r_vec, struct nfp_net_tx_buf *txbuf, - struct nfp_net_tx_desc *txd, struct sk_buff *skb) + struct nfp_net_tx_desc *txd, struct sk_buff *skb, + u32 md_bytes) { - u32 hdrlen; + u32 l3_offset, l4_offset, hdrlen; u16 mss; if (!skb_is_gso(skb)) return; if (!skb->encapsulation) { - txd->l3_offset = skb_network_offset(skb); - txd->l4_offset = skb_transport_offset(skb); + l3_offset = skb_network_offset(skb); + l4_offset = skb_transport_offset(skb); hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb); } else { - txd->l3_offset = skb_inner_network_offset(skb); - txd->l4_offset = skb_inner_transport_offset(skb); + l3_offset = skb_inner_network_offset(skb); + l4_offset = skb_inner_transport_offset(skb); hdrlen = skb_inner_transport_header(skb) - skb->data + inner_tcp_hdrlen(skb); } @@ -676,7 +683,9 @@ static void nfp_net_tx_tso(struct nfp_net_r_vector *r_vec, txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1); mss = skb_shinfo(skb)->gso_size & PCIE_DESC_TX_MSS_MASK; - txd->lso_hdrlen = hdrlen; + txd->l3_offset = l3_offset - md_bytes; + txd->l4_offset = l4_offset - md_bytes; + txd->lso_hdrlen = hdrlen - md_bytes; txd->mss = cpu_to_le16(mss); txd->flags |= PCIE_DESC_TX_LSO; @@ -786,11 +795,11 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) { struct nfp_net *nn = netdev_priv(netdev); const struct skb_frag_struct *frag; - struct nfp_net_tx_desc *txd, txdg; int f, nr_frags, wr_idx, md_bytes; struct nfp_net_tx_ring *tx_ring; struct nfp_net_r_vector *r_vec; struct nfp_net_tx_buf *txbuf; + struct nfp_net_tx_desc *txd; struct netdev_queue *nd_q; struct nfp_net_dp *dp; dma_addr_t dma_addr; @@ -801,13 +810,13 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) qidx = skb_get_queue_mapping(skb); tx_ring = &dp->tx_rings[qidx]; r_vec = tx_ring->r_vec; - nd_q = netdev_get_tx_queue(dp->netdev, qidx); nr_frags = skb_shinfo(skb)->nr_frags; if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) { nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n", qidx, tx_ring->wr_p, tx_ring->rd_p); + nd_q = netdev_get_tx_queue(dp->netdev, qidx); netif_tx_stop_queue(nd_q); nfp_net_tx_xmit_more_flush(tx_ring); u64_stats_update_begin(&r_vec->tx_sync); @@ -851,7 +860,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) txd->lso_hdrlen = 0; /* Do not reorder - tso may adjust pkt cnt, vlan may override fields */ - nfp_net_tx_tso(r_vec, txbuf, txd, skb); + nfp_net_tx_tso(r_vec, txbuf, txd, skb, md_bytes); nfp_net_tx_csum(dp, r_vec, txbuf, txd, skb); if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) { txd->flags |= PCIE_DESC_TX_VLAN; @@ -860,8 +869,10 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) /* Gather DMA */ if (nr_frags > 0) { + __le64 second_half; + /* all descs must match except for in addr, length and eop */ - txdg = *txd; + second_half = txd->vals8[1]; for (f = 0; f < nr_frags; f++) { frag = &skb_shinfo(skb)->frags[f]; @@ -878,11 +889,11 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) tx_ring->txbufs[wr_idx].fidx = f; txd = &tx_ring->txds[wr_idx]; - *txd = txdg; txd->dma_len = cpu_to_le16(fsize); nfp_desc_set_dma_addr(txd, dma_addr); - txd->offset_eop |= - (f == nr_frags - 1) ? PCIE_DESC_TX_EOP : 0; + txd->offset_eop = md_bytes | + ((f == nr_frags - 1) ? PCIE_DESC_TX_EOP : 0); + txd->vals8[1] = second_half; } u64_stats_update_begin(&r_vec->tx_sync); @@ -890,16 +901,16 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) u64_stats_update_end(&r_vec->tx_sync); } - netdev_tx_sent_queue(nd_q, txbuf->real_len); - skb_tx_timestamp(skb); + nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx); + tx_ring->wr_p += nr_frags + 1; if (nfp_net_tx_ring_should_stop(tx_ring)) nfp_net_tx_ring_stop(nd_q, tx_ring); tx_ring->wr_ptr_add += nr_frags + 1; - if (!skb->xmit_more || netif_xmit_stopped(nd_q)) + if (__netdev_tx_sent_queue(nd_q, txbuf->real_len, skb->xmit_more)) nfp_net_tx_xmit_more_flush(tx_ring); return NETDEV_TX_OK; @@ -940,14 +951,10 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget) { struct nfp_net_r_vector *r_vec = tx_ring->r_vec; struct nfp_net_dp *dp = &r_vec->nfp_net->dp; - const struct skb_frag_struct *frag; struct netdev_queue *nd_q; u32 done_pkts = 0, done_bytes = 0; - struct sk_buff *skb; - int todo, nr_frags; u32 qcp_rd_p; - int fidx; - int idx; + int todo; if (tx_ring->wr_p == tx_ring->rd_p) return; @@ -961,26 +968,33 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget) todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p); while (todo--) { + const struct skb_frag_struct *frag; + struct nfp_net_tx_buf *tx_buf; + struct sk_buff *skb; + int fidx, nr_frags; + int idx; + idx = D_IDX(tx_ring, tx_ring->rd_p++); + tx_buf = &tx_ring->txbufs[idx]; - skb = tx_ring->txbufs[idx].skb; + skb = tx_buf->skb; if (!skb) continue; nr_frags = skb_shinfo(skb)->nr_frags; - fidx = tx_ring->txbufs[idx].fidx; + fidx = tx_buf->fidx; if (fidx == -1) { /* unmap head */ - dma_unmap_single(dp->dev, tx_ring->txbufs[idx].dma_addr, + dma_unmap_single(dp->dev, tx_buf->dma_addr, skb_headlen(skb), DMA_TO_DEVICE); - done_pkts += tx_ring->txbufs[idx].pkt_cnt; - done_bytes += tx_ring->txbufs[idx].real_len; + done_pkts += tx_buf->pkt_cnt; + done_bytes += tx_buf->real_len; } else { /* unmap fragment */ frag = &skb_shinfo(skb)->frags[fidx]; - dma_unmap_page(dp->dev, tx_ring->txbufs[idx].dma_addr, + dma_unmap_page(dp->dev, tx_buf->dma_addr, skb_frag_size(frag), DMA_TO_DEVICE); } @@ -988,9 +1002,9 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget) if (fidx == nr_frags - 1) napi_consume_skb(skb, budget); - tx_ring->txbufs[idx].dma_addr = 0; - tx_ring->txbufs[idx].skb = NULL; - tx_ring->txbufs[idx].fidx = -2; + tx_buf->dma_addr = 0; + tx_buf->skb = NULL; + tx_buf->fidx = -2; } tx_ring->qcp_rd_p = qcp_rd_p; @@ -3275,7 +3289,10 @@ nfp_net_features_check(struct sk_buff *skb, struct net_device *dev, hdrlen = skb_inner_transport_header(skb) - skb->data + inner_tcp_hdrlen(skb); - if (unlikely(hdrlen > NFP_NET_LSO_MAX_HDR_SZ)) + /* Assume worst case scenario of having longest possible + * metadata prepend - 8B + */ + if (unlikely(hdrlen > NFP_NET_LSO_MAX_HDR_SZ - 8)) features &= ~NETIF_F_GSO_MASK; } @@ -3560,6 +3577,7 @@ void nfp_net_info(struct nfp_net *nn) /** * nfp_net_alloc() - Allocate netdev and related structure * @pdev: PCI device + * @ctrl_bar: PCI IOMEM with vNIC config memory * @needs_netdev: Whether to allocate a netdev for this vNIC * @max_tx_rings: Maximum number of TX rings supported by device * @max_rx_rings: Maximum number of RX rings supported by device @@ -3570,11 +3588,12 @@ void nfp_net_info(struct nfp_net *nn) * * Return: NFP Net device structure, or ERR_PTR on error. */ -struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev, - unsigned int max_tx_rings, - unsigned int max_rx_rings) +struct nfp_net * +nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev, + unsigned int max_tx_rings, unsigned int max_rx_rings) { struct nfp_net *nn; + int err; if (needs_netdev) { struct net_device *netdev; @@ -3594,6 +3613,7 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev, } nn->dp.dev = &pdev->dev; + nn->dp.ctrl_bar = ctrl_bar; nn->pdev = pdev; nn->max_tx_rings = max_tx_rings; @@ -3616,7 +3636,19 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev, timer_setup(&nn->reconfig_timer, nfp_net_reconfig_timer, 0); + err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar, + &nn->tlv_caps); + if (err) + goto err_free_nn; + return nn; + +err_free_nn: + if (nn->dp.netdev) + free_netdev(nn->dp.netdev); + else + vfree(nn); + return ERR_PTR(err); } /** @@ -3889,11 +3921,6 @@ int nfp_net_init(struct nfp_net *nn) nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD; } - err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar, - &nn->tlv_caps); - if (err) - return err; - if (nn->dp.netdev) nfp_net_netdev_init(nn); |