summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/huawei/hinic/hinic_tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/huawei/hinic/hinic_tx.c')
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c244
1 files changed, 180 insertions, 64 deletions
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index e17bf33eba0c..9b60966736db 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Huawei HiNIC PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
*/
+#include <linux/if_vlan.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/u64_stats_sync.h>
@@ -54,7 +46,7 @@
#define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr))
-#define MIN_SKB_LEN 17
+#define MIN_SKB_LEN 32
#define MAX_PAYLOAD_OFFSET 221
#define TRANSPORT_OFFSET(l4_hdr, skb) ((u32)((l4_hdr) - (skb)->data))
@@ -82,7 +74,7 @@ enum hinic_offload_type {
* hinic_txq_clean_stats - Clean the statistics of specific queue
* @txq: Logical Tx Queue
**/
-void hinic_txq_clean_stats(struct hinic_txq *txq)
+static void hinic_txq_clean_stats(struct hinic_txq *txq)
{
struct hinic_txq_stats *txq_stats = &txq->txq_stats;
@@ -92,6 +84,7 @@ void hinic_txq_clean_stats(struct hinic_txq *txq)
txq_stats->tx_busy = 0;
txq_stats->tx_wake = 0;
txq_stats->tx_dropped = 0;
+ txq_stats->big_frags_pkts = 0;
u64_stats_update_end(&txq_stats->syncp);
}
@@ -105,7 +98,6 @@ void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats)
struct hinic_txq_stats *txq_stats = &txq->txq_stats;
unsigned int start;
- u64_stats_update_begin(&stats->syncp);
do {
start = u64_stats_fetch_begin(&txq_stats->syncp);
stats->pkts = txq_stats->pkts;
@@ -113,8 +105,8 @@ void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats)
stats->tx_busy = txq_stats->tx_busy;
stats->tx_wake = txq_stats->tx_wake;
stats->tx_dropped = txq_stats->tx_dropped;
+ stats->big_frags_pkts = txq_stats->big_frags_pkts;
} while (u64_stats_fetch_retry(&txq_stats->syncp, start));
- u64_stats_update_end(&stats->syncp);
}
/**
@@ -143,7 +135,7 @@ static int tx_map_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
dma_addr_t dma_addr;
int i, j;
@@ -364,6 +356,7 @@ static int offload_csum(struct hinic_sq_task *task, u32 *queue_info,
enum hinic_l4_offload_type l4_offload;
u32 offset, l4_len, network_hdr_len;
enum hinic_l3_offload_type l3_type;
+ u32 tunnel_type = NOT_TUNNEL;
union hinic_l3 ip;
union hinic_l4 l4;
u8 l4_proto;
@@ -374,27 +367,56 @@ static int offload_csum(struct hinic_sq_task *task, u32 *queue_info,
if (skb->encapsulation) {
u32 l4_tunnel_len;
+ tunnel_type = TUNNEL_UDP_NO_CSUM;
ip.hdr = skb_network_header(skb);
- if (ip.v4->version == 4)
+ if (ip.v4->version == 4) {
l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD;
- else if (ip.v4->version == 6)
+ l4_proto = ip.v4->protocol;
+ } else if (ip.v4->version == 6) {
+ unsigned char *exthdr;
+ __be16 frag_off;
+
l3_type = IPV6_PKT;
- else
+ tunnel_type = TUNNEL_UDP_CSUM;
+ exthdr = ip.hdr + sizeof(*ip.v6);
+ l4_proto = ip.v6->nexthdr;
+ l4.hdr = skb_transport_header(skb);
+ if (l4.hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data,
+ &l4_proto, &frag_off);
+ } else {
l3_type = L3TYPE_UNKNOWN;
+ l4_proto = IPPROTO_RAW;
+ }
hinic_task_set_outter_l3(task, l3_type,
skb_network_header_len(skb));
- l4_tunnel_len = skb_inner_network_offset(skb) -
- skb_transport_offset(skb);
-
- hinic_task_set_tunnel_l4(task, TUNNEL_UDP_NO_CSUM,
- l4_tunnel_len);
+ switch (l4_proto) {
+ case IPPROTO_UDP:
+ l4_tunnel_len = skb_inner_network_offset(skb) -
+ skb_transport_offset(skb);
+ ip.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_inner_transport_header(skb);
+ network_hdr_len = skb_inner_network_header_len(skb);
+ break;
+ case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
+ tunnel_type = NOT_TUNNEL;
+ l4_tunnel_len = 0;
+
+ ip.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+ network_hdr_len = skb_network_header_len(skb);
+ break;
+ default:
+ /* Unsupported tunnel packet, disable csum offload */
+ skb_checksum_help(skb);
+ return 0;
+ }
- ip.hdr = skb_inner_network_header(skb);
- l4.hdr = skb_inner_transport_header(skb);
- network_hdr_len = skb_inner_network_header_len(skb);
+ hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len);
} else {
ip.hdr = skb_network_header(skb);
l4.hdr = skb_transport_header(skb);
@@ -414,10 +436,20 @@ static int offload_csum(struct hinic_sq_task *task, u32 *queue_info,
return 1;
}
+static void offload_vlan(struct hinic_sq_task *task, u32 *queue_info,
+ u16 vlan_tag, u16 vlan_pri)
+{
+ task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(vlan_tag, VLAN_TAG) |
+ HINIC_SQ_TASK_INFO0_SET(1U, VLAN_OFFLOAD);
+
+ *queue_info |= HINIC_SQ_CTRL_SET(vlan_pri, QUEUE_INFO_PRI);
+}
+
static int hinic_tx_offload(struct sk_buff *skb, struct hinic_sq_task *task,
u32 *queue_info)
{
enum hinic_offload_type offload = 0;
+ u16 vlan_tag;
int enabled;
enabled = offload_tso(task, queue_info, skb);
@@ -431,6 +463,13 @@ static int hinic_tx_offload(struct sk_buff *skb, struct hinic_sq_task *task,
return -EPROTONOSUPPORT;
}
+ if (unlikely(skb_vlan_tag_present(skb))) {
+ vlan_tag = skb_vlan_tag_get(skb);
+ offload_vlan(task, queue_info, vlan_tag,
+ vlan_tag >> VLAN_PRIO_SHIFT);
+ offload |= TX_OFFLOAD_VLAN;
+ }
+
if (offload)
hinic_task_set_l2hdr(task, skb_network_offset(skb));
@@ -449,6 +488,67 @@ static int hinic_tx_offload(struct sk_buff *skb, struct hinic_sq_task *task,
return 0;
}
+netdev_tx_t hinic_lb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ u16 prod_idx, q_id = skb->queue_mapping;
+ struct netdev_queue *netdev_txq;
+ int nr_sges, err = NETDEV_TX_OK;
+ struct hinic_sq_wqe *sq_wqe;
+ unsigned int wqe_size;
+ struct hinic_txq *txq;
+ struct hinic_qp *qp;
+
+ txq = &nic_dev->txqs[q_id];
+ qp = container_of(txq->sq, struct hinic_qp, sq);
+ nr_sges = skb_shinfo(skb)->nr_frags + 1;
+
+ err = tx_map_skb(nic_dev, skb, txq->sges);
+ if (err)
+ goto skb_error;
+
+ wqe_size = HINIC_SQ_WQE_SIZE(nr_sges);
+
+ sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
+ if (!sq_wqe) {
+ netif_stop_subqueue(netdev, qp->q_id);
+
+ sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
+ if (sq_wqe) {
+ netif_wake_subqueue(nic_dev->netdev, qp->q_id);
+ goto process_sq_wqe;
+ }
+
+ tx_unmap_skb(nic_dev, skb, txq->sges);
+
+ u64_stats_update_begin(&txq->txq_stats.syncp);
+ txq->txq_stats.tx_busy++;
+ u64_stats_update_end(&txq->txq_stats.syncp);
+ err = NETDEV_TX_BUSY;
+ wqe_size = 0;
+ goto flush_skbs;
+ }
+
+process_sq_wqe:
+ hinic_sq_prepare_wqe(txq->sq, sq_wqe, txq->sges, nr_sges);
+ hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size);
+
+flush_skbs:
+ netdev_txq = netdev_get_tx_queue(netdev, q_id);
+ if ((!netdev_xmit_more()) || (netif_xmit_stopped(netdev_txq)))
+ hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0);
+
+ return err;
+
+skb_error:
+ dev_kfree_skb_any(skb);
+ u64_stats_update_begin(&txq->txq_stats.syncp);
+ txq->txq_stats.tx_dropped++;
+ u64_stats_update_end(&txq->txq_stats.syncp);
+
+ return NETDEV_TX_OK;
+}
+
netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
@@ -473,6 +573,12 @@ netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
}
nr_sges = skb_shinfo(skb)->nr_frags + 1;
+ if (nr_sges > 17) {
+ u64_stats_update_begin(&txq->txq_stats.syncp);
+ txq->txq_stats.big_frags_pkts++;
+ u64_stats_update_end(&txq->txq_stats.syncp);
+ }
+
if (nr_sges > txq->max_sges) {
netdev_err(netdev, "Too many Tx sges\n");
goto skb_error;
@@ -508,7 +614,7 @@ netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
}
process_sq_wqe:
- hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges);
+ hinic_sq_prepare_wqe(txq->sq, sq_wqe, txq->sges, nr_sges);
err = hinic_tx_offload(skb, &sq_wqe->task, &sq_wqe->ctrl.queue_info);
if (err)
@@ -518,7 +624,7 @@ process_sq_wqe:
flush_skbs:
netdev_txq = netdev_get_tx_queue(netdev, q_id);
- if ((!skb->xmit_more) || (netif_xmit_stopped(netdev_txq)))
+ if ((!netdev_xmit_more()) || (netif_xmit_stopped(netdev_txq)))
hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0);
return err;
@@ -553,7 +659,7 @@ static void tx_free_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
}
/**
- * free_all_rx_skbs - free all skbs in tx queue
+ * free_all_tx_skbs - free all skbs in tx queue
* @txq: tx queue
**/
static void free_all_tx_skbs(struct hinic_txq *txq)
@@ -606,9 +712,11 @@ static int free_tx_poll(struct napi_struct *napi, int budget)
do {
hw_ci = HW_CONS_IDX(sq) & wq->mask;
+ dma_rmb();
+
/* Reading a WQEBB to get real WQE size and consumer index. */
sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &sw_ci);
- if ((!sq_wqe) ||
+ if (!sq_wqe ||
(((hw_ci - sw_ci) & wq->mask) * wq->wqebb_size < wqe_size))
break;
@@ -638,8 +746,8 @@ static int free_tx_poll(struct napi_struct *napi, int budget)
netdev_txq = netdev_get_tx_queue(txq->netdev, qp->q_id);
__netif_tx_lock(netdev_txq, smp_processor_id());
-
- netif_wake_subqueue(nic_dev->netdev, qp->q_id);
+ if (!netif_testing(nic_dev->netdev))
+ netif_wake_subqueue(nic_dev->netdev, qp->q_id);
__netif_tx_unlock(netdev_txq);
@@ -655,27 +763,17 @@ static int free_tx_poll(struct napi_struct *napi, int budget)
if (pkts < budget) {
napi_complete(napi);
- hinic_hwdev_set_msix_state(nic_dev->hwdev,
- sq->msix_entry,
- HINIC_MSIX_ENABLE);
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ hinic_hwdev_set_msix_state(nic_dev->hwdev,
+ sq->msix_entry,
+ HINIC_MSIX_ENABLE);
+
return pkts;
}
return budget;
}
-static void tx_napi_add(struct hinic_txq *txq, int weight)
-{
- netif_napi_add(txq->netdev, &txq->napi, free_tx_poll, weight);
- napi_enable(&txq->napi);
-}
-
-static void tx_napi_del(struct hinic_txq *txq)
-{
- napi_disable(&txq->napi);
- netif_napi_del(&txq->napi);
-}
-
static irqreturn_t tx_irq(int irq, void *data)
{
struct hinic_txq *txq = data;
@@ -683,10 +781,11 @@ static irqreturn_t tx_irq(int irq, void *data)
nic_dev = netdev_priv(txq->netdev);
- /* Disable the interrupt until napi will be completed */
- hinic_hwdev_set_msix_state(nic_dev->hwdev,
- txq->sq->msix_entry,
- HINIC_MSIX_DISABLE);
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ /* Disable the interrupt until napi will be completed */
+ hinic_hwdev_set_msix_state(nic_dev->hwdev,
+ txq->sq->msix_entry,
+ HINIC_MSIX_DISABLE);
hinic_hwdev_msix_cnt_set(nic_dev->hwdev, txq->sq->msix_entry);
@@ -697,23 +796,43 @@ static irqreturn_t tx_irq(int irq, void *data)
static int tx_request_irq(struct hinic_txq *txq)
{
struct hinic_dev *nic_dev = netdev_priv(txq->netdev);
+ struct hinic_msix_config interrupt_info = {0};
+ struct hinic_intr_coal_info *intr_coal = NULL;
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
struct hinic_sq *sq = txq->sq;
+ struct hinic_qp *qp;
int err;
- tx_napi_add(txq, nic_dev->tx_weight);
+ qp = container_of(sq, struct hinic_qp, sq);
+
+ netif_napi_add_weight(txq->netdev, &txq->napi, free_tx_poll,
+ nic_dev->tx_weight);
hinic_hwdev_msix_set(nic_dev->hwdev, sq->msix_entry,
TX_IRQ_NO_PENDING, TX_IRQ_NO_COALESC,
TX_IRQ_NO_LLI_TIMER, TX_IRQ_NO_CREDIT,
TX_IRQ_NO_RESEND_TIMER);
+ intr_coal = &nic_dev->tx_intr_coalesce[qp->q_id];
+ interrupt_info.msix_index = sq->msix_entry;
+ interrupt_info.coalesce_timer_cnt = intr_coal->coalesce_timer_cfg;
+ interrupt_info.pending_cnt = intr_coal->pending_limt;
+ interrupt_info.resend_timer_cnt = intr_coal->resend_timer_cfg;
+
+ err = hinic_set_interrupt_cfg(hwdev, &interrupt_info);
+ if (err) {
+ netif_err(nic_dev, drv, txq->netdev,
+ "Failed to set TX interrupt coalescing attribute\n");
+ netif_napi_del(&txq->napi);
+ return err;
+ }
+
err = request_irq(sq->irq, tx_irq, 0, txq->irq_name, txq);
if (err) {
dev_err(&pdev->dev, "Failed to request Tx irq\n");
- tx_napi_del(txq);
+ netif_napi_del(&txq->napi);
return err;
}
@@ -725,7 +844,7 @@ static void tx_free_irq(struct hinic_txq *txq)
struct hinic_sq *sq = txq->sq;
free_irq(sq->irq, txq);
- tx_napi_del(txq);
+ netif_napi_del(&txq->napi);
}
/**
@@ -742,8 +861,7 @@ int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq,
struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq);
struct hinic_dev *nic_dev = netdev_priv(netdev);
struct hinic_hwdev *hwdev = nic_dev->hwdev;
- int err, irqname_len;
- size_t sges_size;
+ int err;
txq->netdev = netdev;
txq->sq = sq;
@@ -752,27 +870,25 @@ int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq,
txq->max_sges = HINIC_MAX_SQ_BUFDESCS;
- sges_size = txq->max_sges * sizeof(*txq->sges);
- txq->sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL);
+ txq->sges = devm_kcalloc(&netdev->dev, txq->max_sges,
+ sizeof(*txq->sges), GFP_KERNEL);
if (!txq->sges)
return -ENOMEM;
- sges_size = txq->max_sges * sizeof(*txq->free_sges);
- txq->free_sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL);
+ txq->free_sges = devm_kcalloc(&netdev->dev, txq->max_sges,
+ sizeof(*txq->free_sges), GFP_KERNEL);
if (!txq->free_sges) {
err = -ENOMEM;
goto err_alloc_free_sges;
}
- irqname_len = snprintf(NULL, 0, "hinic_txq%d", qp->q_id) + 1;
- txq->irq_name = devm_kzalloc(&netdev->dev, irqname_len, GFP_KERNEL);
+ txq->irq_name = devm_kasprintf(&netdev->dev, GFP_KERNEL, "%s_txq%d",
+ netdev->name, qp->q_id);
if (!txq->irq_name) {
err = -ENOMEM;
goto err_alloc_irqname;
}
- sprintf(txq->irq_name, "hinic_txq%d", qp->q_id);
-
err = hinic_hwdev_hw_ci_addr_set(hwdev, sq, CI_UPDATE_NO_PENDING,
CI_UPDATE_NO_COALESC);
if (err)