summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c')
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c402
1 files changed, 317 insertions, 85 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index f4227517dc8e..f4fdbfba8667 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -1,19 +1,28 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 RVU Virtual Function ethernet driver */
+/* Marvell RVU Virtual Function ethernet driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
#include <linux/etherdevice.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/net_tstamp.h>
#include "otx2_common.h"
#include "otx2_reg.h"
+#include "otx2_ptp.h"
+#include "cn10k.h"
+#include "cn10k_ipsec.h"
-#define DRV_NAME "octeontx2-nicvf"
-#define DRV_STRING "Marvell OcteonTX2 NIC Virtual Function Driver"
+#define DRV_NAME "rvu_nicvf"
+#define DRV_STRING "Marvell RVU NIC Virtual Function Driver"
static const struct pci_device_id otx2_vf_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AFVF) },
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_VF) },
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_SDP_REP) },
{ }
};
@@ -63,10 +72,6 @@ static void otx2vf_process_vfaf_mbox_msg(struct otx2_nic *vf,
case MBOX_MSG_NIX_LF_ALLOC:
mbox_handler_nix_lf_alloc(vf, (struct nix_lf_alloc_rsp *)msg);
break;
- case MBOX_MSG_NIX_TXSCH_ALLOC:
- mbox_handler_nix_txsch_alloc(vf,
- (struct nix_txsch_alloc_rsp *)msg);
- break;
case MBOX_MSG_NIX_BP_ENABLE:
mbox_handler_nix_bp_enable(vf, (struct nix_bp_cfg_rsp *)msg);
break;
@@ -86,23 +91,27 @@ static void otx2vf_vfaf_mbox_handler(struct work_struct *work)
struct otx2_mbox *mbox;
struct mbox *af_mbox;
int offset, id;
+ u16 num_msgs;
af_mbox = container_of(work, struct mbox, mbox_wrk);
mbox = &af_mbox->mbox;
mdev = &mbox->dev[0];
rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
- if (af_mbox->num_msgs == 0)
+ num_msgs = rsp_hdr->num_msgs;
+
+ if (num_msgs == 0)
return;
+
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
- for (id = 0; id < af_mbox->num_msgs; id++) {
+ for (id = 0; id < num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
otx2vf_process_vfaf_mbox_msg(af_mbox->pfvf, msg);
offset = mbox->rx_start + msg->next_msgoff;
+ if (mdev->msgs_acked == (af_mbox->num_msgs - 1))
+ __otx2_mbox_reset(mbox, 0);
mdev->msgs_acked++;
}
-
- otx2_mbox_reset(mbox, 0);
}
static int otx2vf_process_mbox_msg_up(struct otx2_nic *vf,
@@ -127,7 +136,7 @@ static int otx2vf_process_mbox_msg_up(struct otx2_nic *vf,
rsp->hdr.id = MBOX_MSG_CGX_LINK_EVENT;
rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
- rsp->hdr.pcifunc = 0;
+ rsp->hdr.pcifunc = req->pcifunc;
rsp->hdr.rc = 0;
err = otx2_mbox_up_handler_cgx_link_event(
vf, (struct cgx_link_info_msg *)req, rsp);
@@ -148,6 +157,7 @@ static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work)
struct mbox *vf_mbox;
struct otx2_nic *vf;
int offset, id;
+ u16 num_msgs;
vf_mbox = container_of(work, struct mbox, mbox_up_wrk);
vf = vf_mbox->pfvf;
@@ -155,12 +165,14 @@ static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work)
mdev = &mbox->dev[0];
rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
- if (vf_mbox->up_num_msgs == 0)
+ num_msgs = rsp_hdr->num_msgs;
+
+ if (num_msgs == 0)
return;
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
- for (id = 0; id < vf_mbox->up_num_msgs; id++) {
+ for (id = 0; id < num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
otx2vf_process_mbox_msg_up(vf, msg);
offset = mbox->rx_start + msg->next_msgoff;
@@ -175,38 +187,48 @@ static irqreturn_t otx2vf_vfaf_mbox_intr_handler(int irq, void *vf_irq)
struct otx2_mbox_dev *mdev;
struct otx2_mbox *mbox;
struct mbox_hdr *hdr;
+ u64 mbox_data;
/* Clear the IRQ */
otx2_write64(vf, RVU_VF_INT, BIT_ULL(0));
+ mbox_data = otx2_read64(vf, RVU_VF_VFPF_MBOX0);
+
/* Read latest mbox data */
smp_rmb();
- /* Check for PF => VF response messages */
- mbox = &vf->mbox.mbox;
- mdev = &mbox->dev[0];
- otx2_sync_mbox_bbuf(mbox, 0);
-
- hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
- if (hdr->num_msgs) {
- vf->mbox.num_msgs = hdr->num_msgs;
- hdr->num_msgs = 0;
- memset(mbox->hwbase + mbox->rx_start, 0,
- ALIGN(sizeof(struct mbox_hdr), sizeof(u64)));
- queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk);
- }
- /* Check for PF => VF notification messages */
- mbox = &vf->mbox.mbox_up;
- mdev = &mbox->dev[0];
- otx2_sync_mbox_bbuf(mbox, 0);
+ if (mbox_data & MBOX_DOWN_MSG) {
+ mbox_data &= ~MBOX_DOWN_MSG;
+ otx2_write64(vf, RVU_VF_VFPF_MBOX0, mbox_data);
+
+ /* Check for PF => VF response messages */
+ mbox = &vf->mbox.mbox;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
+
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs)
+ queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk);
+
+ trace_otx2_msg_interrupt(mbox->pdev, "DOWN reply from PF to VF",
+ BIT_ULL(0));
+ }
- hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
- if (hdr->num_msgs) {
- vf->mbox.up_num_msgs = hdr->num_msgs;
- hdr->num_msgs = 0;
- memset(mbox->hwbase + mbox->rx_start, 0,
- ALIGN(sizeof(struct mbox_hdr), sizeof(u64)));
- queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk);
+ if (mbox_data & MBOX_UP_MSG) {
+ mbox_data &= ~MBOX_UP_MSG;
+ otx2_write64(vf, RVU_VF_VFPF_MBOX0, mbox_data);
+
+ /* Check for PF => VF notification messages */
+ mbox = &vf->mbox.mbox_up;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
+
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs)
+ queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk);
+
+ trace_otx2_msg_interrupt(mbox->pdev, "UP message from PF to VF",
+ BIT_ULL(0));
}
return IRQ_HANDLED;
@@ -218,6 +240,10 @@ static void otx2vf_disable_mbox_intr(struct otx2_nic *vf)
/* Disable VF => PF mailbox IRQ */
otx2_write64(vf, RVU_VF_INT_ENA_W1C, BIT_ULL(0));
+
+ if (is_cn20k(vf->pdev))
+ otx2_write64(vf, RVU_VF_INT_ENA_W1C, BIT_ULL(0) | BIT_ULL(1));
+
free_irq(vector, vf);
}
@@ -230,9 +256,18 @@ static int otx2vf_register_mbox_intr(struct otx2_nic *vf, bool probe_pf)
/* Register mailbox interrupt handler */
irq_name = &hw->irq_name[RVU_VF_INT_VEC_MBOX * NAME_SIZE];
- snprintf(irq_name, NAME_SIZE, "RVUVFAF Mbox");
- err = request_irq(pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX),
- otx2vf_vfaf_mbox_intr_handler, 0, irq_name, vf);
+ snprintf(irq_name, NAME_SIZE, "RVUVF%d AFVF Mbox", ((vf->pcifunc &
+ RVU_PFVF_FUNC_MASK) - 1));
+
+ if (!is_cn20k(vf->pdev)) {
+ err = request_irq(pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX),
+ otx2vf_vfaf_mbox_intr_handler, 0, irq_name, vf);
+ } else {
+ err = request_irq(pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX),
+ vf->hw_ops->vfaf_mbox_intr_handler, 0, irq_name,
+ vf);
+ }
+
if (err) {
dev_err(vf->dev,
"RVUPF: IRQ registration failed for VFAF mbox irq\n");
@@ -242,8 +277,15 @@ static int otx2vf_register_mbox_intr(struct otx2_nic *vf, bool probe_pf)
/* Enable mailbox interrupt for msgs coming from PF.
* First clear to avoid spurious interrupts, if any.
*/
- otx2_write64(vf, RVU_VF_INT, BIT_ULL(0));
- otx2_write64(vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0));
+ if (!is_cn20k(vf->pdev)) {
+ otx2_write64(vf, RVU_VF_INT, BIT_ULL(0));
+ otx2_write64(vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0));
+ } else {
+ otx2_write64(vf, RVU_VF_INT, BIT_ULL(0) | BIT_ULL(1) |
+ BIT_ULL(2) | BIT_ULL(3));
+ otx2_write64(vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0) |
+ BIT_ULL(1) | BIT_ULL(2) | BIT_ULL(3));
+ }
if (!probe_pf)
return 0;
@@ -270,12 +312,11 @@ static void otx2vf_vfaf_mbox_destroy(struct otx2_nic *vf)
struct mbox *mbox = &vf->mbox;
if (vf->mbox_wq) {
- flush_workqueue(vf->mbox_wq);
destroy_workqueue(vf->mbox_wq);
vf->mbox_wq = NULL;
}
- if (mbox->mbox.hwbase)
+ if (mbox->mbox.hwbase && !test_bit(CN10K_MBOX, &vf->hw.cap_flag))
iounmap((void __iomem *)mbox->mbox.hwbase);
otx2_mbox_destroy(&mbox->mbox);
@@ -289,22 +330,36 @@ static int otx2vf_vfaf_mbox_init(struct otx2_nic *vf)
int err;
mbox->pfvf = vf;
- vf->mbox_wq = alloc_workqueue("otx2_vfaf_mailbox",
- WQ_UNBOUND | WQ_HIGHPRI |
- WQ_MEM_RECLAIM, 1);
+ vf->mbox_wq = alloc_ordered_workqueue("otx2_vfaf_mailbox",
+ WQ_HIGHPRI | WQ_MEM_RECLAIM);
if (!vf->mbox_wq)
return -ENOMEM;
- /* Mailbox is a reserved memory (in RAM) region shared between
- * admin function (i.e PF0) and this VF, shouldn't be mapped as
- * device memory to allow unaligned accesses.
+ /* For cn20k platform, VF mailbox region is in dram aliased from AF
+ * VF MBOX ADDR, MBOX is a separate RVU block.
*/
- hwbase = ioremap_wc(pci_resource_start(vf->pdev, PCI_MBOX_BAR_NUM),
- pci_resource_len(vf->pdev, PCI_MBOX_BAR_NUM));
- if (!hwbase) {
- dev_err(vf->dev, "Unable to map VFAF mailbox region\n");
- err = -ENOMEM;
- goto exit;
+ if (is_cn20k(vf->pdev)) {
+ hwbase = vf->reg_base + RVU_VF_MBOX_REGION + ((u64)BLKADDR_MBOX <<
+ RVU_FUNC_BLKADDR_SHIFT);
+ } else if (test_bit(CN10K_MBOX, &vf->hw.cap_flag)) {
+ /* For cn10k platform, VF mailbox region is in its BAR2
+ * register space
+ */
+ hwbase = vf->reg_base + RVU_VF_MBOX_REGION;
+ } else {
+ /* Mailbox is a reserved memory (in RAM) region shared between
+ * admin function (i.e PF0) and this VF, shouldn't be mapped as
+ * device memory to allow unaligned accesses.
+ */
+ hwbase = ioremap_wc(pci_resource_start(vf->pdev,
+ PCI_MBOX_BAR_NUM),
+ pci_resource_len(vf->pdev,
+ PCI_MBOX_BAR_NUM));
+ if (!hwbase) {
+ dev_err(vf->dev, "Unable to map VFAF mailbox region\n");
+ err = -ENOMEM;
+ goto exit;
+ }
}
err = otx2_mbox_init(&mbox->mbox, hwbase, vf->pdev, vf->reg_base,
@@ -327,6 +382,8 @@ static int otx2vf_vfaf_mbox_init(struct otx2_nic *vf)
return 0;
exit:
+ if (hwbase && !test_bit(CN10K_MBOX, &vf->hw.cap_flag))
+ iounmap(hwbase);
destroy_workqueue(vf->mbox_wq);
return err;
}
@@ -342,7 +399,7 @@ static int otx2vf_open(struct net_device *netdev)
/* LBKs do not receive link events so tell everyone we are up here */
vf = netdev_priv(netdev);
- if (is_otx2_lbkvf(vf->pdev)) {
+ if (is_otx2_lbkvf(vf->pdev) || is_otx2_sdp_rep(vf->pdev)) {
pr_info("%s NIC Link is UP\n", netdev->name);
netif_carrier_on(netdev);
netif_tx_start_all_queues(netdev);
@@ -360,13 +417,23 @@ static netdev_tx_t otx2vf_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct otx2_nic *vf = netdev_priv(netdev);
int qidx = skb_get_queue_mapping(skb);
+ struct otx2_dev_stats *dev_stats;
struct otx2_snd_queue *sq;
struct netdev_queue *txq;
+ /* Check for minimum and maximum packet length */
+ if (skb->len <= ETH_HLEN ||
+ (!skb_shinfo(skb)->gso_size && skb->len > vf->tx_max_pktlen)) {
+ dev_stats = &vf->hw.dev_stats;
+ atomic_long_inc(&dev_stats->tx_discards);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
sq = &vf->qset.sq[qidx];
txq = netdev_get_tx_queue(netdev, qidx);
- if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
+ if (!otx2_sq_append_skb(vf, txq, sq, skb, qidx)) {
netif_tx_stop_queue(txq);
/* Check again, incase SQBs got freed up */
@@ -381,6 +448,42 @@ static netdev_tx_t otx2vf_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
+static void otx2vf_set_rx_mode(struct net_device *netdev)
+{
+ struct otx2_nic *vf = netdev_priv(netdev);
+
+ queue_work(vf->otx2_wq, &vf->rx_mode_work);
+}
+
+static void otx2vf_do_set_rx_mode(struct work_struct *work)
+{
+ struct otx2_nic *vf = container_of(work, struct otx2_nic, rx_mode_work);
+ struct net_device *netdev = vf->netdev;
+ unsigned int flags = netdev->flags;
+ struct nix_rx_mode *req;
+
+ mutex_lock(&vf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_nix_set_rx_mode(&vf->mbox);
+ if (!req) {
+ mutex_unlock(&vf->mbox.lock);
+ return;
+ }
+
+ req->mode = NIX_RX_MODE_UCAST;
+
+ if (flags & IFF_PROMISC)
+ req->mode |= NIX_RX_MODE_PROMISC;
+ if (flags & (IFF_ALLMULTI | IFF_MULTICAST))
+ req->mode |= NIX_RX_MODE_ALLMULTI;
+
+ req->mode |= NIX_RX_MODE_USE_MCE;
+
+ otx2_sync_mbox_msg(&vf->mbox);
+
+ mutex_unlock(&vf->mbox.lock);
+}
+
static int otx2vf_change_mtu(struct net_device *netdev, int new_mtu)
{
bool if_up = netif_running(netdev);
@@ -391,7 +494,7 @@ static int otx2vf_change_mtu(struct net_device *netdev, int new_mtu)
netdev_info(netdev, "Changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (if_up)
err = otx2vf_open(netdev);
@@ -414,16 +517,39 @@ static void otx2vf_reset_task(struct work_struct *work)
rtnl_unlock();
}
+static int otx2vf_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ return otx2_handle_ntuple_tc_features(netdev, features);
+}
+
static const struct net_device_ops otx2vf_netdev_ops = {
.ndo_open = otx2vf_open,
.ndo_stop = otx2vf_stop,
.ndo_start_xmit = otx2vf_xmit,
+ .ndo_select_queue = otx2_select_queue,
+ .ndo_set_rx_mode = otx2vf_set_rx_mode,
.ndo_set_mac_address = otx2_set_mac_address,
.ndo_change_mtu = otx2vf_change_mtu,
+ .ndo_set_features = otx2vf_set_features,
.ndo_get_stats64 = otx2_get_stats64,
.ndo_tx_timeout = otx2_tx_timeout,
+ .ndo_setup_tc = otx2_setup_tc,
+ .ndo_hwtstamp_get = otx2_config_hwtstamp_get,
+ .ndo_hwtstamp_set = otx2_config_hwtstamp_set,
};
+static int otx2_vf_wq_init(struct otx2_nic *vf)
+{
+ vf->otx2_wq = create_singlethread_workqueue("otx2vf_wq");
+ if (!vf->otx2_wq)
+ return -ENOMEM;
+
+ INIT_WORK(&vf->rx_mode_work, otx2vf_do_set_rx_mode);
+ INIT_WORK(&vf->reset_task, otx2vf_reset_task);
+ return 0;
+}
+
static int otx2vf_realloc_msix_vectors(struct otx2_nic *vf)
{
struct otx2_hw *hw = &vf->hw;
@@ -448,10 +574,10 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int num_vec = pci_msix_vec_count(pdev);
struct device *dev = &pdev->dev;
+ int err, qcount, qos_txqs;
struct net_device *netdev;
struct otx2_nic *vf;
struct otx2_hw *hw;
- int err, qcount;
err = pcim_enable_device(pdev);
if (err) {
@@ -459,7 +585,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
- err = pci_request_regions(pdev, DRV_NAME);
+ err = pcim_request_all_regions(pdev, DRV_NAME);
if (err) {
dev_err(dev, "PCI request regions failed 0x%x\n", err);
return err;
@@ -468,17 +594,16 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (err) {
dev_err(dev, "DMA mask config failed, abort\n");
- goto err_release_regions;
+ return err;
}
pci_set_master(pdev);
qcount = num_online_cpus();
- netdev = alloc_etherdev_mqs(sizeof(*vf), qcount, qcount);
- if (!netdev) {
- err = -ENOMEM;
- goto err_release_regions;
- }
+ qos_txqs = min_t(int, qcount, OTX2_QOS_MAX_LEAF_NODES);
+ netdev = alloc_etherdev_mqs(sizeof(*vf), qcount + qos_txqs, qcount);
+ if (!netdev)
+ return -ENOMEM;
pci_set_drvdata(pdev, netdev);
SET_NETDEV_DEV(netdev, &pdev->dev);
@@ -494,6 +619,10 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hw->rx_queues = qcount;
hw->tx_queues = qcount;
hw->max_queues = qcount;
+ hw->non_qos_queues = qcount;
+ hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN;
+ /* Use CQE of 128 byte descriptor size by default */
+ hw->xqe_size = 128;
hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
GFP_KERNEL);
@@ -523,6 +652,13 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_free_irq_vectors;
}
+ otx2_setup_dev_hw_settings(vf);
+
+ if (is_cn20k(vf->pdev))
+ cn20k_init(vf);
+ else
+ otx2_init_hw_ops(vf);
+
/* Init VF <=> PF mailbox stuff */
err = otx2vf_vfaf_mbox_init(vf);
if (err)
@@ -540,32 +676,45 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = otx2vf_realloc_msix_vectors(vf);
if (err)
- goto err_mbox_destroy;
+ goto err_detach_rsrc;
err = otx2_set_real_num_queues(netdev, qcount, qcount);
if (err)
goto err_detach_rsrc;
- otx2_setup_dev_hw_settings(vf);
+ err = cn10k_lmtst_init(vf);
+ if (err)
+ goto err_detach_rsrc;
+
+ /* Don't check for error. Proceed without ptp */
+ otx2_ptp_init(vf);
/* Assign default mac address */
otx2_get_mac_from_af(netdev);
netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
- NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
+ NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_L4;
netdev->features = netdev->hw_features;
+ /* Support TSO on tag interface */
+ netdev->vlan_features |= netdev->features;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX;
+ netdev->features |= netdev->hw_features;
+
+ netdev->hw_features |= NETIF_F_NTUPLE;
+ netdev->hw_features |= NETIF_F_RXALL;
+ netdev->hw_features |= NETIF_F_HW_TC;
- netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
+ netif_set_tso_max_segs(netdev, OTX2_MAX_GSO_SEGS);
netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
netdev->netdev_ops = &otx2vf_netdev_ops;
- /* MTU range: 68 - 9190 */
netdev->min_mtu = OTX2_MIN_MTU;
- netdev->max_mtu = OTX2_MAX_MTU;
-
- INIT_WORK(&vf->reset_task, otx2vf_reset_task);
+ netdev->max_mtu = otx2_get_max_mtu(vf);
+ hw->max_mtu = netdev->max_mtu;
/* To distinguish, for LBK VFs set netdev name explicitly */
if (is_otx2_lbkvf(vf->pdev)) {
@@ -577,21 +726,79 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
snprintf(netdev->name, sizeof(netdev->name), "lbk%d", n);
}
+ if (is_otx2_sdp_rep(vf->pdev)) {
+ int n;
+
+ n = vf->pcifunc & RVU_PFVF_FUNC_MASK;
+ n -= 1;
+ snprintf(netdev->name, sizeof(netdev->name), "sdp%d-%d",
+ pdev->bus->number, n);
+ }
+
+ err = cn10k_ipsec_init(netdev);
+ if (err)
+ goto err_ptp_destroy;
+
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
- goto err_detach_rsrc;
+ goto err_ipsec_clean;
}
+ err = otx2_vf_wq_init(vf);
+ if (err)
+ goto err_unreg_netdev;
+
otx2vf_set_ethtool_ops(netdev);
- /* Enable pause frames by default */
- vf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
- vf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
+ err = otx2vf_mcam_flow_init(vf);
+ if (err)
+ goto err_unreg_netdev;
+
+ err = otx2_init_tc(vf);
+ if (err)
+ goto err_unreg_netdev;
+
+ err = otx2_register_dl(vf);
+ if (err)
+ goto err_shutdown_tc;
+
+ vf->af_xdp_zc_qidx = bitmap_zalloc(qcount, GFP_KERNEL);
+ if (!vf->af_xdp_zc_qidx) {
+ err = -ENOMEM;
+ goto err_unreg_devlink;
+ }
+
+#ifdef CONFIG_DCB
+ /* Priority flow control is not supported for LBK and SDP vf(s) */
+ if (!(is_otx2_lbkvf(vf->pdev) || is_otx2_sdp_rep(vf->pdev))) {
+ err = otx2_dcbnl_set_ops(netdev);
+ if (err)
+ goto err_free_zc_bmap;
+ }
+#endif
+ otx2_qos_init(vf, qos_txqs);
return 0;
+#ifdef CONFIG_DCB
+err_free_zc_bmap:
+ bitmap_free(vf->af_xdp_zc_qidx);
+#endif
+err_unreg_devlink:
+ otx2_unregister_dl(vf);
+err_shutdown_tc:
+ otx2_shutdown_tc(vf);
+err_unreg_netdev:
+ unregister_netdev(netdev);
+err_ipsec_clean:
+ cn10k_ipsec_clean(vf);
+err_ptp_destroy:
+ otx2_ptp_destroy(vf);
err_detach_rsrc:
+ free_percpu(vf->hw.lmt_info);
+ if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
+ qmem_free(vf->dev, vf->dync_lmt);
otx2_detach_resources(&vf->mbox);
err_disable_mbox_intr:
otx2vf_disable_mbox_intr(vf);
@@ -602,8 +809,6 @@ err_free_irq_vectors:
err_free_netdev:
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
-err_release_regions:
- pci_release_regions(pdev);
return err;
}
@@ -617,15 +822,42 @@ static void otx2vf_remove(struct pci_dev *pdev)
vf = netdev_priv(netdev);
- otx2vf_disable_mbox_intr(vf);
+ /* Disable 802.3x pause frames */
+ if (vf->flags & OTX2_FLAG_RX_PAUSE_ENABLED ||
+ (vf->flags & OTX2_FLAG_TX_PAUSE_ENABLED)) {
+ vf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED;
+ vf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED;
+ otx2_config_pause_frm(vf);
+ }
+#ifdef CONFIG_DCB
+ /* Disable PFC config */
+ if (vf->pfc_en) {
+ vf->pfc_en = 0;
+ otx2_config_priority_flow_ctrl(vf);
+ }
+#endif
+
+ cancel_work_sync(&vf->reset_task);
+ otx2_unregister_dl(vf);
+ unregister_netdev(netdev);
+ if (vf->otx2_wq)
+ destroy_workqueue(vf->otx2_wq);
+ cn10k_ipsec_clean(vf);
+ otx2_ptp_destroy(vf);
+ otx2_mcam_flow_del(vf);
+ otx2_shutdown_tc(vf);
+ otx2_shutdown_qos(vf);
otx2_detach_resources(&vf->mbox);
+ otx2vf_disable_mbox_intr(vf);
+ free_percpu(vf->hw.lmt_info);
+ if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
+ qmem_free(vf->dev, vf->dync_lmt);
otx2vf_vfaf_mbox_destroy(vf);
pci_free_irq_vectors(vf->pdev);
+ bitmap_free(vf->af_xdp_zc_qidx);
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
-
- pci_release_regions(pdev);
}
static struct pci_driver otx2vf_driver = {