summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/marvell/octeontx2/nic
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-03-31 17:29:33 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-03-31 17:29:33 -0700
commit29d9f30d4ce6c7a38745a54a8cddface10013490 (patch)
tree85649ba6a7b39203584d8db9365e03f64e62c136 /drivers/net/ethernet/marvell/octeontx2/nic
parent56a451b780676bc1cdac011735fe2869fa2e9abf (diff)
parent7f80ccfe996871ca69648efee74a60ae7ad0dcd9 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next
Pull networking updates from David Miller: "Highlights: 1) Fix the iwlwifi regression, from Johannes Berg. 2) Support BSS coloring and 802.11 encapsulation offloading in hardware, from John Crispin. 3) Fix some potential Spectre issues in qtnfmac, from Sergey Matyukevich. 4) Add TTL decrement action to openvswitch, from Matteo Croce. 5) Allow paralleization through flow_action setup by not taking the RTNL mutex, from Vlad Buslov. 6) A lot of zero-length array to flexible-array conversions, from Gustavo A. R. Silva. 7) Align XDP statistics names across several drivers for consistency, from Lorenzo Bianconi. 8) Add various pieces of infrastructure for offloading conntrack, and make use of it in mlx5 driver, from Paul Blakey. 9) Allow using listening sockets in BPF sockmap, from Jakub Sitnicki. 10) Lots of parallelization improvements during configuration changes in mlxsw driver, from Ido Schimmel. 11) Add support to devlink for generic packet traps, which report packets dropped during ACL processing. And use them in mlxsw driver. From Jiri Pirko. 12) Support bcmgenet on ACPI, from Jeremy Linton. 13) Make BPF compatible with RT, from Thomas Gleixnet, Alexei Starovoitov, and your's truly. 14) Support XDP meta-data in virtio_net, from Yuya Kusakabe. 15) Fix sysfs permissions when network devices change namespaces, from Christian Brauner. 16) Add a flags element to ethtool_ops so that drivers can more simply indicate which coalescing parameters they actually support, and therefore the generic layer can validate the user's ethtool request. Use this in all drivers, from Jakub Kicinski. 17) Offload FIFO qdisc in mlxsw, from Petr Machata. 18) Support UDP sockets in sockmap, from Lorenz Bauer. 19) Fix stretch ACK bugs in several TCP congestion control modules, from Pengcheng Yang. 20) Support virtual functiosn in octeontx2 driver, from Tomasz Duszynski. 21) Add region operations for devlink and use it in ice driver to dump NVM contents, from Jacob Keller. 22) Add support for hw offload of MACSEC, from Antoine Tenart. 23) Add support for BPF programs that can be attached to LSM hooks, from KP Singh. 24) Support for multiple paths, path managers, and counters in MPTCP. From Peter Krystad, Paolo Abeni, Florian Westphal, Davide Caratti, and others. 25) More progress on adding the netlink interface to ethtool, from Michal Kubecek" * git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (2121 commits) net: ipv6: rpl_iptunnel: Fix potential memory leak in rpl_do_srh_inline cxgb4/chcr: nic-tls stats in ethtool net: dsa: fix oops while probing Marvell DSA switches net/bpfilter: remove superfluous testing message net: macb: Fix handling of fixed-link node net: dsa: ksz: Select KSZ protocol tag netdevsim: dev: Fix memory leak in nsim_dev_take_snapshot_write net: stmmac: add EHL 2.5Gbps PCI info and PCI ID net: stmmac: add EHL PSE0 & PSE1 1Gbps PCI info and PCI ID net: stmmac: create dwmac-intel.c to contain all Intel platform net: dsa: bcm_sf2: Support specifying VLAN tag egress rule net: dsa: bcm_sf2: Add support for matching VLAN TCI net: dsa: bcm_sf2: Move writing of CFP_DATA(5) into slicing functions net: dsa: bcm_sf2: Check earlier for FLOW_EXT and FLOW_MAC_EXT net: dsa: bcm_sf2: Disable learning for ASP port net: dsa: b53: Deny enslaving port 7 for 7278 into a bridge net: dsa: b53: Prevent tagged VLAN on port 7 for 7278 net: dsa: b53: Restore VLAN entries upon (re)configuration net: dsa: bcm_sf2: Fix overflow checks hv_netvsc: Remove unnecessary round_up for recv_completion_cnt ...
Diffstat (limited to 'drivers/net/ethernet/marvell/octeontx2/nic')
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c163
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h49
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c182
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c861
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c42
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c648
8 files changed, 1842 insertions, 118 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index 41bf00cf5b1d..778df331c8ac 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -4,7 +4,9 @@
#
obj-$(CONFIG_OCTEONTX2_PF) += octeontx2_nicpf.o
+obj-$(CONFIG_OCTEONTX2_VF) += octeontx2_nicvf.o
octeontx2_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o
+octeontx2_nicvf-y := otx2_vf.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index b945bd3d5d88..f1d2dea90a8c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -49,15 +49,15 @@ void otx2_update_lmac_stats(struct otx2_nic *pfvf)
if (!netif_running(pfvf->netdev))
return;
- otx2_mbox_lock(&pfvf->mbox);
+ mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_cgx_stats(&pfvf->mbox);
if (!req) {
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return;
}
otx2_sync_mbox_msg(&pfvf->mbox);
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
}
int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx)
@@ -128,6 +128,7 @@ void otx2_get_stats64(struct net_device *netdev,
stats->tx_packets = dev_stats->tx_frames;
stats->tx_dropped = dev_stats->tx_drops;
}
+EXPORT_SYMBOL(otx2_get_stats64);
/* Sync MAC address with RVU AF */
static int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, u8 *mac)
@@ -135,17 +136,17 @@ static int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, u8 *mac)
struct nix_set_mac_addr *req;
int err;
- otx2_mbox_lock(&pfvf->mbox);
+ mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_nix_set_mac_addr(&pfvf->mbox);
if (!req) {
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
}
ether_addr_copy(req->mac_addr, mac);
err = otx2_sync_mbox_msg(&pfvf->mbox);
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return err;
}
@@ -157,27 +158,27 @@ static int otx2_hw_get_mac_addr(struct otx2_nic *pfvf,
struct msg_req *req;
int err;
- otx2_mbox_lock(&pfvf->mbox);
+ mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_nix_get_mac_addr(&pfvf->mbox);
if (!req) {
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
}
err = otx2_sync_mbox_msg(&pfvf->mbox);
if (err) {
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return err;
}
msghdr = otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
if (IS_ERR(msghdr)) {
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return PTR_ERR(msghdr);
}
rsp = (struct nix_get_mac_addr_rsp *)msghdr;
ether_addr_copy(netdev->dev_addr, rsp->mac_addr);
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return 0;
}
@@ -197,26 +198,50 @@ int otx2_set_mac_address(struct net_device *netdev, void *p)
return 0;
}
+EXPORT_SYMBOL(otx2_set_mac_address);
int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
{
struct nix_frs_cfg *req;
int err;
- otx2_mbox_lock(&pfvf->mbox);
+ mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_nix_set_hw_frs(&pfvf->mbox);
if (!req) {
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
}
- /* SMQ config limits maximum pkt size that can be transmitted */
- req->update_smq = true;
pfvf->max_frs = mtu + OTX2_ETH_HLEN;
req->maxlen = pfvf->max_frs;
err = otx2_sync_mbox_msg(&pfvf->mbox);
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+int otx2_config_pause_frm(struct otx2_nic *pfvf)
+{
+ struct cgx_pause_frm_cfg *req;
+ int err;
+
+ if (is_otx2_lbkvf(pfvf->pdev))
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ req->rx_pause = !!(pfvf->flags & OTX2_FLAG_RX_PAUSE_ENABLED);
+ req->tx_pause = !!(pfvf->flags & OTX2_FLAG_TX_PAUSE_ENABLED);
+ req->set = 1;
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+unlock:
+ mutex_unlock(&pfvf->mbox.lock);
return err;
}
@@ -226,10 +251,10 @@ int otx2_set_flowkey_cfg(struct otx2_nic *pfvf)
struct nix_rss_flowkey_cfg *req;
int err;
- otx2_mbox_lock(&pfvf->mbox);
+ mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_nix_rss_flowkey_cfg(&pfvf->mbox);
if (!req) {
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
}
req->mcam_index = -1; /* Default or reserved index */
@@ -237,7 +262,7 @@ int otx2_set_flowkey_cfg(struct otx2_nic *pfvf)
req->group = DEFAULT_RSS_CONTEXT_GROUP;
err = otx2_sync_mbox_msg(&pfvf->mbox);
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return err;
}
@@ -248,7 +273,7 @@ int otx2_set_rss_table(struct otx2_nic *pfvf)
struct nix_aq_enq_req *aq;
int idx, err;
- otx2_mbox_lock(mbox);
+ mutex_lock(&mbox->lock);
/* Get memory to put this msg */
for (idx = 0; idx < rss->rss_size; idx++) {
aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
@@ -258,12 +283,12 @@ int otx2_set_rss_table(struct otx2_nic *pfvf)
*/
err = otx2_sync_mbox_msg(mbox);
if (err) {
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
return err;
}
aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
if (!aq) {
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
return -ENOMEM;
}
}
@@ -276,7 +301,7 @@ int otx2_set_rss_table(struct otx2_nic *pfvf)
aq->op = NIX_AQ_INSTOP_INIT;
}
err = otx2_sync_mbox_msg(mbox);
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
return err;
}
@@ -394,6 +419,7 @@ void otx2_tx_timeout(struct net_device *netdev, unsigned int txq)
schedule_work(&pfvf->reset_task);
}
+EXPORT_SYMBOL(otx2_tx_timeout);
void otx2_get_mac_from_af(struct net_device *netdev)
{
@@ -408,6 +434,7 @@ void otx2_get_mac_from_af(struct net_device *netdev)
if (!is_valid_ether_addr(netdev->dev_addr))
eth_hw_addr_random(netdev);
}
+EXPORT_SYMBOL(otx2_get_mac_from_af);
static int otx2_get_link(struct otx2_nic *pfvf)
{
@@ -443,7 +470,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
/* Set topology e.t.c configuration */
if (lvl == NIX_TXSCH_LVL_SMQ) {
req->reg[0] = NIX_AF_SMQX_CFG(schq);
- req->regval[0] = ((pfvf->netdev->mtu + OTX2_ETH_HLEN) << 8) |
+ req->regval[0] = ((OTX2_MAX_MTU + OTX2_ETH_HLEN) << 8) |
OTX2_MIN_MTU;
req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) |
@@ -529,17 +556,17 @@ int otx2_txschq_stop(struct otx2_nic *pfvf)
struct nix_txsch_free_req *free_req;
int lvl, schq, err;
- otx2_mbox_lock(&pfvf->mbox);
+ mutex_lock(&pfvf->mbox.lock);
/* Free the transmit schedulers */
free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox);
if (!free_req) {
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
}
free_req->flags = TXSCHQ_FREE_ALL;
err = otx2_sync_mbox_msg(&pfvf->mbox);
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
/* Clear the txschq list */
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
@@ -553,17 +580,19 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
{
int qidx, sqe_tail, sqe_head;
u64 incr, *ptr, val;
+ int timeout = 1000;
ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
incr = (u64)qidx << 32;
- while (1) {
+ while (timeout) {
val = otx2_atomic64_add(incr, ptr);
sqe_head = (val >> 20) & 0x3F;
sqe_tail = (val >> 28) & 0x3F;
if (sqe_head == sqe_tail)
break;
usleep_range(1, 3);
+ timeout--;
}
}
}
@@ -580,6 +609,7 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
* RED accepts pkts if free pointers > 102 & <= 205.
* Drops pkts if free pointers < 102.
*/
+#define RQ_BP_LVL_AURA (255 - ((85 * 256) / 100)) /* BP when 85% is full */
#define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */
#define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */
@@ -741,6 +771,13 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
if (qidx < pfvf->hw.rx_queues) {
aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, cq->cqe_cnt);
aq->cq.drop_ena = 1;
+
+ /* Enable receive CQ backpressure */
+ aq->cq.bp_ena = 1;
+ aq->cq.bpid = pfvf->bpid[0];
+
+ /* Set backpressure level is same as cq pass level */
+ aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
}
/* Fill AQ info */
@@ -951,6 +988,7 @@ void otx2_aura_pool_free(struct otx2_nic *pfvf)
qmem_free(pfvf->dev, pool->fc_addr);
}
devm_kfree(pfvf->dev, pfvf->qset.pool);
+ pfvf->qset.pool = NULL;
}
static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
@@ -996,6 +1034,14 @@ static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
aq->aura.fc_addr = pool->fc_addr->iova;
aq->aura.fc_hyst_bits = 0; /* Store count on all updates */
+ /* Enable backpressure for RQ aura */
+ if (aura_id < pfvf->hw.rqpool_cnt) {
+ aq->aura.bp_ena = 0;
+ aq->aura.nix0_bpid = pfvf->bpid[0];
+ /* Set backpressure level for RQ's Aura */
+ aq->aura.bp = RQ_BP_LVL_AURA;
+ }
+
/* Fill AQ info */
aq->ctype = NPA_AQ_CTYPE_AURA;
aq->op = NPA_AQ_INSTOP_INIT;
@@ -1210,10 +1256,10 @@ int otx2_detach_resources(struct mbox *mbox)
{
struct rsrc_detach *detach;
- otx2_mbox_lock(mbox);
+ mutex_lock(&mbox->lock);
detach = otx2_mbox_alloc_msg_detach_resources(mbox);
if (!detach) {
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
return -ENOMEM;
}
@@ -1222,9 +1268,10 @@ int otx2_detach_resources(struct mbox *mbox)
/* Send detach request to AF */
otx2_mbox_msg_send(&mbox->mbox, 0);
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
return 0;
}
+EXPORT_SYMBOL(otx2_detach_resources);
int otx2_attach_npa_nix(struct otx2_nic *pfvf)
{
@@ -1232,11 +1279,11 @@ int otx2_attach_npa_nix(struct otx2_nic *pfvf)
struct msg_req *msix;
int err;
- otx2_mbox_lock(&pfvf->mbox);
+ mutex_lock(&pfvf->mbox.lock);
/* Get memory to put this msg */
attach = otx2_mbox_alloc_msg_attach_resources(&pfvf->mbox);
if (!attach) {
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
}
@@ -1246,7 +1293,7 @@ int otx2_attach_npa_nix(struct otx2_nic *pfvf)
/* Send attach request to AF */
err = otx2_sync_mbox_msg(&pfvf->mbox);
if (err) {
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return err;
}
@@ -1261,16 +1308,16 @@ int otx2_attach_npa_nix(struct otx2_nic *pfvf)
/* Get NPA and NIX MSIX vector offsets */
msix = otx2_mbox_alloc_msg_msix_offset(&pfvf->mbox);
if (!msix) {
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
}
err = otx2_sync_mbox_msg(&pfvf->mbox);
if (err) {
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return err;
}
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
if (pfvf->hw.npa_msixoff == MSIX_VECTOR_INVALID ||
pfvf->hw.nix_msixoff == MSIX_VECTOR_INVALID) {
@@ -1281,12 +1328,13 @@ int otx2_attach_npa_nix(struct otx2_nic *pfvf)
return 0;
}
+EXPORT_SYMBOL(otx2_attach_npa_nix);
void otx2_ctx_disable(struct mbox *mbox, int type, bool npa)
{
struct hwctx_disable_req *req;
- otx2_mbox_lock(mbox);
+ mutex_lock(&mbox->lock);
/* Request AQ to disable this context */
if (npa)
req = otx2_mbox_alloc_msg_npa_hwctx_disable(mbox);
@@ -1294,7 +1342,7 @@ void otx2_ctx_disable(struct mbox *mbox, int type, bool npa)
req = otx2_mbox_alloc_msg_nix_hwctx_disable(mbox);
if (!req) {
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
return;
}
@@ -1304,7 +1352,26 @@ void otx2_ctx_disable(struct mbox *mbox, int type, bool npa)
dev_err(mbox->pfvf->dev, "%s failed to disable context\n",
__func__);
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
+}
+
+int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable)
+{
+ struct nix_bp_cfg_req *req;
+
+ if (enable)
+ req = otx2_mbox_alloc_msg_nix_bp_enable(&pfvf->mbox);
+ else
+ req = otx2_mbox_alloc_msg_nix_bp_disable(&pfvf->mbox);
+
+ if (!req)
+ return -ENOMEM;
+
+ req->chan_base = 0;
+ req->chan_cnt = 1;
+ req->bpid_per_chan = 0;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
}
/* Mbox message handlers */
@@ -1330,6 +1397,7 @@ void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
pf->hw.txschq_list[lvl][schq] =
rsp->schq_list[lvl][schq];
}
+EXPORT_SYMBOL(mbox_handler_nix_txsch_alloc);
void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf,
struct npa_lf_alloc_rsp *rsp)
@@ -1337,6 +1405,7 @@ void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf,
pfvf->hw.stack_pg_ptrs = rsp->stack_pg_ptrs;
pfvf->hw.stack_pg_bytes = rsp->stack_pg_bytes;
}
+EXPORT_SYMBOL(mbox_handler_npa_lf_alloc);
void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
struct nix_lf_alloc_rsp *rsp)
@@ -1347,6 +1416,7 @@ void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
pfvf->hw.lso_tsov4_idx = rsp->lso_tsov4_idx;
pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx;
}
+EXPORT_SYMBOL(mbox_handler_nix_lf_alloc);
void mbox_handler_msix_offset(struct otx2_nic *pfvf,
struct msix_offset_rsp *rsp)
@@ -1354,6 +1424,19 @@ void mbox_handler_msix_offset(struct otx2_nic *pfvf,
pfvf->hw.npa_msixoff = rsp->npa_msixoff;
pfvf->hw.nix_msixoff = rsp->nix_msixoff;
}
+EXPORT_SYMBOL(mbox_handler_msix_offset);
+
+void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf,
+ struct nix_bp_cfg_rsp *rsp)
+{
+ int chan, chan_id;
+
+ for (chan = 0; chan < rsp->chan_cnt; chan++) {
+ chan_id = ((rsp->chan_bpid[chan] >> 10) & 0x7F);
+ pfvf->bpid[chan_id] = rsp->chan_bpid[chan] & 0x3FF;
+ }
+}
+EXPORT_SYMBOL(mbox_handler_nix_bp_enable);
void otx2_free_cints(struct otx2_nic *pfvf, int n)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 320f3b7bf57f..018c283a0ac4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -20,6 +20,8 @@
/* PCI device IDs */
#define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063
+#define PCI_DEVID_OCTEONTX2_RVU_VF 0xA064
+#define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8
#define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200
@@ -191,6 +193,17 @@ struct otx2_hw {
u64 cgx_tx_stats[CGX_TX_STATS_COUNT];
};
+struct otx2_vf_config {
+ struct otx2_nic *pf;
+ struct delayed_work link_event_work;
+ bool intf_down; /* interface was either configured or not */
+};
+
+struct flr_work {
+ struct work_struct work;
+ struct otx2_nic *pf;
+};
+
struct refill_work {
struct delayed_work pool_refill_work;
struct otx2_nic *pf;
@@ -204,6 +217,8 @@ struct otx2_nic {
u16 rbsize; /* Receive buffer size */
#define OTX2_FLAG_INTF_DOWN BIT_ULL(2)
+#define OTX2_FLAG_RX_PAUSE_ENABLED BIT_ULL(9)
+#define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10)
u64 flags;
struct otx2_qset qset;
@@ -213,14 +228,23 @@ struct otx2_nic {
/* Mbox */
struct mbox mbox;
+ struct mbox *mbox_pfvf;
struct workqueue_struct *mbox_wq;
+ struct workqueue_struct *mbox_pfvf_wq;
+ u8 total_vfs;
u16 pcifunc; /* RVU PF_FUNC */
+ u16 bpid[NIX_MAX_BPID_CHAN];
+ struct otx2_vf_config *vf_configs;
struct cgx_link_user_info linfo;
u64 reset_count;
struct work_struct reset_task;
+ struct workqueue_struct *flr_wq;
+ struct flr_work *flr_wrk;
struct refill_work *refill_wrk;
+ struct workqueue_struct *otx2_wq;
+ struct work_struct rx_mode_work;
/* Ethtool stuff */
u32 msg_enable;
@@ -229,6 +253,11 @@ struct otx2_nic {
int nix_blkaddr;
};
+static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
+{
+ return pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF;
+}
+
static inline bool is_96xx_A0(struct pci_dev *pdev)
{
return (pdev->revision == 0x00) &&
@@ -348,21 +377,6 @@ static inline void otx2_sync_mbox_bbuf(struct otx2_mbox *mbox, int devid)
hw_mbase + mbox->rx_start, msg_size + msgs_offset);
}
-static inline void otx2_mbox_lock_init(struct mbox *mbox)
-{
- mutex_init(&mbox->lock);
-}
-
-static inline void otx2_mbox_lock(struct mbox *mbox)
-{
- mutex_lock(&mbox->lock);
-}
-
-static inline void otx2_mbox_unlock(struct mbox *mbox)
-{
- mutex_unlock(&mbox->lock);
-}
-
/* With the absence of API for 128-bit IO memory access for arm64,
* implement required operations at place.
*/
@@ -558,6 +572,7 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu);
void otx2_tx_timeout(struct net_device *netdev, unsigned int txq);
void otx2_get_mac_from_af(struct net_device *netdev);
void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx);
+int otx2_config_pause_frm(struct otx2_nic *pfvf);
/* RVU block related APIs */
int otx2_attach_npa_nix(struct otx2_nic *pfvf);
@@ -578,6 +593,7 @@ dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
gfp_t gfp);
int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
+int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
@@ -598,6 +614,8 @@ void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
struct nix_txsch_alloc_rsp *rsp);
void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
struct cgx_stats_rsp *rsp);
+void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf,
+ struct nix_bp_cfg_rsp *rsp);
/* Device stats APIs */
void otx2_get_dev_stats(struct otx2_nic *pfvf);
@@ -607,6 +625,7 @@ void otx2_update_lmac_stats(struct otx2_nic *pfvf);
int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx);
int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx);
void otx2_set_ethtool_ops(struct net_device *netdev);
+void otx2vf_set_ethtool_ops(struct net_device *netdev);
int otx2_open(struct net_device *netdev);
int otx2_stop(struct net_device *netdev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 60fcf82dd8cb..d59f5a9c7273 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -17,6 +17,7 @@
#include "otx2_common.h"
#define DRV_NAME "octeontx2-nicpf"
+#define DRV_VF_NAME "octeontx2-nicvf"
struct otx2_stat {
char name[ETH_GSTRING_LEN];
@@ -63,16 +64,6 @@ static const unsigned int otx2_n_dev_stats = ARRAY_SIZE(otx2_dev_stats);
static const unsigned int otx2_n_drv_stats = ARRAY_SIZE(otx2_drv_stats);
static const unsigned int otx2_n_queue_stats = ARRAY_SIZE(otx2_queue_stats);
-static void otx2_dev_open(struct net_device *netdev)
-{
- otx2_open(netdev);
-}
-
-static void otx2_dev_stop(struct net_device *netdev)
-{
- otx2_stop(netdev);
-}
-
static void otx2_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
@@ -232,7 +223,7 @@ static int otx2_set_channels(struct net_device *dev,
return -EINVAL;
if (if_up)
- otx2_dev_stop(dev);
+ dev->netdev_ops->ndo_stop(dev);
err = otx2_set_real_num_queues(dev, channel->tx_count,
channel->rx_count);
@@ -245,7 +236,7 @@ static int otx2_set_channels(struct net_device *dev,
fail:
if (if_up)
- otx2_dev_open(dev);
+ dev->netdev_ops->ndo_open(dev);
netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
pfvf->hw.tx_queues, pfvf->hw.rx_queues);
@@ -253,6 +244,51 @@ fail:
return err;
}
+static void otx2_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct cgx_pause_frm_cfg *req, *rsp;
+
+ if (is_otx2_lbkvf(pfvf->pdev))
+ return;
+
+ req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox);
+ if (!req)
+ return;
+
+ if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
+ rsp = (struct cgx_pause_frm_cfg *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ pause->rx_pause = rsp->rx_pause;
+ pause->tx_pause = rsp->tx_pause;
+ }
+}
+
+static int otx2_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ if (pause->autoneg)
+ return -EOPNOTSUPP;
+
+ if (is_otx2_lbkvf(pfvf->pdev))
+ return -EOPNOTSUPP;
+
+ if (pause->rx_pause)
+ pfvf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
+ else
+ pfvf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED;
+
+ if (pause->tx_pause)
+ pfvf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
+ else
+ pfvf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED;
+
+ return otx2_config_pause_frm(pfvf);
+}
+
static void otx2_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
@@ -297,14 +333,15 @@ static int otx2_set_ringparam(struct net_device *netdev,
return 0;
if (if_up)
- otx2_dev_stop(netdev);
+ netdev->netdev_ops->ndo_stop(netdev);
/* Assigned to the nearest possible exponent. */
qs->sqe_cnt = tx_count;
qs->rqe_cnt = rx_count;
if (if_up)
- otx2_dev_open(netdev);
+ netdev->netdev_ops->ndo_open(netdev);
+
return 0;
}
@@ -329,17 +366,6 @@ static int otx2_set_coalesce(struct net_device *netdev,
struct otx2_hw *hw = &pfvf->hw;
int qidx;
- if (ec->use_adaptive_rx_coalesce || ec->use_adaptive_tx_coalesce ||
- ec->rx_coalesce_usecs_irq || ec->rx_max_coalesced_frames_irq ||
- ec->tx_coalesce_usecs_irq || ec->tx_max_coalesced_frames_irq ||
- ec->stats_block_coalesce_usecs || ec->pkt_rate_low ||
- ec->rx_coalesce_usecs_low || ec->rx_max_coalesced_frames_low ||
- ec->tx_coalesce_usecs_low || ec->tx_max_coalesced_frames_low ||
- ec->pkt_rate_high || ec->rx_coalesce_usecs_high ||
- ec->rx_max_coalesced_frames_high || ec->tx_coalesce_usecs_high ||
- ec->tx_max_coalesced_frames_high || ec->rate_sample_interval)
- return -EOPNOTSUPP;
-
if (!ec->rx_max_coalesced_frames || !ec->tx_max_coalesced_frames)
return 0;
@@ -631,10 +657,15 @@ static u32 otx2_get_link(struct net_device *netdev)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
+ /* LBK link is internal and always UP */
+ if (is_otx2_lbkvf(pfvf->pdev))
+ return 1;
return pfvf->linfo.link_up;
}
static const struct ethtool_ops otx2_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_link = otx2_get_link,
.get_drvinfo = otx2_get_drvinfo,
.get_strings = otx2_get_strings,
@@ -654,9 +685,110 @@ static const struct ethtool_ops otx2_ethtool_ops = {
.set_rxfh = otx2_set_rxfh,
.get_msglevel = otx2_get_msglevel,
.set_msglevel = otx2_set_msglevel,
+ .get_pauseparam = otx2_get_pauseparam,
+ .set_pauseparam = otx2_set_pauseparam,
};
void otx2_set_ethtool_ops(struct net_device *netdev)
{
netdev->ethtool_ops = &otx2_ethtool_ops;
}
+
+/* VF's ethtool APIs */
+static void otx2vf_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ struct otx2_nic *vf = netdev_priv(netdev);
+
+ strlcpy(info->driver, DRV_VF_NAME, sizeof(info->driver));
+ strlcpy(info->bus_info, pci_name(vf->pdev), sizeof(info->bus_info));
+}
+
+static void otx2vf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ struct otx2_nic *vf = netdev_priv(netdev);
+ int stats;
+
+ if (sset != ETH_SS_STATS)
+ return;
+
+ for (stats = 0; stats < otx2_n_dev_stats; stats++) {
+ memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+
+ for (stats = 0; stats < otx2_n_drv_stats; stats++) {
+ memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+
+ otx2_get_qset_strings(vf, &data, 0);
+
+ strcpy(data, "reset_count");
+ data += ETH_GSTRING_LEN;
+}
+
+static void otx2vf_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct otx2_nic *vf = netdev_priv(netdev);
+ int stat;
+
+ otx2_get_dev_stats(vf);
+ for (stat = 0; stat < otx2_n_dev_stats; stat++)
+ *(data++) = ((u64 *)&vf->hw.dev_stats)
+ [otx2_dev_stats[stat].index];
+
+ for (stat = 0; stat < otx2_n_drv_stats; stat++)
+ *(data++) = atomic_read(&((atomic_t *)&vf->hw.drv_stats)
+ [otx2_drv_stats[stat].index]);
+
+ otx2_get_qset_stats(vf, stats, &data);
+ *(data++) = vf->reset_count;
+}
+
+static int otx2vf_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct otx2_nic *vf = netdev_priv(netdev);
+ int qstats_count;
+
+ if (sset != ETH_SS_STATS)
+ return -EINVAL;
+
+ qstats_count = otx2_n_queue_stats *
+ (vf->hw.rx_queues + vf->hw.tx_queues);
+
+ return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count + 1;
+}
+
+static const struct ethtool_ops otx2vf_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
+ .get_link = otx2_get_link,
+ .get_drvinfo = otx2vf_get_drvinfo,
+ .get_strings = otx2vf_get_strings,
+ .get_ethtool_stats = otx2vf_get_ethtool_stats,
+ .get_sset_count = otx2vf_get_sset_count,
+ .set_channels = otx2_set_channels,
+ .get_channels = otx2_get_channels,
+ .get_rxnfc = otx2_get_rxnfc,
+ .set_rxnfc = otx2_set_rxnfc,
+ .get_rxfh_key_size = otx2_get_rxfh_key_size,
+ .get_rxfh_indir_size = otx2_get_rxfh_indir_size,
+ .get_rxfh = otx2_get_rxfh,
+ .set_rxfh = otx2_set_rxfh,
+ .get_ringparam = otx2_get_ringparam,
+ .set_ringparam = otx2_set_ringparam,
+ .get_coalesce = otx2_get_coalesce,
+ .set_coalesce = otx2_set_coalesce,
+ .get_msglevel = otx2_get_msglevel,
+ .set_msglevel = otx2_set_msglevel,
+ .get_pauseparam = otx2_get_pauseparam,
+ .set_pauseparam = otx2_set_pauseparam,
+};
+
+void otx2vf_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &otx2vf_ethtool_ops;
+}
+EXPORT_SYMBOL(otx2vf_set_ethtool_ops);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 85f9b9ba6bd5..411e5ea1031e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -24,7 +24,6 @@
#define DRV_NAME "octeontx2-nicpf"
#define DRV_STRING "Marvell OcteonTX2 NIC Physical Function Driver"
-#define DRV_VERSION "1.0"
/* Supported devices */
static const struct pci_device_id otx2_pf_id_table[] = {
@@ -32,10 +31,9 @@ static const struct pci_device_id otx2_pf_id_table[] = {
{ 0, } /* end of table */
};
-MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
MODULE_DESCRIPTION(DRV_STRING);
MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, otx2_pf_id_table);
enum {
@@ -61,6 +59,224 @@ static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
return err;
}
+static void otx2_disable_flr_me_intr(struct otx2_nic *pf)
+{
+ int irq, vfs = pf->total_vfs;
+
+ /* Disable VFs ME interrupts */
+ otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
+ irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0);
+ free_irq(irq, pf);
+
+ /* Disable VFs FLR interrupts */
+ otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
+ irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0);
+ free_irq(irq, pf);
+
+ if (vfs <= 64)
+ return;
+
+ otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
+ irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME1);
+ free_irq(irq, pf);
+
+ otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
+ irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR1);
+ free_irq(irq, pf);
+}
+
+static void otx2_flr_wq_destroy(struct otx2_nic *pf)
+{
+ if (!pf->flr_wq)
+ return;
+ destroy_workqueue(pf->flr_wq);
+ pf->flr_wq = NULL;
+ devm_kfree(pf->dev, pf->flr_wrk);
+}
+
+static void otx2_flr_handler(struct work_struct *work)
+{
+ struct flr_work *flrwork = container_of(work, struct flr_work, work);
+ struct otx2_nic *pf = flrwork->pf;
+ struct mbox *mbox = &pf->mbox;
+ struct msg_req *req;
+ int vf, reg = 0;
+
+ vf = flrwork - pf->flr_wrk;
+
+ mutex_lock(&mbox->lock);
+ req = otx2_mbox_alloc_msg_vf_flr(mbox);
+ if (!req) {
+ mutex_unlock(&mbox->lock);
+ return;
+ }
+ req->hdr.pcifunc &= RVU_PFVF_FUNC_MASK;
+ req->hdr.pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
+
+ if (!otx2_sync_mbox_msg(&pf->mbox)) {
+ if (vf >= 64) {
+ reg = 1;
+ vf = vf - 64;
+ }
+ /* clear transcation pending bit */
+ otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
+ otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
+ }
+
+ mutex_unlock(&mbox->lock);
+}
+
+static irqreturn_t otx2_pf_flr_intr_handler(int irq, void *pf_irq)
+{
+ struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
+ int reg, dev, vf, start_vf, num_reg = 1;
+ u64 intr;
+
+ if (pf->total_vfs > 64)
+ num_reg = 2;
+
+ for (reg = 0; reg < num_reg; reg++) {
+ intr = otx2_read64(pf, RVU_PF_VFFLR_INTX(reg));
+ if (!intr)
+ continue;
+ start_vf = 64 * reg;
+ for (vf = 0; vf < 64; vf++) {
+ if (!(intr & BIT_ULL(vf)))
+ continue;
+ dev = vf + start_vf;
+ queue_work(pf->flr_wq, &pf->flr_wrk[dev].work);
+ /* Clear interrupt */
+ otx2_write64(pf, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
+ /* Disable the interrupt */
+ otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(reg),
+ BIT_ULL(vf));
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t otx2_pf_me_intr_handler(int irq, void *pf_irq)
+{
+ struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
+ int vf, reg, num_reg = 1;
+ u64 intr;
+
+ if (pf->total_vfs > 64)
+ num_reg = 2;
+
+ for (reg = 0; reg < num_reg; reg++) {
+ intr = otx2_read64(pf, RVU_PF_VFME_INTX(reg));
+ if (!intr)
+ continue;
+ for (vf = 0; vf < 64; vf++) {
+ if (!(intr & BIT_ULL(vf)))
+ continue;
+ /* clear trpend bit */
+ otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
+ /* clear interrupt */
+ otx2_write64(pf, RVU_PF_VFME_INTX(reg), BIT_ULL(vf));
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs)
+{
+ struct otx2_hw *hw = &pf->hw;
+ char *irq_name;
+ int ret;
+
+ /* Register ME interrupt handler*/
+ irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME0 * NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME0", rvu_get_pf(pf->pcifunc));
+ ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0),
+ otx2_pf_me_intr_handler, 0, irq_name, pf);
+ if (ret) {
+ dev_err(pf->dev,
+ "RVUPF: IRQ registration failed for ME0\n");
+ }
+
+ /* Register FLR interrupt handler */
+ irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR0 * NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR0", rvu_get_pf(pf->pcifunc));
+ ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0),
+ otx2_pf_flr_intr_handler, 0, irq_name, pf);
+ if (ret) {
+ dev_err(pf->dev,
+ "RVUPF: IRQ registration failed for FLR0\n");
+ return ret;
+ }
+
+ if (numvfs > 64) {
+ irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME1 * NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME1",
+ rvu_get_pf(pf->pcifunc));
+ ret = request_irq(pci_irq_vector
+ (pf->pdev, RVU_PF_INT_VEC_VFME1),
+ otx2_pf_me_intr_handler, 0, irq_name, pf);
+ if (ret) {
+ dev_err(pf->dev,
+ "RVUPF: IRQ registration failed for ME1\n");
+ }
+ irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR1 * NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR1",
+ rvu_get_pf(pf->pcifunc));
+ ret = request_irq(pci_irq_vector
+ (pf->pdev, RVU_PF_INT_VEC_VFFLR1),
+ otx2_pf_flr_intr_handler, 0, irq_name, pf);
+ if (ret) {
+ dev_err(pf->dev,
+ "RVUPF: IRQ registration failed for FLR1\n");
+ return ret;
+ }
+ }
+
+ /* Enable ME interrupt for all VFs*/
+ otx2_write64(pf, RVU_PF_VFME_INTX(0), INTR_MASK(numvfs));
+ otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(numvfs));
+
+ /* Enable FLR interrupt for all VFs*/
+ otx2_write64(pf, RVU_PF_VFFLR_INTX(0), INTR_MASK(numvfs));
+ otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(numvfs));
+
+ if (numvfs > 64) {
+ numvfs -= 64;
+
+ otx2_write64(pf, RVU_PF_VFME_INTX(1), INTR_MASK(numvfs));
+ otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(1),
+ INTR_MASK(numvfs));
+
+ otx2_write64(pf, RVU_PF_VFFLR_INTX(1), INTR_MASK(numvfs));
+ otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(1),
+ INTR_MASK(numvfs));
+ }
+ return 0;
+}
+
+static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs)
+{
+ int vf;
+
+ pf->flr_wq = alloc_workqueue("otx2_pf_flr_wq",
+ WQ_UNBOUND | WQ_HIGHPRI, 1);
+ if (!pf->flr_wq)
+ return -ENOMEM;
+
+ pf->flr_wrk = devm_kcalloc(pf->dev, num_vfs,
+ sizeof(struct flr_work), GFP_KERNEL);
+ if (!pf->flr_wrk) {
+ destroy_workqueue(pf->flr_wq);
+ return -ENOMEM;
+ }
+
+ for (vf = 0; vf < num_vfs; vf++) {
+ pf->flr_wrk[vf].pf = pf;
+ INIT_WORK(&pf->flr_wrk[vf].work, otx2_flr_handler);
+ }
+
+ return 0;
+}
+
static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
int first, int mdevs, u64 intr, int type)
{
@@ -115,9 +331,391 @@ static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
}
}
+static void otx2_forward_msg_pfvf(struct otx2_mbox_dev *mdev,
+ struct otx2_mbox *pfvf_mbox, void *bbuf_base,
+ int devid)
+{
+ struct otx2_mbox_dev *src_mdev = mdev;
+ int offset;
+
+ /* Msgs are already copied, trigger VF's mbox irq */
+ smp_wmb();
+
+ offset = pfvf_mbox->trigger | (devid << pfvf_mbox->tr_shift);
+ writeq(1, (void __iomem *)pfvf_mbox->reg_base + offset);
+
+ /* Restore VF's mbox bounce buffer region address */
+ src_mdev->mbase = bbuf_base;
+}
+
+static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf,
+ struct otx2_mbox *src_mbox,
+ int dir, int vf, int num_msgs)
+{
+ struct otx2_mbox_dev *src_mdev, *dst_mdev;
+ struct mbox_hdr *mbox_hdr;
+ struct mbox_hdr *req_hdr;
+ struct mbox *dst_mbox;
+ int dst_size, err;
+
+ if (dir == MBOX_DIR_PFAF) {
+ /* Set VF's mailbox memory as PF's bounce buffer memory, so
+ * that explicit copying of VF's msgs to PF=>AF mbox region
+ * and AF=>PF responses to VF's mbox region can be avoided.
+ */
+ src_mdev = &src_mbox->dev[vf];
+ mbox_hdr = src_mbox->hwbase +
+ src_mbox->rx_start + (vf * MBOX_SIZE);
+
+ dst_mbox = &pf->mbox;
+ dst_size = dst_mbox->mbox.tx_size -
+ ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN);
+ /* Check if msgs fit into destination area */
+ if (mbox_hdr->msg_size > dst_size)
+ return -EINVAL;
+
+ dst_mdev = &dst_mbox->mbox.dev[0];
+
+ mutex_lock(&pf->mbox.lock);
+ dst_mdev->mbase = src_mdev->mbase;
+ dst_mdev->msg_size = mbox_hdr->msg_size;
+ dst_mdev->num_msgs = num_msgs;
+ err = otx2_sync_mbox_msg(dst_mbox);
+ if (err) {
+ dev_warn(pf->dev,
+ "AF not responding to VF%d messages\n", vf);
+ /* restore PF mbase and exit */
+ dst_mdev->mbase = pf->mbox.bbuf_base;
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+ }
+ /* At this point, all the VF messages sent to AF are acked
+ * with proper responses and responses are copied to VF
+ * mailbox hence raise interrupt to VF.
+ */
+ req_hdr = (struct mbox_hdr *)(dst_mdev->mbase +
+ dst_mbox->mbox.rx_start);
+ req_hdr->num_msgs = num_msgs;
+
+ otx2_forward_msg_pfvf(dst_mdev, &pf->mbox_pfvf[0].mbox,
+ pf->mbox.bbuf_base, vf);
+ mutex_unlock(&pf->mbox.lock);
+ } else if (dir == MBOX_DIR_PFVF_UP) {
+ src_mdev = &src_mbox->dev[0];
+ mbox_hdr = src_mbox->hwbase + src_mbox->rx_start;
+ req_hdr = (struct mbox_hdr *)(src_mdev->mbase +
+ src_mbox->rx_start);
+ req_hdr->num_msgs = num_msgs;
+
+ dst_mbox = &pf->mbox_pfvf[0];
+ dst_size = dst_mbox->mbox_up.tx_size -
+ ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN);
+ /* Check if msgs fit into destination area */
+ if (mbox_hdr->msg_size > dst_size)
+ return -EINVAL;
+
+ dst_mdev = &dst_mbox->mbox_up.dev[vf];
+ dst_mdev->mbase = src_mdev->mbase;
+ dst_mdev->msg_size = mbox_hdr->msg_size;
+ dst_mdev->num_msgs = mbox_hdr->num_msgs;
+ err = otx2_sync_mbox_up_msg(dst_mbox, vf);
+ if (err) {
+ dev_warn(pf->dev,
+ "VF%d is not responding to mailbox\n", vf);
+ return err;
+ }
+ } else if (dir == MBOX_DIR_VFPF_UP) {
+ req_hdr = (struct mbox_hdr *)(src_mbox->dev[0].mbase +
+ src_mbox->rx_start);
+ req_hdr->num_msgs = num_msgs;
+ otx2_forward_msg_pfvf(&pf->mbox_pfvf->mbox_up.dev[vf],
+ &pf->mbox.mbox_up,
+ pf->mbox_pfvf[vf].bbuf_base,
+ 0);
+ }
+
+ return 0;
+}
+
+static void otx2_pfvf_mbox_handler(struct work_struct *work)
+{
+ struct mbox_msghdr *msg = NULL;
+ int offset, vf_idx, id, err;
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *req_hdr;
+ struct otx2_mbox *mbox;
+ struct mbox *vf_mbox;
+ struct otx2_nic *pf;
+
+ vf_mbox = container_of(work, struct mbox, mbox_wrk);
+ pf = vf_mbox->pfvf;
+ vf_idx = vf_mbox - pf->mbox_pfvf;
+
+ mbox = &pf->mbox_pfvf[0].mbox;
+ mdev = &mbox->dev[vf_idx];
+ req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+
+ offset = ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+
+ for (id = 0; id < vf_mbox->num_msgs; id++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start +
+ offset);
+
+ if (msg->sig != OTX2_MBOX_REQ_SIG)
+ goto inval_msg;
+
+ /* Set VF's number in each of the msg */
+ msg->pcifunc &= RVU_PFVF_FUNC_MASK;
+ msg->pcifunc |= (vf_idx + 1) & RVU_PFVF_FUNC_MASK;
+ offset = msg->next_msgoff;
+ }
+ err = otx2_forward_vf_mbox_msgs(pf, mbox, MBOX_DIR_PFAF, vf_idx,
+ vf_mbox->num_msgs);
+ if (err)
+ goto inval_msg;
+ return;
+
+inval_msg:
+ otx2_reply_invalid_msg(mbox, vf_idx, 0, msg->id);
+ otx2_mbox_msg_send(mbox, vf_idx);
+}
+
+static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
+{
+ struct mbox *vf_mbox = container_of(work, struct mbox, mbox_up_wrk);
+ struct otx2_nic *pf = vf_mbox->pfvf;
+ struct otx2_mbox_dev *mdev;
+ int offset, id, vf_idx = 0;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg;
+ struct otx2_mbox *mbox;
+
+ vf_idx = vf_mbox - pf->mbox_pfvf;
+ mbox = &pf->mbox_pfvf[0].mbox_up;
+ mdev = &mbox->dev[vf_idx];
+
+ rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+
+ for (id = 0; id < vf_mbox->up_num_msgs; id++) {
+ msg = mdev->mbase + offset;
+
+ if (msg->id >= MBOX_MSG_MAX) {
+ dev_err(pf->dev,
+ "Mbox msg with unknown ID 0x%x\n", msg->id);
+ goto end;
+ }
+
+ if (msg->sig != OTX2_MBOX_RSP_SIG) {
+ dev_err(pf->dev,
+ "Mbox msg with wrong signature %x, ID 0x%x\n",
+ msg->sig, msg->id);
+ goto end;
+ }
+
+ switch (msg->id) {
+ case MBOX_MSG_CGX_LINK_EVENT:
+ break;
+ default:
+ if (msg->rc)
+ dev_err(pf->dev,
+ "Mbox msg response has err %d, ID 0x%x\n",
+ msg->rc, msg->id);
+ break;
+ }
+
+end:
+ offset = mbox->rx_start + msg->next_msgoff;
+ mdev->msgs_acked++;
+ }
+
+ otx2_mbox_reset(mbox, vf_idx);
+}
+
+static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
+{
+ struct otx2_nic *pf = (struct otx2_nic *)(pf_irq);
+ int vfs = pf->total_vfs;
+ struct mbox *mbox;
+ u64 intr;
+
+ mbox = pf->mbox_pfvf;
+ /* Handle VF interrupts */
+ if (vfs > 64) {
+ intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(1));
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr);
+ otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr,
+ TYPE_PFVF);
+ vfs -= 64;
+ }
+
+ intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0));
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), intr);
+
+ otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF);
+
+ return IRQ_HANDLED;
+}
+
+static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
+{
+ void __iomem *hwbase;
+ struct mbox *mbox;
+ int err, vf;
+ u64 base;
+
+ if (!numvfs)
+ return -EINVAL;
+
+ pf->mbox_pfvf = devm_kcalloc(&pf->pdev->dev, numvfs,
+ sizeof(struct mbox), GFP_KERNEL);
+ if (!pf->mbox_pfvf)
+ return -ENOMEM;
+
+ pf->mbox_pfvf_wq = alloc_workqueue("otx2_pfvf_mailbox",
+ WQ_UNBOUND | WQ_HIGHPRI |
+ WQ_MEM_RECLAIM, 1);
+ if (!pf->mbox_pfvf_wq)
+ return -ENOMEM;
+
+ base = readq((void __iomem *)((u64)pf->reg_base + RVU_PF_VF_BAR4_ADDR));
+ hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs);
+
+ if (!hwbase) {
+ err = -ENOMEM;
+ goto free_wq;
+ }
+
+ mbox = &pf->mbox_pfvf[0];
+ err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base,
+ MBOX_DIR_PFVF, numvfs);
+ if (err)
+ goto free_iomem;
+
+ err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base,
+ MBOX_DIR_PFVF_UP, numvfs);
+ if (err)
+ goto free_iomem;
+
+ for (vf = 0; vf < numvfs; vf++) {
+ mbox->pfvf = pf;
+ INIT_WORK(&mbox->mbox_wrk, otx2_pfvf_mbox_handler);
+ INIT_WORK(&mbox->mbox_up_wrk, otx2_pfvf_mbox_up_handler);
+ mbox++;
+ }
+
+ return 0;
+
+free_iomem:
+ if (hwbase)
+ iounmap(hwbase);
+free_wq:
+ destroy_workqueue(pf->mbox_pfvf_wq);
+ return err;
+}
+
+static void otx2_pfvf_mbox_destroy(struct otx2_nic *pf)
+{
+ struct mbox *mbox = &pf->mbox_pfvf[0];
+
+ if (!mbox)
+ return;
+
+ if (pf->mbox_pfvf_wq) {
+ destroy_workqueue(pf->mbox_pfvf_wq);
+ pf->mbox_pfvf_wq = NULL;
+ }
+
+ if (mbox->mbox.hwbase)
+ iounmap(mbox->mbox.hwbase);
+
+ otx2_mbox_destroy(&mbox->mbox);
+}
+
+static void otx2_enable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
+{
+ /* Clear PF <=> VF mailbox IRQ */
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull);
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull);
+
+ /* Enable PF <=> VF mailbox IRQ */
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(numvfs));
+ if (numvfs > 64) {
+ numvfs -= 64;
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
+ INTR_MASK(numvfs));
+ }
+}
+
+static void otx2_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
+{
+ int vector;
+
+ /* Disable PF <=> VF mailbox IRQ */
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ull);
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ull);
+
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull);
+ vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
+ free_irq(vector, pf);
+
+ if (numvfs > 64) {
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull);
+ vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
+ free_irq(vector, pf);
+ }
+}
+
+static int otx2_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
+{
+ struct otx2_hw *hw = &pf->hw;
+ char *irq_name;
+ int err;
+
+ /* Register MBOX0 interrupt handler */
+ irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX0 * NAME_SIZE];
+ if (pf->pcifunc)
+ snprintf(irq_name, NAME_SIZE,
+ "RVUPF%d_VF Mbox0", rvu_get_pf(pf->pcifunc));
+ else
+ snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox0");
+ err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0),
+ otx2_pfvf_mbox_intr_handler, 0, irq_name, pf);
+ if (err) {
+ dev_err(pf->dev,
+ "RVUPF: IRQ registration failed for PFVF mbox0 irq\n");
+ return err;
+ }
+
+ if (numvfs > 64) {
+ /* Register MBOX1 interrupt handler */
+ irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX1 * NAME_SIZE];
+ if (pf->pcifunc)
+ snprintf(irq_name, NAME_SIZE,
+ "RVUPF%d_VF Mbox1", rvu_get_pf(pf->pcifunc));
+ else
+ snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox1");
+ err = request_irq(pci_irq_vector(pf->pdev,
+ RVU_PF_INT_VEC_VFPF_MBOX1),
+ otx2_pfvf_mbox_intr_handler,
+ 0, irq_name, pf);
+ if (err) {
+ dev_err(pf->dev,
+ "RVUPF: IRQ registration failed for PFVF mbox1 irq\n");
+ return err;
+ }
+ }
+
+ otx2_enable_pfvf_mbox_intr(pf, numvfs);
+
+ return 0;
+}
+
static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf,
struct mbox_msghdr *msg)
{
+ int devid;
+
if (msg->id >= MBOX_MSG_MAX) {
dev_err(pf->dev,
"Mbox msg with unknown ID 0x%x\n", msg->id);
@@ -131,6 +729,26 @@ static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf,
return;
}
+ /* message response heading VF */
+ devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
+ if (devid) {
+ struct otx2_vf_config *config = &pf->vf_configs[devid - 1];
+ struct delayed_work *dwork;
+
+ switch (msg->id) {
+ case MBOX_MSG_NIX_LF_START_RX:
+ config->intf_down = false;
+ dwork = &config->link_event_work;
+ schedule_delayed_work(dwork, msecs_to_jiffies(100));
+ break;
+ case MBOX_MSG_NIX_LF_STOP_RX:
+ config->intf_down = true;
+ break;
+ }
+
+ return;
+ }
+
switch (msg->id) {
case MBOX_MSG_READY:
pf->pcifunc = msg->pcifunc;
@@ -148,6 +766,9 @@ static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf,
mbox_handler_nix_txsch_alloc(pf,
(struct nix_txsch_alloc_rsp *)msg);
break;
+ case MBOX_MSG_NIX_BP_ENABLE:
+ mbox_handler_nix_bp_enable(pf, (struct nix_bp_cfg_rsp *)msg);
+ break;
case MBOX_MSG_CGX_STATS:
mbox_handler_cgx_stats(pf, (struct cgx_stats_rsp *)msg);
break;
@@ -209,9 +830,22 @@ int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf,
struct cgx_link_info_msg *msg,
struct msg_rsp *rsp)
{
+ int i;
+
/* Copy the link info sent by AF */
pf->linfo = msg->link_info;
+ /* notify VFs about link event */
+ for (i = 0; i < pci_num_vf(pf->pdev); i++) {
+ struct otx2_vf_config *config = &pf->vf_configs[i];
+ struct delayed_work *dwork = &config->link_event_work;
+
+ if (config->intf_down)
+ continue;
+
+ schedule_delayed_work(dwork, msecs_to_jiffies(100));
+ }
+
/* interface has not been fully configured yet */
if (pf->flags & OTX2_FLAG_INTF_DOWN)
return 0;
@@ -283,6 +917,12 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
otx2_process_mbox_msg_up(pf, msg);
offset = mbox->rx_start + msg->next_msgoff;
}
+ if (devid) {
+ otx2_forward_vf_mbox_msgs(pf, &pf->mbox.mbox_up,
+ MBOX_DIR_PFVF_UP, devid - 1,
+ af_mbox->up_num_msgs);
+ return;
+ }
otx2_mbox_msg_send(mbox, 0);
}
@@ -359,7 +999,6 @@ static void otx2_pfaf_mbox_destroy(struct otx2_nic *pf)
struct mbox *mbox = &pf->mbox;
if (pf->mbox_wq) {
- flush_workqueue(pf->mbox_wq);
destroy_workqueue(pf->mbox_wq);
pf->mbox_wq = NULL;
}
@@ -412,7 +1051,7 @@ static int otx2_pfaf_mbox_init(struct otx2_nic *pf)
INIT_WORK(&mbox->mbox_wrk, otx2_pfaf_mbox_handler);
INIT_WORK(&mbox->mbox_up_wrk, otx2_pfaf_mbox_up_handler);
- otx2_mbox_lock_init(&pf->mbox);
+ mutex_init(&mbox->lock);
return 0;
exit:
@@ -425,19 +1064,19 @@ static int otx2_cgx_config_linkevents(struct otx2_nic *pf, bool enable)
struct msg_req *msg;
int err;
- otx2_mbox_lock(&pf->mbox);
+ mutex_lock(&pf->mbox.lock);
if (enable)
msg = otx2_mbox_alloc_msg_cgx_start_linkevents(&pf->mbox);
else
msg = otx2_mbox_alloc_msg_cgx_stop_linkevents(&pf->mbox);
if (!msg) {
- otx2_mbox_unlock(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
return -ENOMEM;
}
err = otx2_sync_mbox_msg(&pf->mbox);
- otx2_mbox_unlock(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
return err;
}
@@ -446,19 +1085,19 @@ static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
struct msg_req *msg;
int err;
- otx2_mbox_lock(&pf->mbox);
+ mutex_lock(&pf->mbox.lock);
if (enable)
msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox);
else
msg = otx2_mbox_alloc_msg_cgx_intlbk_disable(&pf->mbox);
if (!msg) {
- otx2_mbox_unlock(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
return -ENOMEM;
}
err = otx2_sync_mbox_msg(&pf->mbox);
- otx2_mbox_unlock(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
return err;
}
@@ -480,6 +1119,7 @@ int otx2_set_real_num_queues(struct net_device *netdev,
"Failed to set no of Rx queues: %d\n", rx_queues);
return err;
}
+EXPORT_SYMBOL(otx2_set_real_num_queues);
static irqreturn_t otx2_q_intr_handler(int irq, void *data)
{
@@ -643,7 +1283,7 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
/* Get the size of receive buffers to allocate */
pf->rbsize = RCV_FRAG_LEN(pf->netdev->mtu + OTX2_ETH_HLEN);
- otx2_mbox_lock(mbox);
+ mutex_lock(&mbox->lock);
/* NPA init */
err = otx2_config_npa(pf);
if (err)
@@ -654,38 +1294,41 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
if (err)
goto err_free_npa_lf;
+ /* Enable backpressure */
+ otx2_nix_config_bp(pf, true);
+
/* Init Auras and pools used by NIX RQ, for free buffer ptrs */
err = otx2_rq_aura_pool_init(pf);
if (err) {
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
goto err_free_nix_lf;
}
/* Init Auras and pools used by NIX SQ, for queueing SQEs */
err = otx2_sq_aura_pool_init(pf);
if (err) {
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
goto err_free_rq_ptrs;
}
err = otx2_txsch_alloc(pf);
if (err) {
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
goto err_free_sq_ptrs;
}
err = otx2_config_nix_queues(pf);
if (err) {
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
goto err_free_txsch;
}
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
err = otx2_txschq_config(pf, lvl);
if (err) {
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
goto err_free_nix_queues;
}
}
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
return err;
err_free_nix_queues:
@@ -703,7 +1346,7 @@ err_free_rq_ptrs:
otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
otx2_aura_pool_free(pf);
err_free_nix_lf:
- otx2_mbox_lock(mbox);
+ mutex_lock(&mbox->lock);
req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
if (req) {
if (otx2_sync_mbox_msg(mbox))
@@ -717,7 +1360,7 @@ err_free_npa_lf:
dev_err(pf->dev, "%s failed to free npalf\n", __func__);
}
exit:
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
return err;
}
@@ -737,6 +1380,12 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
if (err)
dev_err(pf->dev, "RVUPF: Failed to stop/free TX schedulers\n");
+ mutex_lock(&mbox->lock);
+ /* Disable backpressure */
+ if (!(pf->pcifunc & RVU_PFVF_FUNC_MASK))
+ otx2_nix_config_bp(pf, false);
+ mutex_unlock(&mbox->lock);
+
/* Disable RQs */
otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
@@ -756,28 +1405,28 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
otx2_free_cq_res(pf);
- otx2_mbox_lock(mbox);
+ mutex_lock(&mbox->lock);
/* Reset NIX LF */
req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
if (req) {
if (otx2_sync_mbox_msg(mbox))
dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
}
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
/* Disable NPA Pool and Aura hw context */
otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
otx2_aura_pool_free(pf);
- otx2_mbox_lock(mbox);
+ mutex_lock(&mbox->lock);
/* Reset NPA LF */
req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
if (req) {
if (otx2_sync_mbox_msg(mbox))
dev_err(pf->dev, "%s failed to free npalf\n", __func__);
}
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
}
int otx2_open(struct net_device *netdev)
@@ -906,6 +1555,9 @@ int otx2_open(struct net_device *netdev)
if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK))
otx2_handle_link_event(pf);
+ /* Restore pause frame settings */
+ otx2_config_pause_frm(pf);
+
err = otx2_rxtx_enable(pf, true);
if (err)
goto err_free_cints;
@@ -929,6 +1581,7 @@ err_free_mem:
kfree(qset->napi);
return err;
}
+EXPORT_SYMBOL(otx2_open);
int otx2_stop(struct net_device *netdev)
{
@@ -989,6 +1642,7 @@ int otx2_stop(struct net_device *netdev)
sizeof(*qset) - offsetof(struct otx2_qset, sqe_cnt));
return 0;
}
+EXPORT_SYMBOL(otx2_stop);
static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
{
@@ -1025,15 +1679,23 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
static void otx2_set_rx_mode(struct net_device *netdev)
{
struct otx2_nic *pf = netdev_priv(netdev);
+
+ queue_work(pf->otx2_wq, &pf->rx_mode_work);
+}
+
+static void otx2_do_set_rx_mode(struct work_struct *work)
+{
+ struct otx2_nic *pf = container_of(work, struct otx2_nic, rx_mode_work);
+ struct net_device *netdev = pf->netdev;
struct nix_rx_mode *req;
if (!(netdev->flags & IFF_UP))
return;
- otx2_mbox_lock(&pf->mbox);
+ mutex_lock(&pf->mbox.lock);
req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox);
if (!req) {
- otx2_mbox_unlock(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
return;
}
@@ -1046,7 +1708,7 @@ static void otx2_set_rx_mode(struct net_device *netdev)
req->mode |= NIX_RX_MODE_ALLMULTI;
otx2_sync_mbox_msg(&pf->mbox);
- otx2_mbox_unlock(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
}
static int otx2_set_features(struct net_device *netdev,
@@ -1086,6 +1748,17 @@ static const struct net_device_ops otx2_netdev_ops = {
.ndo_get_stats64 = otx2_get_stats64,
};
+static int otx2_wq_init(struct otx2_nic *pf)
+{
+ pf->otx2_wq = create_singlethread_workqueue("otx2_wq");
+ if (!pf->otx2_wq)
+ return -ENOMEM;
+
+ INIT_WORK(&pf->rx_mode_work, otx2_do_set_rx_mode);
+ INIT_WORK(&pf->reset_task, otx2_reset_task);
+ return 0;
+}
+
static int otx2_check_pf_usable(struct otx2_nic *nic)
{
u64 rev;
@@ -1117,7 +1790,6 @@ static int otx2_realloc_msix_vectors(struct otx2_nic *pf)
otx2_disable_mbox_intr(pf);
pci_free_irq_vectors(hw->pdev);
- pci_free_irq_vectors(hw->pdev);
err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
if (err < 0) {
dev_err(pf->dev, "%s: Failed to realloc %d IRQ vectors\n",
@@ -1172,6 +1844,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pf->netdev = netdev;
pf->pdev = pdev;
pf->dev = dev;
+ pf->total_vfs = pci_sriov_get_totalvfs(pdev);
pf->flags |= OTX2_FLAG_INTF_DOWN;
hw = &pf->hw;
@@ -1270,21 +1943,29 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->min_mtu = OTX2_MIN_MTU;
netdev->max_mtu = OTX2_MAX_MTU;
- INIT_WORK(&pf->reset_task, otx2_reset_task);
-
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
goto err_detach_rsrc;
}
+ err = otx2_wq_init(pf);
+ if (err)
+ goto err_unreg_netdev;
+
otx2_set_ethtool_ops(netdev);
/* Enable link notifications */
otx2_cgx_config_linkevents(pf, true);
+ /* Enable pause frames by default */
+ pf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
+ pf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
+
return 0;
+err_unreg_netdev:
+ unregister_netdev(netdev);
err_detach_rsrc:
otx2_detach_resources(&pf->mbox);
err_disable_mbox_intr:
@@ -1301,6 +1982,121 @@ err_release_regions:
return err;
}
+static void otx2_vf_link_event_task(struct work_struct *work)
+{
+ struct otx2_vf_config *config;
+ struct cgx_link_info_msg *req;
+ struct mbox_msghdr *msghdr;
+ struct otx2_nic *pf;
+ int vf_idx;
+
+ config = container_of(work, struct otx2_vf_config,
+ link_event_work.work);
+ vf_idx = config - config->pf->vf_configs;
+ pf = config->pf;
+
+ msghdr = otx2_mbox_alloc_msg_rsp(&pf->mbox_pfvf[0].mbox_up, vf_idx,
+ sizeof(*req), sizeof(struct msg_rsp));
+ if (!msghdr) {
+ dev_err(pf->dev, "Failed to create VF%d link event\n", vf_idx);
+ return;
+ }
+
+ req = (struct cgx_link_info_msg *)msghdr;
+ req->hdr.id = MBOX_MSG_CGX_LINK_EVENT;
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info));
+
+ otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx);
+}
+
+static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct otx2_nic *pf = netdev_priv(netdev);
+ int ret, i;
+
+ /* Init PF <=> VF mailbox stuff */
+ ret = otx2_pfvf_mbox_init(pf, numvfs);
+ if (ret)
+ return ret;
+
+ ret = otx2_register_pfvf_mbox_intr(pf, numvfs);
+ if (ret)
+ goto free_mbox;
+
+ pf->vf_configs = kcalloc(numvfs, sizeof(struct otx2_vf_config),
+ GFP_KERNEL);
+ if (!pf->vf_configs) {
+ ret = -ENOMEM;
+ goto free_intr;
+ }
+
+ for (i = 0; i < numvfs; i++) {
+ pf->vf_configs[i].pf = pf;
+ pf->vf_configs[i].intf_down = true;
+ INIT_DELAYED_WORK(&pf->vf_configs[i].link_event_work,
+ otx2_vf_link_event_task);
+ }
+
+ ret = otx2_pf_flr_init(pf, numvfs);
+ if (ret)
+ goto free_configs;
+
+ ret = otx2_register_flr_me_intr(pf, numvfs);
+ if (ret)
+ goto free_flr;
+
+ ret = pci_enable_sriov(pdev, numvfs);
+ if (ret)
+ goto free_flr_intr;
+
+ return numvfs;
+free_flr_intr:
+ otx2_disable_flr_me_intr(pf);
+free_flr:
+ otx2_flr_wq_destroy(pf);
+free_configs:
+ kfree(pf->vf_configs);
+free_intr:
+ otx2_disable_pfvf_mbox_intr(pf, numvfs);
+free_mbox:
+ otx2_pfvf_mbox_destroy(pf);
+ return ret;
+}
+
+static int otx2_sriov_disable(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct otx2_nic *pf = netdev_priv(netdev);
+ int numvfs = pci_num_vf(pdev);
+ int i;
+
+ if (!numvfs)
+ return 0;
+
+ pci_disable_sriov(pdev);
+
+ for (i = 0; i < pci_num_vf(pdev); i++)
+ cancel_delayed_work_sync(&pf->vf_configs[i].link_event_work);
+ kfree(pf->vf_configs);
+
+ otx2_disable_flr_me_intr(pf);
+ otx2_flr_wq_destroy(pf);
+ otx2_disable_pfvf_mbox_intr(pf, numvfs);
+ otx2_pfvf_mbox_destroy(pf);
+
+ return 0;
+}
+
+static int otx2_sriov_configure(struct pci_dev *pdev, int numvfs)
+{
+ if (numvfs == 0)
+ return otx2_sriov_disable(pdev);
+ else
+ return otx2_sriov_enable(pdev, numvfs);
+}
+
static void otx2_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -1315,6 +2111,10 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_cgx_config_linkevents(pf, false);
unregister_netdev(netdev);
+ otx2_sriov_disable(pf->pdev);
+ if (pf->otx2_wq)
+ destroy_workqueue(pf->otx2_wq);
+
otx2_detach_resources(&pf->mbox);
otx2_disable_mbox_intr(pf);
otx2_pfaf_mbox_destroy(pf);
@@ -1331,6 +2131,7 @@ static struct pci_driver otx2_pf_driver = {
.probe = otx2_probe,
.shutdown = otx2_remove,
.remove = otx2_remove,
+ .sriov_configure = otx2_sriov_configure
};
static int __init otx2_rvupf_init_module(void)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
index 7963d418886a..867f646e0802 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
@@ -45,6 +45,19 @@
#define RVU_PF_MSIX_VECX_CTL(a) (0x008 | (a) << 4)
#define RVU_PF_MSIX_PBAX(a) (0xF0000 | (a) << 3)
+/* RVU VF registers */
+#define RVU_VF_VFPF_MBOX0 (0x00000)
+#define RVU_VF_VFPF_MBOX1 (0x00008)
+#define RVU_VF_VFPF_MBOXX(a) (0x00 | (a) << 3)
+#define RVU_VF_INT (0x20)
+#define RVU_VF_INT_W1S (0x28)
+#define RVU_VF_INT_ENA_W1S (0x30)
+#define RVU_VF_INT_ENA_W1C (0x38)
+#define RVU_VF_BLOCK_ADDRX_DISC(a) (0x200 | (a) << 3)
+#define RVU_VF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4)
+#define RVU_VF_MSIX_VECX_CTL(a) (0x008 | (a) << 4)
+#define RVU_VF_MSIX_PBAX(a) (0xF0000 | (a) << 3)
+
#define RVU_FUNC_BLKADDR_SHIFT 20
#define RVU_FUNC_BLKADDR_MASK 0x1FULL
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index bef4c20fe314..45abe0cd0e7b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -138,6 +138,25 @@ static void otx2_set_rxhash(struct otx2_nic *pfvf,
skb_set_hash(skb, hash, hash_type);
}
+static void otx2_free_rcv_seg(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe,
+ int qidx)
+{
+ struct nix_rx_sg_s *sg = &cqe->sg;
+ void *end, *start;
+ u64 *seg_addr;
+ int seg;
+
+ start = (void *)sg;
+ end = start + ((cqe->parse.desc_sizem1 + 1) * 16);
+ while (start < end) {
+ sg = (struct nix_rx_sg_s *)start;
+ seg_addr = &sg->seg_addr;
+ for (seg = 0; seg < sg->segs; seg++, seg_addr++)
+ otx2_aura_freeptr(pfvf, qidx, *seg_addr & ~0x07ULL);
+ start += sizeof(*sg);
+ }
+}
+
static bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
struct nix_cqe_rx_s *cqe, int qidx)
{
@@ -189,16 +208,17 @@ static bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
/* For now ignore all the NPC parser errors and
* pass the packets to stack.
*/
- return false;
+ if (cqe->sg.segs == 1)
+ return false;
}
/* If RXALL is enabled pass on packets to stack. */
- if (cqe->sg.segs && (pfvf->netdev->features & NETIF_F_RXALL))
+ if (cqe->sg.segs == 1 && (pfvf->netdev->features & NETIF_F_RXALL))
return false;
/* Free buffer back to pool */
if (cqe->sg.segs)
- otx2_aura_freeptr(pfvf, qidx, cqe->sg.seg_addr & ~0x07ULL);
+ otx2_free_rcv_seg(pfvf, cqe, qidx);
return true;
}
@@ -210,7 +230,7 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
struct nix_rx_parse_s *parse = &cqe->parse;
struct sk_buff *skb = NULL;
- if (unlikely(parse->errlev || parse->errcode)) {
+ if (unlikely(parse->errlev || parse->errcode || cqe->sg.segs > 1)) {
if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx))
return;
}
@@ -284,6 +304,7 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM);
cq->pool_ptrs--;
}
+ otx2_get_page(cq->rbpool);
return processed_cqe;
}
@@ -778,6 +799,7 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
return true;
}
+EXPORT_SYMBOL(otx2_sq_append_skb);
void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
{
@@ -788,11 +810,15 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
while ((cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq))) {
if (!cqe->sg.subdc)
continue;
+ processed_cqe++;
+ if (cqe->sg.segs > 1) {
+ otx2_free_rcv_seg(pfvf, cqe, cq->cq_idx);
+ continue;
+ }
iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, DMA_FROM_DEVICE);
put_page(virt_to_page(phys_to_virt(pa)));
- processed_cqe++;
}
/* Free CQEs to HW */
@@ -831,18 +857,18 @@ int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable)
struct msg_req *msg;
int err;
- otx2_mbox_lock(&pfvf->mbox);
+ mutex_lock(&pfvf->mbox.lock);
if (enable)
msg = otx2_mbox_alloc_msg_nix_lf_start_rx(&pfvf->mbox);
else
msg = otx2_mbox_alloc_msg_nix_lf_stop_rx(&pfvf->mbox);
if (!msg) {
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
}
err = otx2_sync_mbox_msg(&pfvf->mbox);
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return err;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
new file mode 100644
index 000000000000..187c633a7af5
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -0,0 +1,648 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Virtual Function ethernet driver */
+
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "otx2_common.h"
+#include "otx2_reg.h"
+
+#define DRV_NAME "octeontx2-nicvf"
+#define DRV_STRING "Marvell OcteonTX2 NIC Virtual Function Driver"
+
+static const struct pci_device_id otx2_vf_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AFVF) },
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_VF) },
+ { }
+};
+
+MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
+MODULE_DESCRIPTION(DRV_STRING);
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, otx2_vf_id_table);
+
+/* RVU VF Interrupt Vector Enumeration */
+enum {
+ RVU_VF_INT_VEC_MBOX = 0x0,
+};
+
+static void otx2vf_process_vfaf_mbox_msg(struct otx2_nic *vf,
+ struct mbox_msghdr *msg)
+{
+ if (msg->id >= MBOX_MSG_MAX) {
+ dev_err(vf->dev,
+ "Mbox msg with unknown ID %d\n", msg->id);
+ return;
+ }
+
+ if (msg->sig != OTX2_MBOX_RSP_SIG) {
+ dev_err(vf->dev,
+ "Mbox msg with wrong signature %x, ID %d\n",
+ msg->sig, msg->id);
+ return;
+ }
+
+ if (msg->rc == MBOX_MSG_INVALID) {
+ dev_err(vf->dev,
+ "PF/AF says the sent msg(s) %d were invalid\n",
+ msg->id);
+ return;
+ }
+
+ switch (msg->id) {
+ case MBOX_MSG_READY:
+ vf->pcifunc = msg->pcifunc;
+ break;
+ case MBOX_MSG_MSIX_OFFSET:
+ mbox_handler_msix_offset(vf, (struct msix_offset_rsp *)msg);
+ break;
+ case MBOX_MSG_NPA_LF_ALLOC:
+ mbox_handler_npa_lf_alloc(vf, (struct npa_lf_alloc_rsp *)msg);
+ break;
+ case MBOX_MSG_NIX_LF_ALLOC:
+ mbox_handler_nix_lf_alloc(vf, (struct nix_lf_alloc_rsp *)msg);
+ break;
+ case MBOX_MSG_NIX_TXSCH_ALLOC:
+ mbox_handler_nix_txsch_alloc(vf,
+ (struct nix_txsch_alloc_rsp *)msg);
+ break;
+ case MBOX_MSG_NIX_BP_ENABLE:
+ mbox_handler_nix_bp_enable(vf, (struct nix_bp_cfg_rsp *)msg);
+ break;
+ default:
+ if (msg->rc)
+ dev_err(vf->dev,
+ "Mbox msg response has err %d, ID %d\n",
+ msg->rc, msg->id);
+ }
+}
+
+static void otx2vf_vfaf_mbox_handler(struct work_struct *work)
+{
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg;
+ struct otx2_mbox *mbox;
+ struct mbox *af_mbox;
+ int offset, id;
+
+ af_mbox = container_of(work, struct mbox, mbox_wrk);
+ mbox = &af_mbox->mbox;
+ mdev = &mbox->dev[0];
+ rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (af_mbox->num_msgs == 0)
+ return;
+ offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+
+ for (id = 0; id < af_mbox->num_msgs; id++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + offset);
+ otx2vf_process_vfaf_mbox_msg(af_mbox->pfvf, msg);
+ offset = mbox->rx_start + msg->next_msgoff;
+ mdev->msgs_acked++;
+ }
+
+ otx2_mbox_reset(mbox, 0);
+}
+
+static int otx2vf_process_mbox_msg_up(struct otx2_nic *vf,
+ struct mbox_msghdr *req)
+{
+ struct msg_rsp *rsp;
+ int err;
+
+ /* Check if valid, if not reply with a invalid msg */
+ if (req->sig != OTX2_MBOX_REQ_SIG) {
+ otx2_reply_invalid_msg(&vf->mbox.mbox_up, 0, 0, req->id);
+ return -ENODEV;
+ }
+
+ switch (req->id) {
+ case MBOX_MSG_CGX_LINK_EVENT:
+ rsp = (struct msg_rsp *)otx2_mbox_alloc_msg(
+ &vf->mbox.mbox_up, 0,
+ sizeof(struct msg_rsp));
+ if (!rsp)
+ return -ENOMEM;
+
+ rsp->hdr.id = MBOX_MSG_CGX_LINK_EVENT;
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
+ rsp->hdr.pcifunc = 0;
+ rsp->hdr.rc = 0;
+ err = otx2_mbox_up_handler_cgx_link_event(
+ vf, (struct cgx_link_info_msg *)req, rsp);
+ return err;
+ default:
+ otx2_reply_invalid_msg(&vf->mbox.mbox_up, 0, 0, req->id);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work)
+{
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg;
+ struct otx2_mbox *mbox;
+ struct mbox *vf_mbox;
+ struct otx2_nic *vf;
+ int offset, id;
+
+ vf_mbox = container_of(work, struct mbox, mbox_up_wrk);
+ vf = vf_mbox->pfvf;
+ mbox = &vf_mbox->mbox_up;
+ mdev = &mbox->dev[0];
+
+ rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (vf_mbox->up_num_msgs == 0)
+ return;
+
+ offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+
+ for (id = 0; id < vf_mbox->up_num_msgs; id++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + offset);
+ otx2vf_process_mbox_msg_up(vf, msg);
+ offset = mbox->rx_start + msg->next_msgoff;
+ }
+
+ otx2_mbox_msg_send(mbox, 0);
+}
+
+static irqreturn_t otx2vf_vfaf_mbox_intr_handler(int irq, void *vf_irq)
+{
+ struct otx2_nic *vf = (struct otx2_nic *)vf_irq;
+ struct otx2_mbox_dev *mdev;
+ struct otx2_mbox *mbox;
+ struct mbox_hdr *hdr;
+
+ /* Clear the IRQ */
+ otx2_write64(vf, RVU_VF_INT, BIT_ULL(0));
+
+ /* Read latest mbox data */
+ smp_rmb();
+
+ /* Check for PF => VF response messages */
+ mbox = &vf->mbox.mbox;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
+
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs) {
+ vf->mbox.num_msgs = hdr->num_msgs;
+ hdr->num_msgs = 0;
+ memset(mbox->hwbase + mbox->rx_start, 0,
+ ALIGN(sizeof(struct mbox_hdr), sizeof(u64)));
+ queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk);
+ }
+ /* Check for PF => VF notification messages */
+ mbox = &vf->mbox.mbox_up;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
+
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs) {
+ vf->mbox.up_num_msgs = hdr->num_msgs;
+ hdr->num_msgs = 0;
+ memset(mbox->hwbase + mbox->rx_start, 0,
+ ALIGN(sizeof(struct mbox_hdr), sizeof(u64)));
+ queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void otx2vf_disable_mbox_intr(struct otx2_nic *vf)
+{
+ int vector = pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX);
+
+ /* Disable VF => PF mailbox IRQ */
+ otx2_write64(vf, RVU_VF_INT_ENA_W1C, BIT_ULL(0));
+ free_irq(vector, vf);
+}
+
+static int otx2vf_register_mbox_intr(struct otx2_nic *vf, bool probe_pf)
+{
+ struct otx2_hw *hw = &vf->hw;
+ struct msg_req *req;
+ char *irq_name;
+ int err;
+
+ /* Register mailbox interrupt handler */
+ irq_name = &hw->irq_name[RVU_VF_INT_VEC_MBOX * NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "RVUVFAF Mbox");
+ err = request_irq(pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX),
+ otx2vf_vfaf_mbox_intr_handler, 0, irq_name, vf);
+ if (err) {
+ dev_err(vf->dev,
+ "RVUPF: IRQ registration failed for VFAF mbox irq\n");
+ return err;
+ }
+
+ /* Enable mailbox interrupt for msgs coming from PF.
+ * First clear to avoid spurious interrupts, if any.
+ */
+ otx2_write64(vf, RVU_VF_INT, BIT_ULL(0));
+ otx2_write64(vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0));
+
+ if (!probe_pf)
+ return 0;
+
+ /* Check mailbox communication with PF */
+ req = otx2_mbox_alloc_msg_ready(&vf->mbox);
+ if (!req) {
+ otx2vf_disable_mbox_intr(vf);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&vf->mbox);
+ if (err) {
+ dev_warn(vf->dev,
+ "AF not responding to mailbox, deferring probe\n");
+ otx2vf_disable_mbox_intr(vf);
+ return -EPROBE_DEFER;
+ }
+ return 0;
+}
+
+static void otx2vf_vfaf_mbox_destroy(struct otx2_nic *vf)
+{
+ struct mbox *mbox = &vf->mbox;
+
+ if (vf->mbox_wq) {
+ flush_workqueue(vf->mbox_wq);
+ destroy_workqueue(vf->mbox_wq);
+ vf->mbox_wq = NULL;
+ }
+
+ if (mbox->mbox.hwbase)
+ iounmap((void __iomem *)mbox->mbox.hwbase);
+
+ otx2_mbox_destroy(&mbox->mbox);
+ otx2_mbox_destroy(&mbox->mbox_up);
+}
+
+static int otx2vf_vfaf_mbox_init(struct otx2_nic *vf)
+{
+ struct mbox *mbox = &vf->mbox;
+ void __iomem *hwbase;
+ int err;
+
+ mbox->pfvf = vf;
+ vf->mbox_wq = alloc_workqueue("otx2_vfaf_mailbox",
+ WQ_UNBOUND | WQ_HIGHPRI |
+ WQ_MEM_RECLAIM, 1);
+ if (!vf->mbox_wq)
+ return -ENOMEM;
+
+ /* Mailbox is a reserved memory (in RAM) region shared between
+ * admin function (i.e PF0) and this VF, shouldn't be mapped as
+ * device memory to allow unaligned accesses.
+ */
+ hwbase = ioremap_wc(pci_resource_start(vf->pdev, PCI_MBOX_BAR_NUM),
+ pci_resource_len(vf->pdev, PCI_MBOX_BAR_NUM));
+ if (!hwbase) {
+ dev_err(vf->dev, "Unable to map VFAF mailbox region\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ err = otx2_mbox_init(&mbox->mbox, hwbase, vf->pdev, vf->reg_base,
+ MBOX_DIR_VFPF, 1);
+ if (err)
+ goto exit;
+
+ err = otx2_mbox_init(&mbox->mbox_up, hwbase, vf->pdev, vf->reg_base,
+ MBOX_DIR_VFPF_UP, 1);
+ if (err)
+ goto exit;
+
+ err = otx2_mbox_bbuf_init(mbox, vf->pdev);
+ if (err)
+ goto exit;
+
+ INIT_WORK(&mbox->mbox_wrk, otx2vf_vfaf_mbox_handler);
+ INIT_WORK(&mbox->mbox_up_wrk, otx2vf_vfaf_mbox_up_handler);
+ mutex_init(&mbox->lock);
+
+ return 0;
+exit:
+ destroy_workqueue(vf->mbox_wq);
+ return err;
+}
+
+static int otx2vf_open(struct net_device *netdev)
+{
+ struct otx2_nic *vf;
+ int err;
+
+ err = otx2_open(netdev);
+ if (err)
+ return err;
+
+ /* LBKs do not receive link events so tell everyone we are up here */
+ vf = netdev_priv(netdev);
+ if (is_otx2_lbkvf(vf->pdev)) {
+ pr_info("%s NIC Link is UP\n", netdev->name);
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+ }
+
+ return 0;
+}
+
+static int otx2vf_stop(struct net_device *netdev)
+{
+ return otx2_stop(netdev);
+}
+
+static netdev_tx_t otx2vf_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct otx2_nic *vf = netdev_priv(netdev);
+ int qidx = skb_get_queue_mapping(skb);
+ struct otx2_snd_queue *sq;
+ struct netdev_queue *txq;
+
+ sq = &vf->qset.sq[qidx];
+ txq = netdev_get_tx_queue(netdev, qidx);
+
+ if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
+ netif_tx_stop_queue(txq);
+
+ /* Check again, incase SQBs got freed up */
+ smp_mb();
+ if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb)
+ > sq->sqe_thresh)
+ netif_tx_wake_queue(txq);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static int otx2vf_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ bool if_up = netif_running(netdev);
+ int err = 0;
+
+ if (if_up)
+ otx2vf_stop(netdev);
+
+ netdev_info(netdev, "Changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
+ netdev->mtu = new_mtu;
+
+ if (if_up)
+ err = otx2vf_open(netdev);
+
+ return err;
+}
+
+static void otx2vf_reset_task(struct work_struct *work)
+{
+ struct otx2_nic *vf = container_of(work, struct otx2_nic, reset_task);
+
+ rtnl_lock();
+
+ if (netif_running(vf->netdev)) {
+ otx2vf_stop(vf->netdev);
+ vf->reset_count++;
+ otx2vf_open(vf->netdev);
+ }
+
+ rtnl_unlock();
+}
+
+static const struct net_device_ops otx2vf_netdev_ops = {
+ .ndo_open = otx2vf_open,
+ .ndo_stop = otx2vf_stop,
+ .ndo_start_xmit = otx2vf_xmit,
+ .ndo_set_mac_address = otx2_set_mac_address,
+ .ndo_change_mtu = otx2vf_change_mtu,
+ .ndo_get_stats64 = otx2_get_stats64,
+ .ndo_tx_timeout = otx2_tx_timeout,
+};
+
+static int otx2vf_realloc_msix_vectors(struct otx2_nic *vf)
+{
+ struct otx2_hw *hw = &vf->hw;
+ int num_vec, err;
+
+ num_vec = hw->nix_msixoff;
+ num_vec += NIX_LF_CINT_VEC_START + hw->max_queues;
+
+ otx2vf_disable_mbox_intr(vf);
+ pci_free_irq_vectors(hw->pdev);
+ err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
+ if (err < 0) {
+ dev_err(vf->dev, "%s: Failed to realloc %d IRQ vectors\n",
+ __func__, num_vec);
+ return err;
+ }
+
+ return otx2vf_register_mbox_intr(vf, false);
+}
+
+static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ int num_vec = pci_msix_vec_count(pdev);
+ struct device *dev = &pdev->dev;
+ struct net_device *netdev;
+ struct otx2_nic *vf;
+ struct otx2_hw *hw;
+ int err, qcount;
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ return err;
+ }
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "DMA mask config failed, abort\n");
+ goto err_release_regions;
+ }
+
+ pci_set_master(pdev);
+
+ qcount = num_online_cpus();
+ netdev = alloc_etherdev_mqs(sizeof(*vf), qcount, qcount);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ pci_set_drvdata(pdev, netdev);
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ vf = netdev_priv(netdev);
+ vf->netdev = netdev;
+ vf->pdev = pdev;
+ vf->dev = dev;
+ vf->iommu_domain = iommu_get_domain_for_dev(dev);
+
+ vf->flags |= OTX2_FLAG_INTF_DOWN;
+ hw = &vf->hw;
+ hw->pdev = vf->pdev;
+ hw->rx_queues = qcount;
+ hw->tx_queues = qcount;
+ hw->max_queues = qcount;
+
+ hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
+ GFP_KERNEL);
+ if (!hw->irq_name)
+ goto err_free_netdev;
+
+ hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec,
+ sizeof(cpumask_var_t), GFP_KERNEL);
+ if (!hw->affinity_mask)
+ goto err_free_netdev;
+
+ err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
+ if (err < 0) {
+ dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n",
+ __func__, num_vec);
+ goto err_free_netdev;
+ }
+
+ vf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!vf->reg_base) {
+ dev_err(dev, "Unable to map physical function CSRs, aborting\n");
+ err = -ENOMEM;
+ goto err_free_irq_vectors;
+ }
+
+ /* Init VF <=> PF mailbox stuff */
+ err = otx2vf_vfaf_mbox_init(vf);
+ if (err)
+ goto err_free_irq_vectors;
+
+ /* Register mailbox interrupt */
+ err = otx2vf_register_mbox_intr(vf, true);
+ if (err)
+ goto err_mbox_destroy;
+
+ /* Request AF to attach NPA and LIX LFs to this AF */
+ err = otx2_attach_npa_nix(vf);
+ if (err)
+ goto err_disable_mbox_intr;
+
+ err = otx2vf_realloc_msix_vectors(vf);
+ if (err)
+ goto err_mbox_destroy;
+
+ err = otx2_set_real_num_queues(netdev, qcount, qcount);
+ if (err)
+ goto err_detach_rsrc;
+
+ otx2_setup_dev_hw_settings(vf);
+
+ /* Assign default mac address */
+ otx2_get_mac_from_af(netdev);
+
+ netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
+ NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
+ netdev->features = netdev->hw_features;
+
+ netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
+ netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
+
+ netdev->netdev_ops = &otx2vf_netdev_ops;
+
+ /* MTU range: 68 - 9190 */
+ netdev->min_mtu = OTX2_MIN_MTU;
+ netdev->max_mtu = OTX2_MAX_MTU;
+
+ INIT_WORK(&vf->reset_task, otx2vf_reset_task);
+
+ /* To distinguish, for LBK VFs set netdev name explicitly */
+ if (is_otx2_lbkvf(vf->pdev)) {
+ int n;
+
+ n = (vf->pcifunc >> RVU_PFVF_FUNC_SHIFT) & RVU_PFVF_FUNC_MASK;
+ /* Need to subtract 1 to get proper VF number */
+ n -= 1;
+ snprintf(netdev->name, sizeof(netdev->name), "lbk%d", n);
+ }
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(dev, "Failed to register netdevice\n");
+ goto err_detach_rsrc;
+ }
+
+ otx2vf_set_ethtool_ops(netdev);
+
+ /* Enable pause frames by default */
+ vf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
+ vf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
+
+ return 0;
+
+err_detach_rsrc:
+ otx2_detach_resources(&vf->mbox);
+err_disable_mbox_intr:
+ otx2vf_disable_mbox_intr(vf);
+err_mbox_destroy:
+ otx2vf_vfaf_mbox_destroy(vf);
+err_free_irq_vectors:
+ pci_free_irq_vectors(hw->pdev);
+err_free_netdev:
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+err_release_regions:
+ pci_release_regions(pdev);
+ return err;
+}
+
+static void otx2vf_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct otx2_nic *vf;
+
+ if (!netdev)
+ return;
+
+ vf = netdev_priv(netdev);
+
+ otx2vf_disable_mbox_intr(vf);
+
+ otx2_detach_resources(&vf->mbox);
+ otx2vf_vfaf_mbox_destroy(vf);
+ pci_free_irq_vectors(vf->pdev);
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+
+ pci_release_regions(pdev);
+}
+
+static struct pci_driver otx2vf_driver = {
+ .name = DRV_NAME,
+ .id_table = otx2_vf_id_table,
+ .probe = otx2vf_probe,
+ .remove = otx2vf_remove,
+ .shutdown = otx2vf_remove,
+};
+
+static int __init otx2vf_init_module(void)
+{
+ pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
+
+ return pci_register_driver(&otx2vf_driver);
+}
+
+static void __exit otx2vf_cleanup_module(void)
+{
+ pci_unregister_driver(&otx2vf_driver);
+}
+
+module_init(otx2vf_init_module);
+module_exit(otx2vf_cleanup_module);