summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/marvell/octeontx2/af/rvu.c')
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c169
1 files changed, 144 insertions, 25 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 5c190c3ce898..557e4292c846 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -21,7 +21,6 @@
#define DRV_NAME "octeontx2-af"
#define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
-#define DRV_VERSION "1.0"
static int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
@@ -46,10 +45,9 @@ static const struct pci_device_id rvu_id_table[] = {
{ 0, } /* end of table */
};
-MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
MODULE_DESCRIPTION(DRV_STRING);
MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, rvu_id_table);
static char *mkex_profile; /* MKEX profile name */
@@ -88,13 +86,15 @@ int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
u64 reg_val;
reg = rvu->afreg_base + ((block << 28) | offset);
- while (time_before(jiffies, timeout)) {
- reg_val = readq(reg);
- if (zero && !(reg_val & mask))
- return 0;
- if (!zero && (reg_val & mask))
- return 0;
+again:
+ reg_val = readq(reg);
+ if (zero && !(reg_val & mask))
+ return 0;
+ if (!zero && (reg_val & mask))
+ return 0;
+ if (time_before(jiffies, timeout)) {
usleep_range(1, 5);
+ goto again;
}
return -EBUSY;
}
@@ -421,6 +421,19 @@ static void rvu_check_block_implemented(struct rvu *rvu)
}
}
+static void rvu_setup_rvum_blk_revid(struct rvu *rvu)
+{
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM),
+ RVU_BLK_RVUM_REVID);
+}
+
+static void rvu_clear_rvum_blk_revid(struct rvu *rvu)
+{
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM), 0x00);
+}
+
int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf)
{
int err;
@@ -603,7 +616,11 @@ setup_vfmsix:
*/
cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
max_msix = cfg & 0xFFFFF;
- phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE);
+ if (rvu->fwdata && rvu->fwdata->msixtr_base)
+ phy_addr = rvu->fwdata->msixtr_base;
+ else
+ phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE);
+
iova = dma_map_resource(rvu->dev, phy_addr,
max_msix * PCI_MSIX_ENTRY_SIZE,
DMA_BIDIRECTIONAL, 0);
@@ -613,10 +630,18 @@ setup_vfmsix:
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova);
rvu->msix_base_iova = iova;
+ rvu->msixtr_base_phy = phy_addr;
return 0;
}
+static void rvu_reset_msix(struct rvu *rvu)
+{
+ /* Restore msixtr base register */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE,
+ rvu->msixtr_base_phy);
+}
+
static void rvu_free_hw_resources(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -655,9 +680,80 @@ static void rvu_free_hw_resources(struct rvu *rvu)
max_msix * PCI_MSIX_ENTRY_SIZE,
DMA_BIDIRECTIONAL, 0);
+ rvu_reset_msix(rvu);
mutex_destroy(&rvu->rsrc_lock);
}
+static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int pf, vf, numvfs, hwvf;
+ struct rvu_pfvf *pfvf;
+ u64 *mac;
+
+ for (pf = 0; pf < hw->total_pfs; pf++) {
+ if (!is_pf_cgxmapped(rvu, pf))
+ continue;
+ /* Assign MAC address to PF */
+ pfvf = &rvu->pf[pf];
+ if (rvu->fwdata && pf < PF_MACNUM_MAX) {
+ mac = &rvu->fwdata->pf_macs[pf];
+ if (*mac)
+ u64_to_ether_addr(*mac, pfvf->mac_addr);
+ else
+ eth_random_addr(pfvf->mac_addr);
+ } else {
+ eth_random_addr(pfvf->mac_addr);
+ }
+
+ /* Assign MAC address to VFs */
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
+ for (vf = 0; vf < numvfs; vf++, hwvf++) {
+ pfvf = &rvu->hwvf[hwvf];
+ if (rvu->fwdata && hwvf < VF_MACNUM_MAX) {
+ mac = &rvu->fwdata->vf_macs[hwvf];
+ if (*mac)
+ u64_to_ether_addr(*mac, pfvf->mac_addr);
+ else
+ eth_random_addr(pfvf->mac_addr);
+ } else {
+ eth_random_addr(pfvf->mac_addr);
+ }
+ }
+ }
+}
+
+static int rvu_fwdata_init(struct rvu *rvu)
+{
+ u64 fwdbase;
+ int err;
+
+ /* Get firmware data base address */
+ err = cgx_get_fwdata_base(&fwdbase);
+ if (err)
+ goto fail;
+ rvu->fwdata = ioremap_wc(fwdbase, sizeof(struct rvu_fwdata));
+ if (!rvu->fwdata)
+ goto fail;
+ if (!is_rvu_fwdata_valid(rvu)) {
+ dev_err(rvu->dev,
+ "Mismatch in 'fwdata' struct btw kernel and firmware\n");
+ iounmap(rvu->fwdata);
+ rvu->fwdata = NULL;
+ return -EINVAL;
+ }
+ return 0;
+fail:
+ dev_info(rvu->dev, "Unable to fetch 'fwdata' from firmware\n");
+ return -EIO;
+}
+
+static void rvu_fwdata_exit(struct rvu *rvu)
+{
+ if (rvu->fwdata)
+ iounmap(rvu->fwdata);
+}
+
static int rvu_setup_hw_resources(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -813,6 +909,8 @@ init:
mutex_init(&rvu->rsrc_lock);
+ rvu_fwdata_init(rvu);
+
err = rvu_setup_msix_resources(rvu);
if (err)
return err;
@@ -825,8 +923,10 @@ init:
/* Allocate memory for block LF/slot to pcifunc mapping info */
block->fn_map = devm_kcalloc(rvu->dev, block->lf.max,
sizeof(u16), GFP_KERNEL);
- if (!block->fn_map)
- return -ENOMEM;
+ if (!block->fn_map) {
+ err = -ENOMEM;
+ goto msix_err;
+ }
/* Scan all blocks to check if low level firmware has
* already provisioned any of the resources to a PF/VF.
@@ -836,25 +936,36 @@ init:
err = rvu_npc_init(rvu);
if (err)
- goto exit;
+ goto npc_err;
err = rvu_cgx_init(rvu);
if (err)
- goto exit;
+ goto cgx_err;
+
+ /* Assign MACs for CGX mapped functions */
+ rvu_setup_pfvf_macaddress(rvu);
err = rvu_npa_init(rvu);
if (err)
- goto cgx_err;
+ goto npa_err;
err = rvu_nix_init(rvu);
if (err)
- goto cgx_err;
+ goto nix_err;
return 0;
+nix_err:
+ rvu_nix_freemem(rvu);
+npa_err:
+ rvu_npa_freemem(rvu);
cgx_err:
rvu_cgx_exit(rvu);
-exit:
+npc_err:
+ rvu_npc_freemem(rvu);
+ rvu_fwdata_exit(rvu);
+msix_err:
+ rvu_reset_msix(rvu);
return err;
}
@@ -901,6 +1012,10 @@ int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
struct ready_msg_rsp *rsp)
{
+ if (rvu->fwdata) {
+ rsp->rclk_freq = rvu->fwdata->rclk;
+ rsp->sclk_freq = rvu->fwdata->sclk;
+ }
return 0;
}
@@ -2128,6 +2243,9 @@ static int rvu_register_interrupts(struct rvu *rvu)
}
rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true;
+ /* Clear TRPEND bit for all PF */
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_AF_PFTRPEND, INTR_MASK(rvu->hw->total_pfs));
/* Enable ME interrupt for all PFs*/
rvu_write64(rvu, BLKADDR_RVUM,
RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs));
@@ -2439,17 +2557,13 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_disable_device;
}
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (err) {
- dev_err(dev, "Unable to set DMA mask\n");
+ dev_err(dev, "DMA mask config failed, abort\n");
goto err_release_regions;
}
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
- if (err) {
- dev_err(dev, "Unable to set consistent DMA mask\n");
- goto err_release_regions;
- }
+ pci_set_master(pdev);
/* Map Admin function CSRs */
rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0);
@@ -2489,6 +2603,8 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_flr;
+ rvu_setup_rvum_blk_revid(rvu);
+
/* Enable AF's VFs (if any) */
err = rvu_enable_sriov(rvu);
if (err)
@@ -2506,8 +2622,10 @@ err_mbox:
rvu_mbox_destroy(&rvu->afpf_wq_info);
err_hwsetup:
rvu_cgx_exit(rvu);
+ rvu_fwdata_exit(rvu);
rvu_reset_all_blocks(rvu);
rvu_free_hw_resources(rvu);
+ rvu_clear_rvum_blk_revid(rvu);
err_release_regions:
pci_release_regions(pdev);
err_disable_device:
@@ -2527,11 +2645,12 @@ static void rvu_remove(struct pci_dev *pdev)
rvu_unregister_interrupts(rvu);
rvu_flr_wq_destroy(rvu);
rvu_cgx_exit(rvu);
+ rvu_fwdata_exit(rvu);
rvu_mbox_destroy(&rvu->afpf_wq_info);
rvu_disable_sriov(rvu);
rvu_reset_all_blocks(rvu);
rvu_free_hw_resources(rvu);
-
+ rvu_clear_rvum_blk_revid(rvu);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);