summaryrefslogtreecommitdiff
path: root/drivers/vdpa/octeon_ep/octep_vdpa_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/vdpa/octeon_ep/octep_vdpa_main.c')
-rw-r--r--drivers/vdpa/octeon_ep/octep_vdpa_main.c123
1 files changed, 83 insertions, 40 deletions
diff --git a/drivers/vdpa/octeon_ep/octep_vdpa_main.c b/drivers/vdpa/octeon_ep/octep_vdpa_main.c
index cd55b1aac151..31a02e7fd7f2 100644
--- a/drivers/vdpa/octeon_ep/octep_vdpa_main.c
+++ b/drivers/vdpa/octeon_ep/octep_vdpa_main.c
@@ -49,58 +49,89 @@ static irqreturn_t octep_vdpa_intr_handler(int irq, void *data)
struct octep_hw *oct_hw = data;
int i;
- for (i = 0; i < oct_hw->nr_vring; i++) {
- if (oct_hw->vqs[i].cb.callback && ioread32(oct_hw->vqs[i].cb_notify_addr)) {
- /* Acknowledge the per queue notification to the device */
- iowrite32(0, oct_hw->vqs[i].cb_notify_addr);
- oct_hw->vqs[i].cb.callback(oct_hw->vqs[i].cb.private);
+ /* Each device has multiple interrupts (nb_irqs) shared among rings
+ * (nr_vring). Device interrupts are mapped to the rings in a
+ * round-robin fashion.
+ *
+ * For example, if nb_irqs = 8 and nr_vring = 64:
+ * 0 -> 0, 8, 16, 24, 32, 40, 48, 56;
+ * 1 -> 1, 9, 17, 25, 33, 41, 49, 57;
+ * ...
+ * 7 -> 7, 15, 23, 31, 39, 47, 55, 63;
+ */
+
+ for (i = irq - oct_hw->irqs[0]; i < oct_hw->nr_vring; i += oct_hw->nb_irqs) {
+ if (ioread8(oct_hw->vqs[i].cb_notify_addr)) {
+ /* Acknowledge the per ring notification to the device */
+ iowrite8(0, oct_hw->vqs[i].cb_notify_addr);
+
+ if (likely(oct_hw->vqs[i].cb.callback))
+ oct_hw->vqs[i].cb.callback(oct_hw->vqs[i].cb.private);
+ break;
}
}
+ /* Check for config interrupt. Config uses the first interrupt */
+ if (unlikely(irq == oct_hw->irqs[0] && ioread8(oct_hw->isr))) {
+ iowrite8(0, oct_hw->isr);
+
+ if (oct_hw->config_cb.callback)
+ oct_hw->config_cb.callback(oct_hw->config_cb.private);
+ }
+
return IRQ_HANDLED;
}
static void octep_free_irqs(struct octep_hw *oct_hw)
{
struct pci_dev *pdev = oct_hw->pdev;
+ int irq;
+
+ if (!oct_hw->irqs)
+ return;
- if (oct_hw->irq != -1) {
- devm_free_irq(&pdev->dev, oct_hw->irq, oct_hw);
- oct_hw->irq = -1;
+ for (irq = 0; irq < oct_hw->nb_irqs; irq++) {
+ if (!oct_hw->irqs[irq])
+ break;
+
+ devm_free_irq(&pdev->dev, oct_hw->irqs[irq], oct_hw);
}
+
pci_free_irq_vectors(pdev);
+ devm_kfree(&pdev->dev, oct_hw->irqs);
+ oct_hw->irqs = NULL;
}
static int octep_request_irqs(struct octep_hw *oct_hw)
{
struct pci_dev *pdev = oct_hw->pdev;
- int ret, irq;
+ int ret, irq, idx;
- /* Currently HW device provisions one IRQ per VF, hence
- * allocate one IRQ for all virtqueues call interface.
- */
- ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
+ oct_hw->irqs = devm_kcalloc(&pdev->dev, oct_hw->nb_irqs, sizeof(int), GFP_KERNEL);
+ if (!oct_hw->irqs)
+ return -ENOMEM;
+
+ ret = pci_alloc_irq_vectors(pdev, 1, oct_hw->nb_irqs, PCI_IRQ_MSIX);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to alloc msix vector");
return ret;
}
- snprintf(oct_hw->vqs->msix_name, sizeof(oct_hw->vqs->msix_name),
- OCTEP_VDPA_DRIVER_NAME "-vf-%d", pci_iov_vf_id(pdev));
-
- irq = pci_irq_vector(pdev, 0);
- ret = devm_request_irq(&pdev->dev, irq, octep_vdpa_intr_handler, 0,
- oct_hw->vqs->msix_name, oct_hw);
- if (ret) {
- dev_err(&pdev->dev, "Failed to register interrupt handler\n");
- goto free_irq_vec;
+ for (idx = 0; idx < oct_hw->nb_irqs; idx++) {
+ irq = pci_irq_vector(pdev, idx);
+ ret = devm_request_irq(&pdev->dev, irq, octep_vdpa_intr_handler, 0,
+ dev_name(&pdev->dev), oct_hw);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register interrupt handler\n");
+ goto free_irqs;
+ }
+ oct_hw->irqs[idx] = irq;
}
- oct_hw->irq = irq;
return 0;
-free_irq_vec:
- pci_free_irq_vectors(pdev);
+free_irqs:
+ octep_free_irqs(oct_hw);
return ret;
}
@@ -271,7 +302,9 @@ static u32 octep_vdpa_get_generation(struct vdpa_device *vdpa_dev)
static u32 octep_vdpa_get_device_id(struct vdpa_device *vdpa_dev)
{
- return VIRTIO_ID_NET;
+ struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
+
+ return oct_hw->dev_id;
}
static u32 octep_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev)
@@ -421,6 +454,9 @@ static void octep_vdpa_remove_pf(struct pci_dev *pdev)
octep_iounmap_region(pdev, octpf->base, OCTEP_HW_MBOX_BAR);
octep_vdpa_pf_bar_expand(octpf);
+
+ /* The pf version does not use managed PCI. */
+ pci_disable_device(pdev);
}
static void octep_vdpa_vf_bar_shrink(struct pci_dev *pdev)
@@ -472,15 +508,15 @@ static int octep_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
u64 device_features;
int ret;
- oct_vdpa = vdpa_alloc_device(struct octep_vdpa, vdpa, &pdev->dev, &octep_vdpa_ops, 1, 1,
- NULL, false);
+ oct_vdpa = vdpa_alloc_device(struct octep_vdpa, vdpa, &pdev->dev, &octep_vdpa_ops,
+ NULL, 1, 1, NULL, false);
if (IS_ERR(oct_vdpa)) {
dev_err(&pdev->dev, "Failed to allocate vDPA structure for octep vdpa device");
return PTR_ERR(oct_vdpa);
}
oct_vdpa->pdev = pdev;
- oct_vdpa->vdpa.dma_dev = &pdev->dev;
+ oct_vdpa->vdpa.vmap.dma_dev = &pdev->dev;
oct_vdpa->vdpa.mdev = mdev;
oct_vdpa->oct_hw = oct_hw;
vdpa_dev = &oct_vdpa->vdpa;
@@ -559,6 +595,7 @@ static void octep_vdpa_setup_task(struct work_struct *work)
struct device *dev = &pdev->dev;
struct octep_hw *oct_hw;
unsigned long timeout;
+ u64 val;
int ret;
oct_hw = &mgmt_dev->oct_hw;
@@ -590,6 +627,13 @@ static void octep_vdpa_setup_task(struct work_struct *work)
if (ret)
return;
+ val = readq(oct_hw->base[OCTEP_HW_MBOX_BAR] + OCTEP_VF_IN_CTRL(0));
+ oct_hw->nb_irqs = OCTEP_VF_IN_CTRL_RPVF(val);
+ if (!oct_hw->nb_irqs || oct_hw->nb_irqs > OCTEP_MAX_CB_INTR) {
+ dev_err(dev, "Invalid number of interrupts %d\n", oct_hw->nb_irqs);
+ goto unmap_region;
+ }
+
ret = octep_hw_caps_read(oct_hw, pdev);
if (ret < 0)
goto unmap_region;
@@ -692,6 +736,7 @@ static int octep_sriov_enable(struct pci_dev *pdev, int num_vfs)
octep_vdpa_assign_barspace(vf_pdev, pdev, index);
if (++index == num_vfs) {
done = true;
+ pci_dev_put(vf_pdev);
break;
}
}
@@ -768,12 +813,6 @@ static int octep_vdpa_pf_setup(struct octep_pf *octpf)
return -EINVAL;
}
- if (OCTEP_EPF_RINFO_RPVF(val) != BIT_ULL(0)) {
- val &= ~GENMASK_ULL(35, 32);
- val |= BIT_ULL(32);
- writeq(val, addr + OCTEP_EPF_RINFO(0));
- }
-
len = pci_resource_len(pdev, OCTEP_HW_CAPS_BAR);
octpf->vf_stride = len / totalvfs;
@@ -790,7 +829,7 @@ static int octep_vdpa_probe_pf(struct pci_dev *pdev)
struct octep_pf *octpf;
int ret;
- ret = pcim_enable_device(pdev);
+ ret = pci_enable_device(pdev);
if (ret) {
dev_err(dev, "Failed to enable device\n");
return ret;
@@ -799,15 +838,17 @@ static int octep_vdpa_probe_pf(struct pci_dev *pdev)
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret) {
dev_err(dev, "No usable DMA configuration\n");
- return ret;
+ goto disable_pci;
}
octpf = devm_kzalloc(dev, sizeof(*octpf), GFP_KERNEL);
- if (!octpf)
- return -ENOMEM;
+ if (!octpf) {
+ ret = -ENOMEM;
+ goto disable_pci;
+ }
ret = octep_iomap_region(pdev, octpf->base, OCTEP_HW_MBOX_BAR);
if (ret)
- return ret;
+ goto disable_pci;
pci_set_master(pdev);
pci_set_drvdata(pdev, octpf);
@@ -821,6 +862,8 @@ static int octep_vdpa_probe_pf(struct pci_dev *pdev)
unmap_region:
octep_iounmap_region(pdev, octpf->base, OCTEP_HW_MBOX_BAR);
+disable_pci:
+ pci_disable_device(pdev);
return ret;
}