summaryrefslogtreecommitdiff
path: root/arch/powerpc/platforms/powernv/npu-dma.c
diff options
context:
space:
mode:
authorAlexey Kardashevskiy <aik@ozlabs.ru>2016-04-29 18:55:23 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2016-05-11 21:54:31 +1000
commit85674868cecebdf6eb7239ecf9c32b6273208d03 (patch)
treead73c4a0aadc9b2a212642a002344b46baf9397c /arch/powerpc/platforms/powernv/npu-dma.c
parentb575c731fe58c1ecb1247c6a334ba6c7b4c0b480 (diff)
powerpc/powernv/npu: Rework TCE Kill handling
The pnv_ioda_pe struct keeps an array of peers. At the moment it is only used to link GPU and NPU for 2 purposes: 1. Access NPU quickly when configuring DMA for GPU - this was addressed in the previos patch by removing use of it as DMA setup is not what the kernel would constantly do. 2. Invalidate TCE cache for NPU when it is invalidated for GPU. GPU and NPU are in different PE. There is already a mechanism to attach multiple iommu_table_group to the same iommu_table (used for VFIO), we can reuse it here so does this patch. This gets rid of peers[] array and PNV_IODA_PE_PEER flag as they are not needed anymore. While we are here, add TCE cache invalidation after enabling bypass. Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru> Reviewed-By: Alistair Popple <alistair@popple.id.au> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/platforms/powernv/npu-dma.c')
-rw-r--r--arch/powerpc/platforms/powernv/npu-dma.c71
1 files changed, 20 insertions, 51 deletions
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index 800d70f5d132..cb2d1dad38a8 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -136,22 +136,17 @@ static struct pnv_ioda_pe *get_gpu_pci_dev_and_pe(struct pnv_ioda_pe *npe,
struct pnv_ioda_pe *pe;
struct pci_dn *pdn;
- if (npe->flags & PNV_IODA_PE_PEER) {
- pe = npe->peers[0];
- pdev = pe->pdev;
- } else {
- pdev = pnv_pci_get_gpu_dev(npe->pdev);
- if (!pdev)
- return NULL;
-
- pdn = pci_get_pdn(pdev);
- if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
- return NULL;
+ pdev = pnv_pci_get_gpu_dev(npe->pdev);
+ if (!pdev)
+ return NULL;
- hose = pci_bus_to_host(pdev->bus);
- phb = hose->private_data;
- pe = &phb->ioda.pe_array[pdn->pe_number];
- }
+ pdn = pci_get_pdn(pdev);
+ if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
+ return NULL;
+
+ hose = pci_bus_to_host(pdev->bus);
+ phb = hose->private_data;
+ pe = &phb->ioda.pe_array[pdn->pe_number];
if (gpdev)
*gpdev = pdev;
@@ -186,6 +181,10 @@ static long pnv_npu_set_window(struct pnv_ioda_pe *npe,
}
pnv_pci_ioda2_tce_invalidate_entire(phb, false);
+ /* Add the table to the list so its TCE cache will get invalidated */
+ pnv_pci_link_table_and_group(phb->hose->node, 0,
+ tbl, &npe->table_group);
+
return 0;
}
@@ -206,43 +205,10 @@ static long pnv_npu_unset_window(struct pnv_ioda_pe *npe)
}
pnv_pci_ioda2_tce_invalidate_entire(phb, false);
- return 0;
-}
-
-void pnv_npu_init_dma_pe(struct pnv_ioda_pe *npe)
-{
- struct pnv_ioda_pe *gpe;
- struct pci_dev *gpdev;
- int i, avail = -1;
-
- if (!npe->pdev || !(npe->flags & PNV_IODA_PE_DEV))
- return;
-
- gpe = get_gpu_pci_dev_and_pe(npe, &gpdev);
- if (!gpe)
- return;
-
- for (i = 0; i < PNV_IODA_MAX_PEER_PES; i++) {
- /* Nothing to do if the PE is already connected. */
- if (gpe->peers[i] == npe)
- return;
-
- if (!gpe->peers[i])
- avail = i;
- }
+ pnv_pci_unlink_table_and_group(npe->table_group.tables[0],
+ &npe->table_group);
- if (WARN_ON(avail < 0))
- return;
-
- gpe->peers[avail] = npe;
- gpe->flags |= PNV_IODA_PE_PEER;
-
- /*
- * We assume that the NPU devices only have a single peer PE
- * (the GPU PCIe device PE).
- */
- npe->peers[0] = gpe;
- npe->flags |= PNV_IODA_PE_PEER;
+ return 0;
}
/*
@@ -302,6 +268,9 @@ static int pnv_npu_dma_set_bypass(struct pnv_ioda_pe *npe)
npe->pe_number, npe->pe_number,
0 /* bypass base */, top);
+ if (rc == OPAL_SUCCESS)
+ pnv_pci_ioda2_tce_invalidate_entire(phb, false);
+
return rc;
}