summaryrefslogtreecommitdiff
path: root/drivers/infiniband/core
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2020-11-06 19:19:38 +0100
committerJason Gunthorpe <jgg@nvidia.com>2020-11-17 15:22:07 -0400
commit5a7a9e038b032137ae9c45d5429f18a2ffdf7d42 (patch)
tree39f0d285c032b578c125978129add2e57803e5ab /drivers/infiniband/core
parent42f2611cc1738b201701e717246e11e86bef4e1e (diff)
RDMA/core: remove use of dma_virt_ops
Use the ib_dma_* helpers to skip the DMA translation instead. This removes the last user if dma_virt_ops and keeps the weird layering violation inside the RDMA core instead of burderning the DMA mapping subsystems with it. This also means the software RDMA drivers now don't have to mess with DMA parameters that are not relevant to them at all, and that in the future we can use PCI P2P transfers even for software RDMA, as there is no first fake layer of DMA mapping that the P2P DMA support. Link: https://lore.kernel.org/r/20201106181941.1878556-8-hch@lst.de Signed-off-by: Christoph Hellwig <hch@lst.de> Tested-by: Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/core')
-rw-r--r--drivers/infiniband/core/device.c43
-rw-r--r--drivers/infiniband/core/rw.c5
2 files changed, 27 insertions, 21 deletions
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index ce26564d4edf..3ab1edea6acb 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -1209,25 +1209,6 @@ out:
return ret;
}
-static void setup_dma_device(struct ib_device *device,
- struct device *dma_device)
-{
- /*
- * If the caller does not provide a DMA capable device then the IB
- * device will be used. In this case the caller should fully setup the
- * ibdev for DMA. This usually means using dma_virt_ops.
- */
-#ifdef CONFIG_DMA_VIRT_OPS
- if (!dma_device) {
- device->dev.dma_ops = &dma_virt_ops;
- dma_device = &device->dev;
- }
-#endif
- WARN_ON(!dma_device);
- device->dma_device = dma_device;
- WARN_ON(!device->dma_device->dma_parms);
-}
-
/*
* setup_device() allocates memory and sets up data that requires calling the
* device ops, this is the only reason these actions are not done during
@@ -1373,7 +1354,14 @@ int ib_register_device(struct ib_device *device, const char *name,
if (ret)
return ret;
- setup_dma_device(device, dma_device);
+ /*
+ * If the caller does not provide a DMA capable device then the IB core
+ * will set up ib_sge and scatterlist structures that stash the kernel
+ * virtual address into the address field.
+ */
+ WARN_ON(dma_device && !dma_device->dma_parms);
+ device->dma_device = dma_device;
+
ret = setup_device(device);
if (ret)
return ret;
@@ -2708,6 +2696,21 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
}
EXPORT_SYMBOL(ib_set_device_ops);
+#ifdef CONFIG_INFINIBAND_VIRT_DMA
+int ib_dma_virt_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i) {
+ sg_dma_address(s) = (uintptr_t)sg_virt(s);
+ sg_dma_len(s) = s->length;
+ }
+ return nents;
+}
+EXPORT_SYMBOL(ib_dma_virt_map_sg);
+#endif /* CONFIG_INFINIBAND_VIRT_DMA */
+
static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
[RDMA_NL_LS_OP_RESOLVE] = {
.doit = ib_nl_handle_resolve_resp,
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index 13f43ab7220b..a96030b784eb 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -285,8 +285,11 @@ static void rdma_rw_unmap_sg(struct ib_device *dev, struct scatterlist *sg,
static int rdma_rw_map_sg(struct ib_device *dev, struct scatterlist *sg,
u32 sg_cnt, enum dma_data_direction dir)
{
- if (is_pci_p2pdma_page(sg_page(sg)))
+ if (is_pci_p2pdma_page(sg_page(sg))) {
+ if (WARN_ON_ONCE(ib_uses_virt_dma(dev)))
+ return 0;
return pci_p2pdma_map_sg(dev->dma_device, sg, sg_cnt, dir);
+ }
return ib_dma_map_sg(dev, sg, sg_cnt, dir);
}