summaryrefslogtreecommitdiff
path: root/drivers/gpu/host1x
diff options
context:
space:
mode:
authorThierry Reding <treding@nvidia.com>2020-02-04 14:59:25 +0100
committerThierry Reding <treding@nvidia.com>2020-02-06 18:21:55 +0100
commit273da5a046965ccf0ec79eb63f2d5173467e20fa (patch)
tree00894e2c40171bee6c9ad2bb0cd02bae0071bf29 /drivers/gpu/host1x
parent2d9384ff91770a71bd1ff24c25952ef1187a0e9c (diff)
drm/tegra: Reuse IOVA mapping where possible
This partially reverts the DMA API support that was recently merged because it was causing performance regressions on older Tegra devices. Unfortunately, the cache maintenance performed by dma_map_sg() and dma_unmap_sg() causes performance to drop by a factor of 10. The right solution for this would be to cache mappings for buffers per consumer device, but that's a bit involved. Instead, we simply revert to the old behaviour of sharing IOVA mappings when we know that devices can do so (i.e. they share the same IOMMU domain). Cc: <stable@vger.kernel.org> # v5.5 Reported-by: Dmitry Osipenko <digetx@gmail.com> Signed-off-by: Thierry Reding <treding@nvidia.com> Tested-by: Dmitry Osipenko <digetx@gmail.com> Reviewed-by: Dmitry Osipenko <digetx@gmail.com>
Diffstat (limited to 'drivers/gpu/host1x')
-rw-r--r--drivers/gpu/host1x/job.c32
1 files changed, 29 insertions, 3 deletions
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index 25ca54de8fc5..0d53c08e9972 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -8,6 +8,7 @@
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/host1x.h>
+#include <linux/iommu.h>
#include <linux/kref.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
@@ -101,9 +102,11 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
{
struct host1x_client *client = job->client;
struct device *dev = client->dev;
+ struct iommu_domain *domain;
unsigned int i;
int err;
+ domain = iommu_get_domain_for_dev(dev);
job->num_unpins = 0;
for (i = 0; i < job->num_relocs; i++) {
@@ -117,7 +120,19 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
goto unpin;
}
- if (client->group)
+ /*
+ * If the client device is not attached to an IOMMU, the
+ * physical address of the buffer object can be used.
+ *
+ * Similarly, when an IOMMU domain is shared between all
+ * host1x clients, the IOVA is already available, so no
+ * need to map the buffer object again.
+ *
+ * XXX Note that this isn't always safe to do because it
+ * relies on an assumption that no cache maintenance is
+ * needed on the buffer objects.
+ */
+ if (!domain || client->group)
phys = &phys_addr;
else
phys = NULL;
@@ -176,6 +191,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
dma_addr_t phys_addr;
unsigned long shift;
struct iova *alloc;
+ dma_addr_t *phys;
unsigned int j;
g->bo = host1x_bo_get(g->bo);
@@ -184,7 +200,17 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
goto unpin;
}
- sgt = host1x_bo_pin(host->dev, g->bo, NULL);
+ /**
+ * If the host1x is not attached to an IOMMU, there is no need
+ * to map the buffer object for the host1x, since the physical
+ * address can simply be used.
+ */
+ if (!iommu_get_domain_for_dev(host->dev))
+ phys = &phys_addr;
+ else
+ phys = NULL;
+
+ sgt = host1x_bo_pin(host->dev, g->bo, phys);
if (IS_ERR(sgt)) {
err = PTR_ERR(sgt);
goto unpin;
@@ -214,7 +240,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
job->unpins[job->num_unpins].size = gather_size;
phys_addr = iova_dma_addr(&host->iova, alloc);
- } else {
+ } else if (sgt) {
err = dma_map_sg(host->dev, sgt->sgl, sgt->nents,
DMA_TO_DEVICE);
if (!err) {