summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/etnaviv/etnaviv_mmu.c')
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c534
1 files changed, 354 insertions, 180 deletions
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index f103e787de94..a992be2ede88 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -1,117 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (C) 2015 Etnaviv Project
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
+ * Copyright (C) 2015-2018 Etnaviv Project
*/
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+
+#include <drm/drm_print.h>
+
#include "common.xml.h"
#include "etnaviv_cmdbuf.h"
#include "etnaviv_drv.h"
#include "etnaviv_gem.h"
#include "etnaviv_gpu.h"
-#include "etnaviv_iommu.h"
#include "etnaviv_mmu.h"
-static int etnaviv_fault_handler(struct iommu_domain *iommu, struct device *dev,
- unsigned long iova, int flags, void *arg)
+static void etnaviv_context_unmap(struct etnaviv_iommu_context *context,
+ unsigned long iova, size_t size)
{
- DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
- return 0;
+ size_t unmapped_page, unmapped = 0;
+ size_t pgsize = SZ_4K;
+
+ while (unmapped < size) {
+ unmapped_page = context->global->ops->unmap(context, iova,
+ pgsize);
+ if (!unmapped_page)
+ break;
+
+ iova += unmapped_page;
+ unmapped += unmapped_page;
+ }
+}
+
+static int etnaviv_context_map(struct etnaviv_iommu_context *context,
+ unsigned long iova, phys_addr_t paddr,
+ size_t size, int prot)
+{
+ unsigned long orig_iova = iova;
+ size_t pgsize = SZ_4K;
+ size_t orig_size = size;
+ int ret = 0;
+
+ while (size) {
+ ret = context->global->ops->map(context, iova, paddr, pgsize,
+ prot);
+ if (ret)
+ break;
+
+ iova += pgsize;
+ paddr += pgsize;
+ size -= pgsize;
+ }
+
+ /* unroll mapping in case something went wrong */
+ if (ret)
+ etnaviv_context_unmap(context, orig_iova, orig_size - size);
+
+ return ret;
}
-int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
- struct sg_table *sgt, unsigned len, int prot)
+static int etnaviv_iommu_map(struct etnaviv_iommu_context *context,
+ u32 iova, unsigned int va_len,
+ struct sg_table *sgt, int prot)
{
- struct iommu_domain *domain = iommu->domain;
struct scatterlist *sg;
unsigned int da = iova;
- unsigned int i, j;
+ unsigned int i;
int ret;
- if (!domain || !sgt)
+ if (!context || !sgt)
return -EINVAL;
- for_each_sg(sgt->sgl, sg, sgt->nents, i) {
- u32 pa = sg_dma_address(sg) - sg->offset;
- size_t bytes = sg_dma_len(sg) + sg->offset;
+ for_each_sgtable_dma_sg(sgt, sg, i) {
+ phys_addr_t pa = sg_dma_address(sg);
+ unsigned int da_len = sg_dma_len(sg);
+ unsigned int bytes = min_t(unsigned int, da_len, va_len);
- VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
+ VERB("map[%d]: %08x %pap(%x)", i, da, &pa, bytes);
+
+ if (!IS_ALIGNED(iova | pa | bytes, SZ_4K)) {
+ dev_err(context->global->dev,
+ "unaligned: iova 0x%x pa %pa size 0x%x\n",
+ iova, &pa, bytes);
+ ret = -EINVAL;
+ goto fail;
+ }
- ret = iommu_map(domain, da, pa, bytes, prot);
+ ret = etnaviv_context_map(context, da, pa, bytes, prot);
if (ret)
goto fail;
+ va_len -= bytes;
da += bytes;
}
+ context->flush_seq++;
+
return 0;
fail:
- da = iova;
-
- for_each_sg(sgt->sgl, sg, i, j) {
- size_t bytes = sg_dma_len(sg) + sg->offset;
-
- iommu_unmap(domain, da, bytes);
- da += bytes;
- }
+ etnaviv_context_unmap(context, iova, da - iova);
return ret;
}
-int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
- struct sg_table *sgt, unsigned len)
+static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova,
+ struct sg_table *sgt, unsigned len)
{
- struct iommu_domain *domain = iommu->domain;
- struct scatterlist *sg;
- unsigned int da = iova;
- int i;
-
- for_each_sg(sgt->sgl, sg, sgt->nents, i) {
- size_t bytes = sg_dma_len(sg) + sg->offset;
- size_t unmapped;
-
- unmapped = iommu_unmap(domain, da, bytes);
- if (unmapped < bytes)
- return unmapped;
-
- VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
+ etnaviv_context_unmap(context, iova, len);
- BUG_ON(!PAGE_ALIGNED(bytes));
-
- da += bytes;
- }
-
- return 0;
+ context->flush_seq++;
}
-static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu,
+static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context,
struct etnaviv_vram_mapping *mapping)
{
struct etnaviv_gem_object *etnaviv_obj = mapping->object;
- etnaviv_iommu_unmap(mmu, mapping->vram_node.start,
- etnaviv_obj->sgt, etnaviv_obj->base.size);
+ lockdep_assert_held(&context->lock);
+
+ etnaviv_iommu_unmap(context, mapping->vram_node.start,
+ etnaviv_obj->sgt, etnaviv_obj->size);
drm_mm_remove_node(&mapping->vram_node);
}
-static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
+void etnaviv_iommu_reap_mapping(struct etnaviv_vram_mapping *mapping)
+{
+ struct etnaviv_iommu_context *context = mapping->context;
+
+ lockdep_assert_held(&context->lock);
+ WARN_ON(mapping->use);
+
+ etnaviv_iommu_remove_mapping(context, mapping);
+ etnaviv_iommu_context_put(mapping->context);
+ mapping->context = NULL;
+ list_del_init(&mapping->mmu_node);
+}
+
+static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
struct drm_mm_node *node, size_t size)
{
struct etnaviv_vram_mapping *free = NULL;
enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
int ret;
- lockdep_assert_held(&mmu->lock);
+ lockdep_assert_held(&context->lock);
while (1) {
struct etnaviv_vram_mapping *m, *n;
@@ -119,29 +151,17 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
struct list_head list;
bool found;
- ret = drm_mm_insert_node_in_range(&mmu->mm, node,
- size, 0, 0,
- mmu->last_iova, U64_MAX,
- mode);
+ ret = drm_mm_insert_node_in_range(&context->mm, node,
+ size, 0, 0, 0, U64_MAX, mode);
if (ret != -ENOSPC)
break;
- /*
- * If we did not search from the start of the MMU region,
- * try again in case there are free slots.
- */
- if (mmu->last_iova) {
- mmu->last_iova = 0;
- mmu->need_flush = true;
- continue;
- }
-
/* Try to retire some entries */
- drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, mode);
+ drm_mm_scan_init(&scan, &context->mm, size, 0, 0, mode);
found = 0;
INIT_LIST_HEAD(&list);
- list_for_each_entry(free, &mmu->mappings, mmu_node) {
+ list_for_each_entry(free, &context->mappings, mmu_node) {
/* If this vram node has not been used, skip this. */
if (!free->vram_node.mm)
continue;
@@ -183,9 +203,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
* this mapping.
*/
list_for_each_entry_safe(m, n, &list, scan_node) {
- etnaviv_iommu_remove_mapping(mmu, m);
- m->mmu = NULL;
- list_del_init(&m->mmu_node);
+ etnaviv_iommu_reap_mapping(m);
list_del_init(&m->scan_node);
}
@@ -200,9 +218,54 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
return ret;
}
-int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
+static int etnaviv_iommu_insert_exact(struct etnaviv_iommu_context *context,
+ struct drm_mm_node *node, size_t size, u64 va)
+{
+ struct etnaviv_vram_mapping *m, *n;
+ struct drm_mm_node *scan_node;
+ LIST_HEAD(scan_list);
+ int ret;
+
+ lockdep_assert_held(&context->lock);
+
+ ret = drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
+ va + size, DRM_MM_INSERT_LOWEST);
+ if (ret != -ENOSPC)
+ return ret;
+
+ /*
+ * When we can't insert the node, due to a existing mapping blocking
+ * the address space, there are two possible reasons:
+ * 1. Userspace genuinely messed up and tried to reuse address space
+ * before the last job using this VMA has finished executing.
+ * 2. The existing buffer mappings are idle, but the buffers are not
+ * destroyed yet (likely due to being referenced by another context) in
+ * which case the mappings will not be cleaned up and we must reap them
+ * here to make space for the new mapping.
+ */
+
+ drm_mm_for_each_node_in_range(scan_node, &context->mm, va, va + size) {
+ m = container_of(scan_node, struct etnaviv_vram_mapping,
+ vram_node);
+
+ if (m->use)
+ return -ENOSPC;
+
+ list_add(&m->scan_node, &scan_list);
+ }
+
+ list_for_each_entry_safe(m, n, &scan_list, scan_node) {
+ etnaviv_iommu_reap_mapping(m);
+ list_del_init(&m->scan_node);
+ }
+
+ return drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
+ va + size, DRM_MM_INSERT_LOWEST);
+}
+
+int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
- struct etnaviv_vram_mapping *mapping)
+ struct etnaviv_vram_mapping *mapping, u64 va)
{
struct sg_table *sgt = etnaviv_obj->sgt;
struct drm_mm_node *node;
@@ -210,177 +273,288 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
lockdep_assert_held(&etnaviv_obj->lock);
- mutex_lock(&mmu->lock);
+ mutex_lock(&context->lock);
/* v1 MMU can optimize single entry (contiguous) scatterlists */
- if (mmu->version == ETNAVIV_IOMMU_V1 &&
+ if (context->global->version == ETNAVIV_IOMMU_V1 &&
sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
u32 iova;
iova = sg_dma_address(sgt->sgl) - memory_base;
if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
mapping->iova = iova;
- list_add_tail(&mapping->mmu_node, &mmu->mappings);
- mutex_unlock(&mmu->lock);
- return 0;
+ mapping->context = etnaviv_iommu_context_get(context);
+ list_add_tail(&mapping->mmu_node, &context->mappings);
+ ret = 0;
+ goto unlock;
}
}
node = &mapping->vram_node;
- ret = etnaviv_iommu_find_iova(mmu, node, etnaviv_obj->base.size);
- if (ret < 0) {
- mutex_unlock(&mmu->lock);
- return ret;
- }
+ if (va)
+ ret = etnaviv_iommu_insert_exact(context, node, etnaviv_obj->size, va);
+ else
+ ret = etnaviv_iommu_find_iova(context, node, etnaviv_obj->size);
+ if (ret < 0)
+ goto unlock;
- mmu->last_iova = node->start + etnaviv_obj->base.size;
mapping->iova = node->start;
- ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size,
- IOMMU_READ | IOMMU_WRITE);
+ ret = etnaviv_iommu_map(context, node->start, etnaviv_obj->size, sgt,
+ ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
if (ret < 0) {
drm_mm_remove_node(node);
- mutex_unlock(&mmu->lock);
- return ret;
+ goto unlock;
}
- list_add_tail(&mapping->mmu_node, &mmu->mappings);
- mmu->need_flush = true;
- mutex_unlock(&mmu->lock);
+ mapping->context = etnaviv_iommu_context_get(context);
+ list_add_tail(&mapping->mmu_node, &context->mappings);
+unlock:
+ mutex_unlock(&context->lock);
return ret;
}
-void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
+void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
struct etnaviv_vram_mapping *mapping)
{
WARN_ON(mapping->use);
- mutex_lock(&mmu->lock);
+ mutex_lock(&context->lock);
+
+ /* Bail if the mapping has been reaped by another thread */
+ if (!mapping->context) {
+ mutex_unlock(&context->lock);
+ return;
+ }
/* If the vram node is on the mm, unmap and remove the node */
- if (mapping->vram_node.mm == &mmu->mm)
- etnaviv_iommu_remove_mapping(mmu, mapping);
+ if (mapping->vram_node.mm == &context->mm)
+ etnaviv_iommu_remove_mapping(context, mapping);
list_del(&mapping->mmu_node);
- mmu->need_flush = true;
- mutex_unlock(&mmu->lock);
+ mutex_unlock(&context->lock);
+ etnaviv_iommu_context_put(context);
}
-void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
+static void etnaviv_iommu_context_free(struct kref *kref)
{
- drm_mm_takedown(&mmu->mm);
- iommu_domain_free(mmu->domain);
- kfree(mmu);
-}
+ struct etnaviv_iommu_context *context =
+ container_of(kref, struct etnaviv_iommu_context, refcount);
-struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu)
+ etnaviv_cmdbuf_suballoc_unmap(context, &context->cmdbuf_mapping);
+ mutex_destroy(&context->lock);
+ context->global->ops->free(context);
+}
+void etnaviv_iommu_context_put(struct etnaviv_iommu_context *context)
{
- enum etnaviv_iommu_version version;
- struct etnaviv_iommu *mmu;
+ kref_put(&context->refcount, etnaviv_iommu_context_free);
+}
- mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
- if (!mmu)
- return ERR_PTR(-ENOMEM);
+struct etnaviv_iommu_context *
+etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
+ struct etnaviv_cmdbuf_suballoc *suballoc)
+{
+ struct etnaviv_iommu_context *ctx;
+ int ret;
- if (!(gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)) {
- mmu->domain = etnaviv_iommuv1_domain_alloc(gpu);
- version = ETNAVIV_IOMMU_V1;
- } else {
- mmu->domain = etnaviv_iommuv2_domain_alloc(gpu);
- version = ETNAVIV_IOMMU_V2;
- }
+ if (global->version == ETNAVIV_IOMMU_V1)
+ ctx = etnaviv_iommuv1_context_alloc(global);
+ else
+ ctx = etnaviv_iommuv2_context_alloc(global);
- if (!mmu->domain) {
- dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n");
- kfree(mmu);
- return ERR_PTR(-ENOMEM);
- }
+ if (!ctx)
+ return NULL;
- mmu->gpu = gpu;
- mmu->version = version;
- mutex_init(&mmu->lock);
- INIT_LIST_HEAD(&mmu->mappings);
+ ret = etnaviv_cmdbuf_suballoc_map(suballoc, ctx, &ctx->cmdbuf_mapping,
+ global->memory_base);
+ if (ret)
+ goto out_free;
- drm_mm_init(&mmu->mm, mmu->domain->geometry.aperture_start,
- mmu->domain->geometry.aperture_end -
- mmu->domain->geometry.aperture_start + 1);
+ if (global->version == ETNAVIV_IOMMU_V1 &&
+ ctx->cmdbuf_mapping.iova > 0x80000000) {
+ dev_err(global->dev,
+ "command buffer outside valid memory window\n");
+ goto out_unmap;
+ }
- iommu_set_fault_handler(mmu->domain, etnaviv_fault_handler, gpu->dev);
+ return ctx;
- return mmu;
+out_unmap:
+ etnaviv_cmdbuf_suballoc_unmap(ctx, &ctx->cmdbuf_mapping);
+out_free:
+ global->ops->free(ctx);
+ return NULL;
}
-void etnaviv_iommu_restore(struct etnaviv_gpu *gpu)
+void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
+ struct etnaviv_iommu_context *context)
{
- if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
- etnaviv_iommuv1_restore(gpu);
- else
- etnaviv_iommuv2_restore(gpu);
+ context->global->ops->restore(gpu, context);
}
-int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
- struct drm_mm_node *vram_node, size_t size,
- u32 *iova)
+int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *context,
+ struct etnaviv_vram_mapping *mapping,
+ u32 memory_base, dma_addr_t paddr,
+ size_t size)
{
- struct etnaviv_iommu *mmu = gpu->mmu;
+ mutex_lock(&context->lock);
- if (mmu->version == ETNAVIV_IOMMU_V1) {
- *iova = paddr - gpu->memory_base;
+ if (mapping->use > 0) {
+ mapping->use++;
+ mutex_unlock(&context->lock);
return 0;
+ }
+
+ /*
+ * For MMUv1 we don't add the suballoc region to the pagetables, as
+ * those GPUs can only work with cmdbufs accessed through the linear
+ * window. Instead we manufacture a mapping to make it look uniform
+ * to the upper layers.
+ */
+ if (context->global->version == ETNAVIV_IOMMU_V1) {
+ mapping->iova = paddr - memory_base;
} else {
+ struct drm_mm_node *node = &mapping->vram_node;
int ret;
- mutex_lock(&mmu->lock);
- ret = etnaviv_iommu_find_iova(mmu, vram_node, size);
+ ret = etnaviv_iommu_find_iova(context, node, size);
if (ret < 0) {
- mutex_unlock(&mmu->lock);
+ mutex_unlock(&context->lock);
return ret;
}
- ret = iommu_map(mmu->domain, vram_node->start, paddr, size,
- IOMMU_READ);
+
+ mapping->iova = node->start;
+ ret = etnaviv_context_map(context, node->start, paddr, size,
+ ETNAVIV_PROT_READ);
if (ret < 0) {
- drm_mm_remove_node(vram_node);
- mutex_unlock(&mmu->lock);
+ drm_mm_remove_node(node);
+ mutex_unlock(&context->lock);
return ret;
}
- mmu->last_iova = vram_node->start + size;
- gpu->mmu->need_flush = true;
- mutex_unlock(&mmu->lock);
- *iova = (u32)vram_node->start;
- return 0;
+ context->flush_seq++;
}
+
+ list_add_tail(&mapping->mmu_node, &context->mappings);
+ mapping->use = 1;
+
+ mutex_unlock(&context->lock);
+
+ return 0;
}
-void etnaviv_iommu_put_suballoc_va(struct etnaviv_gpu *gpu,
- struct drm_mm_node *vram_node, size_t size,
- u32 iova)
+void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context *context,
+ struct etnaviv_vram_mapping *mapping)
{
- struct etnaviv_iommu *mmu = gpu->mmu;
+ struct drm_mm_node *node = &mapping->vram_node;
+
+ mutex_lock(&context->lock);
+ mapping->use--;
- if (mmu->version == ETNAVIV_IOMMU_V2) {
- mutex_lock(&mmu->lock);
- iommu_unmap(mmu->domain,iova, size);
- drm_mm_remove_node(vram_node);
- mutex_unlock(&mmu->lock);
+ if (mapping->use > 0 || context->global->version == ETNAVIV_IOMMU_V1) {
+ mutex_unlock(&context->lock);
+ return;
}
+
+ etnaviv_context_unmap(context, node->start, node->size);
+ drm_mm_remove_node(node);
+ mutex_unlock(&context->lock);
}
-size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu)
+
+size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context *context)
+{
+ return context->global->ops->dump_size(context);
+}
+
+void etnaviv_iommu_dump(struct etnaviv_iommu_context *context, void *buf)
{
- struct etnaviv_iommu_ops *ops;
+ context->global->ops->dump(context, buf);
+}
+
+int etnaviv_iommu_global_init(struct etnaviv_gpu *gpu)
+{
+ enum etnaviv_iommu_version version = ETNAVIV_IOMMU_V1;
+ struct etnaviv_drm_private *priv = gpu->drm->dev_private;
+ struct etnaviv_iommu_global *global;
+ struct device *dev = gpu->drm->dev;
+
+ if (gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)
+ version = ETNAVIV_IOMMU_V2;
+
+ if (priv->mmu_global) {
+ if (priv->mmu_global->version != version) {
+ dev_err(gpu->dev,
+ "MMU version doesn't match global version\n");
+ return -ENXIO;
+ }
+
+ priv->mmu_global->use++;
+ return 0;
+ }
+
+ global = kzalloc(sizeof(*global), GFP_KERNEL);
+ if (!global)
+ return -ENOMEM;
- ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
+ global->bad_page_cpu = dma_alloc_wc(dev, SZ_4K, &global->bad_page_dma,
+ GFP_KERNEL);
+ if (!global->bad_page_cpu)
+ goto free_global;
- return ops->dump_size(iommu->domain);
+ memset32(global->bad_page_cpu, 0xdead55aa, SZ_4K / sizeof(u32));
+
+ if (version == ETNAVIV_IOMMU_V2) {
+ global->v2.pta_cpu = dma_alloc_wc(dev, ETNAVIV_PTA_SIZE,
+ &global->v2.pta_dma, GFP_KERNEL);
+ if (!global->v2.pta_cpu)
+ goto free_bad_page;
+ }
+
+ global->dev = dev;
+ global->version = version;
+ global->use = 1;
+ mutex_init(&global->lock);
+
+ if (version == ETNAVIV_IOMMU_V1)
+ global->ops = &etnaviv_iommuv1_ops;
+ else
+ global->ops = &etnaviv_iommuv2_ops;
+
+ priv->mmu_global = global;
+
+ return 0;
+
+free_bad_page:
+ dma_free_wc(dev, SZ_4K, global->bad_page_cpu, global->bad_page_dma);
+free_global:
+ kfree(global);
+
+ return -ENOMEM;
}
-void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf)
+void etnaviv_iommu_global_fini(struct etnaviv_gpu *gpu)
{
- struct etnaviv_iommu_ops *ops;
+ struct etnaviv_drm_private *priv = gpu->drm->dev_private;
+ struct etnaviv_iommu_global *global = priv->mmu_global;
+
+ if (!global)
+ return;
+
+ if (--global->use > 0)
+ return;
+
+ if (global->v2.pta_cpu)
+ dma_free_wc(global->dev, ETNAVIV_PTA_SIZE,
+ global->v2.pta_cpu, global->v2.pta_dma);
+
+ if (global->bad_page_cpu)
+ dma_free_wc(global->dev, SZ_4K,
+ global->bad_page_cpu, global->bad_page_dma);
- ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
+ mutex_destroy(&global->lock);
+ kfree(global);
- ops->dump(iommu->domain, buf);
+ priv->mmu_global = NULL;
}