diff options
Diffstat (limited to 'drivers/gpu/drm/etnaviv/etnaviv_mmu.c')
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_mmu.c | 64 |
1 files changed, 24 insertions, 40 deletions
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c index 4fa72567183a..df5192083b20 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c @@ -19,12 +19,6 @@ static void etnaviv_context_unmap(struct etnaviv_iommu_context *context, size_t unmapped_page, unmapped = 0; size_t pgsize = SZ_4K; - if (!IS_ALIGNED(iova | size, pgsize)) { - pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n", - iova, size, pgsize); - return; - } - while (unmapped < size) { unmapped_page = context->global->ops->unmap(context, iova, pgsize); @@ -45,12 +39,6 @@ static int etnaviv_context_map(struct etnaviv_iommu_context *context, size_t orig_size = size; int ret = 0; - if (!IS_ALIGNED(iova | paddr | size, pgsize)) { - pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n", - iova, &paddr, size, pgsize); - return -EINVAL; - } - while (size) { ret = context->global->ops->map(context, iova, paddr, pgsize, prot); @@ -69,9 +57,11 @@ static int etnaviv_context_map(struct etnaviv_iommu_context *context, return ret; } -static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova, - struct sg_table *sgt, unsigned len, int prot) -{ struct scatterlist *sg; +static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, + u32 iova, unsigned int va_len, + struct sg_table *sgt, int prot) +{ + struct scatterlist *sg; unsigned int da = iova; unsigned int i; int ret; @@ -80,15 +70,25 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova, return -EINVAL; for_each_sgtable_dma_sg(sgt, sg, i) { - phys_addr_t pa = sg_dma_address(sg) - sg->offset; - size_t bytes = sg_dma_len(sg) + sg->offset; + phys_addr_t pa = sg_dma_address(sg); + unsigned int da_len = sg_dma_len(sg); + unsigned int bytes = min_t(unsigned int, da_len, va_len); + + VERB("map[%d]: %08x %pap(%x)", i, da, &pa, bytes); - VERB("map[%d]: %08x %pap(%zx)", i, iova, &pa, bytes); + if (!IS_ALIGNED(iova | pa | bytes, SZ_4K)) { + dev_err(context->global->dev, + "unaligned: iova 0x%x pa %pa size 0x%x\n", + iova, &pa, bytes); + ret = -EINVAL; + goto fail; + } ret = etnaviv_context_map(context, da, pa, bytes, prot); if (ret) goto fail; + va_len -= bytes; da += bytes; } @@ -104,21 +104,7 @@ fail: static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova, struct sg_table *sgt, unsigned len) { - struct scatterlist *sg; - unsigned int da = iova; - int i; - - for_each_sgtable_dma_sg(sgt, sg, i) { - size_t bytes = sg_dma_len(sg) + sg->offset; - - etnaviv_context_unmap(context, da, bytes); - - VERB("unmap[%d]: %08x(%zx)", i, iova, bytes); - - BUG_ON(!PAGE_ALIGNED(bytes)); - - da += bytes; - } + etnaviv_context_unmap(context, iova, len); context->flush_seq++; } @@ -131,7 +117,7 @@ static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context, lockdep_assert_held(&context->lock); etnaviv_iommu_unmap(context, mapping->vram_node.start, - etnaviv_obj->sgt, etnaviv_obj->base.size); + etnaviv_obj->sgt, etnaviv_obj->size); drm_mm_remove_node(&mapping->vram_node); } @@ -305,16 +291,14 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context, node = &mapping->vram_node; if (va) - ret = etnaviv_iommu_insert_exact(context, node, - etnaviv_obj->base.size, va); + ret = etnaviv_iommu_insert_exact(context, node, etnaviv_obj->size, va); else - ret = etnaviv_iommu_find_iova(context, node, - etnaviv_obj->base.size); + ret = etnaviv_iommu_find_iova(context, node, etnaviv_obj->size); if (ret < 0) goto unlock; mapping->iova = node->start; - ret = etnaviv_iommu_map(context, node->start, sgt, etnaviv_obj->base.size, + ret = etnaviv_iommu_map(context, node->start, etnaviv_obj->size, sgt, ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE); if (ret < 0) { @@ -358,7 +342,7 @@ static void etnaviv_iommu_context_free(struct kref *kref) container_of(kref, struct etnaviv_iommu_context, refcount); etnaviv_cmdbuf_suballoc_unmap(context, &context->cmdbuf_mapping); - + mutex_destroy(&context->lock); context->global->ops->free(context); } void etnaviv_iommu_context_put(struct etnaviv_iommu_context *context) |