diff options
| author | Alex Mastro <amastro@fb.com> | 2025-10-28 09:15:01 -0700 |
|---|---|---|
| committer | Alex Williamson <alex@shazbot.org> | 2025-10-28 15:54:41 -0600 |
| commit | 1196f1f897d4ee64d8844e8cfa97c8f93e4d158c (patch) | |
| tree | 9c409a944f0fa493fa98d4164b666f9f49ef4224 | |
| parent | 6012379ede6aa7477db6276bb9876fe7d67c4312 (diff) | |
vfio/type1: move iova increment to unmap_unpin_*() caller
Move incrementing iova to the caller of these functions as part of
preparing to handle end of address space map/unmap.
Tested-by: Alejandro Jimenez <alejandro.j.jimenez@oracle.com>
Fixes: 73fa0d10d077 ("vfio: Type1 IOMMU implementation")
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Alejandro Jimenez <alejandro.j.jimenez@oracle.com>
Signed-off-by: Alex Mastro <amastro@fb.com>
Link: https://lore.kernel.org/r/20251028-fix-unmap-v6-2-2542b96bcc8e@fb.com
Signed-off-by: Alex Williamson <alex@shazbot.org>
| -rw-r--r-- | drivers/vfio/vfio_iommu_type1.c | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 91b1480b7a37..48bcc0633d44 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -1083,7 +1083,7 @@ static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain, #define VFIO_IOMMU_TLB_SYNC_MAX 512 static size_t unmap_unpin_fast(struct vfio_domain *domain, - struct vfio_dma *dma, dma_addr_t *iova, + struct vfio_dma *dma, dma_addr_t iova, size_t len, phys_addr_t phys, long *unlocked, struct list_head *unmapped_list, int *unmapped_cnt, @@ -1093,18 +1093,17 @@ static size_t unmap_unpin_fast(struct vfio_domain *domain, struct vfio_regions *entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (entry) { - unmapped = iommu_unmap_fast(domain->domain, *iova, len, + unmapped = iommu_unmap_fast(domain->domain, iova, len, iotlb_gather); if (!unmapped) { kfree(entry); } else { - entry->iova = *iova; + entry->iova = iova; entry->phys = phys; entry->len = unmapped; list_add_tail(&entry->list, unmapped_list); - *iova += unmapped; (*unmapped_cnt)++; } } @@ -1123,18 +1122,17 @@ static size_t unmap_unpin_fast(struct vfio_domain *domain, } static size_t unmap_unpin_slow(struct vfio_domain *domain, - struct vfio_dma *dma, dma_addr_t *iova, + struct vfio_dma *dma, dma_addr_t iova, size_t len, phys_addr_t phys, long *unlocked) { - size_t unmapped = iommu_unmap(domain->domain, *iova, len); + size_t unmapped = iommu_unmap(domain->domain, iova, len); if (unmapped) { - *unlocked += vfio_unpin_pages_remote(dma, *iova, + *unlocked += vfio_unpin_pages_remote(dma, iova, phys >> PAGE_SHIFT, unmapped >> PAGE_SHIFT, false); - *iova += unmapped; cond_resched(); } return unmapped; @@ -1197,16 +1195,18 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma, * First, try to use fast unmap/unpin. In case of failure, * switch to slow unmap/unpin path. */ - unmapped = unmap_unpin_fast(domain, dma, &iova, len, phys, + unmapped = unmap_unpin_fast(domain, dma, iova, len, phys, &unlocked, &unmapped_region_list, &unmapped_region_cnt, &iotlb_gather); if (!unmapped) { - unmapped = unmap_unpin_slow(domain, dma, &iova, len, + unmapped = unmap_unpin_slow(domain, dma, iova, len, phys, &unlocked); if (WARN_ON(!unmapped)) break; } + + iova += unmapped; } dma->iommu_mapped = false; |
