diff options
| author | Jason Gunthorpe <jgg@nvidia.com> | 2025-04-14 10:46:39 -0300 |
|---|---|---|
| committer | Alex Williamson <alex.williamson@redhat.com> | 2025-05-19 12:39:27 -0600 |
| commit | 07970d048cddeb56fa17925d7c37cb2400322aab (patch) | |
| tree | 78d3e7173bd635a3d67bdfbd6c5ff81f65565346 | |
| parent | a5806cd506af5a7c19bcd596e4708b5c464bfd21 (diff) | |
vfio/type1: Remove Fine Grained Superpages detection
VFIO is looking to enable an optimization where it can rely on a fast
unmap operation that returned the size of a larger IOPTE.
Due to how the test was constructed this would only ever succeed on the
AMDv1 page table that supported an 8k contiguous size. Nothing else
supports this.
Alex says the performance win was fairly minor, so lets remove this
code. Always use iommu_iova_to_phys() to extent contiguous pages.
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Alejandro Jimenez <alejandro.j.jimenez@oracle.com>
Tested-by: Alejandro Jimenez <alejandro.j.jimenez@oracle.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Link: https://lore.kernel.org/r/0-v2-97fa1da8d983+412-vfio_fgsp_jgg@nvidia.com
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
| -rw-r--r-- | drivers/vfio/vfio_iommu_type1.c | 49 |
1 files changed, 1 insertions, 48 deletions
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 0ac56072af9f..afc1449335c3 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -80,7 +80,6 @@ struct vfio_domain { struct iommu_domain *domain; struct list_head next; struct list_head group_list; - bool fgsp : 1; /* Fine-grained super pages */ bool enforce_cache_coherency : 1; }; @@ -1095,8 +1094,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma, * may require hardware cache flushing, try to find the * largest contiguous physical memory chunk to unmap. */ - for (len = PAGE_SIZE; - !domain->fgsp && iova + len < end; len += PAGE_SIZE) { + for (len = PAGE_SIZE; iova + len < end; len += PAGE_SIZE) { next = iommu_iova_to_phys(domain->domain, iova + len); if (next != phys + len) break; @@ -1833,49 +1831,6 @@ unwind: return ret; } -/* - * We change our unmap behavior slightly depending on whether the IOMMU - * supports fine-grained superpages. IOMMUs like AMD-Vi will use a superpage - * for practically any contiguous power-of-two mapping we give it. This means - * we don't need to look for contiguous chunks ourselves to make unmapping - * more efficient. On IOMMUs with coarse-grained super pages, like Intel VT-d - * with discrete 2M/1G/512G/1T superpages, identifying contiguous chunks - * significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when - * hugetlbfs is in use. - */ -static void vfio_test_domain_fgsp(struct vfio_domain *domain, struct list_head *regions) -{ - int ret, order = get_order(PAGE_SIZE * 2); - struct vfio_iova *region; - struct page *pages; - dma_addr_t start; - - pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); - if (!pages) - return; - - list_for_each_entry(region, regions, list) { - start = ALIGN(region->start, PAGE_SIZE * 2); - if (start >= region->end || (region->end - start < PAGE_SIZE * 2)) - continue; - - ret = iommu_map(domain->domain, start, page_to_phys(pages), PAGE_SIZE * 2, - IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE, - GFP_KERNEL_ACCOUNT); - if (!ret) { - size_t unmapped = iommu_unmap(domain->domain, start, PAGE_SIZE); - - if (unmapped == PAGE_SIZE) - iommu_unmap(domain->domain, start + PAGE_SIZE, PAGE_SIZE); - else - domain->fgsp = true; - } - break; - } - - __free_pages(pages, order); -} - static struct vfio_iommu_group *find_iommu_group(struct vfio_domain *domain, struct iommu_group *iommu_group) { @@ -2314,8 +2269,6 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, } } - vfio_test_domain_fgsp(domain, &iova_copy); - /* replay mappings on new domains */ ret = vfio_iommu_replay(iommu, domain); if (ret) |
