diff options
Diffstat (limited to 'tools/testing/selftests/vfio/vfio_dma_mapping_test.c')
| -rw-r--r-- | tools/testing/selftests/vfio/vfio_dma_mapping_test.c | 129 |
1 files changed, 121 insertions, 8 deletions
diff --git a/tools/testing/selftests/vfio/vfio_dma_mapping_test.c b/tools/testing/selftests/vfio/vfio_dma_mapping_test.c index ab19c54a774d..16eba2ecca47 100644 --- a/tools/testing/selftests/vfio/vfio_dma_mapping_test.c +++ b/tools/testing/selftests/vfio/vfio_dma_mapping_test.c @@ -3,14 +3,16 @@ #include <sys/mman.h> #include <unistd.h> +#include <uapi/linux/types.h> +#include <linux/iommufd.h> #include <linux/limits.h> #include <linux/mman.h> #include <linux/sizes.h> #include <linux/vfio.h> -#include <vfio_util.h> +#include <libvfio.h> -#include "../kselftest_harness.h" +#include "kselftest_harness.h" static const char *device_bdf; @@ -92,7 +94,9 @@ static int iommu_mapping_get(const char *bdf, u64 iova, } FIXTURE(vfio_dma_mapping_test) { + struct iommu *iommu; struct vfio_pci_device *device; + struct iova_allocator *iova_allocator; }; FIXTURE_VARIANT(vfio_dma_mapping_test) { @@ -112,23 +116,30 @@ FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous, 0, 0); FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous_hugetlb_2mb, SZ_2M, MAP_HUGETLB | MAP_HUGE_2MB); FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous_hugetlb_1gb, SZ_1G, MAP_HUGETLB | MAP_HUGE_1GB); +#undef FIXTURE_VARIANT_ADD_IOMMU_MODE + FIXTURE_SETUP(vfio_dma_mapping_test) { - self->device = vfio_pci_device_init(device_bdf, variant->iommu_mode); + self->iommu = iommu_init(variant->iommu_mode); + self->device = vfio_pci_device_init(device_bdf, self->iommu); + self->iova_allocator = iova_allocator_init(self->iommu); } FIXTURE_TEARDOWN(vfio_dma_mapping_test) { + iova_allocator_cleanup(self->iova_allocator); vfio_pci_device_cleanup(self->device); + iommu_cleanup(self->iommu); } TEST_F(vfio_dma_mapping_test, dma_map_unmap) { const u64 size = variant->size ?: getpagesize(); const int flags = variant->mmap_flags; - struct vfio_dma_region region; + struct dma_region region; struct iommu_mapping mapping; u64 mapping_size = size; + u64 unmapped; int rc; region.vaddr = mmap(NULL, size, PROT_READ | PROT_WRITE, flags, -1, 0); @@ -139,10 +150,10 @@ TEST_F(vfio_dma_mapping_test, dma_map_unmap) else ASSERT_NE(region.vaddr, MAP_FAILED); - region.iova = (u64)region.vaddr; + region.iova = iova_allocator_alloc(self->iova_allocator, size); region.size = size; - vfio_pci_dma_map(self->device, ®ion); + iommu_map(self->iommu, ®ion); printf("Mapped HVA %p (size 0x%lx) at IOVA 0x%lx\n", region.vaddr, size, region.iova); ASSERT_EQ(region.iova, to_iova(self->device, region.vaddr)); @@ -184,14 +195,116 @@ TEST_F(vfio_dma_mapping_test, dma_map_unmap) } unmap: - vfio_pci_dma_unmap(self->device, ®ion); + rc = __iommu_unmap(self->iommu, ®ion, &unmapped); + ASSERT_EQ(rc, 0); + ASSERT_EQ(unmapped, region.size); printf("Unmapped IOVA 0x%lx\n", region.iova); - ASSERT_EQ(INVALID_IOVA, __to_iova(self->device, region.vaddr)); + ASSERT_NE(0, __to_iova(self->device, region.vaddr, NULL)); ASSERT_NE(0, iommu_mapping_get(device_bdf, region.iova, &mapping)); ASSERT_TRUE(!munmap(region.vaddr, size)); } +FIXTURE(vfio_dma_map_limit_test) { + struct iommu *iommu; + struct vfio_pci_device *device; + struct dma_region region; + size_t mmap_size; +}; + +FIXTURE_VARIANT(vfio_dma_map_limit_test) { + const char *iommu_mode; +}; + +#define FIXTURE_VARIANT_ADD_IOMMU_MODE(_iommu_mode) \ +FIXTURE_VARIANT_ADD(vfio_dma_map_limit_test, _iommu_mode) { \ + .iommu_mode = #_iommu_mode, \ +} + +FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(); + +#undef FIXTURE_VARIANT_ADD_IOMMU_MODE + +FIXTURE_SETUP(vfio_dma_map_limit_test) +{ + struct dma_region *region = &self->region; + struct iommu_iova_range *ranges; + u64 region_size = getpagesize(); + iova_t last_iova; + u32 nranges; + + /* + * Over-allocate mmap by double the size to provide enough backing vaddr + * for overflow tests + */ + self->mmap_size = 2 * region_size; + + self->iommu = iommu_init(variant->iommu_mode); + self->device = vfio_pci_device_init(device_bdf, self->iommu); + region->vaddr = mmap(NULL, self->mmap_size, PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + ASSERT_NE(region->vaddr, MAP_FAILED); + + ranges = iommu_iova_ranges(self->iommu, &nranges); + VFIO_ASSERT_NOT_NULL(ranges); + last_iova = ranges[nranges - 1].last; + free(ranges); + + /* One page prior to the last iova */ + region->iova = last_iova & ~(region_size - 1); + region->size = region_size; +} + +FIXTURE_TEARDOWN(vfio_dma_map_limit_test) +{ + vfio_pci_device_cleanup(self->device); + iommu_cleanup(self->iommu); + ASSERT_EQ(munmap(self->region.vaddr, self->mmap_size), 0); +} + +TEST_F(vfio_dma_map_limit_test, unmap_range) +{ + struct dma_region *region = &self->region; + u64 unmapped; + int rc; + + iommu_map(self->iommu, region); + ASSERT_EQ(region->iova, to_iova(self->device, region->vaddr)); + + rc = __iommu_unmap(self->iommu, region, &unmapped); + ASSERT_EQ(rc, 0); + ASSERT_EQ(unmapped, region->size); +} + +TEST_F(vfio_dma_map_limit_test, unmap_all) +{ + struct dma_region *region = &self->region; + u64 unmapped; + int rc; + + iommu_map(self->iommu, region); + ASSERT_EQ(region->iova, to_iova(self->device, region->vaddr)); + + rc = __iommu_unmap_all(self->iommu, &unmapped); + ASSERT_EQ(rc, 0); + ASSERT_EQ(unmapped, region->size); +} + +TEST_F(vfio_dma_map_limit_test, overflow) +{ + struct dma_region *region = &self->region; + int rc; + + region->iova = ~(iova_t)0 & ~(region->size - 1); + region->size = self->mmap_size; + + rc = __iommu_map(self->iommu, region); + ASSERT_EQ(rc, -EOVERFLOW); + + rc = __iommu_unmap(self->iommu, region, NULL); + ASSERT_EQ(rc, -EOVERFLOW); +} + int main(int argc, char *argv[]) { device_bdf = vfio_selftests_get_bdf(&argc, argv); |
