diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-11-22 19:55:10 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-11-22 19:55:10 -0800 |
commit | ceba6f6f33f29ab838b23a567621b847e527d085 (patch) | |
tree | b6d72001108e6ca83bf28458470c1699ce5cb338 /drivers/iommu/io-pgtable-arm.c | |
parent | eb78332b1067776ca4a474ccfd92460014e8d8e3 (diff) | |
parent | 42f0cbb2a253bcd7d4f20e80462014622f19d88e (diff) |
Merge tag 'iommu-updates-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux
Pull iommu updates from Joerg Roedel:
"Core Updates:
- Convert call-sites using iommu_domain_alloc() to more specific
versions and remove function
- Introduce iommu_paging_domain_alloc_flags()
- Extend support for allocating PASID-capable domains to more drivers
- Remove iommu_present()
- Some smaller improvements
New IOMMU driver for RISC-V
Intel VT-d Updates:
- Add domain_alloc_paging support
- Enable user space IOPFs in non-PASID and non-svm cases
- Small code refactoring and cleanups
- Add domain replacement support for pasid
AMD-Vi Updates:
- Adapt to iommu_paging_domain_alloc_flags() interface and alloc V2
page-tables by default
- Replace custom domain ID allocator with IDA allocator
- Add ops->release_domain() support
- Other improvements to device attach and domain allocation code
paths
ARM-SMMU Updates:
- SMMUv2:
- Return -EPROBE_DEFER for client devices probing before their
SMMU
- Devicetree binding updates for Qualcomm MMU-500 implementations
- SMMUv3:
- Minor fixes and cleanup for NVIDIA's virtual command queue
driver
- IO-PGTable:
- Fix indexing of concatenated PGDs and extend selftest coverage
- Remove unused block-splitting support
S390 IOMMU:
- Implement support for blocking domain
Mediatek IOMMU:
- Enable 35-bit physical address support for mt8186
OMAP IOMMU driver:
- Adapt to recent IOMMU core changes and unbreak driver"
* tag 'iommu-updates-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux: (92 commits)
iommu/tegra241-cmdqv: Fix alignment failure at max_n_shift
iommu: Make set_dev_pasid op support domain replacement
iommu/arm-smmu-v3: Make set_dev_pasid() op support replace
iommu/vt-d: Add set_dev_pasid callback for nested domain
iommu/vt-d: Make identity_domain_set_dev_pasid() to handle domain replacement
iommu/vt-d: Make intel_svm_set_dev_pasid() support domain replacement
iommu/vt-d: Limit intel_iommu_set_dev_pasid() for paging domain
iommu/vt-d: Make intel_iommu_set_dev_pasid() to handle domain replacement
iommu/vt-d: Add iommu_domain_did() to get did
iommu/vt-d: Consolidate the struct dev_pasid_info add/remove
iommu/vt-d: Add pasid replace helpers
iommu/vt-d: Refactor the pasid setup helpers
iommu/vt-d: Add a helper to flush cache for updating present pasid entry
iommu: Pass old domain to set_dev_pasid op
iommu/iova: Fix typo 'adderss'
iommu: Add a kdoc to iommu_unmap()
iommu/io-pgtable-arm-v7s: Remove split on unmap behavior
iommu/io-pgtable-arm: Remove split on unmap behavior
iommu/vt-d: Drain PRQs when domain removed from RID
iommu/vt-d: Drop pasid requirement for prq initialization
...
Diffstat (limited to 'drivers/iommu/io-pgtable-arm.c')
-rw-r--r-- | drivers/iommu/io-pgtable-arm.c | 114 |
1 files changed, 33 insertions, 81 deletions
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 74f58c6ac30c..6b9bb58a414f 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -211,6 +211,18 @@ static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte, return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4); } +/* + * Convert an index returned by ARM_LPAE_PGD_IDX(), which can point into + * a concatenated PGD, into the maximum number of entries that can be + * mapped in the same table page. + */ +static inline int arm_lpae_max_entries(int i, struct arm_lpae_io_pgtable *data) +{ + int ptes_per_table = ARM_LPAE_PTES_PER_TABLE(data); + + return ptes_per_table - (i & (ptes_per_table - 1)); +} + static bool selftest_running = false; static dma_addr_t __arm_lpae_dma_addr(void *pages) @@ -402,7 +414,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, /* If we can install a leaf entry at this level, then do so */ if (size == block_size) { - max_entries = ARM_LPAE_PTES_PER_TABLE(data) - map_idx_start; + max_entries = arm_lpae_max_entries(map_idx_start, data); num_entries = min_t(int, pgcount, max_entries); ret = arm_lpae_init_pte(data, iova, paddr, prot, lvl, num_entries, ptep); if (!ret) @@ -585,66 +597,6 @@ static void arm_lpae_free_pgtable(struct io_pgtable *iop) kfree(data); } -static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, - struct iommu_iotlb_gather *gather, - unsigned long iova, size_t size, - arm_lpae_iopte blk_pte, int lvl, - arm_lpae_iopte *ptep, size_t pgcount) -{ - struct io_pgtable_cfg *cfg = &data->iop.cfg; - arm_lpae_iopte pte, *tablep; - phys_addr_t blk_paddr; - size_t tablesz = ARM_LPAE_GRANULE(data); - size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data); - int ptes_per_table = ARM_LPAE_PTES_PER_TABLE(data); - int i, unmap_idx_start = -1, num_entries = 0, max_entries; - - if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS)) - return 0; - - tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg, data->iop.cookie); - if (!tablep) - return 0; /* Bytes unmapped */ - - if (size == split_sz) { - unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data); - max_entries = ptes_per_table - unmap_idx_start; - num_entries = min_t(int, pgcount, max_entries); - } - - blk_paddr = iopte_to_paddr(blk_pte, data); - pte = iopte_prot(blk_pte); - - for (i = 0; i < ptes_per_table; i++, blk_paddr += split_sz) { - /* Unmap! */ - if (i >= unmap_idx_start && i < (unmap_idx_start + num_entries)) - continue; - - __arm_lpae_init_pte(data, blk_paddr, pte, lvl, 1, &tablep[i]); - } - - pte = arm_lpae_install_table(tablep, ptep, blk_pte, data); - if (pte != blk_pte) { - __arm_lpae_free_pages(tablep, tablesz, cfg, data->iop.cookie); - /* - * We may race against someone unmapping another part of this - * block, but anything else is invalid. We can't misinterpret - * a page entry here since we're never at the last level. - */ - if (iopte_type(pte) != ARM_LPAE_PTE_TYPE_TABLE) - return 0; - - tablep = iopte_deref(pte, data); - } else if (unmap_idx_start >= 0) { - for (i = 0; i < num_entries; i++) - io_pgtable_tlb_add_page(&data->iop, gather, iova + i * size, size); - - return num_entries * size; - } - - return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl, tablep); -} - static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, struct iommu_iotlb_gather *gather, unsigned long iova, size_t size, size_t pgcount, @@ -666,7 +618,7 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, /* If the size matches this level, we're in the right place */ if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) { - max_entries = ARM_LPAE_PTES_PER_TABLE(data) - unmap_idx_start; + max_entries = arm_lpae_max_entries(unmap_idx_start, data); num_entries = min_t(int, pgcount, max_entries); /* Find and handle non-leaf entries */ @@ -694,12 +646,8 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, return i * size; } else if (iopte_leaf(pte, lvl, iop->fmt)) { - /* - * Insert a table at the next level to map the old region, - * minus the part we want to unmap - */ - return arm_lpae_split_blk_unmap(data, gather, iova, size, pte, - lvl + 1, ptep, pgcount); + WARN_ONCE(true, "Unmap of a partial large IOPTE is not allowed"); + return 0; } /* Keep on walkin' */ @@ -1362,19 +1310,6 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) iova += SZ_1G; } - /* Partial unmap */ - size = 1UL << __ffs(cfg->pgsize_bitmap); - if (ops->unmap_pages(ops, SZ_1G + size, size, 1, NULL) != size) - return __FAIL(ops, i); - - /* Remap of partial unmap */ - if (ops->map_pages(ops, SZ_1G + size, size, size, 1, - IOMMU_READ, GFP_KERNEL, &mapped)) - return __FAIL(ops, i); - - if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42)) - return __FAIL(ops, i); - /* Full unmap */ iova = 0; for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) { @@ -1397,6 +1332,23 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) iova += SZ_1G; } + /* + * Map/unmap the last largest supported page of the IAS, this can + * trigger corner cases in the concatednated page tables. + */ + mapped = 0; + size = 1UL << __fls(cfg->pgsize_bitmap); + iova = (1UL << cfg->ias) - size; + if (ops->map_pages(ops, iova, iova, size, 1, + IOMMU_READ | IOMMU_WRITE | + IOMMU_NOEXEC | IOMMU_CACHE, + GFP_KERNEL, &mapped)) + return __FAIL(ops, i); + if (mapped != size) + return __FAIL(ops, i); + if (ops->unmap_pages(ops, iova, size, 1, NULL) != size) + return __FAIL(ops, i); + free_io_pgtable_ops(ops); } |