summaryrefslogtreecommitdiff
path: root/include/linux/iommu.h
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@nvidia.com>2022-04-11 12:16:05 -0300
committerJoerg Roedel <jroedel@suse.de>2022-04-28 17:24:57 +0200
commit6043257b1de069fbb5a2a52d7211c0275bc8c0e0 (patch)
tree37a4d6522421f3f70f5997a668ba557dfa2c8043 /include/linux/iommu.h
parent5b1553bf18de019388c5e9de03d9330f92507695 (diff)
iommu: Introduce the domain op enforce_cache_coherency()
This new mechanism will replace using IOMMU_CAP_CACHE_COHERENCY and IOMMU_CACHE to control the no-snoop blocking behavior of the IOMMU. Currently only Intel and AMD IOMMUs are known to support this feature. They both implement it as an IOPTE bit, that when set, will cause PCIe TLPs to that IOVA with the no-snoop bit set to be treated as though the no-snoop bit was clear. The new API is triggered by calling enforce_cache_coherency() before mapping any IOVA to the domain which globally switches on no-snoop blocking. This allows other implementations that might block no-snoop globally and outside the IOPTE - AMD also documents such a HW capability. Leave AMD out of sync with Intel and have it block no-snoop even for in-kernel users. This can be trivially resolved in a follow up patch. Only VFIO needs to call this API because it does not have detailed control over the device to avoid requesting no-snoop behavior at the device level. Other places using domains with real kernel drivers should simply avoid asking their devices to set the no-snoop bit. Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Acked-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/1-v3-2cf356649677+a32-intel_no_snoop_jgg@nvidia.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'include/linux/iommu.h')
-rw-r--r--include/linux/iommu.h4
1 files changed, 4 insertions, 0 deletions
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 4123693ae319..c7ad6b10e261 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -274,6 +274,9 @@ struct iommu_ops {
* @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
* queue
* @iova_to_phys: translate iova to physical address
+ * @enforce_cache_coherency: Prevent any kind of DMA from bypassing IOMMU_CACHE,
+ * including no-snoop TLPs on PCIe or other platform
+ * specific mechanisms.
* @enable_nesting: Enable nesting
* @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*)
* @free: Release the domain after use.
@@ -302,6 +305,7 @@ struct iommu_domain_ops {
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
dma_addr_t iova);
+ bool (*enforce_cache_coherency)(struct iommu_domain *domain);
int (*enable_nesting)(struct iommu_domain *domain);
int (*set_pgtable_quirks)(struct iommu_domain *domain,
unsigned long quirks);