summaryrefslogtreecommitdiff
path: root/drivers/iommu/intel/iommu.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-01-29 13:32:05 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2021-01-29 13:32:05 -0800
commit8ef24c2011b77bd6344d16630d3cd95d63de63f8 (patch)
treebc3993e0a60ceb6131589b5191897d1308e7288e /drivers/iommu/intel/iommu.c
parent32b0c410cda19df9f0e88edcae126d0a660cf8b9 (diff)
parent29b32839725f8c89a41cb6ee054c85f3116ea8b5 (diff)
Merge tag 'iommu-fixes-v5.11-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull iommu fixes from Joerg Roedel: - AMD IOMMU fix to make sure features are detected before they are queried. - Intel IOMMU address alignment check fix for an IOLTB flushing command. - Performance fix for Intel IOMMU to make sure the code does not do full IOTLB flushes all the time. Those flushes are very expensive on emulated IOMMUs. * tag 'iommu-fixes-v5.11-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: iommu/vt-d: Do not use flush-queue when caching-mode is on iommu/vt-d: Correctly check addr alignment in qi_flush_dev_iotlb_pasid() iommu/amd: Use IVHD EFR for early initialization of IOMMU features
Diffstat (limited to 'drivers/iommu/intel/iommu.c')
-rw-r--r--drivers/iommu/intel/iommu.c32
1 files changed, 31 insertions, 1 deletions
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index f665322a0991..06b00b5363d8 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -5440,6 +5440,36 @@ intel_iommu_domain_set_attr(struct iommu_domain *domain,
return ret;
}
+static bool domain_use_flush_queue(void)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+ bool r = true;
+
+ if (intel_iommu_strict)
+ return false;
+
+ /*
+ * The flush queue implementation does not perform page-selective
+ * invalidations that are required for efficient TLB flushes in virtual
+ * environments. The benefit of batching is likely to be much lower than
+ * the overhead of synchronizing the virtual and physical IOMMU
+ * page-tables.
+ */
+ rcu_read_lock();
+ for_each_active_iommu(iommu, drhd) {
+ if (!cap_caching_mode(iommu->cap))
+ continue;
+
+ pr_warn_once("IOMMU batching is disabled due to virtualization");
+ r = false;
+ break;
+ }
+ rcu_read_unlock();
+
+ return r;
+}
+
static int
intel_iommu_domain_get_attr(struct iommu_domain *domain,
enum iommu_attr attr, void *data)
@@ -5450,7 +5480,7 @@ intel_iommu_domain_get_attr(struct iommu_domain *domain,
case IOMMU_DOMAIN_DMA:
switch (attr) {
case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
- *(int *)data = !intel_iommu_strict;
+ *(int *)data = domain_use_flush_queue();
return 0;
default:
return -ENODEV;