summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/iommu/intel/iommu.c47
1 files changed, 22 insertions, 25 deletions
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 940d1c6c2248..d2451a8ee6e7 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -3643,44 +3643,41 @@ static bool domain_support_force_snooping(struct dmar_domain *domain)
return support;
}
-static void domain_set_force_snooping(struct dmar_domain *domain)
+static bool intel_iommu_enforce_cache_coherency_fs(struct iommu_domain *domain)
{
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
struct device_domain_info *info;
- assert_spin_locked(&domain->lock);
- /*
- * Second level page table supports per-PTE snoop control. The
- * iommu_map() interface will handle this by setting SNP bit.
- */
- if (!domain->use_first_level) {
- domain->set_pte_snp = true;
- return;
- }
+ guard(spinlock_irqsave)(&dmar_domain->lock);
+
+ if (dmar_domain->force_snooping)
+ return true;
- list_for_each_entry(info, &domain->devices, link)
+ if (!domain_support_force_snooping(dmar_domain))
+ return false;
+
+ dmar_domain->force_snooping = true;
+ list_for_each_entry(info, &dmar_domain->devices, link)
intel_pasid_setup_page_snoop_control(info->iommu, info->dev,
IOMMU_NO_PASID);
+ return true;
}
-static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain)
+static bool intel_iommu_enforce_cache_coherency_ss(struct iommu_domain *domain)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- unsigned long flags;
- if (dmar_domain->force_snooping)
- return true;
-
- spin_lock_irqsave(&dmar_domain->lock, flags);
+ guard(spinlock_irqsave)(&dmar_domain->lock);
if (!domain_support_force_snooping(dmar_domain) ||
- (!dmar_domain->use_first_level && dmar_domain->has_mappings)) {
- spin_unlock_irqrestore(&dmar_domain->lock, flags);
+ dmar_domain->has_mappings)
return false;
- }
- domain_set_force_snooping(dmar_domain);
+ /*
+ * Second level page table supports per-PTE snoop control. The
+ * iommu_map() interface will handle this by setting SNP bit.
+ */
+ dmar_domain->set_pte_snp = true;
dmar_domain->force_snooping = true;
- spin_unlock_irqrestore(&dmar_domain->lock, flags);
-
return true;
}
@@ -4398,7 +4395,7 @@ const struct iommu_domain_ops intel_fs_paging_domain_ops = {
.iotlb_sync = intel_iommu_tlb_sync,
.iova_to_phys = intel_iommu_iova_to_phys,
.free = intel_iommu_domain_free,
- .enforce_cache_coherency = intel_iommu_enforce_cache_coherency,
+ .enforce_cache_coherency = intel_iommu_enforce_cache_coherency_fs,
};
const struct iommu_domain_ops intel_ss_paging_domain_ops = {
@@ -4411,7 +4408,7 @@ const struct iommu_domain_ops intel_ss_paging_domain_ops = {
.iotlb_sync = intel_iommu_tlb_sync,
.iova_to_phys = intel_iommu_iova_to_phys,
.free = intel_iommu_domain_free,
- .enforce_cache_coherency = intel_iommu_enforce_cache_coherency,
+ .enforce_cache_coherency = intel_iommu_enforce_cache_coherency_ss,
};
const struct iommu_ops intel_iommu_ops = {