summaryrefslogtreecommitdiff
path: root/drivers/iommu
diff options
context:
space:
mode:
authorSuravee Suthikulpanit <suravee.suthikulpanit@amd.com>2023-05-30 10:11:36 -0400
committerJoerg Roedel <jroedel@suse.de>2023-06-09 14:47:10 +0200
commit98aeb4ea5599c5f7fbb1645bdd2050d0be96dfa3 (patch)
tree16ca996769a61936934d3408baaad77a69688928 /drivers/iommu
parent66419036f68a838c00cbccacd6cb2e99da6e5710 (diff)
iommu/amd: Do not Invalidate IRT when IRTE caching is disabled
With the Interrupt Remapping Table cache disabled, there is no need to issue invalidate IRT and wait for its completion. Therefore, add logic to bypass the operation. Reviewed-by: Jerry Snitselaar <jsnitsel@redhat.com> Suggested-by: Joao Martins <joao.m.martins@oracle.com> Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> Link: https://lore.kernel.org/r/20230530141137.14376-5-suravee.suthikulpanit@amd.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/amd/iommu.c21
1 files changed, 15 insertions, 6 deletions
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index e6daf0f39fd8..8fdd6ebf8711 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -1266,12 +1266,24 @@ static void amd_iommu_flush_irt_all(struct amd_iommu *iommu)
u32 devid;
u16 last_bdf = iommu->pci_seg->last_bdf;
+ if (iommu->irtcachedis_enabled)
+ return;
+
for (devid = 0; devid <= last_bdf; devid++)
iommu_flush_irt(iommu, devid);
iommu_completion_wait(iommu);
}
+static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid)
+{
+ if (iommu->irtcachedis_enabled)
+ return;
+
+ iommu_flush_irt(iommu, devid);
+ iommu_completion_wait(iommu);
+}
+
void iommu_flush_all_caches(struct amd_iommu *iommu)
{
if (iommu_feature(iommu, FEATURE_IA)) {
@@ -3030,8 +3042,7 @@ static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
raw_spin_unlock_irqrestore(&table->lock, flags);
- iommu_flush_irt(iommu, devid);
- iommu_completion_wait(iommu);
+ iommu_flush_irt_and_complete(iommu, devid);
return 0;
}
@@ -3050,8 +3061,7 @@ static int modify_irte(struct amd_iommu *iommu,
table->table[index] = irte->val;
raw_spin_unlock_irqrestore(&table->lock, flags);
- iommu_flush_irt(iommu, devid);
- iommu_completion_wait(iommu);
+ iommu_flush_irt_and_complete(iommu, devid);
return 0;
}
@@ -3069,8 +3079,7 @@ static void free_irte(struct amd_iommu *iommu, u16 devid, int index)
iommu->irte_ops->clear_allocated(table, index);
raw_spin_unlock_irqrestore(&table->lock, flags);
- iommu_flush_irt(iommu, devid);
- iommu_completion_wait(iommu);
+ iommu_flush_irt_and_complete(iommu, devid);
}
static void irte_prepare(void *entry,