summaryrefslogtreecommitdiff
path: root/drivers/iommu
diff options
context:
space:
mode:
authorVasant Hegde <vasant.hegde@amd.com>2023-11-22 09:02:11 +0000
committerJoerg Roedel <jroedel@suse.de>2023-12-11 15:25:35 +0100
commit4f0a600799237ed95b403f24354305b0f81ccbb4 (patch)
tree3dc60a827a097b8f16a226d0e5cb3a56bd1a71b3 /drivers/iommu
parentcf62924daf9f0363c82e4332d9ac9630f1d76c42 (diff)
iommu/amd: Refactor IOMMU tlb invalidation code
build_inv_iommu_pages() and build_inv_iommu_pasid() pretty much duplicates the code. Hence enhance build_inv_iommu_pages() to invalidate guest pages as well. And remove build_inv_iommu_pasid(). Suggested-by: Kishon Vijay Abraham I <kvijayab@amd.com> Signed-off-by: Vasant Hegde <vasant.hegde@amd.com> Reviewed-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/20231122090215.6191-6-vasant.hegde@amd.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/amd/iommu.c36
1 files changed, 14 insertions, 22 deletions
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index b26db4f0b650..8c09e8f63913 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -1124,17 +1124,23 @@ static inline u64 build_inv_address(u64 address, size_t size)
}
static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
- size_t size, u16 domid)
+ size_t size, u16 domid,
+ ioasid_t pasid, bool gn)
{
u64 inv_address = build_inv_address(address, size);
memset(cmd, 0, sizeof(*cmd));
+
cmd->data[1] |= domid;
cmd->data[2] = lower_32_bits(inv_address);
cmd->data[3] = upper_32_bits(inv_address);
- CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
/* PDE bit - we want to flush everything, not only the PTEs */
cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
+ if (gn) {
+ cmd->data[0] |= pasid;
+ cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
+ }
+ CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
}
static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
@@ -1151,22 +1157,6 @@ static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
}
-static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, u32 pasid,
- u64 address, size_t size)
-{
- u64 inv_address = build_inv_address(address, size);
-
- memset(cmd, 0, sizeof(*cmd));
-
- cmd->data[0] = pasid;
- cmd->data[1] = domid;
- cmd->data[2] = lower_32_bits(inv_address);
- cmd->data[3] = upper_32_bits(inv_address);
- cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
- cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
- CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
-}
-
static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, u32 pasid,
int qdep, u64 address, size_t size)
{
@@ -1337,7 +1327,7 @@ static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
for (dom_id = 0; dom_id <= last_bdf; ++dom_id) {
struct iommu_cmd cmd;
build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
- dom_id);
+ dom_id, IOMMU_NO_PASID, false);
iommu_queue_command(iommu, &cmd);
}
@@ -1348,7 +1338,8 @@ static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id)
{
struct iommu_cmd cmd;
- build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, dom_id);
+ build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
+ dom_id, IOMMU_NO_PASID, false);
iommu_queue_command(iommu, &cmd);
iommu_completion_wait(iommu);
@@ -1477,7 +1468,8 @@ static void __domain_flush_pages(struct protection_domain *domain,
struct iommu_cmd cmd;
int ret = 0, i;
- build_inv_iommu_pages(&cmd, address, size, domain->id);
+ build_inv_iommu_pages(&cmd, address, size, domain->id,
+ IOMMU_NO_PASID, false);
for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
if (!domain->dev_iommu[i])
@@ -2661,7 +2653,7 @@ static int __flush_pasid(struct protection_domain *domain, u32 pasid,
if (!(domain->flags & PD_IOMMUV2_MASK))
return -EINVAL;
- build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size);
+ build_inv_iommu_pages(&cmd, address, size, domain->id, pasid, true);
/*
* IOMMU TLB needs to be flushed before Device TLB to