summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorWill Deacon <will@kernel.org>2019-07-02 16:44:50 +0100
committerWill Deacon <will@kernel.org>2019-07-29 17:22:58 +0100
commite953f7f2fa78d1c7fd064171f88457c6b1e21af9 (patch)
treeb35d7667967cc2988207ace4d920ff5bc55c97b6
parentabfd6fe0cd535d31ee83b668be6eb59ce6a8469d (diff)
iommu/io-pgtable: Remove unused ->tlb_sync() callback
The ->tlb_sync() callback is no longer used, so it can be removed. Signed-off-by: Will Deacon <will@kernel.org>
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c1
-rw-r--r--drivers/iommu/arm-smmu-v3.c8
-rw-r--r--drivers/iommu/arm-smmu.c17
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c6
-rw-r--r--drivers/iommu/io-pgtable-arm.c6
-rw-r--r--drivers/iommu/ipmmu-vmsa.c1
-rw-r--r--drivers/iommu/msm_iommu.c20
-rw-r--r--drivers/iommu/mtk_iommu.c1
-rw-r--r--drivers/iommu/qcom_iommu.c1
-rw-r--r--include/linux/io-pgtable.h9
10 files changed, 16 insertions, 54 deletions
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index ff9af320cacc..de22a2276e00 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -269,7 +269,6 @@ static const struct iommu_flush_ops mmu_tlb_ops = {
.tlb_flush_all = mmu_tlb_inv_context_s1,
.tlb_flush_walk = mmu_tlb_flush_walk,
.tlb_flush_leaf = mmu_tlb_flush_leaf,
- .tlb_sync = mmu_tlb_sync_context,
};
static const char *access_type_name(struct panfrost_device *pfdev,
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 98c90a1b4b22..231093413ff9 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -1545,13 +1545,6 @@ static int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
}
/* IO_PGTABLE API */
-static void arm_smmu_tlb_sync(void *cookie)
-{
- struct arm_smmu_domain *smmu_domain = cookie;
-
- arm_smmu_cmdq_issue_sync(smmu_domain->smmu);
-}
-
static void arm_smmu_tlb_inv_context(void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
@@ -1634,7 +1627,6 @@ static const struct iommu_flush_ops arm_smmu_flush_ops = {
.tlb_flush_walk = arm_smmu_tlb_inv_walk,
.tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
.tlb_add_page = arm_smmu_tlb_inv_page_nosync,
- .tlb_sync = arm_smmu_tlb_sync,
};
/* IOMMU API */
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index f056164a94b0..07a267c437d6 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -251,7 +251,8 @@ enum arm_smmu_domain_stage {
struct arm_smmu_flush_ops {
struct iommu_flush_ops tlb;
void (*tlb_inv_range)(unsigned long iova, size_t size, size_t granule,
- bool leaf, void *cookie)
+ bool leaf, void *cookie);
+ void (*tlb_sync)(void *cookie);
};
struct arm_smmu_domain {
@@ -539,7 +540,7 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
* On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
* almost negligible, but the benefit of getting the first one in as far ahead
* of the sync as possible is significant, hence we don't just make this a
- * no-op and set .tlb_sync to arm_smmu_inv_context_s2() as you might think.
+ * no-op and set .tlb_sync to arm_smmu_tlb_inv_context_s2() as you might think.
*/
static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
size_t granule, bool leaf, void *cookie)
@@ -560,7 +561,7 @@ static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
ops->tlb_inv_range(iova, size, granule, false, cookie);
- ops->tlb.tlb_sync(cookie);
+ ops->tlb_sync(cookie);
}
static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size,
@@ -570,7 +571,7 @@ static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size,
const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
ops->tlb_inv_range(iova, size, granule, true, cookie);
- ops->tlb.tlb_sync(cookie);
+ ops->tlb_sync(cookie);
}
static void arm_smmu_tlb_add_page(unsigned long iova, size_t granule,
@@ -588,9 +589,9 @@ static const struct arm_smmu_flush_ops arm_smmu_s1_tlb_ops = {
.tlb_flush_walk = arm_smmu_tlb_inv_walk,
.tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
.tlb_add_page = arm_smmu_tlb_add_page,
- .tlb_sync = arm_smmu_tlb_sync_context,
},
.tlb_inv_range = arm_smmu_tlb_inv_range_nosync,
+ .tlb_sync = arm_smmu_tlb_sync_context,
};
static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
@@ -599,9 +600,9 @@ static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
.tlb_flush_walk = arm_smmu_tlb_inv_walk,
.tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
.tlb_add_page = arm_smmu_tlb_add_page,
- .tlb_sync = arm_smmu_tlb_sync_context,
},
.tlb_inv_range = arm_smmu_tlb_inv_range_nosync,
+ .tlb_sync = arm_smmu_tlb_sync_context,
};
static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
@@ -610,9 +611,9 @@ static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
.tlb_flush_walk = arm_smmu_tlb_inv_walk,
.tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
.tlb_add_page = arm_smmu_tlb_add_page,
- .tlb_sync = arm_smmu_tlb_sync_vmid,
},
.tlb_inv_range = arm_smmu_tlb_inv_vmid_nosync,
+ .tlb_sync = arm_smmu_tlb_sync_vmid,
};
static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
@@ -1387,7 +1388,7 @@ static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
if (smmu_domain->flush_ops) {
arm_smmu_rpm_get(smmu);
- smmu_domain->flush_ops->tlb.tlb_sync(smmu_domain);
+ smmu_domain->flush_ops->tlb_sync(smmu_domain);
arm_smmu_rpm_put(smmu);
}
}
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index b3f975c95f76..203894fb6765 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -813,17 +813,11 @@ static void dummy_tlb_add_page(unsigned long iova, size_t granule, void *cookie)
dummy_tlb_flush(iova, granule, granule, cookie);
}
-static void dummy_tlb_sync(void *cookie)
-{
- WARN_ON(cookie != cfg_cookie);
-}
-
static const struct iommu_flush_ops dummy_tlb_ops = {
.tlb_flush_all = dummy_tlb_flush_all,
.tlb_flush_walk = dummy_tlb_flush,
.tlb_flush_leaf = dummy_tlb_flush,
.tlb_add_page = dummy_tlb_add_page,
- .tlb_sync = dummy_tlb_sync,
};
#define __FAIL(ops) ({ \
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index a5c0db01533e..f35516744965 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -1080,17 +1080,11 @@ static void dummy_tlb_add_page(unsigned long iova, size_t granule, void *cookie)
dummy_tlb_flush(iova, granule, granule, cookie);
}
-static void dummy_tlb_sync(void *cookie)
-{
- WARN_ON(cookie != cfg_cookie);
-}
-
static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
.tlb_flush_all = dummy_tlb_flush_all,
.tlb_flush_walk = dummy_tlb_flush,
.tlb_flush_leaf = dummy_tlb_flush,
.tlb_add_page = dummy_tlb_add_page,
- .tlb_sync = dummy_tlb_sync,
};
static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index c4da271af90e..a2b8eff4c1f7 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -371,7 +371,6 @@ static const struct iommu_flush_ops ipmmu_flush_ops = {
.tlb_flush_all = ipmmu_tlb_flush_all,
.tlb_flush_walk = ipmmu_tlb_flush,
.tlb_flush_leaf = ipmmu_tlb_flush,
- .tlb_sync = ipmmu_tlb_flush_all,
};
/* -----------------------------------------------------------------------------
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 2cd83295a841..ccfc7ed230ef 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -168,28 +168,16 @@ fail:
return;
}
-static void __flush_iotlb_sync(void *cookie)
-{
- /*
- * Nothing is needed here, the barrier to guarantee
- * completion of the tlb sync operation is implicitly
- * taken care when the iommu client does a writel before
- * kick starting the other master.
- */
-}
-
static void __flush_iotlb_walk(unsigned long iova, size_t size,
size_t granule, void *cookie)
{
__flush_iotlb_range(iova, size, granule, false, cookie);
- __flush_iotlb_sync(cookie);
}
static void __flush_iotlb_leaf(unsigned long iova, size_t size,
size_t granule, void *cookie)
{
__flush_iotlb_range(iova, size, granule, true, cookie);
- __flush_iotlb_sync(cookie);
}
static void __flush_iotlb_page(unsigned long iova, size_t granule, void *cookie)
@@ -202,7 +190,6 @@ static const struct iommu_flush_ops msm_iommu_flush_ops = {
.tlb_flush_walk = __flush_iotlb_walk,
.tlb_flush_leaf = __flush_iotlb_leaf,
.tlb_add_page = __flush_iotlb_page,
- .tlb_sync = __flush_iotlb_sync,
};
static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end)
@@ -712,6 +699,13 @@ static struct iommu_ops msm_iommu_ops = {
.detach_dev = msm_iommu_detach_dev,
.map = msm_iommu_map,
.unmap = msm_iommu_unmap,
+ /*
+ * Nothing is needed here, the barrier to guarantee
+ * completion of the tlb sync operation is implicitly
+ * taken care when the iommu client does a writel before
+ * kick starting the other master.
+ */
+ .iotlb_sync = NULL,
.iova_to_phys = msm_iommu_iova_to_phys,
.add_device = msm_iommu_add_device,
.remove_device = msm_iommu_remove_device,
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index a0b4b4dc4b90..3785750bdb44 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -213,7 +213,6 @@ static const struct iommu_flush_ops mtk_iommu_flush_ops = {
.tlb_flush_walk = mtk_iommu_tlb_flush_walk,
.tlb_flush_leaf = mtk_iommu_tlb_flush_leaf,
.tlb_add_page = mtk_iommu_tlb_flush_page_nosync,
- .tlb_sync = mtk_iommu_tlb_sync,
};
static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
index 7d8411dee4cf..0b8a6d6bb475 100644
--- a/drivers/iommu/qcom_iommu.c
+++ b/drivers/iommu/qcom_iommu.c
@@ -189,7 +189,6 @@ static const struct iommu_flush_ops qcom_flush_ops = {
.tlb_flush_walk = qcom_iommu_tlb_flush_walk,
.tlb_flush_leaf = qcom_iommu_tlb_flush_leaf,
.tlb_add_page = qcom_iommu_tlb_add_page,
- .tlb_sync = qcom_iommu_tlb_sync,
};
static irqreturn_t qcom_iommu_fault(int irq, void *dev)
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
index 99e04bd2baa1..843310484fe2 100644
--- a/include/linux/io-pgtable.h
+++ b/include/linux/io-pgtable.h
@@ -30,9 +30,6 @@ enum io_pgtable_fmt {
* for IOMMUs that cannot batch TLB invalidation operations
* efficiently and are therefore better suited to issuing them
* early rather than deferring them until iommu_tlb_sync().
- * @tlb_sync: Ensure any queued TLB invalidation has taken effect, and
- * any corresponding page table updates are visible to the
- * IOMMU.
*
* Note that these can all be called in atomic context and must therefore
* not block.
@@ -44,7 +41,6 @@ struct iommu_flush_ops {
void (*tlb_flush_leaf)(unsigned long iova, size_t size, size_t granule,
void *cookie);
void (*tlb_add_page)(unsigned long iova, size_t granule, void *cookie);
- void (*tlb_sync)(void *cookie);
};
/**
@@ -218,11 +214,6 @@ io_pgtable_tlb_add_page(struct io_pgtable *iop, unsigned long iova,
iop->cfg.tlb->tlb_add_page(iova, granule, iop->cookie);
}
-static inline void io_pgtable_tlb_sync(struct io_pgtable *iop)
-{
- iop->cfg.tlb->tlb_sync(iop->cookie);
-}
-
/**
* struct io_pgtable_init_fns - Alloc/free a set of page tables for a
* particular format.