summaryrefslogtreecommitdiff
path: root/drivers/iommu/mtk_iommu_v1.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/mtk_iommu_v1.c')
-rw-r--r--drivers/iommu/mtk_iommu_v1.c404
1 files changed, 240 insertions, 164 deletions
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index 778e66f5f1aa..c8d8eff5373d 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -7,13 +7,11 @@
*
* Based on driver/iommu/mtk_iommu.c
*/
-#include <linux/memblock.h>
#include <linux/bug.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
-#include <linux/dma-iommu.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -27,12 +25,21 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#include <asm/barrier.h>
-#include <asm/dma-iommu.h>
-#include <linux/init.h>
+#include <dt-bindings/memory/mtk-memory-port.h>
#include <dt-bindings/memory/mt2701-larb-port.h>
#include <soc/mediatek/smi.h>
-#include "mtk_iommu.h"
+
+#if defined(CONFIG_ARM)
+#include <asm/dma-iommu.h>
+#else
+#define arm_iommu_create_mapping(...) NULL
+#define arm_iommu_attach_device(...) -ENODEV
+struct dma_iommu_mapping {
+ struct iommu_domain *domain;
+};
+#endif
#define REG_MMU_PT_BASE_ADDR 0x000
@@ -81,6 +88,7 @@
/* MTK generation one iommu HW only support 4K size mapping */
#define MT2701_IOMMU_PAGE_SHIFT 12
#define MT2701_IOMMU_PAGE_SIZE (1UL << MT2701_IOMMU_PAGE_SHIFT)
+#define MT2701_LARB_NR_MAX 3
/*
* MTK m4u support 4GB iova address space, and only support 4K page
@@ -88,17 +96,53 @@
*/
#define M2701_IOMMU_PGT_SIZE SZ_4M
-struct mtk_iommu_domain {
+struct mtk_iommu_v1_suspend_reg {
+ u32 standard_axi_mode;
+ u32 dcm_dis;
+ u32 ctrl_reg;
+ u32 int_control0;
+};
+
+struct mtk_iommu_v1_data {
+ void __iomem *base;
+ int irq;
+ struct device *dev;
+ struct clk *bclk;
+ phys_addr_t protect_base; /* protect memory base */
+ struct mtk_iommu_v1_domain *m4u_dom;
+
+ struct iommu_device iommu;
+ struct dma_iommu_mapping *mapping;
+ struct mtk_smi_larb_iommu larb_imu[MTK_LARB_NR_MAX];
+
+ struct mtk_iommu_v1_suspend_reg reg;
+};
+
+struct mtk_iommu_v1_domain {
spinlock_t pgtlock; /* lock for page table */
struct iommu_domain domain;
u32 *pgt_va;
dma_addr_t pgt_pa;
- struct mtk_iommu_data *data;
+ struct mtk_iommu_v1_data *data;
};
-static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom)
+static int mtk_iommu_v1_bind(struct device *dev)
+{
+ struct mtk_iommu_v1_data *data = dev_get_drvdata(dev);
+
+ return component_bind_all(dev, &data->larb_imu);
+}
+
+static void mtk_iommu_v1_unbind(struct device *dev)
{
- return container_of(dom, struct mtk_iommu_domain, domain);
+ struct mtk_iommu_v1_data *data = dev_get_drvdata(dev);
+
+ component_unbind_all(dev, &data->larb_imu);
+}
+
+static struct mtk_iommu_v1_domain *to_mtk_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct mtk_iommu_v1_domain, domain);
}
static const int mt2701_m4u_in_larb[] = {
@@ -124,7 +168,7 @@ static inline int mt2701_m4u_to_port(int id)
return id - mt2701_m4u_in_larb[larb];
}
-static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data)
+static void mtk_iommu_v1_tlb_flush_all(struct mtk_iommu_v1_data *data)
{
writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
data->base + REG_MMU_INV_SEL);
@@ -132,8 +176,8 @@ static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data)
wmb(); /* Make sure the tlb flush all done */
}
-static void mtk_iommu_tlb_flush_range(struct mtk_iommu_data *data,
- unsigned long iova, size_t size)
+static void mtk_iommu_v1_tlb_flush_range(struct mtk_iommu_v1_data *data,
+ unsigned long iova, size_t size)
{
int ret;
u32 tmp;
@@ -151,16 +195,16 @@ static void mtk_iommu_tlb_flush_range(struct mtk_iommu_data *data,
if (ret) {
dev_warn(data->dev,
"Partial TLB flush timed out, falling back to full flush\n");
- mtk_iommu_tlb_flush_all(data);
+ mtk_iommu_v1_tlb_flush_all(data);
}
/* Clear the CPE status */
writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
}
-static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
+static irqreturn_t mtk_iommu_v1_isr(int irq, void *dev_id)
{
- struct mtk_iommu_data *data = dev_id;
- struct mtk_iommu_domain *dom = data->m4u_dom;
+ struct mtk_iommu_v1_data *data = dev_id;
+ struct mtk_iommu_v1_domain *dom = data->m4u_dom;
u32 int_state, regval, fault_iova, fault_pa;
unsigned int fault_larb, fault_port;
@@ -190,13 +234,13 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
regval |= F_INT_CLR_BIT;
writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL);
- mtk_iommu_tlb_flush_all(data);
+ mtk_iommu_v1_tlb_flush_all(data);
return IRQ_HANDLED;
}
-static void mtk_iommu_config(struct mtk_iommu_data *data,
- struct device *dev, bool enable)
+static void mtk_iommu_v1_config(struct mtk_iommu_v1_data *data,
+ struct device *dev, bool enable)
{
struct mtk_smi_larb_iommu *larb_mmu;
unsigned int larbid, portid;
@@ -209,7 +253,7 @@ static void mtk_iommu_config(struct mtk_iommu_data *data,
larb_mmu = &data->larb_imu[larbid];
dev_dbg(dev, "%s iommu port: %d\n",
- enable ? "enable" : "disable", portid);
+ str_enable_disable(enable), portid);
if (enable)
larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
@@ -218,9 +262,9 @@ static void mtk_iommu_config(struct mtk_iommu_data *data,
}
}
-static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data)
+static int mtk_iommu_v1_domain_finalise(struct mtk_iommu_v1_data *data)
{
- struct mtk_iommu_domain *dom = data->m4u_dom;
+ struct mtk_iommu_v1_domain *dom = data->m4u_dom;
spin_lock_init(&dom->pgtlock);
@@ -236,35 +280,35 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data)
return 0;
}
-static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type)
+static struct iommu_domain *mtk_iommu_v1_domain_alloc_paging(struct device *dev)
{
- struct mtk_iommu_domain *dom;
-
- if (type != IOMMU_DOMAIN_UNMANAGED)
- return NULL;
+ struct mtk_iommu_v1_domain *dom;
dom = kzalloc(sizeof(*dom), GFP_KERNEL);
if (!dom)
return NULL;
+ dom->domain.pgsize_bitmap = MT2701_IOMMU_PAGE_SIZE;
+
return &dom->domain;
}
-static void mtk_iommu_domain_free(struct iommu_domain *domain)
+static void mtk_iommu_v1_domain_free(struct iommu_domain *domain)
{
- struct mtk_iommu_domain *dom = to_mtk_domain(domain);
- struct mtk_iommu_data *data = dom->data;
+ struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
+ struct mtk_iommu_v1_data *data = dom->data;
dma_free_coherent(data->dev, M2701_IOMMU_PGT_SIZE,
dom->pgt_va, dom->pgt_pa);
kfree(to_mtk_domain(domain));
}
-static int mtk_iommu_attach_device(struct iommu_domain *domain,
- struct device *dev)
+static int mtk_iommu_v1_attach_device(struct iommu_domain *domain,
+ struct device *dev,
+ struct iommu_domain *old)
{
- struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
- struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+ struct mtk_iommu_v1_data *data = dev_iommu_priv_get(dev);
+ struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
struct dma_iommu_mapping *mtk_mapping;
int ret;
@@ -275,76 +319,83 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
if (!data->m4u_dom) {
data->m4u_dom = dom;
- ret = mtk_iommu_domain_finalise(data);
+ ret = mtk_iommu_v1_domain_finalise(data);
if (ret) {
data->m4u_dom = NULL;
return ret;
}
}
- mtk_iommu_config(data, dev, true);
+ mtk_iommu_v1_config(data, dev, true);
return 0;
}
-static void mtk_iommu_detach_device(struct iommu_domain *domain,
- struct device *dev)
+static int mtk_iommu_v1_identity_attach(struct iommu_domain *identity_domain,
+ struct device *dev,
+ struct iommu_domain *old)
{
- struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
+ struct mtk_iommu_v1_data *data = dev_iommu_priv_get(dev);
- mtk_iommu_config(data, dev, false);
+ mtk_iommu_v1_config(data, dev, false);
+ return 0;
}
-static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+static struct iommu_domain_ops mtk_iommu_v1_identity_ops = {
+ .attach_dev = mtk_iommu_v1_identity_attach,
+};
+
+static struct iommu_domain mtk_iommu_v1_identity_domain = {
+ .type = IOMMU_DOMAIN_IDENTITY,
+ .ops = &mtk_iommu_v1_identity_ops,
+};
+
+static int mtk_iommu_v1_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped)
{
- struct mtk_iommu_domain *dom = to_mtk_domain(domain);
- unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT;
+ struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
unsigned long flags;
unsigned int i;
u32 *pgt_base_iova = dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT);
u32 pabase = (u32)paddr;
- int map_size = 0;
spin_lock_irqsave(&dom->pgtlock, flags);
- for (i = 0; i < page_num; i++) {
- if (pgt_base_iova[i]) {
- memset(pgt_base_iova, 0, i * sizeof(u32));
+ for (i = 0; i < pgcount; i++) {
+ if (pgt_base_iova[i])
break;
- }
pgt_base_iova[i] = pabase | F_DESC_VALID | F_DESC_NONSEC;
pabase += MT2701_IOMMU_PAGE_SIZE;
- map_size += MT2701_IOMMU_PAGE_SIZE;
}
spin_unlock_irqrestore(&dom->pgtlock, flags);
- mtk_iommu_tlb_flush_range(dom->data, iova, size);
+ *mapped = i * MT2701_IOMMU_PAGE_SIZE;
+ mtk_iommu_v1_tlb_flush_range(dom->data, iova, *mapped);
- return map_size == size ? 0 : -EEXIST;
+ return i == pgcount ? 0 : -EEXIST;
}
-static size_t mtk_iommu_unmap(struct iommu_domain *domain,
- unsigned long iova, size_t size,
- struct iommu_iotlb_gather *gather)
+static size_t mtk_iommu_v1_unmap(struct iommu_domain *domain, unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather)
{
- struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+ struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
unsigned long flags;
u32 *pgt_base_iova = dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT);
- unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT;
+ size_t size = pgcount * MT2701_IOMMU_PAGE_SIZE;
spin_lock_irqsave(&dom->pgtlock, flags);
- memset(pgt_base_iova, 0, page_num * sizeof(u32));
+ memset(pgt_base_iova, 0, pgcount * sizeof(u32));
spin_unlock_irqrestore(&dom->pgtlock, flags);
- mtk_iommu_tlb_flush_range(dom->data, iova, size);
+ mtk_iommu_v1_tlb_flush_range(dom->data, iova, size);
return size;
}
-static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
- dma_addr_t iova)
+static phys_addr_t mtk_iommu_v1_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
{
- struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+ struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
unsigned long flags;
phys_addr_t pa;
@@ -356,17 +407,16 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
return pa;
}
-static const struct iommu_ops mtk_iommu_ops;
+static const struct iommu_ops mtk_iommu_v1_ops;
/*
* MTK generation one iommu HW only support one iommu domain, and all the client
* sharing the same iova address space.
*/
-static int mtk_iommu_create_mapping(struct device *dev,
- struct of_phandle_args *args)
+static int mtk_iommu_v1_create_mapping(struct device *dev,
+ const struct of_phandle_args *args)
{
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct mtk_iommu_data *data;
+ struct mtk_iommu_v1_data *data;
struct platform_device *m4updev;
struct dma_iommu_mapping *mtk_mapping;
int ret;
@@ -377,14 +427,9 @@ static int mtk_iommu_create_mapping(struct device *dev,
return -EINVAL;
}
- if (!fwspec) {
- ret = iommu_fwspec_init(dev, &args->np->fwnode, &mtk_iommu_ops);
- if (ret)
- return ret;
- fwspec = dev_iommu_fwspec_get(dev);
- } else if (dev_iommu_fwspec_get(dev)->ops != &mtk_iommu_ops) {
- return -EINVAL;
- }
+ ret = iommu_fwspec_init(dev, of_fwnode_handle(args->np));
+ if (ret)
+ return ret;
if (!dev_iommu_priv_get(dev)) {
/* Get the m4u device */
@@ -393,6 +438,8 @@ static int mtk_iommu_create_mapping(struct device *dev,
return -EINVAL;
dev_iommu_priv_set(dev, platform_get_drvdata(m4updev));
+
+ put_device(&m4updev->dev);
}
ret = iommu_fwspec_add_ids(dev, args->args, 1);
@@ -403,8 +450,7 @@ static int mtk_iommu_create_mapping(struct device *dev,
mtk_mapping = data->mapping;
if (!mtk_mapping) {
/* MTK iommu support 4GB iova address space. */
- mtk_mapping = arm_iommu_create_mapping(&platform_bus_type,
- 0, 1ULL << 32);
+ mtk_mapping = arm_iommu_create_mapping(dev, 0, 1ULL << 32);
if (IS_ERR(mtk_mapping))
return PTR_ERR(mtk_mapping);
@@ -414,23 +460,20 @@ static int mtk_iommu_create_mapping(struct device *dev,
return 0;
}
-static int mtk_iommu_def_domain_type(struct device *dev)
-{
- return IOMMU_DOMAIN_UNMANAGED;
-}
-
-static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
+static struct iommu_device *mtk_iommu_v1_probe_device(struct device *dev)
{
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ struct iommu_fwspec *fwspec = NULL;
struct of_phandle_args iommu_spec;
- struct mtk_iommu_data *data;
- int err, idx = 0;
+ struct mtk_iommu_v1_data *data;
+ int err, idx = 0, larbid, larbidx;
+ struct device_link *link;
+ struct device *larbdev;
while (!of_parse_phandle_with_args(dev->of_node, "iommus",
"#iommu-cells",
idx, &iommu_spec)) {
- err = mtk_iommu_create_mapping(dev, &iommu_spec);
+ err = mtk_iommu_v1_create_mapping(dev, &iommu_spec);
of_node_put(iommu_spec.np);
if (err)
return ERR_PTR(err);
@@ -440,39 +483,61 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
idx++;
}
- if (!fwspec || fwspec->ops != &mtk_iommu_ops)
- return ERR_PTR(-ENODEV); /* Not a iommu client device */
+ if (!fwspec)
+ return ERR_PTR(-ENODEV);
data = dev_iommu_priv_get(dev);
+ /* Link the consumer device with the smi-larb device(supplier) */
+ larbid = mt2701_m4u_to_larb(fwspec->ids[0]);
+ if (larbid >= MT2701_LARB_NR_MAX)
+ return ERR_PTR(-EINVAL);
+
+ for (idx = 1; idx < fwspec->num_ids; idx++) {
+ larbidx = mt2701_m4u_to_larb(fwspec->ids[idx]);
+ if (larbid != larbidx) {
+ dev_err(dev, "Can only use one larb. Fail@larb%d-%d.\n",
+ larbid, larbidx);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ larbdev = data->larb_imu[larbid].dev;
+ if (!larbdev)
+ return ERR_PTR(-EINVAL);
+
+ link = device_link_add(dev, larbdev,
+ DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS);
+ if (!link)
+ dev_err(dev, "Unable to link %s\n", dev_name(larbdev));
+
return &data->iommu;
}
-static void mtk_iommu_probe_finalize(struct device *dev)
+static void mtk_iommu_v1_probe_finalize(struct device *dev)
{
- struct dma_iommu_mapping *mtk_mapping;
- struct mtk_iommu_data *data;
+ __maybe_unused struct mtk_iommu_v1_data *data = dev_iommu_priv_get(dev);
int err;
- data = dev_iommu_priv_get(dev);
- mtk_mapping = data->mapping;
-
- err = arm_iommu_attach_device(dev, mtk_mapping);
+ err = arm_iommu_attach_device(dev, data->mapping);
if (err)
dev_err(dev, "Can't create IOMMU mapping - DMA-OPS will not work\n");
}
-static void mtk_iommu_release_device(struct device *dev)
+static void mtk_iommu_v1_release_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ struct mtk_iommu_v1_data *data;
+ struct device *larbdev;
+ unsigned int larbid;
- if (!fwspec || fwspec->ops != &mtk_iommu_ops)
- return;
-
- iommu_fwspec_free(dev);
+ data = dev_iommu_priv_get(dev);
+ larbid = mt2701_m4u_to_larb(fwspec->ids[0]);
+ larbdev = data->larb_imu[larbid].dev;
+ device_link_remove(dev, larbdev);
}
-static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
+static int mtk_iommu_v1_hw_init(const struct mtk_iommu_v1_data *data)
{
u32 regval;
int ret;
@@ -502,7 +567,7 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
writel_relaxed(F_MMU_DCM_ON, data->base + REG_MMU_DCM);
- if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
+ if (devm_request_irq(data->dev, data->irq, mtk_iommu_v1_isr, 0,
dev_name(data->dev), (void *)data)) {
writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
clk_disable_unprepare(data->bclk);
@@ -513,37 +578,38 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
return 0;
}
-static const struct iommu_ops mtk_iommu_ops = {
- .domain_alloc = mtk_iommu_domain_alloc,
- .domain_free = mtk_iommu_domain_free,
- .attach_dev = mtk_iommu_attach_device,
- .detach_dev = mtk_iommu_detach_device,
- .map = mtk_iommu_map,
- .unmap = mtk_iommu_unmap,
- .iova_to_phys = mtk_iommu_iova_to_phys,
- .probe_device = mtk_iommu_probe_device,
- .probe_finalize = mtk_iommu_probe_finalize,
- .release_device = mtk_iommu_release_device,
- .def_domain_type = mtk_iommu_def_domain_type,
+static const struct iommu_ops mtk_iommu_v1_ops = {
+ .identity_domain = &mtk_iommu_v1_identity_domain,
+ .domain_alloc_paging = mtk_iommu_v1_domain_alloc_paging,
+ .probe_device = mtk_iommu_v1_probe_device,
+ .probe_finalize = mtk_iommu_v1_probe_finalize,
+ .release_device = mtk_iommu_v1_release_device,
.device_group = generic_device_group,
- .pgsize_bitmap = ~0UL << MT2701_IOMMU_PAGE_SHIFT,
.owner = THIS_MODULE,
+ .default_domain_ops = &(const struct iommu_domain_ops) {
+ .attach_dev = mtk_iommu_v1_attach_device,
+ .map_pages = mtk_iommu_v1_map,
+ .unmap_pages = mtk_iommu_v1_unmap,
+ .iova_to_phys = mtk_iommu_v1_iova_to_phys,
+ .free = mtk_iommu_v1_domain_free,
+ }
};
-static const struct of_device_id mtk_iommu_of_ids[] = {
+static const struct of_device_id mtk_iommu_v1_of_ids[] = {
{ .compatible = "mediatek,mt2701-m4u", },
{}
};
+MODULE_DEVICE_TABLE(of, mtk_iommu_v1_of_ids);
-static const struct component_master_ops mtk_iommu_com_ops = {
- .bind = mtk_iommu_bind,
- .unbind = mtk_iommu_unbind,
+static const struct component_master_ops mtk_iommu_v1_com_ops = {
+ .bind = mtk_iommu_v1_bind,
+ .unbind = mtk_iommu_v1_unbind,
};
-static int mtk_iommu_probe(struct platform_device *pdev)
+static int mtk_iommu_v1_probe(struct platform_device *pdev)
{
- struct mtk_iommu_data *data;
struct device *dev = &pdev->dev;
+ struct mtk_iommu_v1_data *data;
struct resource *res;
struct component_match *match = NULL;
void *protect;
@@ -556,8 +622,8 @@ static int mtk_iommu_probe(struct platform_device *pdev)
data->dev = dev;
/* Protect memory. HW will access here while translation fault.*/
- protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2,
- GFP_KERNEL | GFP_DMA);
+ protect = devm_kcalloc(dev, 2, MTK_PROTECT_PA_ALIGN,
+ GFP_KERNEL | GFP_DMA);
if (!protect)
return -ENOMEM;
data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
@@ -580,13 +646,18 @@ static int mtk_iommu_probe(struct platform_device *pdev)
if (larb_nr < 0)
return larb_nr;
+ if (larb_nr > MTK_LARB_NR_MAX)
+ return -EINVAL;
+
for (i = 0; i < larb_nr; i++) {
struct device_node *larbnode;
struct platform_device *plarbdev;
larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i);
- if (!larbnode)
- return -EINVAL;
+ if (!larbnode) {
+ ret = -EINVAL;
+ goto out_put_larbs;
+ }
if (!of_device_is_available(larbnode)) {
of_node_put(larbnode);
@@ -596,69 +667,74 @@ static int mtk_iommu_probe(struct platform_device *pdev)
plarbdev = of_find_device_by_node(larbnode);
if (!plarbdev) {
of_node_put(larbnode);
- return -EPROBE_DEFER;
+ ret = -ENODEV;
+ goto out_put_larbs;
+ }
+ if (!plarbdev->dev.driver) {
+ of_node_put(larbnode);
+ put_device(&plarbdev->dev);
+ ret = -EPROBE_DEFER;
+ goto out_put_larbs;
}
data->larb_imu[i].dev = &plarbdev->dev;
- component_match_add_release(dev, &match, release_of,
- compare_of, larbnode);
+ component_match_add_release(dev, &match, component_release_of,
+ component_compare_of, larbnode);
}
platform_set_drvdata(pdev, data);
- ret = mtk_iommu_hw_init(data);
+ ret = mtk_iommu_v1_hw_init(data);
if (ret)
- return ret;
+ goto out_put_larbs;
ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
dev_name(&pdev->dev));
if (ret)
- return ret;
+ goto out_clk_unprepare;
- ret = iommu_device_register(&data->iommu, &mtk_iommu_ops, dev);
+ ret = iommu_device_register(&data->iommu, &mtk_iommu_v1_ops, dev);
if (ret)
goto out_sysfs_remove;
- if (!iommu_present(&platform_bus_type)) {
- ret = bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
- if (ret)
- goto out_dev_unreg;
- }
-
- ret = component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
+ ret = component_master_add_with_match(dev, &mtk_iommu_v1_com_ops, match);
if (ret)
- goto out_bus_set_null;
+ goto out_dev_unreg;
return ret;
-out_bus_set_null:
- bus_set_iommu(&platform_bus_type, NULL);
out_dev_unreg:
iommu_device_unregister(&data->iommu);
out_sysfs_remove:
iommu_device_sysfs_remove(&data->iommu);
+out_clk_unprepare:
+ clk_disable_unprepare(data->bclk);
+out_put_larbs:
+ for (i = 0; i < MTK_LARB_NR_MAX; i++)
+ put_device(data->larb_imu[i].dev);
+
return ret;
}
-static int mtk_iommu_remove(struct platform_device *pdev)
+static void mtk_iommu_v1_remove(struct platform_device *pdev)
{
- struct mtk_iommu_data *data = platform_get_drvdata(pdev);
+ struct mtk_iommu_v1_data *data = platform_get_drvdata(pdev);
+ int i;
iommu_device_sysfs_remove(&data->iommu);
iommu_device_unregister(&data->iommu);
- if (iommu_present(&platform_bus_type))
- bus_set_iommu(&platform_bus_type, NULL);
-
clk_disable_unprepare(data->bclk);
devm_free_irq(&pdev->dev, data->irq, data);
- component_master_del(&pdev->dev, &mtk_iommu_com_ops);
- return 0;
+ component_master_del(&pdev->dev, &mtk_iommu_v1_com_ops);
+
+ for (i = 0; i < MTK_LARB_NR_MAX; i++)
+ put_device(data->larb_imu[i].dev);
}
-static int __maybe_unused mtk_iommu_suspend(struct device *dev)
+static int __maybe_unused mtk_iommu_v1_suspend(struct device *dev)
{
- struct mtk_iommu_data *data = dev_get_drvdata(dev);
- struct mtk_iommu_suspend_reg *reg = &data->reg;
+ struct mtk_iommu_v1_data *data = dev_get_drvdata(dev);
+ struct mtk_iommu_v1_suspend_reg *reg = &data->reg;
void __iomem *base = data->base;
reg->standard_axi_mode = readl_relaxed(base +
@@ -669,10 +745,10 @@ static int __maybe_unused mtk_iommu_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused mtk_iommu_resume(struct device *dev)
+static int __maybe_unused mtk_iommu_v1_resume(struct device *dev)
{
- struct mtk_iommu_data *data = dev_get_drvdata(dev);
- struct mtk_iommu_suspend_reg *reg = &data->reg;
+ struct mtk_iommu_v1_data *data = dev_get_drvdata(dev);
+ struct mtk_iommu_v1_suspend_reg *reg = &data->reg;
void __iomem *base = data->base;
writel_relaxed(data->m4u_dom->pgt_pa, base + REG_MMU_PT_BASE_ADDR);
@@ -685,20 +761,20 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev)
return 0;
}
-static const struct dev_pm_ops mtk_iommu_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume)
+static const struct dev_pm_ops mtk_iommu_v1_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mtk_iommu_v1_suspend, mtk_iommu_v1_resume)
};
-static struct platform_driver mtk_iommu_driver = {
- .probe = mtk_iommu_probe,
- .remove = mtk_iommu_remove,
+static struct platform_driver mtk_iommu_v1_driver = {
+ .probe = mtk_iommu_v1_probe,
+ .remove = mtk_iommu_v1_remove,
.driver = {
.name = "mtk-iommu-v1",
- .of_match_table = mtk_iommu_of_ids,
- .pm = &mtk_iommu_pm_ops,
+ .of_match_table = mtk_iommu_v1_of_ids,
+ .pm = &mtk_iommu_v1_pm_ops,
}
};
-module_platform_driver(mtk_iommu_driver);
+module_platform_driver(mtk_iommu_v1_driver);
MODULE_DESCRIPTION("IOMMU API for MediaTek M4U v1 implementations");
MODULE_LICENSE("GPL v2");