summaryrefslogtreecommitdiff
path: root/drivers/iommu/rockchip-iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/rockchip-iommu.c')
-rw-r--r--drivers/iommu/rockchip-iommu.c438
1 files changed, 259 insertions, 179 deletions
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index c9ba9f377f63..85f3667e797c 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1,19 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* IOMMU API for Rockchip
*
* Module Authors: Simon Xue <xxm@rock-chips.com>
* Daniel Kurtz <djkurtz@chromium.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/clk.h>
#include <linux/compiler.h>
#include <linux/delay.h>
#include <linux/device.h>
-#include <linux/dma-iommu.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
@@ -24,12 +20,14 @@
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/of.h>
-#include <linux/of_iommu.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
+
+#include "iommu-pages.h"
/** MMU register offsets */
#define RK_MMU_DTE_ADDR 0x00 /* Directory table address */
@@ -90,6 +88,7 @@ struct rk_iommu_domain {
dma_addr_t dt_dma;
spinlock_t iommus_lock; /* lock for iommus list */
spinlock_t dt_lock; /* lock for modifying page directory table */
+ struct device *dma_dev;
struct iommu_domain domain;
};
@@ -99,17 +98,25 @@ static const char * const rk_iommu_clocks[] = {
"aclk", "iface",
};
+struct rk_iommu_ops {
+ phys_addr_t (*pt_address)(u32 dte);
+ u32 (*mk_dtentries)(dma_addr_t pt_dma);
+ u32 (*mk_ptentries)(phys_addr_t page, int prot);
+ u64 dma_bit_mask;
+ gfp_t gfp_flags;
+};
+
struct rk_iommu {
struct device *dev;
void __iomem **bases;
int num_mmu;
+ int num_irq;
struct clk_bulk_data *clocks;
int num_clocks;
bool reset_disabled;
struct iommu_device iommu;
struct list_head node; /* entry in rk_iommu_domain.iommus */
struct iommu_domain *domain; /* domain to which iommu is attached */
- struct iommu_group *group;
};
struct rk_iommudata {
@@ -117,14 +124,15 @@ struct rk_iommudata {
struct rk_iommu *iommu;
};
-static struct device *dma_dev;
+static const struct rk_iommu_ops *rk_ops;
+static struct iommu_domain rk_identity_domain;
static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma,
unsigned int count)
{
size_t size = count * sizeof(u32); /* count of u32 entry */
- dma_sync_single_for_device(dma_dev, dma, size, DMA_TO_DEVICE);
+ dma_sync_single_for_device(dom->dma_dev, dma, size, DMA_TO_DEVICE);
}
static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
@@ -181,6 +189,33 @@ static inline phys_addr_t rk_dte_pt_address(u32 dte)
return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK;
}
+/*
+ * In v2:
+ * 31:12 - PT address bit 31:0
+ * 11: 8 - PT address bit 35:32
+ * 7: 4 - PT address bit 39:36
+ * 3: 1 - Reserved
+ * 0 - 1 if PT @ PT address is valid
+ */
+#define RK_DTE_PT_ADDRESS_MASK_V2 GENMASK_ULL(31, 4)
+#define DTE_HI_MASK1 GENMASK(11, 8)
+#define DTE_HI_MASK2 GENMASK(7, 4)
+#define DTE_HI_SHIFT1 24 /* shift bit 8 to bit 32 */
+#define DTE_HI_SHIFT2 32 /* shift bit 4 to bit 36 */
+#define PAGE_DESC_HI_MASK1 GENMASK_ULL(35, 32)
+#define PAGE_DESC_HI_MASK2 GENMASK_ULL(39, 36)
+
+static inline phys_addr_t rk_dte_pt_address_v2(u32 dte)
+{
+ u64 dte_v2 = dte;
+
+ dte_v2 = ((dte_v2 & DTE_HI_MASK2) << DTE_HI_SHIFT2) |
+ ((dte_v2 & DTE_HI_MASK1) << DTE_HI_SHIFT1) |
+ (dte_v2 & RK_DTE_PT_ADDRESS_MASK);
+
+ return (phys_addr_t)dte_v2;
+}
+
static inline bool rk_dte_is_pt_valid(u32 dte)
{
return dte & RK_DTE_PT_VALID;
@@ -191,6 +226,15 @@ static inline u32 rk_mk_dte(dma_addr_t pt_dma)
return (pt_dma & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID;
}
+static inline u32 rk_mk_dte_v2(dma_addr_t pt_dma)
+{
+ pt_dma = (pt_dma & RK_DTE_PT_ADDRESS_MASK) |
+ ((pt_dma & PAGE_DESC_HI_MASK1) >> DTE_HI_SHIFT1) |
+ (pt_dma & PAGE_DESC_HI_MASK2) >> DTE_HI_SHIFT2;
+
+ return (pt_dma & RK_DTE_PT_ADDRESS_MASK_V2) | RK_DTE_PT_VALID;
+}
+
/*
* Each PTE has a Page address, some flags and a valid bit:
* +---------------------+---+-------+-+
@@ -217,11 +261,6 @@ static inline u32 rk_mk_dte(dma_addr_t pt_dma)
#define RK_PTE_PAGE_READABLE BIT(1)
#define RK_PTE_PAGE_VALID BIT(0)
-static inline phys_addr_t rk_pte_page_address(u32 pte)
-{
- return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK;
-}
-
static inline bool rk_pte_is_page_valid(u32 pte)
{
return pte & RK_PTE_PAGE_VALID;
@@ -237,6 +276,27 @@ static u32 rk_mk_pte(phys_addr_t page, int prot)
return page | flags | RK_PTE_PAGE_VALID;
}
+/*
+ * In v2:
+ * 31:12 - Page address bit 31:0
+ * 11: 8 - Page address bit 35:32
+ * 7: 4 - Page address bit 39:36
+ * 3 - Security
+ * 2 - Writable
+ * 1 - Readable
+ * 0 - 1 if Page @ Page address is valid
+ */
+
+static u32 rk_mk_pte_v2(phys_addr_t page, int prot)
+{
+ u32 flags = 0;
+
+ flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
+ flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
+
+ return rk_mk_dte_v2(page) | flags;
+}
+
static u32 rk_mk_pte_invalid(u32 pte)
{
return pte & ~RK_PTE_PAGE_VALID;
@@ -447,13 +507,13 @@ static int rk_iommu_force_reset(struct rk_iommu *iommu)
/*
* Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
- * and verifying that upper 5 nybbles are read back.
+ * and verifying that upper 5 (v1) or 7 (v2) nybbles are read back.
*/
for (i = 0; i < iommu->num_mmu; i++) {
- rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
+ dte_addr = rk_ops->pt_address(DTE_ADDR_DUMMY);
+ rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_addr);
- dte_addr = rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR);
- if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) {
+ if (dte_addr != rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR)) {
dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
return -EFAULT;
}
@@ -491,7 +551,7 @@ static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
page_offset = rk_iova_page_offset(iova);
mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR);
- mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
+ mmu_dte_addr_phys = rk_ops->pt_address(mmu_dte_addr);
dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
dte_addr = phys_to_virt(dte_addr_phys);
@@ -500,14 +560,14 @@ static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
if (!rk_dte_is_pt_valid(dte))
goto print_it;
- pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4);
+ pte_addr_phys = rk_ops->pt_address(dte) + (pte_index * 4);
pte_addr = phys_to_virt(pte_addr_phys);
pte = *pte_addr;
if (!rk_pte_is_page_valid(pte))
goto print_it;
- page_addr_phys = rk_pte_page_address(pte) + page_offset;
+ page_addr_phys = rk_ops->pt_address(pte) + page_offset;
page_flags = pte & RK_PTE_PAGE_FLAGS_MASK;
print_it:
@@ -529,7 +589,7 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
int i, err;
err = pm_runtime_get_if_in_use(iommu->dev);
- if (WARN_ON_ONCE(err <= 0))
+ if (!err || WARN_ON_ONCE(err < 0))
return ret;
if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)))
@@ -552,7 +612,7 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
dev_err(iommu->dev, "Page fault at %pad of type %s\n",
&iova,
- (flags == IOMMU_FAULT_WRITE) ? "write" : "read");
+ str_write_read(flags == IOMMU_FAULT_WRITE));
log_iova(iommu, i, iova);
@@ -561,7 +621,7 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
* Ignore the return code, though, since we always zap cache
* and clear the page fault anyway.
*/
- if (iommu->domain)
+ if (iommu->domain != &rk_identity_domain)
report_iommu_fault(iommu->domain, iommu->dev, iova,
flags);
else
@@ -603,13 +663,13 @@ static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
if (!rk_dte_is_pt_valid(dte))
goto out;
- pt_phys = rk_dte_pt_address(dte);
+ pt_phys = rk_ops->pt_address(dte);
page_table = (u32 *)phys_to_virt(pt_phys);
pte = page_table[rk_iova_pte_index(iova)];
if (!rk_pte_is_page_valid(pte))
goto out;
- phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova);
+ phys = rk_ops->pt_address(pte) + rk_iova_page_offset(iova);
out:
spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
@@ -670,25 +730,25 @@ static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
if (rk_dte_is_pt_valid(dte))
goto done;
- page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
+ page_table = iommu_alloc_pages_sz(GFP_ATOMIC | rk_ops->gfp_flags,
+ SPAGE_SIZE);
if (!page_table)
return ERR_PTR(-ENOMEM);
- pt_dma = dma_map_single(dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
- if (dma_mapping_error(dma_dev, pt_dma)) {
- dev_err(dma_dev, "DMA mapping error while allocating page table\n");
- free_page((unsigned long)page_table);
+ pt_dma = dma_map_single(rk_domain->dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(rk_domain->dma_dev, pt_dma)) {
+ dev_err(rk_domain->dma_dev, "DMA mapping error while allocating page table\n");
+ iommu_free_pages(page_table);
return ERR_PTR(-ENOMEM);
}
- dte = rk_mk_dte(pt_dma);
+ dte = rk_ops->mk_dtentries(pt_dma);
*dte_addr = dte;
- rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES);
rk_table_flush(rk_domain,
rk_domain->dt_dma + dte_index * sizeof(u32), 1);
done:
- pt_phys = rk_dte_pt_address(dte);
+ pt_phys = rk_ops->pt_address(dte);
return (u32 *)phys_to_virt(pt_phys);
}
@@ -730,7 +790,7 @@ static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
if (rk_pte_is_page_valid(pte))
goto unwind;
- pte_addr[pte_count] = rk_mk_pte(paddr, prot);
+ pte_addr[pte_count] = rk_ops->mk_ptentries(paddr, prot);
paddr += SPAGE_SIZE;
}
@@ -752,7 +812,7 @@ unwind:
pte_count * SPAGE_SIZE);
iova += pte_count * SPAGE_SIZE;
- page_phys = rk_pte_page_address(pte_addr[pte_count]);
+ page_phys = rk_ops->pt_address(pte_addr[pte_count]);
pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
&iova, &page_phys, &paddr, prot);
@@ -760,7 +820,8 @@ unwind:
}
static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, size_t count,
+ int prot, gfp_t gfp, size_t *mapped)
{
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags;
@@ -787,17 +848,20 @@ static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
dte_index = rk_domain->dt[rk_iova_dte_index(iova)];
pte_index = rk_iova_pte_index(iova);
pte_addr = &page_table[pte_index];
- pte_dma = rk_dte_pt_address(dte_index) + pte_index * sizeof(u32);
+
+ pte_dma = rk_ops->pt_address(dte_index) + pte_index * sizeof(u32);
ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova,
paddr, size, prot);
spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
+ if (!ret)
+ *mapped = size;
return ret;
}
static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
- size_t size)
+ size_t size, size_t count, struct iommu_iotlb_gather *gather)
{
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags;
@@ -823,7 +887,7 @@ static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
return 0;
}
- pt_phys = rk_dte_pt_address(dte);
+ pt_phys = rk_ops->pt_address(dte);
pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size);
@@ -838,7 +902,7 @@ static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
{
- struct rk_iommudata *data = dev->archdata.iommu;
+ struct rk_iommudata *data = dev_iommu_priv_get(dev);
return data ? data->iommu : NULL;
}
@@ -881,7 +945,7 @@ static int rk_iommu_enable(struct rk_iommu *iommu)
for (i = 0; i < iommu->num_mmu; i++) {
rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
- rk_domain->dt_dma);
+ rk_ops->mk_dtentries(rk_domain->dt_dma));
rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
}
@@ -895,26 +959,28 @@ out_disable_clocks:
return ret;
}
-static void rk_iommu_detach_device(struct iommu_domain *domain,
- struct device *dev)
+static int rk_iommu_identity_attach(struct iommu_domain *identity_domain,
+ struct device *dev,
+ struct iommu_domain *old)
{
struct rk_iommu *iommu;
- struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
+ struct rk_iommu_domain *rk_domain;
unsigned long flags;
int ret;
/* Allow 'virtual devices' (eg drm) to detach from domain */
iommu = rk_iommu_from_dev(dev);
if (!iommu)
- return;
+ return -ENODEV;
+
+ rk_domain = to_rk_domain(iommu->domain);
dev_dbg(dev, "Detaching from iommu domain\n");
- /* iommu already detached */
- if (iommu->domain != domain)
- return;
+ if (iommu->domain == identity_domain)
+ return 0;
- iommu->domain = NULL;
+ iommu->domain = identity_domain;
spin_lock_irqsave(&rk_domain->iommus_lock, flags);
list_del_init(&iommu->node);
@@ -926,10 +992,21 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
rk_iommu_disable(iommu);
pm_runtime_put(iommu->dev);
}
+
+ return 0;
}
+static struct iommu_domain_ops rk_identity_ops = {
+ .attach_dev = rk_iommu_identity_attach,
+};
+
+static struct iommu_domain rk_identity_domain = {
+ .type = IOMMU_DOMAIN_IDENTITY,
+ .ops = &rk_identity_ops,
+};
+
static int rk_iommu_attach_device(struct iommu_domain *domain,
- struct device *dev)
+ struct device *dev, struct iommu_domain *old)
{
struct rk_iommu *iommu;
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
@@ -950,8 +1027,9 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
if (iommu->domain == domain)
return 0;
- if (iommu->domain)
- rk_iommu_detach_device(iommu->domain, dev);
+ ret = rk_iommu_identity_attach(&rk_identity_domain, dev, old);
+ if (ret)
+ return ret;
iommu->domain = domain;
@@ -964,54 +1042,57 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
return 0;
ret = rk_iommu_enable(iommu);
- if (ret)
- rk_iommu_detach_device(iommu->domain, dev);
+ if (ret) {
+ /*
+ * Note rk_iommu_identity_attach() might fail before physically
+ * attaching the dev to iommu->domain, in which case the actual
+ * old domain for this revert should be rk_identity_domain v.s.
+ * iommu->domain. Since rk_iommu_identity_attach() does not care
+ * about the old domain argument for now, this is not a problem.
+ */
+ WARN_ON(rk_iommu_identity_attach(&rk_identity_domain, dev,
+ iommu->domain));
+ }
pm_runtime_put(iommu->dev);
return ret;
}
-static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
+static struct iommu_domain *rk_iommu_domain_alloc_paging(struct device *dev)
{
struct rk_iommu_domain *rk_domain;
+ struct rk_iommu *iommu;
- if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
- return NULL;
-
- if (!dma_dev)
- return NULL;
-
- rk_domain = devm_kzalloc(dma_dev, sizeof(*rk_domain), GFP_KERNEL);
+ rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL);
if (!rk_domain)
return NULL;
- if (type == IOMMU_DOMAIN_DMA &&
- iommu_get_dma_cookie(&rk_domain->domain))
- return NULL;
-
/*
* rk32xx iommus use a 2 level pagetable.
* Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
* Allocate one 4 KiB page for each table.
*/
- rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
+ rk_domain->dt = iommu_alloc_pages_sz(GFP_KERNEL | rk_ops->gfp_flags,
+ SPAGE_SIZE);
if (!rk_domain->dt)
- goto err_put_cookie;
+ goto err_free_domain;
- rk_domain->dt_dma = dma_map_single(dma_dev, rk_domain->dt,
+ iommu = rk_iommu_from_dev(dev);
+ rk_domain->dma_dev = iommu->dev;
+ rk_domain->dt_dma = dma_map_single(rk_domain->dma_dev, rk_domain->dt,
SPAGE_SIZE, DMA_TO_DEVICE);
- if (dma_mapping_error(dma_dev, rk_domain->dt_dma)) {
- dev_err(dma_dev, "DMA map error for DT\n");
+ if (dma_mapping_error(rk_domain->dma_dev, rk_domain->dt_dma)) {
+ dev_err(rk_domain->dma_dev, "DMA map error for DT\n");
goto err_free_dt;
}
- rk_table_flush(rk_domain, rk_domain->dt_dma, NUM_DT_ENTRIES);
-
spin_lock_init(&rk_domain->iommus_lock);
spin_lock_init(&rk_domain->dt_lock);
INIT_LIST_HEAD(&rk_domain->iommus);
+ rk_domain->domain.pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP;
+
rk_domain->domain.geometry.aperture_start = 0;
rk_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32);
rk_domain->domain.geometry.force_aperture = true;
@@ -1019,10 +1100,9 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
return &rk_domain->domain;
err_free_dt:
- free_page((unsigned long)rk_domain->dt);
-err_put_cookie:
- if (type == IOMMU_DOMAIN_DMA)
- iommu_put_dma_cookie(&rk_domain->domain);
+ iommu_free_pages(rk_domain->dt);
+err_free_domain:
+ kfree(rk_domain);
return NULL;
}
@@ -1037,80 +1117,59 @@ static void rk_iommu_domain_free(struct iommu_domain *domain)
for (i = 0; i < NUM_DT_ENTRIES; i++) {
u32 dte = rk_domain->dt[i];
if (rk_dte_is_pt_valid(dte)) {
- phys_addr_t pt_phys = rk_dte_pt_address(dte);
+ phys_addr_t pt_phys = rk_ops->pt_address(dte);
u32 *page_table = phys_to_virt(pt_phys);
- dma_unmap_single(dma_dev, pt_phys,
+ dma_unmap_single(rk_domain->dma_dev, pt_phys,
SPAGE_SIZE, DMA_TO_DEVICE);
- free_page((unsigned long)page_table);
+ iommu_free_pages(page_table);
}
}
- dma_unmap_single(dma_dev, rk_domain->dt_dma,
+ dma_unmap_single(rk_domain->dma_dev, rk_domain->dt_dma,
SPAGE_SIZE, DMA_TO_DEVICE);
- free_page((unsigned long)rk_domain->dt);
+ iommu_free_pages(rk_domain->dt);
- if (domain->type == IOMMU_DOMAIN_DMA)
- iommu_put_dma_cookie(&rk_domain->domain);
+ kfree(rk_domain);
}
-static int rk_iommu_add_device(struct device *dev)
+static struct iommu_device *rk_iommu_probe_device(struct device *dev)
{
- struct iommu_group *group;
- struct rk_iommu *iommu;
struct rk_iommudata *data;
+ struct rk_iommu *iommu;
- data = dev->archdata.iommu;
+ data = dev_iommu_priv_get(dev);
if (!data)
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
iommu = rk_iommu_from_dev(dev);
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group))
- return PTR_ERR(group);
- iommu_group_put(group);
-
- iommu_device_link(&iommu->iommu, dev);
- data->link = device_link_add(dev, iommu->dev, DL_FLAG_PM_RUNTIME);
+ data->link = device_link_add(dev, iommu->dev,
+ DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
- return 0;
+ return &iommu->iommu;
}
-static void rk_iommu_remove_device(struct device *dev)
+static void rk_iommu_release_device(struct device *dev)
{
- struct rk_iommu *iommu;
- struct rk_iommudata *data = dev->archdata.iommu;
-
- iommu = rk_iommu_from_dev(dev);
+ struct rk_iommudata *data = dev_iommu_priv_get(dev);
device_link_del(data->link);
- iommu_device_unlink(&iommu->iommu, dev);
- iommu_group_remove_device(dev);
-}
-
-static struct iommu_group *rk_iommu_device_group(struct device *dev)
-{
- struct rk_iommu *iommu;
-
- iommu = rk_iommu_from_dev(dev);
-
- return iommu_group_ref_get(iommu->group);
}
static int rk_iommu_of_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct platform_device *iommu_dev;
struct rk_iommudata *data;
- data = devm_kzalloc(dma_dev, sizeof(*data), GFP_KERNEL);
+ iommu_dev = of_find_device_by_node(args->np);
+
+ data = devm_kzalloc(&iommu_dev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- iommu_dev = of_find_device_by_node(args->np);
-
data->iommu = platform_get_drvdata(iommu_dev);
- dev->archdata.iommu = data;
+ dev_iommu_priv_set(dev, data);
platform_device_put(iommu_dev);
@@ -1118,18 +1177,19 @@ static int rk_iommu_of_xlate(struct device *dev,
}
static const struct iommu_ops rk_iommu_ops = {
- .domain_alloc = rk_iommu_domain_alloc,
- .domain_free = rk_iommu_domain_free,
- .attach_dev = rk_iommu_attach_device,
- .detach_dev = rk_iommu_detach_device,
- .map = rk_iommu_map,
- .unmap = rk_iommu_unmap,
- .add_device = rk_iommu_add_device,
- .remove_device = rk_iommu_remove_device,
- .iova_to_phys = rk_iommu_iova_to_phys,
- .device_group = rk_iommu_device_group,
- .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
+ .identity_domain = &rk_identity_domain,
+ .domain_alloc_paging = rk_iommu_domain_alloc_paging,
+ .probe_device = rk_iommu_probe_device,
+ .release_device = rk_iommu_release_device,
+ .device_group = generic_single_device_group,
.of_xlate = rk_iommu_of_xlate,
+ .default_domain_ops = &(const struct iommu_domain_ops) {
+ .attach_dev = rk_iommu_attach_device,
+ .map_pages = rk_iommu_map,
+ .unmap_pages = rk_iommu_unmap,
+ .iova_to_phys = rk_iommu_iova_to_phys,
+ .free = rk_iommu_domain_free,
+ }
};
static int rk_iommu_probe(struct platform_device *pdev)
@@ -1137,17 +1197,31 @@ static int rk_iommu_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct rk_iommu *iommu;
struct resource *res;
+ const struct rk_iommu_ops *ops;
int num_res = pdev->num_resources;
- int err, i, irq;
+ int err, i;
iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
if (!iommu)
return -ENOMEM;
+ iommu->domain = &rk_identity_domain;
+
platform_set_drvdata(pdev, iommu);
iommu->dev = dev;
iommu->num_mmu = 0;
+ ops = of_device_get_match_data(dev);
+ if (!rk_ops)
+ rk_ops = ops;
+
+ /*
+ * That should not happen unless different versions of the
+ * hardware block are embedded the same SoC
+ */
+ if (WARN_ON(rk_ops != ops))
+ return -EINVAL;
+
iommu->bases = devm_kcalloc(dev, num_res, sizeof(*iommu->bases),
GFP_KERNEL);
if (!iommu->bases)
@@ -1165,6 +1239,10 @@ static int rk_iommu_probe(struct platform_device *pdev)
if (iommu->num_mmu == 0)
return PTR_ERR(iommu->bases[0]);
+ iommu->num_irq = platform_irq_count(pdev);
+ if (iommu->num_irq < 0)
+ return iommu->num_irq;
+
iommu->reset_disabled = device_property_read_bool(dev,
"rockchip,disable-mmu-reset");
@@ -1192,54 +1270,37 @@ static int rk_iommu_probe(struct platform_device *pdev)
if (err)
return err;
- iommu->group = iommu_group_alloc();
- if (IS_ERR(iommu->group)) {
- err = PTR_ERR(iommu->group);
- goto err_unprepare_clocks;
+ pm_runtime_enable(dev);
+
+ for (i = 0; i < iommu->num_irq; i++) {
+ int irq = platform_get_irq(pdev, i);
+
+ if (irq < 0) {
+ err = irq;
+ goto err_pm_disable;
+ }
+
+ err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
+ IRQF_SHARED, dev_name(dev), iommu);
+ if (err)
+ goto err_pm_disable;
}
+ dma_set_mask_and_coherent(dev, rk_ops->dma_bit_mask);
+
err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev));
if (err)
- goto err_put_group;
-
- iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops);
- iommu_device_set_fwnode(&iommu->iommu, &dev->of_node->fwnode);
+ goto err_pm_disable;
- err = iommu_device_register(&iommu->iommu);
+ err = iommu_device_register(&iommu->iommu, &rk_iommu_ops, dev);
if (err)
goto err_remove_sysfs;
- /*
- * Use the first registered IOMMU device for domain to use with DMA
- * API, since a domain might not physically correspond to a single
- * IOMMU device..
- */
- if (!dma_dev)
- dma_dev = &pdev->dev;
-
- bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
-
- pm_runtime_enable(dev);
-
- i = 0;
- while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) {
- if (irq < 0)
- return irq;
-
- err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
- IRQF_SHARED, dev_name(dev), iommu);
- if (err) {
- pm_runtime_disable(dev);
- goto err_remove_sysfs;
- }
- }
-
return 0;
err_remove_sysfs:
iommu_device_sysfs_remove(&iommu->iommu);
-err_put_group:
- iommu_group_put(iommu->group);
-err_unprepare_clocks:
+err_pm_disable:
+ pm_runtime_disable(dev);
clk_bulk_unprepare(iommu->num_clocks, iommu->clocks);
return err;
}
@@ -1247,10 +1308,13 @@ err_unprepare_clocks:
static void rk_iommu_shutdown(struct platform_device *pdev)
{
struct rk_iommu *iommu = platform_get_drvdata(pdev);
- int i = 0, irq;
+ int i;
+
+ for (i = 0; i < iommu->num_irq; i++) {
+ int irq = platform_get_irq(pdev, i);
- while ((irq = platform_get_irq(pdev, i++)) != -ENXIO)
devm_free_irq(iommu->dev, irq, iommu);
+ }
pm_runtime_force_suspend(&pdev->dev);
}
@@ -1259,7 +1323,7 @@ static int __maybe_unused rk_iommu_suspend(struct device *dev)
{
struct rk_iommu *iommu = dev_get_drvdata(dev);
- if (!iommu->domain)
+ if (iommu->domain == &rk_identity_domain)
return 0;
rk_iommu_disable(iommu);
@@ -1270,7 +1334,7 @@ static int __maybe_unused rk_iommu_resume(struct device *dev)
{
struct rk_iommu *iommu = dev_get_drvdata(dev);
- if (!iommu->domain)
+ if (iommu->domain == &rk_identity_domain)
return 0;
return rk_iommu_enable(iommu);
@@ -1282,8 +1346,29 @@ static const struct dev_pm_ops rk_iommu_pm_ops = {
pm_runtime_force_resume)
};
+static struct rk_iommu_ops iommu_data_ops_v1 = {
+ .pt_address = &rk_dte_pt_address,
+ .mk_dtentries = &rk_mk_dte,
+ .mk_ptentries = &rk_mk_pte,
+ .dma_bit_mask = DMA_BIT_MASK(32),
+ .gfp_flags = GFP_DMA32,
+};
+
+static struct rk_iommu_ops iommu_data_ops_v2 = {
+ .pt_address = &rk_dte_pt_address_v2,
+ .mk_dtentries = &rk_mk_dte_v2,
+ .mk_ptentries = &rk_mk_pte_v2,
+ .dma_bit_mask = DMA_BIT_MASK(40),
+ .gfp_flags = 0,
+};
+
static const struct of_device_id rk_iommu_dt_ids[] = {
- { .compatible = "rockchip,iommu" },
+ { .compatible = "rockchip,iommu",
+ .data = &iommu_data_ops_v1,
+ },
+ { .compatible = "rockchip,rk3568-iommu",
+ .data = &iommu_data_ops_v2,
+ },
{ /* sentinel */ }
};
@@ -1297,9 +1382,4 @@ static struct platform_driver rk_iommu_driver = {
.suppress_bind_attrs = true,
},
};
-
-static int __init rk_iommu_init(void)
-{
- return platform_driver_register(&rk_iommu_driver);
-}
-subsys_initcall(rk_iommu_init);
+builtin_platform_driver(rk_iommu_driver);