summaryrefslogtreecommitdiff
path: root/drivers/iommu/rockchip-iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/rockchip-iommu.c')
-rw-r--r--drivers/iommu/rockchip-iommu.c174
1 files changed, 149 insertions, 25 deletions
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index bb50e015b1d5..94b9d8e5b9a4 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -95,6 +95,15 @@ static const char * const rk_iommu_clocks[] = {
"aclk", "iface",
};
+struct rk_iommu_ops {
+ phys_addr_t (*pt_address)(u32 dte);
+ u32 (*mk_dtentries)(dma_addr_t pt_dma);
+ u32 (*mk_ptentries)(phys_addr_t page, int prot);
+ phys_addr_t (*dte_addr_phys)(u32 addr);
+ u32 (*dma_addr_dte)(dma_addr_t dt_dma);
+ u64 dma_bit_mask;
+};
+
struct rk_iommu {
struct device *dev;
void __iomem **bases;
@@ -115,6 +124,7 @@ struct rk_iommudata {
};
static struct device *dma_dev;
+static const struct rk_iommu_ops *rk_ops;
static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma,
unsigned int count)
@@ -178,6 +188,33 @@ static inline phys_addr_t rk_dte_pt_address(u32 dte)
return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK;
}
+/*
+ * In v2:
+ * 31:12 - PT address bit 31:0
+ * 11: 8 - PT address bit 35:32
+ * 7: 4 - PT address bit 39:36
+ * 3: 1 - Reserved
+ * 0 - 1 if PT @ PT address is valid
+ */
+#define RK_DTE_PT_ADDRESS_MASK_V2 GENMASK_ULL(31, 4)
+#define DTE_HI_MASK1 GENMASK(11, 8)
+#define DTE_HI_MASK2 GENMASK(7, 4)
+#define DTE_HI_SHIFT1 24 /* shift bit 8 to bit 32 */
+#define DTE_HI_SHIFT2 32 /* shift bit 4 to bit 36 */
+#define PAGE_DESC_HI_MASK1 GENMASK_ULL(39, 36)
+#define PAGE_DESC_HI_MASK2 GENMASK_ULL(35, 32)
+
+static inline phys_addr_t rk_dte_pt_address_v2(u32 dte)
+{
+ u64 dte_v2 = dte;
+
+ dte_v2 = ((dte_v2 & DTE_HI_MASK2) << DTE_HI_SHIFT2) |
+ ((dte_v2 & DTE_HI_MASK1) << DTE_HI_SHIFT1) |
+ (dte_v2 & RK_DTE_PT_ADDRESS_MASK);
+
+ return (phys_addr_t)dte_v2;
+}
+
static inline bool rk_dte_is_pt_valid(u32 dte)
{
return dte & RK_DTE_PT_VALID;
@@ -188,6 +225,15 @@ static inline u32 rk_mk_dte(dma_addr_t pt_dma)
return (pt_dma & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID;
}
+static inline u32 rk_mk_dte_v2(dma_addr_t pt_dma)
+{
+ pt_dma = (pt_dma & RK_DTE_PT_ADDRESS_MASK) |
+ ((pt_dma & PAGE_DESC_HI_MASK1) >> DTE_HI_SHIFT1) |
+ (pt_dma & PAGE_DESC_HI_MASK2) >> DTE_HI_SHIFT2;
+
+ return (pt_dma & RK_DTE_PT_ADDRESS_MASK_V2) | RK_DTE_PT_VALID;
+}
+
/*
* Each PTE has a Page address, some flags and a valid bit:
* +---------------------+---+-------+-+
@@ -214,11 +260,6 @@ static inline u32 rk_mk_dte(dma_addr_t pt_dma)
#define RK_PTE_PAGE_READABLE BIT(1)
#define RK_PTE_PAGE_VALID BIT(0)
-static inline phys_addr_t rk_pte_page_address(u32 pte)
-{
- return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK;
-}
-
static inline bool rk_pte_is_page_valid(u32 pte)
{
return pte & RK_PTE_PAGE_VALID;
@@ -234,6 +275,29 @@ static u32 rk_mk_pte(phys_addr_t page, int prot)
return page | flags | RK_PTE_PAGE_VALID;
}
+/*
+ * In v2:
+ * 31:12 - Page address bit 31:0
+ * 11:9 - Page address bit 34:32
+ * 8:4 - Page address bit 39:35
+ * 3 - Security
+ * 2 - Readable
+ * 1 - Writable
+ * 0 - 1 if Page @ Page address is valid
+ */
+#define RK_PTE_PAGE_READABLE_V2 BIT(2)
+#define RK_PTE_PAGE_WRITABLE_V2 BIT(1)
+
+static u32 rk_mk_pte_v2(phys_addr_t page, int prot)
+{
+ u32 flags = 0;
+
+ flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE_V2 : 0;
+ flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE_V2 : 0;
+
+ return rk_mk_dte_v2(page) | flags;
+}
+
static u32 rk_mk_pte_invalid(u32 pte)
{
return pte & ~RK_PTE_PAGE_VALID;
@@ -447,10 +511,10 @@ static int rk_iommu_force_reset(struct rk_iommu *iommu)
* and verifying that upper 5 nybbles are read back.
*/
for (i = 0; i < iommu->num_mmu; i++) {
- rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
+ dte_addr = rk_ops->pt_address(DTE_ADDR_DUMMY);
+ rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_addr);
- dte_addr = rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR);
- if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) {
+ if (dte_addr != rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR)) {
dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
return -EFAULT;
}
@@ -469,6 +533,31 @@ static int rk_iommu_force_reset(struct rk_iommu *iommu)
return 0;
}
+static inline phys_addr_t rk_dte_addr_phys(u32 addr)
+{
+ return (phys_addr_t)addr;
+}
+
+static inline u32 rk_dma_addr_dte(dma_addr_t dt_dma)
+{
+ return dt_dma;
+}
+
+#define DT_HI_MASK GENMASK_ULL(39, 32)
+#define DT_SHIFT 28
+
+static inline phys_addr_t rk_dte_addr_phys_v2(u32 addr)
+{
+ return (phys_addr_t)(addr & RK_DTE_PT_ADDRESS_MASK) |
+ ((addr & DT_HI_MASK) << DT_SHIFT);
+}
+
+static inline u32 rk_dma_addr_dte_v2(dma_addr_t dt_dma)
+{
+ return (dt_dma & RK_DTE_PT_ADDRESS_MASK) |
+ ((dt_dma & DT_HI_MASK) >> DT_SHIFT);
+}
+
static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
{
void __iomem *base = iommu->bases[index];
@@ -488,7 +577,7 @@ static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
page_offset = rk_iova_page_offset(iova);
mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR);
- mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
+ mmu_dte_addr_phys = rk_ops->dte_addr_phys(mmu_dte_addr);
dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
dte_addr = phys_to_virt(dte_addr_phys);
@@ -497,14 +586,14 @@ static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
if (!rk_dte_is_pt_valid(dte))
goto print_it;
- pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4);
+ pte_addr_phys = rk_ops->pt_address(dte) + (pte_index * 4);
pte_addr = phys_to_virt(pte_addr_phys);
pte = *pte_addr;
if (!rk_pte_is_page_valid(pte))
goto print_it;
- page_addr_phys = rk_pte_page_address(pte) + page_offset;
+ page_addr_phys = rk_ops->pt_address(pte) + page_offset;
page_flags = pte & RK_PTE_PAGE_FLAGS_MASK;
print_it:
@@ -600,13 +689,13 @@ static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
if (!rk_dte_is_pt_valid(dte))
goto out;
- pt_phys = rk_dte_pt_address(dte);
+ pt_phys = rk_ops->pt_address(dte);
page_table = (u32 *)phys_to_virt(pt_phys);
pte = page_table[rk_iova_pte_index(iova)];
if (!rk_pte_is_page_valid(pte))
goto out;
- phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova);
+ phys = rk_ops->pt_address(pte) + rk_iova_page_offset(iova);
out:
spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
@@ -678,14 +767,13 @@ static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
return ERR_PTR(-ENOMEM);
}
- dte = rk_mk_dte(pt_dma);
+ dte = rk_ops->mk_dtentries(pt_dma);
*dte_addr = dte;
- rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES);
rk_table_flush(rk_domain,
rk_domain->dt_dma + dte_index * sizeof(u32), 1);
done:
- pt_phys = rk_dte_pt_address(dte);
+ pt_phys = rk_ops->pt_address(dte);
return (u32 *)phys_to_virt(pt_phys);
}
@@ -727,7 +815,7 @@ static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
if (rk_pte_is_page_valid(pte))
goto unwind;
- pte_addr[pte_count] = rk_mk_pte(paddr, prot);
+ pte_addr[pte_count] = rk_ops->mk_ptentries(paddr, prot);
paddr += SPAGE_SIZE;
}
@@ -749,7 +837,7 @@ unwind:
pte_count * SPAGE_SIZE);
iova += pte_count * SPAGE_SIZE;
- page_phys = rk_pte_page_address(pte_addr[pte_count]);
+ page_phys = rk_ops->pt_address(pte_addr[pte_count]);
pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
&iova, &page_phys, &paddr, prot);
@@ -784,7 +872,8 @@ static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
dte_index = rk_domain->dt[rk_iova_dte_index(iova)];
pte_index = rk_iova_pte_index(iova);
pte_addr = &page_table[pte_index];
- pte_dma = rk_dte_pt_address(dte_index) + pte_index * sizeof(u32);
+
+ pte_dma = rk_ops->pt_address(dte_index) + pte_index * sizeof(u32);
ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova,
paddr, size, prot);
@@ -820,7 +909,7 @@ static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
return 0;
}
- pt_phys = rk_dte_pt_address(dte);
+ pt_phys = rk_ops->pt_address(dte);
pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size);
@@ -878,7 +967,7 @@ static int rk_iommu_enable(struct rk_iommu *iommu)
for (i = 0; i < iommu->num_mmu; i++) {
rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
- rk_domain->dt_dma);
+ rk_ops->dma_addr_dte(rk_domain->dt_dma));
rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
}
@@ -1003,8 +1092,6 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
goto err_free_dt;
}
- rk_table_flush(rk_domain, rk_domain->dt_dma, NUM_DT_ENTRIES);
-
spin_lock_init(&rk_domain->iommus_lock);
spin_lock_init(&rk_domain->dt_lock);
INIT_LIST_HEAD(&rk_domain->iommus);
@@ -1036,7 +1123,7 @@ static void rk_iommu_domain_free(struct iommu_domain *domain)
for (i = 0; i < NUM_DT_ENTRIES; i++) {
u32 dte = rk_domain->dt[i];
if (rk_dte_is_pt_valid(dte)) {
- phys_addr_t pt_phys = rk_dte_pt_address(dte);
+ phys_addr_t pt_phys = rk_ops->pt_address(dte);
u32 *page_table = phys_to_virt(pt_phys);
dma_unmap_single(dma_dev, pt_phys,
SPAGE_SIZE, DMA_TO_DEVICE);
@@ -1126,6 +1213,7 @@ static int rk_iommu_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct rk_iommu *iommu;
struct resource *res;
+ const struct rk_iommu_ops *ops;
int num_res = pdev->num_resources;
int err, i;
@@ -1137,6 +1225,17 @@ static int rk_iommu_probe(struct platform_device *pdev)
iommu->dev = dev;
iommu->num_mmu = 0;
+ ops = of_device_get_match_data(dev);
+ if (!rk_ops)
+ rk_ops = ops;
+
+ /*
+ * That should not happen unless different versions of the
+ * hardware block are embedded the same SoC
+ */
+ if (WARN_ON(rk_ops != ops))
+ return -EINVAL;
+
iommu->bases = devm_kcalloc(dev, num_res, sizeof(*iommu->bases),
GFP_KERNEL);
if (!iommu->bases)
@@ -1225,6 +1324,8 @@ static int rk_iommu_probe(struct platform_device *pdev)
}
}
+ dma_set_mask_and_coherent(dev, rk_ops->dma_bit_mask);
+
return 0;
err_remove_sysfs:
iommu_device_sysfs_remove(&iommu->iommu);
@@ -1276,8 +1377,31 @@ static const struct dev_pm_ops rk_iommu_pm_ops = {
pm_runtime_force_resume)
};
+static struct rk_iommu_ops iommu_data_ops_v1 = {
+ .pt_address = &rk_dte_pt_address,
+ .mk_dtentries = &rk_mk_dte,
+ .mk_ptentries = &rk_mk_pte,
+ .dte_addr_phys = &rk_dte_addr_phys,
+ .dma_addr_dte = &rk_dma_addr_dte,
+ .dma_bit_mask = DMA_BIT_MASK(32),
+};
+
+static struct rk_iommu_ops iommu_data_ops_v2 = {
+ .pt_address = &rk_dte_pt_address_v2,
+ .mk_dtentries = &rk_mk_dte_v2,
+ .mk_ptentries = &rk_mk_pte_v2,
+ .dte_addr_phys = &rk_dte_addr_phys_v2,
+ .dma_addr_dte = &rk_dma_addr_dte_v2,
+ .dma_bit_mask = DMA_BIT_MASK(40),
+};
+
static const struct of_device_id rk_iommu_dt_ids[] = {
- { .compatible = "rockchip,iommu" },
+ { .compatible = "rockchip,iommu",
+ .data = &iommu_data_ops_v1,
+ },
+ { .compatible = "rockchip,rk3568-iommu",
+ .data = &iommu_data_ops_v2,
+ },
{ /* sentinel */ }
};