diff options
Diffstat (limited to 'drivers/iommu/rockchip-iommu.c')
| -rw-r--r-- | drivers/iommu/rockchip-iommu.c | 854 |
1 files changed, 500 insertions, 354 deletions
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 4ba48a26b389..85f3667e797c 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -1,27 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0-only /* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. + * IOMMU API for Rockchip + * + * Module Authors: Simon Xue <xxm@rock-chips.com> + * Daniel Kurtz <djkurtz@chromium.org> */ +#include <linux/clk.h> #include <linux/compiler.h> #include <linux/delay.h> #include <linux/device.h> -#include <linux/dma-iommu.h> #include <linux/dma-mapping.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iommu.h> -#include <linux/jiffies.h> +#include <linux/iopoll.h> #include <linux/list.h> #include <linux/mm.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/spinlock.h> +#include <linux/string_choices.h> + +#include "iommu-pages.h" /** MMU register offsets */ #define RK_MMU_DTE_ADDR 0x00 /* Directory table address */ @@ -36,7 +42,10 @@ #define RK_MMU_AUTO_GATING 0x24 #define DTE_ADDR_DUMMY 0xCAFEBABE -#define FORCE_RESET_TIMEOUT 100 /* ms */ + +#define RK_MMU_POLL_PERIOD_US 100 +#define RK_MMU_FORCE_RESET_TIMEOUT_US 100000 +#define RK_MMU_POLL_TIMEOUT_US 1000 /* RK_MMU_STATUS fields */ #define RK_MMU_STATUS_PAGING_ENABLED BIT(0) @@ -73,35 +82,57 @@ */ #define RK_IOMMU_PGSIZE_BITMAP 0x007ff000 -#define IOMMU_REG_POLL_COUNT_FAST 1000 - struct rk_iommu_domain { struct list_head iommus; - struct platform_device *pdev; u32 *dt; /* page directory table */ dma_addr_t dt_dma; spinlock_t iommus_lock; /* lock for iommus list */ spinlock_t dt_lock; /* lock for modifying page directory table */ + struct device *dma_dev; struct iommu_domain domain; }; +/* list of clocks required by IOMMU */ +static const char * const rk_iommu_clocks[] = { + "aclk", "iface", +}; + +struct rk_iommu_ops { + phys_addr_t (*pt_address)(u32 dte); + u32 (*mk_dtentries)(dma_addr_t pt_dma); + u32 (*mk_ptentries)(phys_addr_t page, int prot); + u64 dma_bit_mask; + gfp_t gfp_flags; +}; + struct rk_iommu { struct device *dev; void __iomem **bases; int num_mmu; - int irq; + int num_irq; + struct clk_bulk_data *clocks; + int num_clocks; + bool reset_disabled; struct iommu_device iommu; struct list_head node; /* entry in rk_iommu_domain.iommus */ struct iommu_domain *domain; /* domain to which iommu is attached */ }; +struct rk_iommudata { + struct device_link *link; /* runtime PM link from IOMMU to master */ + struct rk_iommu *iommu; +}; + +static const struct rk_iommu_ops *rk_ops; +static struct iommu_domain rk_identity_domain; + static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma, unsigned int count) { size_t size = count * sizeof(u32); /* count of u32 entry */ - dma_sync_single_for_device(&dom->pdev->dev, dma, size, DMA_TO_DEVICE); + dma_sync_single_for_device(dom->dma_dev, dma, size, DMA_TO_DEVICE); } static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom) @@ -109,27 +140,6 @@ static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom) return container_of(dom, struct rk_iommu_domain, domain); } -/** - * Inspired by _wait_for in intel_drv.h - * This is NOT safe for use in interrupt context. - * - * Note that it's important that we check the condition again after having - * timed out, since the timeout could be due to preemption or similar and - * we've never had a chance to check the condition before the timeout. - */ -#define rk_wait_for(COND, MS) ({ \ - unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \ - int ret__ = 0; \ - while (!(COND)) { \ - if (time_after(jiffies, timeout__)) { \ - ret__ = (COND) ? 0 : -ETIMEDOUT; \ - break; \ - } \ - usleep_range(50, 100); \ - } \ - ret__; \ -}) - /* * The Rockchip rk3288 iommu uses a 2-level page table. * The first level is the "Directory Table" (DT). @@ -179,6 +189,33 @@ static inline phys_addr_t rk_dte_pt_address(u32 dte) return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK; } +/* + * In v2: + * 31:12 - PT address bit 31:0 + * 11: 8 - PT address bit 35:32 + * 7: 4 - PT address bit 39:36 + * 3: 1 - Reserved + * 0 - 1 if PT @ PT address is valid + */ +#define RK_DTE_PT_ADDRESS_MASK_V2 GENMASK_ULL(31, 4) +#define DTE_HI_MASK1 GENMASK(11, 8) +#define DTE_HI_MASK2 GENMASK(7, 4) +#define DTE_HI_SHIFT1 24 /* shift bit 8 to bit 32 */ +#define DTE_HI_SHIFT2 32 /* shift bit 4 to bit 36 */ +#define PAGE_DESC_HI_MASK1 GENMASK_ULL(35, 32) +#define PAGE_DESC_HI_MASK2 GENMASK_ULL(39, 36) + +static inline phys_addr_t rk_dte_pt_address_v2(u32 dte) +{ + u64 dte_v2 = dte; + + dte_v2 = ((dte_v2 & DTE_HI_MASK2) << DTE_HI_SHIFT2) | + ((dte_v2 & DTE_HI_MASK1) << DTE_HI_SHIFT1) | + (dte_v2 & RK_DTE_PT_ADDRESS_MASK); + + return (phys_addr_t)dte_v2; +} + static inline bool rk_dte_is_pt_valid(u32 dte) { return dte & RK_DTE_PT_VALID; @@ -189,6 +226,15 @@ static inline u32 rk_mk_dte(dma_addr_t pt_dma) return (pt_dma & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID; } +static inline u32 rk_mk_dte_v2(dma_addr_t pt_dma) +{ + pt_dma = (pt_dma & RK_DTE_PT_ADDRESS_MASK) | + ((pt_dma & PAGE_DESC_HI_MASK1) >> DTE_HI_SHIFT1) | + (pt_dma & PAGE_DESC_HI_MASK2) >> DTE_HI_SHIFT2; + + return (pt_dma & RK_DTE_PT_ADDRESS_MASK_V2) | RK_DTE_PT_VALID; +} + /* * Each PTE has a Page address, some flags and a valid bit: * +---------------------+---+-------+-+ @@ -215,11 +261,6 @@ static inline u32 rk_mk_dte(dma_addr_t pt_dma) #define RK_PTE_PAGE_READABLE BIT(1) #define RK_PTE_PAGE_VALID BIT(0) -static inline phys_addr_t rk_pte_page_address(u32 pte) -{ - return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK; -} - static inline bool rk_pte_is_page_valid(u32 pte) { return pte & RK_PTE_PAGE_VALID; @@ -235,6 +276,27 @@ static u32 rk_mk_pte(phys_addr_t page, int prot) return page | flags | RK_PTE_PAGE_VALID; } +/* + * In v2: + * 31:12 - Page address bit 31:0 + * 11: 8 - Page address bit 35:32 + * 7: 4 - Page address bit 39:36 + * 3 - Security + * 2 - Writable + * 1 - Readable + * 0 - 1 if Page @ Page address is valid + */ + +static u32 rk_mk_pte_v2(phys_addr_t page, int prot) +{ + u32 flags = 0; + + flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0; + flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0; + + return rk_mk_dte_v2(page) | flags; +} + static u32 rk_mk_pte_invalid(u32 pte) { return pte & ~RK_PTE_PAGE_VALID; @@ -294,19 +356,21 @@ static void rk_iommu_base_command(void __iomem *base, u32 command) { writel(command, base + RK_MMU_COMMAND); } -static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova, +static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start, size_t size) { int i; - - dma_addr_t iova_end = iova + size; + dma_addr_t iova_end = iova_start + size; /* * TODO(djkurtz): Figure out when it is more efficient to shootdown the * entire iotlb rather than iterate over individual iovas. */ - for (i = 0; i < iommu->num_mmu; i++) - for (; iova < iova_end; iova += SPAGE_SIZE) + for (i = 0; i < iommu->num_mmu; i++) { + dma_addr_t iova; + + for (iova = iova_start; iova < iova_end; iova += SPAGE_SIZE) rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova); + } } static bool rk_iommu_is_stall_active(struct rk_iommu *iommu) @@ -333,9 +397,21 @@ static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu) return enable; } +static bool rk_iommu_is_reset_done(struct rk_iommu *iommu) +{ + bool done = true; + int i; + + for (i = 0; i < iommu->num_mmu; i++) + done &= rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0; + + return done; +} + static int rk_iommu_enable_stall(struct rk_iommu *iommu) { int ret, i; + bool val; if (rk_iommu_is_stall_active(iommu)) return 0; @@ -346,7 +422,9 @@ static int rk_iommu_enable_stall(struct rk_iommu *iommu) rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL); - ret = rk_wait_for(rk_iommu_is_stall_active(iommu), 1); + ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val, + val, RK_MMU_POLL_PERIOD_US, + RK_MMU_POLL_TIMEOUT_US); if (ret) for (i = 0; i < iommu->num_mmu; i++) dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n", @@ -358,13 +436,16 @@ static int rk_iommu_enable_stall(struct rk_iommu *iommu) static int rk_iommu_disable_stall(struct rk_iommu *iommu) { int ret, i; + bool val; if (!rk_iommu_is_stall_active(iommu)) return 0; rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL); - ret = rk_wait_for(!rk_iommu_is_stall_active(iommu), 1); + ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val, + !val, RK_MMU_POLL_PERIOD_US, + RK_MMU_POLL_TIMEOUT_US); if (ret) for (i = 0; i < iommu->num_mmu; i++) dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n", @@ -376,13 +457,16 @@ static int rk_iommu_disable_stall(struct rk_iommu *iommu) static int rk_iommu_enable_paging(struct rk_iommu *iommu) { int ret, i; + bool val; if (rk_iommu_is_paging_enabled(iommu)) return 0; rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING); - ret = rk_wait_for(rk_iommu_is_paging_enabled(iommu), 1); + ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val, + val, RK_MMU_POLL_PERIOD_US, + RK_MMU_POLL_TIMEOUT_US); if (ret) for (i = 0; i < iommu->num_mmu; i++) dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n", @@ -394,13 +478,16 @@ static int rk_iommu_enable_paging(struct rk_iommu *iommu) static int rk_iommu_disable_paging(struct rk_iommu *iommu) { int ret, i; + bool val; if (!rk_iommu_is_paging_enabled(iommu)) return 0; rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING); - ret = rk_wait_for(!rk_iommu_is_paging_enabled(iommu), 1); + ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val, + !val, RK_MMU_POLL_PERIOD_US, + RK_MMU_POLL_TIMEOUT_US); if (ret) for (i = 0; i < iommu->num_mmu; i++) dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n", @@ -413,16 +500,20 @@ static int rk_iommu_force_reset(struct rk_iommu *iommu) { int ret, i; u32 dte_addr; + bool val; + + if (iommu->reset_disabled) + return 0; /* * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY - * and verifying that upper 5 nybbles are read back. + * and verifying that upper 5 (v1) or 7 (v2) nybbles are read back. */ for (i = 0; i < iommu->num_mmu; i++) { - rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY); + dte_addr = rk_ops->pt_address(DTE_ADDR_DUMMY); + rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_addr); - dte_addr = rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR); - if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) { + if (dte_addr != rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR)) { dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n"); return -EFAULT; } @@ -430,13 +521,12 @@ static int rk_iommu_force_reset(struct rk_iommu *iommu) rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET); - for (i = 0; i < iommu->num_mmu; i++) { - ret = rk_wait_for(rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0x00000000, - FORCE_RESET_TIMEOUT); - if (ret) { - dev_err(iommu->dev, "FORCE_RESET command timed out\n"); - return ret; - } + ret = readx_poll_timeout(rk_iommu_is_reset_done, iommu, val, + val, RK_MMU_FORCE_RESET_TIMEOUT_US, + RK_MMU_POLL_TIMEOUT_US); + if (ret) { + dev_err(iommu->dev, "FORCE_RESET command timed out\n"); + return ret; } return 0; @@ -461,7 +551,7 @@ static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova) page_offset = rk_iova_page_offset(iova); mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR); - mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr; + mmu_dte_addr_phys = rk_ops->pt_address(mmu_dte_addr); dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index); dte_addr = phys_to_virt(dte_addr_phys); @@ -470,14 +560,14 @@ static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova) if (!rk_dte_is_pt_valid(dte)) goto print_it; - pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4); + pte_addr_phys = rk_ops->pt_address(dte) + (pte_index * 4); pte_addr = phys_to_virt(pte_addr_phys); pte = *pte_addr; if (!rk_pte_is_page_valid(pte)) goto print_it; - page_addr_phys = rk_pte_page_address(pte) + page_offset; + page_addr_phys = rk_ops->pt_address(pte) + page_offset; page_flags = pte & RK_PTE_PAGE_FLAGS_MASK; print_it: @@ -496,7 +586,14 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id) u32 int_status; dma_addr_t iova; irqreturn_t ret = IRQ_NONE; - int i; + int i, err; + + err = pm_runtime_get_if_in_use(iommu->dev); + if (!err || WARN_ON_ONCE(err < 0)) + return ret; + + if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks))) + goto out; for (i = 0; i < iommu->num_mmu; i++) { int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS); @@ -515,7 +612,7 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id) dev_err(iommu->dev, "Page fault at %pad of type %s\n", &iova, - (flags == IOMMU_FAULT_WRITE) ? "write" : "read"); + str_write_read(flags == IOMMU_FAULT_WRITE)); log_iova(iommu, i, iova); @@ -524,7 +621,7 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id) * Ignore the return code, though, since we always zap cache * and clear the page fault anyway. */ - if (iommu->domain) + if (iommu->domain != &rk_identity_domain) report_iommu_fault(iommu->domain, iommu->dev, iova, flags); else @@ -544,6 +641,10 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id) rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status); } + clk_bulk_disable(iommu->num_clocks, iommu->clocks); + +out: + pm_runtime_put(iommu->dev); return ret; } @@ -562,13 +663,13 @@ static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain, if (!rk_dte_is_pt_valid(dte)) goto out; - pt_phys = rk_dte_pt_address(dte); + pt_phys = rk_ops->pt_address(dte); page_table = (u32 *)phys_to_virt(pt_phys); pte = page_table[rk_iova_pte_index(iova)]; if (!rk_pte_is_page_valid(pte)) goto out; - phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova); + phys = rk_ops->pt_address(pte) + rk_iova_page_offset(iova); out: spin_unlock_irqrestore(&rk_domain->dt_lock, flags); @@ -585,8 +686,21 @@ static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain, spin_lock_irqsave(&rk_domain->iommus_lock, flags); list_for_each(pos, &rk_domain->iommus) { struct rk_iommu *iommu; + int ret; + iommu = list_entry(pos, struct rk_iommu, node); - rk_iommu_zap_lines(iommu, iova, size); + + /* Only zap TLBs of IOMMUs that are powered on. */ + ret = pm_runtime_get_if_in_use(iommu->dev); + if (WARN_ON_ONCE(ret < 0)) + continue; + if (ret) { + WARN_ON(clk_bulk_enable(iommu->num_clocks, + iommu->clocks)); + rk_iommu_zap_lines(iommu, iova, size); + clk_bulk_disable(iommu->num_clocks, iommu->clocks); + pm_runtime_put(iommu->dev); + } } spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); } @@ -603,7 +717,6 @@ static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain, static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain, dma_addr_t iova) { - struct device *dev = &rk_domain->pdev->dev; u32 *page_table, *dte_addr; u32 dte_index, dte; phys_addr_t pt_phys; @@ -617,25 +730,25 @@ static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain, if (rk_dte_is_pt_valid(dte)) goto done; - page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); + page_table = iommu_alloc_pages_sz(GFP_ATOMIC | rk_ops->gfp_flags, + SPAGE_SIZE); if (!page_table) return ERR_PTR(-ENOMEM); - pt_dma = dma_map_single(dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE); - if (dma_mapping_error(dev, pt_dma)) { - dev_err(dev, "DMA mapping error while allocating page table\n"); - free_page((unsigned long)page_table); + pt_dma = dma_map_single(rk_domain->dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE); + if (dma_mapping_error(rk_domain->dma_dev, pt_dma)) { + dev_err(rk_domain->dma_dev, "DMA mapping error while allocating page table\n"); + iommu_free_pages(page_table); return ERR_PTR(-ENOMEM); } - dte = rk_mk_dte(pt_dma); + dte = rk_ops->mk_dtentries(pt_dma); *dte_addr = dte; - rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES); rk_table_flush(rk_domain, rk_domain->dt_dma + dte_index * sizeof(u32), 1); done: - pt_phys = rk_dte_pt_address(dte); + pt_phys = rk_ops->pt_address(dte); return (u32 *)phys_to_virt(pt_phys); } @@ -677,7 +790,7 @@ static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr, if (rk_pte_is_page_valid(pte)) goto unwind; - pte_addr[pte_count] = rk_mk_pte(paddr, prot); + pte_addr[pte_count] = rk_ops->mk_ptentries(paddr, prot); paddr += SPAGE_SIZE; } @@ -699,7 +812,7 @@ unwind: pte_count * SPAGE_SIZE); iova += pte_count * SPAGE_SIZE; - page_phys = rk_pte_page_address(pte_addr[pte_count]); + page_phys = rk_ops->pt_address(pte_addr[pte_count]); pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n", &iova, &page_phys, &paddr, prot); @@ -707,7 +820,8 @@ unwind: } static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova, - phys_addr_t paddr, size_t size, int prot) + phys_addr_t paddr, size_t size, size_t count, + int prot, gfp_t gfp, size_t *mapped) { struct rk_iommu_domain *rk_domain = to_rk_domain(domain); unsigned long flags; @@ -734,17 +848,20 @@ static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova, dte_index = rk_domain->dt[rk_iova_dte_index(iova)]; pte_index = rk_iova_pte_index(iova); pte_addr = &page_table[pte_index]; - pte_dma = rk_dte_pt_address(dte_index) + pte_index * sizeof(u32); + + pte_dma = rk_ops->pt_address(dte_index) + pte_index * sizeof(u32); ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova, paddr, size, prot); spin_unlock_irqrestore(&rk_domain->dt_lock, flags); + if (!ret) + *mapped = size; return ret; } static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova, - size_t size) + size_t size, size_t count, struct iommu_iotlb_gather *gather) { struct rk_iommu_domain *rk_domain = to_rk_domain(domain); unsigned long flags; @@ -770,7 +887,7 @@ static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova, return 0; } - pt_phys = rk_dte_pt_address(dte); + pt_phys = rk_ops->pt_address(dte); pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova); pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32); unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size); @@ -785,156 +902,197 @@ static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova, static struct rk_iommu *rk_iommu_from_dev(struct device *dev) { - struct iommu_group *group; - struct device *iommu_dev; - struct rk_iommu *rk_iommu; + struct rk_iommudata *data = dev_iommu_priv_get(dev); - group = iommu_group_get(dev); - if (!group) - return NULL; - iommu_dev = iommu_group_get_iommudata(group); - rk_iommu = dev_get_drvdata(iommu_dev); - iommu_group_put(group); + return data ? data->iommu : NULL; +} - return rk_iommu; +/* Must be called with iommu powered on and attached */ +static void rk_iommu_disable(struct rk_iommu *iommu) +{ + int i; + + /* Ignore error while disabling, just keep going */ + WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)); + rk_iommu_enable_stall(iommu); + rk_iommu_disable_paging(iommu); + for (i = 0; i < iommu->num_mmu; i++) { + rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0); + rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0); + } + rk_iommu_disable_stall(iommu); + clk_bulk_disable(iommu->num_clocks, iommu->clocks); } -static int rk_iommu_attach_device(struct iommu_domain *domain, - struct device *dev) +/* Must be called with iommu powered on and attached */ +static int rk_iommu_enable(struct rk_iommu *iommu) { - struct rk_iommu *iommu; + struct iommu_domain *domain = iommu->domain; struct rk_iommu_domain *rk_domain = to_rk_domain(domain); - unsigned long flags; int ret, i; - /* - * Allow 'virtual devices' (e.g., drm) to attach to domain. - * Such a device does not belong to an iommu group. - */ - iommu = rk_iommu_from_dev(dev); - if (!iommu) - return 0; - - ret = rk_iommu_enable_stall(iommu); + ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks); if (ret) return ret; - ret = rk_iommu_force_reset(iommu); + ret = rk_iommu_enable_stall(iommu); if (ret) - return ret; - - iommu->domain = domain; + goto out_disable_clocks; - ret = devm_request_irq(iommu->dev, iommu->irq, rk_iommu_irq, - IRQF_SHARED, dev_name(dev), iommu); + ret = rk_iommu_force_reset(iommu); if (ret) - return ret; + goto out_disable_stall; for (i = 0; i < iommu->num_mmu; i++) { rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, - rk_domain->dt_dma); + rk_ops->mk_dtentries(rk_domain->dt_dma)); rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE); rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK); } ret = rk_iommu_enable_paging(iommu); - if (ret) - return ret; + +out_disable_stall: + rk_iommu_disable_stall(iommu); +out_disable_clocks: + clk_bulk_disable(iommu->num_clocks, iommu->clocks); + return ret; +} + +static int rk_iommu_identity_attach(struct iommu_domain *identity_domain, + struct device *dev, + struct iommu_domain *old) +{ + struct rk_iommu *iommu; + struct rk_iommu_domain *rk_domain; + unsigned long flags; + int ret; + + /* Allow 'virtual devices' (eg drm) to detach from domain */ + iommu = rk_iommu_from_dev(dev); + if (!iommu) + return -ENODEV; + + rk_domain = to_rk_domain(iommu->domain); + + dev_dbg(dev, "Detaching from iommu domain\n"); + + if (iommu->domain == identity_domain) + return 0; + + iommu->domain = identity_domain; spin_lock_irqsave(&rk_domain->iommus_lock, flags); - list_add_tail(&iommu->node, &rk_domain->iommus); + list_del_init(&iommu->node); spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); - dev_dbg(dev, "Attached to iommu domain\n"); - - rk_iommu_disable_stall(iommu); + ret = pm_runtime_get_if_in_use(iommu->dev); + WARN_ON_ONCE(ret < 0); + if (ret > 0) { + rk_iommu_disable(iommu); + pm_runtime_put(iommu->dev); + } return 0; } -static void rk_iommu_detach_device(struct iommu_domain *domain, - struct device *dev) +static struct iommu_domain_ops rk_identity_ops = { + .attach_dev = rk_iommu_identity_attach, +}; + +static struct iommu_domain rk_identity_domain = { + .type = IOMMU_DOMAIN_IDENTITY, + .ops = &rk_identity_ops, +}; + +static int rk_iommu_attach_device(struct iommu_domain *domain, + struct device *dev, struct iommu_domain *old) { struct rk_iommu *iommu; struct rk_iommu_domain *rk_domain = to_rk_domain(domain); unsigned long flags; - int i; + int ret; - /* Allow 'virtual devices' (eg drm) to detach from domain */ + /* + * Allow 'virtual devices' (e.g., drm) to attach to domain. + * Such a device does not belong to an iommu group. + */ iommu = rk_iommu_from_dev(dev); if (!iommu) - return; + return 0; + + dev_dbg(dev, "Attaching to iommu domain\n"); + + /* iommu already attached */ + if (iommu->domain == domain) + return 0; + + ret = rk_iommu_identity_attach(&rk_identity_domain, dev, old); + if (ret) + return ret; + + iommu->domain = domain; spin_lock_irqsave(&rk_domain->iommus_lock, flags); - list_del_init(&iommu->node); + list_add_tail(&iommu->node, &rk_domain->iommus); spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); - /* Ignore error while disabling, just keep going */ - rk_iommu_enable_stall(iommu); - rk_iommu_disable_paging(iommu); - for (i = 0; i < iommu->num_mmu; i++) { - rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0); - rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0); - } - rk_iommu_disable_stall(iommu); + ret = pm_runtime_get_if_in_use(iommu->dev); + if (!ret || WARN_ON_ONCE(ret < 0)) + return 0; - devm_free_irq(iommu->dev, iommu->irq, iommu); + ret = rk_iommu_enable(iommu); + if (ret) { + /* + * Note rk_iommu_identity_attach() might fail before physically + * attaching the dev to iommu->domain, in which case the actual + * old domain for this revert should be rk_identity_domain v.s. + * iommu->domain. Since rk_iommu_identity_attach() does not care + * about the old domain argument for now, this is not a problem. + */ + WARN_ON(rk_iommu_identity_attach(&rk_identity_domain, dev, + iommu->domain)); + } - iommu->domain = NULL; + pm_runtime_put(iommu->dev); - dev_dbg(dev, "Detached from iommu domain\n"); + return ret; } -static struct iommu_domain *rk_iommu_domain_alloc(unsigned type) +static struct iommu_domain *rk_iommu_domain_alloc_paging(struct device *dev) { struct rk_iommu_domain *rk_domain; - struct platform_device *pdev; - struct device *iommu_dev; - - if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA) - return NULL; - - /* Register a pdev per domain, so DMA API can base on this *dev - * even some virtual master doesn't have an iommu slave - */ - pdev = platform_device_register_simple("rk_iommu_domain", - PLATFORM_DEVID_AUTO, NULL, 0); - if (IS_ERR(pdev)) - return NULL; + struct rk_iommu *iommu; - rk_domain = devm_kzalloc(&pdev->dev, sizeof(*rk_domain), GFP_KERNEL); + rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL); if (!rk_domain) - goto err_unreg_pdev; - - rk_domain->pdev = pdev; - - if (type == IOMMU_DOMAIN_DMA && - iommu_get_dma_cookie(&rk_domain->domain)) - goto err_unreg_pdev; + return NULL; /* * rk32xx iommus use a 2 level pagetable. * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries. * Allocate one 4 KiB page for each table. */ - rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32); + rk_domain->dt = iommu_alloc_pages_sz(GFP_KERNEL | rk_ops->gfp_flags, + SPAGE_SIZE); if (!rk_domain->dt) - goto err_put_cookie; + goto err_free_domain; - iommu_dev = &pdev->dev; - rk_domain->dt_dma = dma_map_single(iommu_dev, rk_domain->dt, + iommu = rk_iommu_from_dev(dev); + rk_domain->dma_dev = iommu->dev; + rk_domain->dt_dma = dma_map_single(rk_domain->dma_dev, rk_domain->dt, SPAGE_SIZE, DMA_TO_DEVICE); - if (dma_mapping_error(iommu_dev, rk_domain->dt_dma)) { - dev_err(iommu_dev, "DMA map error for DT\n"); + if (dma_mapping_error(rk_domain->dma_dev, rk_domain->dt_dma)) { + dev_err(rk_domain->dma_dev, "DMA map error for DT\n"); goto err_free_dt; } - rk_table_flush(rk_domain, rk_domain->dt_dma, NUM_DT_ENTRIES); - spin_lock_init(&rk_domain->iommus_lock); spin_lock_init(&rk_domain->dt_lock); INIT_LIST_HEAD(&rk_domain->iommus); + rk_domain->domain.pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP; + rk_domain->domain.geometry.aperture_start = 0; rk_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32); rk_domain->domain.geometry.force_aperture = true; @@ -942,12 +1100,9 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type) return &rk_domain->domain; err_free_dt: - free_page((unsigned long)rk_domain->dt); -err_put_cookie: - if (type == IOMMU_DOMAIN_DMA) - iommu_put_dma_cookie(&rk_domain->domain); -err_unreg_pdev: - platform_device_unregister(pdev); + iommu_free_pages(rk_domain->dt); +err_free_domain: + kfree(rk_domain); return NULL; } @@ -962,166 +1117,79 @@ static void rk_iommu_domain_free(struct iommu_domain *domain) for (i = 0; i < NUM_DT_ENTRIES; i++) { u32 dte = rk_domain->dt[i]; if (rk_dte_is_pt_valid(dte)) { - phys_addr_t pt_phys = rk_dte_pt_address(dte); + phys_addr_t pt_phys = rk_ops->pt_address(dte); u32 *page_table = phys_to_virt(pt_phys); - dma_unmap_single(&rk_domain->pdev->dev, pt_phys, + dma_unmap_single(rk_domain->dma_dev, pt_phys, SPAGE_SIZE, DMA_TO_DEVICE); - free_page((unsigned long)page_table); + iommu_free_pages(page_table); } } - dma_unmap_single(&rk_domain->pdev->dev, rk_domain->dt_dma, + dma_unmap_single(rk_domain->dma_dev, rk_domain->dt_dma, SPAGE_SIZE, DMA_TO_DEVICE); - free_page((unsigned long)rk_domain->dt); - - if (domain->type == IOMMU_DOMAIN_DMA) - iommu_put_dma_cookie(&rk_domain->domain); - - platform_device_unregister(rk_domain->pdev); -} - -static bool rk_iommu_is_dev_iommu_master(struct device *dev) -{ - struct device_node *np = dev->of_node; - int ret; - - /* - * An iommu master has an iommus property containing a list of phandles - * to iommu nodes, each with an #iommu-cells property with value 0. - */ - ret = of_count_phandle_with_args(np, "iommus", "#iommu-cells"); - return (ret > 0); -} - -static int rk_iommu_group_set_iommudata(struct iommu_group *group, - struct device *dev) -{ - struct device_node *np = dev->of_node; - struct platform_device *pd; - int ret; - struct of_phandle_args args; + iommu_free_pages(rk_domain->dt); - /* - * An iommu master has an iommus property containing a list of phandles - * to iommu nodes, each with an #iommu-cells property with value 0. - */ - ret = of_parse_phandle_with_args(np, "iommus", "#iommu-cells", 0, - &args); - if (ret) { - dev_err(dev, "of_parse_phandle_with_args(%s) => %d\n", - np->full_name, ret); - return ret; - } - if (args.args_count != 0) { - dev_err(dev, "incorrect number of iommu params found for %s (found %d, expected 0)\n", - args.np->full_name, args.args_count); - return -EINVAL; - } - - pd = of_find_device_by_node(args.np); - of_node_put(args.np); - if (!pd) { - dev_err(dev, "iommu %s not found\n", args.np->full_name); - return -EPROBE_DEFER; - } - - /* TODO(djkurtz): handle multiple slave iommus for a single master */ - iommu_group_set_iommudata(group, &pd->dev, NULL); - - return 0; + kfree(rk_domain); } -static int rk_iommu_add_device(struct device *dev) +static struct iommu_device *rk_iommu_probe_device(struct device *dev) { - struct iommu_group *group; + struct rk_iommudata *data; struct rk_iommu *iommu; - int ret; - if (!rk_iommu_is_dev_iommu_master(dev)) - return -ENODEV; - - group = iommu_group_get(dev); - if (!group) { - group = iommu_group_alloc(); - if (IS_ERR(group)) { - dev_err(dev, "Failed to allocate IOMMU group\n"); - return PTR_ERR(group); - } - } - - ret = iommu_group_add_device(group, dev); - if (ret) - goto err_put_group; - - ret = rk_iommu_group_set_iommudata(group, dev); - if (ret) - goto err_remove_device; + data = dev_iommu_priv_get(dev); + if (!data) + return ERR_PTR(-ENODEV); iommu = rk_iommu_from_dev(dev); - if (iommu) - iommu_device_link(&iommu->iommu, dev); - - iommu_group_put(group); - return 0; + data->link = device_link_add(dev, iommu->dev, + DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME); -err_remove_device: - iommu_group_remove_device(dev); -err_put_group: - iommu_group_put(group); - return ret; + return &iommu->iommu; } -static void rk_iommu_remove_device(struct device *dev) +static void rk_iommu_release_device(struct device *dev) { - struct rk_iommu *iommu; + struct rk_iommudata *data = dev_iommu_priv_get(dev); - if (!rk_iommu_is_dev_iommu_master(dev)) - return; - - iommu = rk_iommu_from_dev(dev); - if (iommu) - iommu_device_unlink(&iommu->iommu, dev); - - iommu_group_remove_device(dev); + device_link_del(data->link); } -static const struct iommu_ops rk_iommu_ops = { - .domain_alloc = rk_iommu_domain_alloc, - .domain_free = rk_iommu_domain_free, - .attach_dev = rk_iommu_attach_device, - .detach_dev = rk_iommu_detach_device, - .map = rk_iommu_map, - .unmap = rk_iommu_unmap, - .map_sg = default_iommu_map_sg, - .add_device = rk_iommu_add_device, - .remove_device = rk_iommu_remove_device, - .iova_to_phys = rk_iommu_iova_to_phys, - .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP, -}; - -static int rk_iommu_domain_probe(struct platform_device *pdev) +static int rk_iommu_of_xlate(struct device *dev, + const struct of_phandle_args *args) { - struct device *dev = &pdev->dev; + struct platform_device *iommu_dev; + struct rk_iommudata *data; + + iommu_dev = of_find_device_by_node(args->np); - dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL); - if (!dev->dma_parms) + data = devm_kzalloc(&iommu_dev->dev, sizeof(*data), GFP_KERNEL); + if (!data) return -ENOMEM; - /* Set dma_ops for dev, otherwise it would be dummy_dma_ops */ - arch_setup_dma_ops(dev, 0, DMA_BIT_MASK(32), NULL, false); + data->iommu = platform_get_drvdata(iommu_dev); + dev_iommu_priv_set(dev, data); - dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); - dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32)); + platform_device_put(iommu_dev); return 0; } -static struct platform_driver rk_iommu_domain_driver = { - .probe = rk_iommu_domain_probe, - .driver = { - .name = "rk_iommu_domain", - }, +static const struct iommu_ops rk_iommu_ops = { + .identity_domain = &rk_identity_domain, + .domain_alloc_paging = rk_iommu_domain_alloc_paging, + .probe_device = rk_iommu_probe_device, + .release_device = rk_iommu_release_device, + .device_group = generic_single_device_group, + .of_xlate = rk_iommu_of_xlate, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = rk_iommu_attach_device, + .map_pages = rk_iommu_map, + .unmap_pages = rk_iommu_unmap, + .iova_to_phys = rk_iommu_iova_to_phys, + .free = rk_iommu_domain_free, + } }; static int rk_iommu_probe(struct platform_device *pdev) @@ -1129,6 +1197,7 @@ static int rk_iommu_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct rk_iommu *iommu; struct resource *res; + const struct rk_iommu_ops *ops; int num_res = pdev->num_resources; int err, i; @@ -1136,11 +1205,24 @@ static int rk_iommu_probe(struct platform_device *pdev) if (!iommu) return -ENOMEM; + iommu->domain = &rk_identity_domain; + platform_set_drvdata(pdev, iommu); iommu->dev = dev; iommu->num_mmu = 0; - iommu->bases = devm_kzalloc(dev, sizeof(*iommu->bases) * num_res, + ops = of_device_get_match_data(dev); + if (!rk_ops) + rk_ops = ops; + + /* + * That should not happen unless different versions of the + * hardware block are embedded the same SoC + */ + if (WARN_ON(rk_ops != ops)) + return -EINVAL; + + iommu->bases = devm_kcalloc(dev, num_res, sizeof(*iommu->bases), GFP_KERNEL); if (!iommu->bases) return -ENOMEM; @@ -1157,83 +1239,147 @@ static int rk_iommu_probe(struct platform_device *pdev) if (iommu->num_mmu == 0) return PTR_ERR(iommu->bases[0]); - iommu->irq = platform_get_irq(pdev, 0); - if (iommu->irq < 0) { - dev_err(dev, "Failed to get IRQ, %d\n", iommu->irq); - return -ENXIO; + iommu->num_irq = platform_irq_count(pdev); + if (iommu->num_irq < 0) + return iommu->num_irq; + + iommu->reset_disabled = device_property_read_bool(dev, + "rockchip,disable-mmu-reset"); + + iommu->num_clocks = ARRAY_SIZE(rk_iommu_clocks); + iommu->clocks = devm_kcalloc(iommu->dev, iommu->num_clocks, + sizeof(*iommu->clocks), GFP_KERNEL); + if (!iommu->clocks) + return -ENOMEM; + + for (i = 0; i < iommu->num_clocks; ++i) + iommu->clocks[i].id = rk_iommu_clocks[i]; + + /* + * iommu clocks should be present for all new devices and devicetrees + * but there are older devicetrees without clocks out in the wild. + * So clocks as optional for the time being. + */ + err = devm_clk_bulk_get(iommu->dev, iommu->num_clocks, iommu->clocks); + if (err == -ENOENT) + iommu->num_clocks = 0; + else if (err) + return err; + + err = clk_bulk_prepare(iommu->num_clocks, iommu->clocks); + if (err) + return err; + + pm_runtime_enable(dev); + + for (i = 0; i < iommu->num_irq; i++) { + int irq = platform_get_irq(pdev, i); + + if (irq < 0) { + err = irq; + goto err_pm_disable; + } + + err = devm_request_irq(iommu->dev, irq, rk_iommu_irq, + IRQF_SHARED, dev_name(dev), iommu); + if (err) + goto err_pm_disable; } + dma_set_mask_and_coherent(dev, rk_ops->dma_bit_mask); + err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev)); if (err) - return err; + goto err_pm_disable; - iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops); - err = iommu_device_register(&iommu->iommu); + err = iommu_device_register(&iommu->iommu, &rk_iommu_ops, dev); + if (err) + goto err_remove_sysfs; + return 0; +err_remove_sysfs: + iommu_device_sysfs_remove(&iommu->iommu); +err_pm_disable: + pm_runtime_disable(dev); + clk_bulk_unprepare(iommu->num_clocks, iommu->clocks); return err; } -static int rk_iommu_remove(struct platform_device *pdev) +static void rk_iommu_shutdown(struct platform_device *pdev) { struct rk_iommu *iommu = platform_get_drvdata(pdev); + int i; - if (iommu) { - iommu_device_sysfs_remove(&iommu->iommu); - iommu_device_unregister(&iommu->iommu); + for (i = 0; i < iommu->num_irq; i++) { + int irq = platform_get_irq(pdev, i); + + devm_free_irq(iommu->dev, irq, iommu); } + pm_runtime_force_suspend(&pdev->dev); +} + +static int __maybe_unused rk_iommu_suspend(struct device *dev) +{ + struct rk_iommu *iommu = dev_get_drvdata(dev); + + if (iommu->domain == &rk_identity_domain) + return 0; + + rk_iommu_disable(iommu); return 0; } +static int __maybe_unused rk_iommu_resume(struct device *dev) +{ + struct rk_iommu *iommu = dev_get_drvdata(dev); + + if (iommu->domain == &rk_identity_domain) + return 0; + + return rk_iommu_enable(iommu); +} + +static const struct dev_pm_ops rk_iommu_pm_ops = { + SET_RUNTIME_PM_OPS(rk_iommu_suspend, rk_iommu_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) +}; + +static struct rk_iommu_ops iommu_data_ops_v1 = { + .pt_address = &rk_dte_pt_address, + .mk_dtentries = &rk_mk_dte, + .mk_ptentries = &rk_mk_pte, + .dma_bit_mask = DMA_BIT_MASK(32), + .gfp_flags = GFP_DMA32, +}; + +static struct rk_iommu_ops iommu_data_ops_v2 = { + .pt_address = &rk_dte_pt_address_v2, + .mk_dtentries = &rk_mk_dte_v2, + .mk_ptentries = &rk_mk_pte_v2, + .dma_bit_mask = DMA_BIT_MASK(40), + .gfp_flags = 0, +}; + static const struct of_device_id rk_iommu_dt_ids[] = { - { .compatible = "rockchip,iommu" }, + { .compatible = "rockchip,iommu", + .data = &iommu_data_ops_v1, + }, + { .compatible = "rockchip,rk3568-iommu", + .data = &iommu_data_ops_v2, + }, { /* sentinel */ } }; -MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids); static struct platform_driver rk_iommu_driver = { .probe = rk_iommu_probe, - .remove = rk_iommu_remove, + .shutdown = rk_iommu_shutdown, .driver = { .name = "rk_iommu", .of_match_table = rk_iommu_dt_ids, + .pm = &rk_iommu_pm_ops, + .suppress_bind_attrs = true, }, }; - -static int __init rk_iommu_init(void) -{ - struct device_node *np; - int ret; - - np = of_find_matching_node(NULL, rk_iommu_dt_ids); - if (!np) - return 0; - - of_node_put(np); - - ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops); - if (ret) - return ret; - - ret = platform_driver_register(&rk_iommu_domain_driver); - if (ret) - return ret; - - ret = platform_driver_register(&rk_iommu_driver); - if (ret) - platform_driver_unregister(&rk_iommu_domain_driver); - return ret; -} -static void __exit rk_iommu_exit(void) -{ - platform_driver_unregister(&rk_iommu_driver); - platform_driver_unregister(&rk_iommu_domain_driver); -} - -subsys_initcall(rk_iommu_init); -module_exit(rk_iommu_exit); - -MODULE_DESCRIPTION("IOMMU API for Rockchip"); -MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com> and Daniel Kurtz <djkurtz@chromium.org>"); -MODULE_ALIAS("platform:rockchip-iommu"); -MODULE_LICENSE("GPL v2"); +builtin_platform_driver(rk_iommu_driver); |
