diff options
Diffstat (limited to 'drivers/iommu/rockchip-iommu.c')
-rw-r--r-- | drivers/iommu/rockchip-iommu.c | 80 |
1 files changed, 38 insertions, 42 deletions
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index da79d9f4cf63..22f74ba33a0e 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -25,6 +25,9 @@ #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/spinlock.h> +#include <linux/string_choices.h> + +#include "iommu-pages.h" /** MMU register offsets */ #define RK_MMU_DTE_ADDR 0x00 /* Directory table address */ @@ -85,6 +88,7 @@ struct rk_iommu_domain { dma_addr_t dt_dma; spinlock_t iommus_lock; /* lock for iommus list */ spinlock_t dt_lock; /* lock for modifying page directory table */ + struct device *dma_dev; struct iommu_domain domain; }; @@ -120,7 +124,6 @@ struct rk_iommudata { struct rk_iommu *iommu; }; -static struct device *dma_dev; static const struct rk_iommu_ops *rk_ops; static struct iommu_domain rk_identity_domain; @@ -129,7 +132,7 @@ static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma, { size_t size = count * sizeof(u32); /* count of u32 entry */ - dma_sync_single_for_device(dma_dev, dma, size, DMA_TO_DEVICE); + dma_sync_single_for_device(dom->dma_dev, dma, size, DMA_TO_DEVICE); } static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom) @@ -609,7 +612,7 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id) dev_err(iommu->dev, "Page fault at %pad of type %s\n", &iova, - (flags == IOMMU_FAULT_WRITE) ? "write" : "read"); + str_write_read(flags == IOMMU_FAULT_WRITE)); log_iova(iommu, i, iova); @@ -727,14 +730,15 @@ static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain, if (rk_dte_is_pt_valid(dte)) goto done; - page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | rk_ops->gfp_flags); + page_table = iommu_alloc_pages_sz(GFP_ATOMIC | rk_ops->gfp_flags, + SPAGE_SIZE); if (!page_table) return ERR_PTR(-ENOMEM); - pt_dma = dma_map_single(dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE); - if (dma_mapping_error(dma_dev, pt_dma)) { - dev_err(dma_dev, "DMA mapping error while allocating page table\n"); - free_page((unsigned long)page_table); + pt_dma = dma_map_single(rk_domain->dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE); + if (dma_mapping_error(rk_domain->dma_dev, pt_dma)) { + dev_err(rk_domain->dma_dev, "DMA mapping error while allocating page table\n"); + iommu_free_pages(page_table); return ERR_PTR(-ENOMEM); } @@ -1048,9 +1052,7 @@ static int rk_iommu_attach_device(struct iommu_domain *domain, static struct iommu_domain *rk_iommu_domain_alloc_paging(struct device *dev) { struct rk_iommu_domain *rk_domain; - - if (!dma_dev) - return NULL; + struct rk_iommu *iommu; rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL); if (!rk_domain) @@ -1061,14 +1063,17 @@ static struct iommu_domain *rk_iommu_domain_alloc_paging(struct device *dev) * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries. * Allocate one 4 KiB page for each table. */ - rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | rk_ops->gfp_flags); + rk_domain->dt = iommu_alloc_pages_sz(GFP_KERNEL | rk_ops->gfp_flags, + SPAGE_SIZE); if (!rk_domain->dt) goto err_free_domain; - rk_domain->dt_dma = dma_map_single(dma_dev, rk_domain->dt, + iommu = rk_iommu_from_dev(dev); + rk_domain->dma_dev = iommu->dev; + rk_domain->dt_dma = dma_map_single(rk_domain->dma_dev, rk_domain->dt, SPAGE_SIZE, DMA_TO_DEVICE); - if (dma_mapping_error(dma_dev, rk_domain->dt_dma)) { - dev_err(dma_dev, "DMA map error for DT\n"); + if (dma_mapping_error(rk_domain->dma_dev, rk_domain->dt_dma)) { + dev_err(rk_domain->dma_dev, "DMA map error for DT\n"); goto err_free_dt; } @@ -1083,7 +1088,7 @@ static struct iommu_domain *rk_iommu_domain_alloc_paging(struct device *dev) return &rk_domain->domain; err_free_dt: - free_page((unsigned long)rk_domain->dt); + iommu_free_pages(rk_domain->dt); err_free_domain: kfree(rk_domain); @@ -1102,15 +1107,15 @@ static void rk_iommu_domain_free(struct iommu_domain *domain) if (rk_dte_is_pt_valid(dte)) { phys_addr_t pt_phys = rk_ops->pt_address(dte); u32 *page_table = phys_to_virt(pt_phys); - dma_unmap_single(dma_dev, pt_phys, + dma_unmap_single(rk_domain->dma_dev, pt_phys, SPAGE_SIZE, DMA_TO_DEVICE); - free_page((unsigned long)page_table); + iommu_free_pages(page_table); } } - dma_unmap_single(dma_dev, rk_domain->dt_dma, + dma_unmap_single(rk_domain->dma_dev, rk_domain->dt_dma, SPAGE_SIZE, DMA_TO_DEVICE); - free_page((unsigned long)rk_domain->dt); + iommu_free_pages(rk_domain->dt); kfree(rk_domain); } @@ -1145,12 +1150,12 @@ static int rk_iommu_of_xlate(struct device *dev, struct platform_device *iommu_dev; struct rk_iommudata *data; - data = devm_kzalloc(dma_dev, sizeof(*data), GFP_KERNEL); + iommu_dev = of_find_device_by_node(args->np); + + data = devm_kzalloc(&iommu_dev->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; - iommu_dev = of_find_device_by_node(args->np); - data->iommu = platform_get_drvdata(iommu_dev); data->iommu->domain = &rk_identity_domain; dev_iommu_priv_set(dev, data); @@ -1253,22 +1258,6 @@ static int rk_iommu_probe(struct platform_device *pdev) if (err) return err; - err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev)); - if (err) - goto err_unprepare_clocks; - - err = iommu_device_register(&iommu->iommu, &rk_iommu_ops, dev); - if (err) - goto err_remove_sysfs; - - /* - * Use the first registered IOMMU device for domain to use with DMA - * API, since a domain might not physically correspond to a single - * IOMMU device.. - */ - if (!dma_dev) - dma_dev = &pdev->dev; - pm_runtime_enable(dev); for (i = 0; i < iommu->num_irq; i++) { @@ -1287,12 +1276,19 @@ static int rk_iommu_probe(struct platform_device *pdev) dma_set_mask_and_coherent(dev, rk_ops->dma_bit_mask); + err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev)); + if (err) + goto err_pm_disable; + + err = iommu_device_register(&iommu->iommu, &rk_iommu_ops, dev); + if (err) + goto err_remove_sysfs; + return 0; -err_pm_disable: - pm_runtime_disable(dev); err_remove_sysfs: iommu_device_sysfs_remove(&iommu->iommu); -err_unprepare_clocks: +err_pm_disable: + pm_runtime_disable(dev); clk_bulk_unprepare(iommu->num_clocks, iommu->clocks); return err; } |