diff options
Diffstat (limited to 'drivers/gpu/drm/msm/msm_iommu.c')
-rw-r--r-- | drivers/gpu/drm/msm/msm_iommu.c | 93 |
1 files changed, 85 insertions, 8 deletions
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index 5cc8d358cc97..fd73dcd3f30e 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -21,6 +21,8 @@ struct msm_iommu_pagetable { struct msm_mmu base; struct msm_mmu *parent; struct io_pgtable_ops *pgtbl_ops; + const struct iommu_flush_ops *tlb; + struct device *iommu_dev; unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ phys_addr_t ttbr; u32 asid; @@ -193,6 +195,28 @@ struct iommu_domain_geometry *msm_iommu_get_geometry(struct msm_mmu *mmu) return &iommu->domain->geometry; } +int +msm_iommu_pagetable_walk(struct msm_mmu *mmu, unsigned long iova, uint64_t ptes[4]) +{ + struct msm_iommu_pagetable *pagetable; + struct arm_lpae_io_pgtable_walk_data wd = {}; + + if (mmu->type != MSM_MMU_IOMMU_PAGETABLE) + return -EINVAL; + + pagetable = to_pagetable(mmu); + + if (!pagetable->pgtbl_ops->pgtable_walk) + return -EINVAL; + + pagetable->pgtbl_ops->pgtable_walk(pagetable->pgtbl_ops, iova, &wd); + + for (int i = 0; i < ARRAY_SIZE(wd.ptes); i++) + ptes[i] = wd.ptes[i]; + + return 0; +} + static const struct msm_mmu_funcs pagetable_funcs = { .map = msm_iommu_pagetable_map, .unmap = msm_iommu_pagetable_unmap, @@ -201,11 +225,33 @@ static const struct msm_mmu_funcs pagetable_funcs = { static void msm_iommu_tlb_flush_all(void *cookie) { + struct msm_iommu_pagetable *pagetable = cookie; + struct adreno_smmu_priv *adreno_smmu; + + if (!pm_runtime_get_if_in_use(pagetable->iommu_dev)) + return; + + adreno_smmu = dev_get_drvdata(pagetable->parent->dev); + + pagetable->tlb->tlb_flush_all((void *)adreno_smmu->cookie); + + pm_runtime_put_autosuspend(pagetable->iommu_dev); } static void msm_iommu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule, void *cookie) { + struct msm_iommu_pagetable *pagetable = cookie; + struct adreno_smmu_priv *adreno_smmu; + + if (!pm_runtime_get_if_in_use(pagetable->iommu_dev)) + return; + + adreno_smmu = dev_get_drvdata(pagetable->parent->dev); + + pagetable->tlb->tlb_flush_walk(iova, size, granule, (void *)adreno_smmu->cookie); + + pm_runtime_put_autosuspend(pagetable->iommu_dev); } static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather, @@ -213,13 +259,13 @@ static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather, { } -static const struct iommu_flush_ops null_tlb_ops = { +static const struct iommu_flush_ops tlb_ops = { .tlb_flush_all = msm_iommu_tlb_flush_all, .tlb_flush_walk = msm_iommu_tlb_flush_walk, .tlb_add_page = msm_iommu_tlb_add_page, }; -static int msm_fault_handler(struct iommu_domain *domain, struct device *dev, +static int msm_gpu_fault_handler(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags, void *arg); struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent) @@ -254,10 +300,10 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent) /* The incoming cfg will have the TTBR1 quirk enabled */ ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1; - ttbr0_cfg.tlb = &null_tlb_ops; + ttbr0_cfg.tlb = &tlb_ops; pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1, - &ttbr0_cfg, iommu->domain); + &ttbr0_cfg, pagetable); if (!pagetable->pgtbl_ops) { kfree(pagetable); @@ -279,6 +325,8 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent) /* Needed later for TLB flush */ pagetable->parent = parent; + pagetable->tlb = ttbr1_cfg->tlb; + pagetable->iommu_dev = ttbr1_cfg->iommu_dev; pagetable->pgsize_bitmap = ttbr0_cfg.pgsize_bitmap; pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr; @@ -293,7 +341,7 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent) return &pagetable->base; } -static int msm_fault_handler(struct iommu_domain *domain, struct device *dev, +static int msm_gpu_fault_handler(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags, void *arg) { struct msm_iommu *iommu = arg; @@ -317,6 +365,17 @@ static int msm_fault_handler(struct iommu_domain *domain, struct device *dev, return 0; } +static int msm_disp_fault_handler(struct iommu_domain *domain, struct device *dev, + unsigned long iova, int flags, void *arg) +{ + struct msm_iommu *iommu = arg; + + if (iommu->base.handler) + return iommu->base.handler(iommu->base.arg, iova, flags, NULL); + + return -ENOSYS; +} + static void msm_iommu_resume_translation(struct msm_mmu *mmu) { struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(mmu->dev); @@ -381,10 +440,13 @@ struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks) struct msm_iommu *iommu; int ret; - domain = iommu_domain_alloc(dev->bus); - if (!domain) + if (!device_iommu_mapped(dev)) return NULL; + domain = iommu_paging_domain_alloc(dev); + if (IS_ERR(domain)) + return ERR_CAST(domain); + iommu_set_pgtable_quirks(domain, quirks); iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); @@ -408,6 +470,21 @@ struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks) return &iommu->base; } +struct msm_mmu *msm_iommu_disp_new(struct device *dev, unsigned long quirks) +{ + struct msm_iommu *iommu; + struct msm_mmu *mmu; + + mmu = msm_iommu_new(dev, quirks); + if (IS_ERR_OR_NULL(mmu)) + return mmu; + + iommu = to_msm_iommu(mmu); + iommu_set_fault_handler(iommu->domain, msm_disp_fault_handler, iommu); + + return mmu; +} + struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsigned long quirks) { struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(dev); @@ -419,7 +496,7 @@ struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsig return mmu; iommu = to_msm_iommu(mmu); - iommu_set_fault_handler(iommu->domain, msm_fault_handler, iommu); + iommu_set_fault_handler(iommu->domain, msm_gpu_fault_handler, iommu); /* Enable stall on iommu fault: */ if (adreno_smmu->set_stall) |