From eecbb3cdcccac24e1e67e92fec24f9035fe19784 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Mon, 1 Jul 2019 17:05:43 -0600 Subject: drm/panfrost: Split panfrost_mmu_map SG list mapping to its own function In preparation to create partial GPU mappings of BOs on page faults, split out the SG list handling of panfrost_mmu_map(). Cc: Tomeu Vizoso Cc: Boris Brezillon Cc: Robin Murphy Reviewed: Steven Price Acked-by: Alyssa Rosenzweig Signed-off-by: Rob Herring Link: https://patchwork.freedesktop.org/patch/msgid/20190808222200.13176-5-robh@kernel.org --- drivers/gpu/drm/panfrost/panfrost_mmu.c | 52 ++++++++++++++++++++------------- 1 file changed, 31 insertions(+), 21 deletions(-) (limited to 'drivers/gpu/drm/panfrost/panfrost_mmu.c') diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index 92ac995dd9c6..b4ac149b2399 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -145,27 +145,13 @@ static size_t get_pgsize(u64 addr, size_t size) return SZ_2M; } -int panfrost_mmu_map(struct panfrost_gem_object *bo) +static int mmu_map_sg(struct panfrost_device *pfdev, u64 iova, + int prot, struct sg_table *sgt) { - struct drm_gem_object *obj = &bo->base.base; - struct panfrost_device *pfdev = to_panfrost_device(obj->dev); - struct io_pgtable_ops *ops = pfdev->mmu->pgtbl_ops; - u64 iova = bo->node.start << PAGE_SHIFT; unsigned int count; struct scatterlist *sgl; - struct sg_table *sgt; - int ret; - - if (WARN_ON(bo->is_mapped)) - return 0; - - sgt = drm_gem_shmem_get_pages_sgt(obj); - if (WARN_ON(IS_ERR(sgt))) - return PTR_ERR(sgt); - - ret = pm_runtime_get_sync(pfdev->dev); - if (ret < 0) - return ret; + struct io_pgtable_ops *ops = pfdev->mmu->pgtbl_ops; + u64 start_iova = iova; mutex_lock(&pfdev->mmu->lock); @@ -178,18 +164,42 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo) while (len) { size_t pgsize = get_pgsize(iova | paddr, len); - ops->map(ops, iova, paddr, pgsize, IOMMU_WRITE | IOMMU_READ); + ops->map(ops, iova, paddr, pgsize, prot); iova += pgsize; paddr += pgsize; len -= pgsize; } } - mmu_hw_do_operation(pfdev, 0, bo->node.start << PAGE_SHIFT, - bo->node.size << PAGE_SHIFT, AS_COMMAND_FLUSH_PT); + mmu_hw_do_operation(pfdev, 0, start_iova, iova - start_iova, + AS_COMMAND_FLUSH_PT); mutex_unlock(&pfdev->mmu->lock); + return 0; +} + +int panfrost_mmu_map(struct panfrost_gem_object *bo) +{ + struct drm_gem_object *obj = &bo->base.base; + struct panfrost_device *pfdev = to_panfrost_device(obj->dev); + struct sg_table *sgt; + int ret; + int prot = IOMMU_READ | IOMMU_WRITE; + + if (WARN_ON(bo->is_mapped)) + return 0; + + sgt = drm_gem_shmem_get_pages_sgt(obj); + if (WARN_ON(IS_ERR(sgt))) + return PTR_ERR(sgt); + + ret = pm_runtime_get_sync(pfdev->dev); + if (ret < 0) + return ret; + + mmu_map_sg(pfdev, bo->node.start << PAGE_SHIFT, prot, sgt); + pm_runtime_mark_last_busy(pfdev->dev); pm_runtime_put_autosuspend(pfdev->dev); bo->is_mapped = true; -- cgit From 203270c025be02d52feca729d9193860317c2e82 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Thu, 11 Jul 2019 15:56:14 -0600 Subject: drm/panfrost: Add a no execute flag for BO allocations Executable buffers have an alignment restriction that they can't cross 16MB boundary as the GPU program counter is 24-bits. This restriction is currently not handled and we just get lucky. As current userspace assumes all BOs are executable, that has to remain the default. So add a new PANFROST_BO_NOEXEC flag to allow userspace to indicate which BOs are not executable. There is also a restriction that executable buffers cannot start or end on a 4GB boundary. This is mostly avoided as there is only 4GB of space currently and the beginning is already blocked out for NULL ptr detection. Add support to handle this restriction fully regardless of the current constraints. For existing userspace, all created BOs remain executable, but the GPU VA alignment will be increased to the size of the BO. This shouldn't matter as there is plenty of GPU VA space. Cc: Tomeu Vizoso Cc: Boris Brezillon Cc: Robin Murphy Reviewed-by: Steven Price Acked-by: Alyssa Rosenzweig Signed-off-by: Rob Herring Link: https://patchwork.freedesktop.org/patch/msgid/20190808222200.13176-6-robh@kernel.org --- drivers/gpu/drm/panfrost/panfrost_mmu.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/gpu/drm/panfrost/panfrost_mmu.c') diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index b4ac149b2399..eba6ce785ef0 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -190,6 +190,9 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo) if (WARN_ON(bo->is_mapped)) return 0; + if (bo->noexec) + prot |= IOMMU_NOEXEC; + sgt = drm_gem_shmem_get_pages_sgt(obj); if (WARN_ON(IS_ERR(sgt))) return PTR_ERR(sgt); -- cgit From 73e467f60acdabd480d1b377a623ba13db0e5dd2 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Thu, 8 Aug 2019 14:30:39 -0600 Subject: drm/panfrost: Consolidate reset handling Runtime PM resume and job timeouts both call the same sequence of functions, so consolidate them to a common function. This will make changing the reset related code easier. The MMU also needs some re-initialization on reset, so rework its call. In the process, we hide the address space details within the MMU code in preparation to support multiple address spaces. Cc: Tomeu Vizoso Cc: David Airlie Cc: Daniel Vetter Cc: Robin Murphy Cc: Alyssa Rosenzweig Reviewed-by: Steven Price Signed-off-by: Rob Herring Link: https://patchwork.freedesktop.org/patch/msgid/20190808222200.13176-7-robh@kernel.org --- drivers/gpu/drm/panfrost/panfrost_mmu.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/panfrost/panfrost_mmu.c') diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index eba6ce785ef0..13757427b886 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -105,15 +105,12 @@ static int mmu_hw_do_operation(struct panfrost_device *pfdev, u32 as_nr, return ret; } -void panfrost_mmu_enable(struct panfrost_device *pfdev, u32 as_nr) +static void panfrost_mmu_enable(struct panfrost_device *pfdev, u32 as_nr) { struct io_pgtable_cfg *cfg = &pfdev->mmu->pgtbl_cfg; u64 transtab = cfg->arm_mali_lpae_cfg.transtab; u64 memattr = cfg->arm_mali_lpae_cfg.memattr; - mmu_write(pfdev, MMU_INT_CLEAR, ~0); - mmu_write(pfdev, MMU_INT_MASK, ~0); - mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL); mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32); @@ -137,6 +134,14 @@ static void mmu_disable(struct panfrost_device *pfdev, u32 as_nr) write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE); } +void panfrost_mmu_reset(struct panfrost_device *pfdev) +{ + panfrost_mmu_enable(pfdev, 0); + + mmu_write(pfdev, MMU_INT_CLEAR, ~0); + mmu_write(pfdev, MMU_INT_MASK, ~0); +} + static size_t get_pgsize(u64 addr, size_t size) { if (addr & (SZ_2M - 1) || size < SZ_2M) @@ -375,9 +380,6 @@ int panfrost_mmu_init(struct panfrost_device *pfdev) dev_err(pfdev->dev, "failed to request mmu irq"); return err; } - mmu_write(pfdev, MMU_INT_CLEAR, ~0); - mmu_write(pfdev, MMU_INT_MASK, ~0); - pfdev->mmu->pgtbl_cfg = (struct io_pgtable_cfg) { .pgsize_bitmap = SZ_4K | SZ_2M, .ias = FIELD_GET(0xff, pfdev->features.mmu_features), -- cgit From b31bdd1389fc765c07ab3d5b341092cb16807d29 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Fri, 26 Jul 2019 16:06:57 -0600 Subject: drm/panfrost: Convert MMU IRQ handler to threaded handler In preparation to handle mapping of page faults, we need the MMU handler to be threaded as code paths take a mutex. As the IRQ may be shared, we can't use the default handler and must disable the MMU interrupts locally. Cc: Tomeu Vizoso Cc: Boris Brezillon Cc: Robin Murphy Reviewed-by: Steven Price Acked-by: Alyssa Rosenzweig Signed-off-by: Rob Herring Link: https://patchwork.freedesktop.org/patch/msgid/20190808222200.13176-8-robh@kernel.org --- drivers/gpu/drm/panfrost/panfrost_mmu.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/panfrost/panfrost_mmu.c') diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index 13757427b886..b609ee55a872 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -305,12 +305,20 @@ static const char *access_type_name(struct panfrost_device *pfdev, static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data) { struct panfrost_device *pfdev = data; - u32 status = mmu_read(pfdev, MMU_INT_STAT); - int i; - if (!status) + if (!mmu_read(pfdev, MMU_INT_STAT)) return IRQ_NONE; + mmu_write(pfdev, MMU_INT_MASK, 0); + return IRQ_WAKE_THREAD; +} + +static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data) +{ + struct panfrost_device *pfdev = data; + u32 status = mmu_read(pfdev, MMU_INT_RAWSTAT); + int i; + dev_err(pfdev->dev, "mmu irq status=%x\n", status); for (i = 0; status; i++) { @@ -355,6 +363,7 @@ static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data) status &= ~mask; } + mmu_write(pfdev, MMU_INT_MASK, ~0); return IRQ_HANDLED; }; @@ -373,8 +382,9 @@ int panfrost_mmu_init(struct panfrost_device *pfdev) if (irq <= 0) return -ENODEV; - err = devm_request_irq(pfdev->dev, irq, panfrost_mmu_irq_handler, - IRQF_SHARED, "mmu", pfdev); + err = devm_request_threaded_irq(pfdev->dev, irq, panfrost_mmu_irq_handler, + panfrost_mmu_irq_handler_thread, + IRQF_SHARED, "mmu", pfdev); if (err) { dev_err(pfdev->dev, "failed to request mmu irq"); -- cgit From 187d2929206e6b098312c174ea873e4cedf5420d Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Fri, 26 Jul 2019 16:09:43 -0600 Subject: drm/panfrost: Add support for GPU heap allocations The midgard/bifrost GPUs need to allocate GPU heap memory which is allocated on GPU page faults and not pinned in memory. The vendor driver calls this functionality GROW_ON_GPF. This implementation assumes that BOs allocated with the PANFROST_BO_NOEXEC flag are never mmapped or exported. Both of those may actually work, but I'm unsure if there's some interaction there. It would cause the whole object to be pinned in memory which would defeat the point of this. On faults, we map in 2MB at a time in order to utilize huge pages (if enabled). Currently, once we've mapped pages in, they are only unmapped if the BO is freed. Once we add shrinker support, we can unmap pages with the shrinker. Cc: Tomeu Vizoso Cc: Boris Brezillon Cc: Robin Murphy Acked-by: Alyssa Rosenzweig Reviewed-by: Steven Price Signed-off-by: Rob Herring Link: https://patchwork.freedesktop.org/patch/msgid/20190808222200.13176-9-robh@kernel.org --- drivers/gpu/drm/panfrost/panfrost_mmu.c | 129 +++++++++++++++++++++++++++++--- 1 file changed, 120 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm/panfrost/panfrost_mmu.c') diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index b609ee55a872..2ed411f09d80 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -2,6 +2,7 @@ /* Copyright 2019 Linaro, Ltd, Rob Herring */ #include #include +#include #include #include #include @@ -9,6 +10,7 @@ #include #include #include +#include #include #include "panfrost_device.h" @@ -240,12 +242,12 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo) size_t unmapped_page; size_t pgsize = get_pgsize(iova, len - unmapped_len); - unmapped_page = ops->unmap(ops, iova, pgsize); - if (!unmapped_page) - break; - - iova += unmapped_page; - unmapped_len += unmapped_page; + if (ops->iova_to_phys(ops, iova)) { + unmapped_page = ops->unmap(ops, iova, pgsize); + WARN_ON(unmapped_page != pgsize); + } + iova += pgsize; + unmapped_len += pgsize; } mmu_hw_do_operation(pfdev, 0, bo->node.start << PAGE_SHIFT, @@ -281,6 +283,105 @@ static const struct iommu_gather_ops mmu_tlb_ops = { .tlb_sync = mmu_tlb_sync_context, }; +static struct drm_mm_node *addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr) +{ + struct drm_mm_node *node; + u64 offset = addr >> PAGE_SHIFT; + + drm_mm_for_each_node(node, &pfdev->mm) { + if (offset >= node->start && offset < (node->start + node->size)) + return node; + } + return NULL; +} + +#define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE) + +int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr) +{ + int ret, i; + struct drm_mm_node *node; + struct panfrost_gem_object *bo; + struct address_space *mapping; + pgoff_t page_offset; + struct sg_table *sgt; + struct page **pages; + + node = addr_to_drm_mm_node(pfdev, as, addr); + if (!node) + return -ENOENT; + + bo = drm_mm_node_to_panfrost_bo(node); + if (!bo->is_heap) { + dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)", + node->start << PAGE_SHIFT); + return -EINVAL; + } + /* Assume 2MB alignment and size multiple */ + addr &= ~((u64)SZ_2M - 1); + page_offset = addr >> PAGE_SHIFT; + page_offset -= node->start; + + mutex_lock(&bo->base.pages_lock); + + if (!bo->base.pages) { + bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M, + sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO); + if (!bo->sgts) + return -ENOMEM; + + pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT, + sizeof(struct page *), GFP_KERNEL | __GFP_ZERO); + if (!pages) { + kfree(bo->sgts); + bo->sgts = NULL; + return -ENOMEM; + } + bo->base.pages = pages; + bo->base.pages_use_count = 1; + } else + pages = bo->base.pages; + + mapping = bo->base.base.filp->f_mapping; + mapping_set_unevictable(mapping); + + for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) { + pages[i] = shmem_read_mapping_page(mapping, i); + if (IS_ERR(pages[i])) { + mutex_unlock(&bo->base.pages_lock); + ret = PTR_ERR(pages[i]); + goto err_pages; + } + } + + mutex_unlock(&bo->base.pages_lock); + + sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)]; + ret = sg_alloc_table_from_pages(sgt, pages + page_offset, + NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL); + if (ret) + goto err_pages; + + if (!dma_map_sg(pfdev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL)) { + ret = -EINVAL; + goto err_map; + } + + mmu_map_sg(pfdev, addr, IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt); + + bo->is_mapped = true; + + dev_dbg(pfdev->dev, "mapped page fault @ %llx", addr); + + return 0; + +err_map: + sg_free_table(sgt); +err_pages: + drm_gem_shmem_put_pages(&bo->base); + return ret; +} + static const char *access_type_name(struct panfrost_device *pfdev, u32 fault_status) { @@ -317,9 +418,7 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data) { struct panfrost_device *pfdev = data; u32 status = mmu_read(pfdev, MMU_INT_RAWSTAT); - int i; - - dev_err(pfdev->dev, "mmu irq status=%x\n", status); + int i, ret; for (i = 0; status; i++) { u32 mask = BIT(i) | BIT(i + 16); @@ -341,6 +440,18 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data) access_type = (fault_status >> 8) & 0x3; source_id = (fault_status >> 16); + /* Page fault only */ + if ((status & mask) == BIT(i)) { + WARN_ON(exception_type < 0xC1 || exception_type > 0xC4); + + ret = panfrost_mmu_map_fault_addr(pfdev, i, addr); + if (!ret) { + mmu_write(pfdev, MMU_INT_CLEAR, BIT(i)); + status &= ~mask; + continue; + } + } + /* terminal fault, print info about the fault */ dev_err(pfdev->dev, "Unhandled Page fault in AS%d at VA 0x%016llX\n" -- cgit From 3efdf83ca0f9d3149f8c2201dad86a74fd952f91 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Wed, 14 Aug 2019 04:48:14 +0000 Subject: drm/panfrost: Fix missing unlock on error in panfrost_mmu_map_fault_addr() Add the missing unlock before return from function panfrost_mmu_map_fault_addr() in the error handling case. Fixes: 187d2929206e ("drm/panfrost: Add support for GPU heap allocations") Signed-off-by: Wei Yongjun Reviewed-by: Steven Price Signed-off-by: Rob Herring Link: https://patchwork.freedesktop.org/patch/msgid/20190814044814.102294-1-weiyongjun1@huawei.com --- drivers/gpu/drm/panfrost/panfrost_mmu.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/panfrost/panfrost_mmu.c') diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index 2ed411f09d80..06f1a563e940 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -327,14 +327,17 @@ int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr) if (!bo->base.pages) { bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M, sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO); - if (!bo->sgts) + if (!bo->sgts) { + mutex_unlock(&bo->base.pages_lock); return -ENOMEM; + } pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT, sizeof(struct page *), GFP_KERNEL | __GFP_ZERO); if (!pages) { kfree(bo->sgts); bo->sgts = NULL; + mutex_unlock(&bo->base.pages_lock); return -ENOMEM; } bo->base.pages = pages; -- cgit From 7282f7645d06bf0afe0a3c11ab92d9392528b819 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Tue, 13 Aug 2019 09:01:15 -0600 Subject: drm/panfrost: Implement per FD address spaces Up until now, a single shared GPU address space was used. This is not ideal as there's no protection between processes and doesn't work for supporting the same GPU/CPU VA feature. Most importantly, this will hopefully mitigate Alyssa's fear of WebGL, whatever that is. Most of the changes here are moving struct drm_mm and struct panfrost_mmu objects from the per device struct to the per FD struct. The critical function is panfrost_mmu_as_get() which handles allocating and switching the h/w address spaces. There's 3 states an AS can be in: free, allocated, and in use. When a job runs, it requests an address space and then marks it not in use when job is complete(but stays assigned). The first time thru, we find a free AS in the alloc_mask and assign the AS to the FD. Then the next time thru, we most likely already have our AS and we just mark it in use with a ref count. We need a ref count because we have multiple job slots. If the job/FD doesn't have an AS assigned and there are no free ones, then we pick an allocated one not in use from our LRU list and switch the AS from the old FD to the new one. Cc: Tomeu Vizoso Cc: David Airlie Cc: Daniel Vetter Cc: Robin Murphy Cc: Steven Price Signed-off-by: Rob Herring Acked-by: Alyssa Rosenzweig Reviewed-by: Steven Price Link: https://patchwork.freedesktop.org/patch/msgid/20190813150115.30338-1-robh@kernel.org --- drivers/gpu/drm/panfrost/panfrost_mmu.c | 222 +++++++++++++++++++++++--------- 1 file changed, 160 insertions(+), 62 deletions(-) (limited to 'drivers/gpu/drm/panfrost/panfrost_mmu.c') diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index 06f1a563e940..842bdd7cf6be 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright 2019 Linaro, Ltd, Rob Herring */ +#include #include #include #include @@ -22,12 +23,6 @@ #define mmu_write(dev, reg, data) writel(data, dev->iomem + reg) #define mmu_read(dev, reg) readl(dev->iomem + reg) -struct panfrost_mmu { - struct io_pgtable_cfg pgtbl_cfg; - struct io_pgtable_ops *pgtbl_ops; - struct mutex lock; -}; - static int wait_ready(struct panfrost_device *pfdev, u32 as_nr) { int ret; @@ -85,13 +80,19 @@ static void lock_region(struct panfrost_device *pfdev, u32 as_nr, } -static int mmu_hw_do_operation(struct panfrost_device *pfdev, u32 as_nr, - u64 iova, size_t size, u32 op) +static int mmu_hw_do_operation(struct panfrost_device *pfdev, + struct panfrost_mmu *mmu, + u64 iova, size_t size, u32 op) { - unsigned long flags; - int ret; + int ret, as_nr; - spin_lock_irqsave(&pfdev->hwaccess_lock, flags); + spin_lock(&pfdev->as_lock); + as_nr = mmu->as; + + if (as_nr < 0) { + spin_unlock(&pfdev->as_lock); + return 0; + } if (op != AS_COMMAND_UNLOCK) lock_region(pfdev, as_nr, iova, size); @@ -102,14 +103,15 @@ static int mmu_hw_do_operation(struct panfrost_device *pfdev, u32 as_nr, /* Wait for the flush to complete */ ret = wait_ready(pfdev, as_nr); - spin_unlock_irqrestore(&pfdev->hwaccess_lock, flags); + spin_unlock(&pfdev->as_lock); return ret; } -static void panfrost_mmu_enable(struct panfrost_device *pfdev, u32 as_nr) +static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) { - struct io_pgtable_cfg *cfg = &pfdev->mmu->pgtbl_cfg; + int as_nr = mmu->as; + struct io_pgtable_cfg *cfg = &mmu->pgtbl_cfg; u64 transtab = cfg->arm_mali_lpae_cfg.transtab; u64 memattr = cfg->arm_mali_lpae_cfg.memattr; @@ -136,9 +138,75 @@ static void mmu_disable(struct panfrost_device *pfdev, u32 as_nr) write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE); } +u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) +{ + int as; + + spin_lock(&pfdev->as_lock); + + as = mmu->as; + if (as >= 0) { + int en = atomic_inc_return(&mmu->as_count); + WARN_ON(en >= NUM_JOB_SLOTS); + + list_move(&mmu->list, &pfdev->as_lru_list); + goto out; + } + + /* Check for a free AS */ + as = ffz(pfdev->as_alloc_mask); + if (!(BIT(as) & pfdev->features.as_present)) { + struct panfrost_mmu *lru_mmu; + + list_for_each_entry_reverse(lru_mmu, &pfdev->as_lru_list, list) { + if (!atomic_read(&lru_mmu->as_count)) + break; + } + WARN_ON(&lru_mmu->list == &pfdev->as_lru_list); + + list_del_init(&lru_mmu->list); + as = lru_mmu->as; + + WARN_ON(as < 0); + lru_mmu->as = -1; + } + + /* Assign the free or reclaimed AS to the FD */ + mmu->as = as; + set_bit(as, &pfdev->as_alloc_mask); + atomic_set(&mmu->as_count, 1); + list_add(&mmu->list, &pfdev->as_lru_list); + + dev_dbg(pfdev->dev, "Assigned AS%d to mmu %p, alloc_mask=%lx", as, mmu, pfdev->as_alloc_mask); + + panfrost_mmu_enable(pfdev, mmu); + +out: + spin_unlock(&pfdev->as_lock); + return as; +} + +void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) +{ + atomic_dec(&mmu->as_count); + WARN_ON(atomic_read(&mmu->as_count) < 0); +} + void panfrost_mmu_reset(struct panfrost_device *pfdev) { - panfrost_mmu_enable(pfdev, 0); + struct panfrost_mmu *mmu, *mmu_tmp; + + spin_lock(&pfdev->as_lock); + + pfdev->as_alloc_mask = 0; + + list_for_each_entry_safe(mmu, mmu_tmp, &pfdev->as_lru_list, list) { + mmu->as = -1; + atomic_set(&mmu->as_count, 0); + list_del_init(&mmu->list); + } + + spin_unlock(&pfdev->as_lock); mmu_write(pfdev, MMU_INT_CLEAR, ~0); mmu_write(pfdev, MMU_INT_MASK, ~0); @@ -152,21 +220,21 @@ static size_t get_pgsize(u64 addr, size_t size) return SZ_2M; } -static int mmu_map_sg(struct panfrost_device *pfdev, u64 iova, - int prot, struct sg_table *sgt) +static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu, + u64 iova, int prot, struct sg_table *sgt) { unsigned int count; struct scatterlist *sgl; - struct io_pgtable_ops *ops = pfdev->mmu->pgtbl_ops; + struct io_pgtable_ops *ops = mmu->pgtbl_ops; u64 start_iova = iova; - mutex_lock(&pfdev->mmu->lock); + mutex_lock(&mmu->lock); for_each_sg(sgt->sgl, sgl, sgt->nents, count) { unsigned long paddr = sg_dma_address(sgl); size_t len = sg_dma_len(sgl); - dev_dbg(pfdev->dev, "map: iova=%llx, paddr=%lx, len=%zx", iova, paddr, len); + dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len); while (len) { size_t pgsize = get_pgsize(iova | paddr, len); @@ -178,10 +246,10 @@ static int mmu_map_sg(struct panfrost_device *pfdev, u64 iova, } } - mmu_hw_do_operation(pfdev, 0, start_iova, iova - start_iova, + mmu_hw_do_operation(pfdev, mmu, start_iova, iova - start_iova, AS_COMMAND_FLUSH_PT); - mutex_unlock(&pfdev->mmu->lock); + mutex_unlock(&mmu->lock); return 0; } @@ -208,7 +276,7 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo) if (ret < 0) return ret; - mmu_map_sg(pfdev, bo->node.start << PAGE_SHIFT, prot, sgt); + mmu_map_sg(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, prot, sgt); pm_runtime_mark_last_busy(pfdev->dev); pm_runtime_put_autosuspend(pfdev->dev); @@ -221,7 +289,7 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo) { struct drm_gem_object *obj = &bo->base.base; struct panfrost_device *pfdev = to_panfrost_device(obj->dev); - struct io_pgtable_ops *ops = pfdev->mmu->pgtbl_ops; + struct io_pgtable_ops *ops = bo->mmu->pgtbl_ops; u64 iova = bo->node.start << PAGE_SHIFT; size_t len = bo->node.size << PAGE_SHIFT; size_t unmapped_len = 0; @@ -230,13 +298,13 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo) if (WARN_ON(!bo->is_mapped)) return; - dev_dbg(pfdev->dev, "unmap: iova=%llx, len=%zx", iova, len); + dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", bo->mmu->as, iova, len); ret = pm_runtime_get_sync(pfdev->dev); if (ret < 0) return; - mutex_lock(&pfdev->mmu->lock); + mutex_lock(&bo->mmu->lock); while (unmapped_len < len) { size_t unmapped_page; @@ -250,10 +318,10 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo) unmapped_len += pgsize; } - mmu_hw_do_operation(pfdev, 0, bo->node.start << PAGE_SHIFT, + mmu_hw_do_operation(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, bo->node.size << PAGE_SHIFT, AS_COMMAND_FLUSH_PT); - mutex_unlock(&pfdev->mmu->lock); + mutex_unlock(&bo->mmu->lock); pm_runtime_mark_last_busy(pfdev->dev); pm_runtime_put_autosuspend(pfdev->dev); @@ -262,9 +330,9 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo) static void mmu_tlb_inv_context_s1(void *cookie) { - struct panfrost_device *pfdev = cookie; + struct panfrost_file_priv *priv = cookie; - mmu_hw_do_operation(pfdev, 0, 0, ~0UL, AS_COMMAND_FLUSH_MEM); + mmu_hw_do_operation(priv->pfdev, &priv->mmu, 0, ~0UL, AS_COMMAND_FLUSH_MEM); } static void mmu_tlb_inv_range_nosync(unsigned long iova, size_t size, @@ -283,16 +351,69 @@ static const struct iommu_gather_ops mmu_tlb_ops = { .tlb_sync = mmu_tlb_sync_context, }; +int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv) +{ + struct panfrost_mmu *mmu = &priv->mmu; + struct panfrost_device *pfdev = priv->pfdev; + + mutex_init(&mmu->lock); + INIT_LIST_HEAD(&mmu->list); + mmu->as = -1; + + mmu->pgtbl_cfg = (struct io_pgtable_cfg) { + .pgsize_bitmap = SZ_4K | SZ_2M, + .ias = FIELD_GET(0xff, pfdev->features.mmu_features), + .oas = FIELD_GET(0xff00, pfdev->features.mmu_features), + .tlb = &mmu_tlb_ops, + .iommu_dev = pfdev->dev, + }; + + mmu->pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &mmu->pgtbl_cfg, + priv); + if (!mmu->pgtbl_ops) + return -EINVAL; + + return 0; +} + +void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv) +{ + struct panfrost_device *pfdev = priv->pfdev; + struct panfrost_mmu *mmu = &priv->mmu; + + spin_lock(&pfdev->as_lock); + if (mmu->as >= 0) { + clear_bit(mmu->as, &pfdev->as_alloc_mask); + clear_bit(mmu->as, &pfdev->as_in_use_mask); + list_del(&mmu->list); + } + spin_unlock(&pfdev->as_lock); + + free_io_pgtable_ops(mmu->pgtbl_ops); +} + static struct drm_mm_node *addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr) { - struct drm_mm_node *node; + struct drm_mm_node *node = NULL; u64 offset = addr >> PAGE_SHIFT; + struct panfrost_mmu *mmu; - drm_mm_for_each_node(node, &pfdev->mm) { - if (offset >= node->start && offset < (node->start + node->size)) - return node; + spin_lock(&pfdev->as_lock); + list_for_each_entry(mmu, &pfdev->as_lru_list, list) { + struct panfrost_file_priv *priv; + if (as != mmu->as) + continue; + + priv = container_of(mmu, struct panfrost_file_priv, mmu); + drm_mm_for_each_node(node, &priv->mm) { + if (offset >= node->start && offset < (node->start + node->size)) + goto out; + } } - return NULL; + +out: + spin_unlock(&pfdev->as_lock); + return node; } #define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE) @@ -317,6 +438,8 @@ int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr) node->start << PAGE_SHIFT); return -EINVAL; } + WARN_ON(bo->mmu->as != as); + /* Assume 2MB alignment and size multiple */ addr &= ~((u64)SZ_2M - 1); page_offset = addr >> PAGE_SHIFT; @@ -370,11 +493,11 @@ int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr) goto err_map; } - mmu_map_sg(pfdev, addr, IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt); + mmu_map_sg(pfdev, bo->mmu, addr, IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt); bo->is_mapped = true; - dev_dbg(pfdev->dev, "mapped page fault @ %llx", addr); + dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr); return 0; @@ -483,15 +606,8 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data) int panfrost_mmu_init(struct panfrost_device *pfdev) { - struct io_pgtable_ops *pgtbl_ops; int err, irq; - pfdev->mmu = devm_kzalloc(pfdev->dev, sizeof(*pfdev->mmu), GFP_KERNEL); - if (!pfdev->mmu) - return -ENOMEM; - - mutex_init(&pfdev->mmu->lock); - irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu"); if (irq <= 0) return -ENODEV; @@ -504,22 +620,6 @@ int panfrost_mmu_init(struct panfrost_device *pfdev) dev_err(pfdev->dev, "failed to request mmu irq"); return err; } - pfdev->mmu->pgtbl_cfg = (struct io_pgtable_cfg) { - .pgsize_bitmap = SZ_4K | SZ_2M, - .ias = FIELD_GET(0xff, pfdev->features.mmu_features), - .oas = FIELD_GET(0xff00, pfdev->features.mmu_features), - .tlb = &mmu_tlb_ops, - .iommu_dev = pfdev->dev, - }; - - pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &pfdev->mmu->pgtbl_cfg, - pfdev); - if (!pgtbl_ops) - return -ENOMEM; - - pfdev->mmu->pgtbl_ops = pgtbl_ops; - - panfrost_mmu_enable(pfdev, 0); return 0; } @@ -528,6 +628,4 @@ void panfrost_mmu_fini(struct panfrost_device *pfdev) { mmu_write(pfdev, MMU_INT_MASK, 0); mmu_disable(pfdev, 0); - - free_io_pgtable_ops(pfdev->mmu->pgtbl_ops); } -- cgit From e316f08f1abf5f1116118e25dce7bc3e9ab03246 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Mon, 26 Aug 2019 17:33:12 -0500 Subject: drm/panfrost: Remove unnecessary mmu->lock mutex There's no need to serialize io-pgtable calls and the as_lock is sufficient to serialize flush operations, so we can remove the per page table lock. Fixes: 7282f7645d06 ("drm/panfrost: Implement per FD address spaces") Suggested-by: Robin Murphy Cc: Tomeu Vizoso Cc: David Airlie Cc: Daniel Vetter Signed-off-by: Rob Herring Acked-by: Alyssa Rosenzweig Reviewed-by: Robin Murphy Reviewed-by: Steven Price Link: https://patchwork.freedesktop.org/patch/msgid/20190826223317.28509-4-robh@kernel.org --- drivers/gpu/drm/panfrost/panfrost_mmu.c | 9 --------- 1 file changed, 9 deletions(-) (limited to 'drivers/gpu/drm/panfrost/panfrost_mmu.c') diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index 842bdd7cf6be..3a8bcfa7e7b6 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -228,8 +228,6 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu, struct io_pgtable_ops *ops = mmu->pgtbl_ops; u64 start_iova = iova; - mutex_lock(&mmu->lock); - for_each_sg(sgt->sgl, sgl, sgt->nents, count) { unsigned long paddr = sg_dma_address(sgl); size_t len = sg_dma_len(sgl); @@ -249,8 +247,6 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu, mmu_hw_do_operation(pfdev, mmu, start_iova, iova - start_iova, AS_COMMAND_FLUSH_PT); - mutex_unlock(&mmu->lock); - return 0; } @@ -304,8 +300,6 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo) if (ret < 0) return; - mutex_lock(&bo->mmu->lock); - while (unmapped_len < len) { size_t unmapped_page; size_t pgsize = get_pgsize(iova, len - unmapped_len); @@ -321,8 +315,6 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo) mmu_hw_do_operation(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, bo->node.size << PAGE_SHIFT, AS_COMMAND_FLUSH_PT); - mutex_unlock(&bo->mmu->lock); - pm_runtime_mark_last_busy(pfdev->dev); pm_runtime_put_autosuspend(pfdev->dev); bo->is_mapped = false; @@ -356,7 +348,6 @@ int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv) struct panfrost_mmu *mmu = &priv->mmu; struct panfrost_device *pfdev = priv->pfdev; - mutex_init(&mmu->lock); INIT_LIST_HEAD(&mmu->list); mmu->as = -1; -- cgit From ec7eba47da867e1dd0ff7d442a8717d70c064658 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Mon, 26 Aug 2019 17:33:13 -0500 Subject: drm/panfrost: Rework page table flushing and runtime PM interaction There is no point in resuming the h/w just to do flush operations and doing so takes several locks which cause lockdep issues with the shrinker. Rework the flush operations to only happen when the h/w is already awake. This avoids taking any locks associated with resuming which trigger lockdep warnings. Fixes: 013b65101315 ("drm/panfrost: Add madvise and shrinker support") Cc: Tomeu Vizoso Cc: David Airlie Cc: Daniel Vetter Signed-off-by: Rob Herring Acked-by: Alyssa Rosenzweig Reviewed-by: Robin Murphy Reviewed-by: Steven Price Link: https://patchwork.freedesktop.org/patch/msgid/20190826223317.28509-5-robh@kernel.org --- drivers/gpu/drm/panfrost/panfrost_mmu.c | 38 ++++++++++++++++----------------- 1 file changed, 18 insertions(+), 20 deletions(-) (limited to 'drivers/gpu/drm/panfrost/panfrost_mmu.c') diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index 3a8bcfa7e7b6..2204e60f7808 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -220,6 +220,22 @@ static size_t get_pgsize(u64 addr, size_t size) return SZ_2M; } +void panfrost_mmu_flush_range(struct panfrost_device *pfdev, + struct panfrost_mmu *mmu, + u64 iova, size_t size) +{ + if (mmu->as < 0) + return; + + pm_runtime_get_noresume(pfdev->dev); + + /* Flush the PTs only if we're already awake */ + if (pm_runtime_active(pfdev->dev)) + mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT); + + pm_runtime_put_sync_autosuspend(pfdev->dev); +} + static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu, u64 iova, int prot, struct sg_table *sgt) { @@ -244,8 +260,7 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu, } } - mmu_hw_do_operation(pfdev, mmu, start_iova, iova - start_iova, - AS_COMMAND_FLUSH_PT); + panfrost_mmu_flush_range(pfdev, mmu, start_iova, iova - start_iova); return 0; } @@ -255,7 +270,6 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo) struct drm_gem_object *obj = &bo->base.base; struct panfrost_device *pfdev = to_panfrost_device(obj->dev); struct sg_table *sgt; - int ret; int prot = IOMMU_READ | IOMMU_WRITE; if (WARN_ON(bo->is_mapped)) @@ -268,14 +282,7 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo) if (WARN_ON(IS_ERR(sgt))) return PTR_ERR(sgt); - ret = pm_runtime_get_sync(pfdev->dev); - if (ret < 0) - return ret; - mmu_map_sg(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, prot, sgt); - - pm_runtime_mark_last_busy(pfdev->dev); - pm_runtime_put_autosuspend(pfdev->dev); bo->is_mapped = true; return 0; @@ -289,17 +296,12 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo) u64 iova = bo->node.start << PAGE_SHIFT; size_t len = bo->node.size << PAGE_SHIFT; size_t unmapped_len = 0; - int ret; if (WARN_ON(!bo->is_mapped)) return; dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", bo->mmu->as, iova, len); - ret = pm_runtime_get_sync(pfdev->dev); - if (ret < 0) - return; - while (unmapped_len < len) { size_t unmapped_page; size_t pgsize = get_pgsize(iova, len - unmapped_len); @@ -312,11 +314,7 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo) unmapped_len += pgsize; } - mmu_hw_do_operation(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, - bo->node.size << PAGE_SHIFT, AS_COMMAND_FLUSH_PT); - - pm_runtime_mark_last_busy(pfdev->dev); - pm_runtime_put_autosuspend(pfdev->dev); + panfrost_mmu_flush_range(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, len); bo->is_mapped = false; } -- cgit From 86df65f39b009e00cf2826eac72c5a969b878065 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Mon, 26 Aug 2019 17:33:14 -0500 Subject: drm/panfrost: Split mmu_hw_do_operation into locked and unlocked version In preparation to call mmu_hw_do_operation with the as_lock already held, Add a mmu_hw_do_operation_locked function. Fixes: 7282f7645d06 ("drm/panfrost: Implement per FD address spaces") Cc: Tomeu Vizoso Cc: David Airlie Cc: Daniel Vetter Signed-off-by: Rob Herring Acked-by: Alyssa Rosenzweig Reviewed-by: Steven Price Link: https://patchwork.freedesktop.org/patch/msgid/20190826223317.28509-6-robh@kernel.org --- drivers/gpu/drm/panfrost/panfrost_mmu.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/panfrost/panfrost_mmu.c') diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index 2204e60f7808..3407b00d0a3a 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -80,19 +80,11 @@ static void lock_region(struct panfrost_device *pfdev, u32 as_nr, } -static int mmu_hw_do_operation(struct panfrost_device *pfdev, - struct panfrost_mmu *mmu, - u64 iova, size_t size, u32 op) +static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr, + u64 iova, size_t size, u32 op) { - int ret, as_nr; - - spin_lock(&pfdev->as_lock); - as_nr = mmu->as; - - if (as_nr < 0) { - spin_unlock(&pfdev->as_lock); + if (as_nr < 0) return 0; - } if (op != AS_COMMAND_UNLOCK) lock_region(pfdev, as_nr, iova, size); @@ -101,10 +93,18 @@ static int mmu_hw_do_operation(struct panfrost_device *pfdev, write_cmd(pfdev, as_nr, op); /* Wait for the flush to complete */ - ret = wait_ready(pfdev, as_nr); + return wait_ready(pfdev, as_nr); +} - spin_unlock(&pfdev->as_lock); +static int mmu_hw_do_operation(struct panfrost_device *pfdev, + struct panfrost_mmu *mmu, + u64 iova, size_t size, u32 op) +{ + int ret; + spin_lock(&pfdev->as_lock); + ret = mmu_hw_do_operation_locked(pfdev, mmu->as, iova, size, op); + spin_unlock(&pfdev->as_lock); return ret; } -- cgit From 5924d40958dfc2b8996fbf788a9d58e411a6db71 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Mon, 26 Aug 2019 17:33:15 -0500 Subject: drm/panfrost: Add cache/TLB flush before switching address space It's not entirely clear if this is required, but add a flush of GPU caches and TLBs before we change an address space to new page tables. Fixes: 7282f7645d06 ("drm/panfrost: Implement per FD address spaces") Cc: Tomeu Vizoso Cc: David Airlie Cc: Daniel Vetter Signed-off-by: Rob Herring Acked-by: Alyssa Rosenzweig Reviewed-by: Robin Murphy Reviewed-by: Steven Price Link: https://patchwork.freedesktop.org/patch/msgid/20190826223317.28509-7-robh@kernel.org --- drivers/gpu/drm/panfrost/panfrost_mmu.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm/panfrost/panfrost_mmu.c') diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index 3407b00d0a3a..d1ebde3327fe 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -115,6 +115,8 @@ static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_m u64 transtab = cfg->arm_mali_lpae_cfg.transtab; u64 memattr = cfg->arm_mali_lpae_cfg.memattr; + mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM); + mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL); mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32); -- cgit From 62f1089f3cbe7d99ced92bf96a8158813b75e5e8 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Mon, 26 Aug 2019 17:33:16 -0500 Subject: drm/panfrost: Flush and disable address space when freeing page tables Currently, page tables are freed without disabling the address space first. This probably is fine as we'll switch to new page tables when the address space is allocated again and runtime PM suspend will reset the GPU clearing the registers. However, it's better to clean up after ourselves. There is also a problem that we could be accessing the h/w in tlb_inv_context() when suspended. Rework the disable code to make sure we flush caches/TLBs and disable the address space before freeing the page tables if we are not suspended. As the tlb_inv_context() hook is only called when freeing the page tables and we do a flush before disabling the AS, lets remove the flush from tlb_inv_context and avoid any runtime PM issues. Fixes: 7282f7645d06 ("drm/panfrost: Implement per FD address spaces") Cc: Tomeu Vizoso Cc: David Airlie Cc: Daniel Vetter Signed-off-by: Rob Herring Acked-by: Alyssa Rosenzweig Reviewed-by: Robin Murphy Reviewed-by: Steven Price Link: https://patchwork.freedesktop.org/patch/msgid/20190826223317.28509-8-robh@kernel.org --- drivers/gpu/drm/panfrost/panfrost_mmu.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/panfrost/panfrost_mmu.c') diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index d1ebde3327fe..387d830cb7cf 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -129,8 +129,10 @@ static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_m write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE); } -static void mmu_disable(struct panfrost_device *pfdev, u32 as_nr) +static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr) { + mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM); + mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0); mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0); @@ -321,11 +323,7 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo) } static void mmu_tlb_inv_context_s1(void *cookie) -{ - struct panfrost_file_priv *priv = cookie; - - mmu_hw_do_operation(priv->pfdev, &priv->mmu, 0, ~0UL, AS_COMMAND_FLUSH_MEM); -} +{} static void mmu_tlb_inv_range_nosync(unsigned long iova, size_t size, size_t granule, bool leaf, void *cookie) @@ -374,6 +372,11 @@ void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv) spin_lock(&pfdev->as_lock); if (mmu->as >= 0) { + pm_runtime_get_noresume(pfdev->dev); + if (pm_runtime_active(pfdev->dev)) + panfrost_mmu_disable(pfdev, mmu->as); + pm_runtime_put_autosuspend(pfdev->dev); + clear_bit(mmu->as, &pfdev->as_alloc_mask); clear_bit(mmu->as, &pfdev->as_in_use_mask); list_del(&mmu->list); @@ -618,5 +621,4 @@ int panfrost_mmu_init(struct panfrost_device *pfdev) void panfrost_mmu_fini(struct panfrost_device *pfdev) { mmu_write(pfdev, MMU_INT_MASK, 0); - mmu_disable(pfdev, 0); } -- cgit