diff options
Diffstat (limited to 'drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c')
| -rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c | 392 |
1 files changed, 208 insertions, 184 deletions
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c index cbe447ac5974..d664ae29ae20 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c @@ -1,30 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * Copyright (C) 2016 Etnaviv Project - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. + * Copyright (C) 2016-2018 Etnaviv Project */ -#include <linux/iommu.h> +#include <linux/bitops.h> +#include <linux/dma-mapping.h> #include <linux/platform_device.h> #include <linux/sizes.h> #include <linux/slab.h> -#include <linux/dma-mapping.h> -#include <linux/bitops.h> +#include <linux/vmalloc.h> #include "etnaviv_cmdbuf.h" #include "etnaviv_gpu.h" #include "etnaviv_mmu.h" -#include "etnaviv_iommu.h" #include "state.xml.h" #include "state_hi.xml.h" @@ -39,51 +27,101 @@ #define MMUv2_MAX_STLB_ENTRIES 1024 -struct etnaviv_iommuv2_domain { - struct iommu_domain domain; - struct device *dev; - void *bad_page_cpu; - dma_addr_t bad_page_dma; +struct etnaviv_iommuv2_context { + struct etnaviv_iommu_context base; + unsigned short id; /* M(aster) TLB aka first level pagetable */ u32 *mtlb_cpu; dma_addr_t mtlb_dma; /* S(lave) TLB aka second level pagetable */ - u32 *stlb_cpu[1024]; - dma_addr_t stlb_dma[1024]; + u32 *stlb_cpu[MMUv2_MAX_STLB_ENTRIES]; + dma_addr_t stlb_dma[MMUv2_MAX_STLB_ENTRIES]; }; -static struct etnaviv_iommuv2_domain *to_etnaviv_domain(struct iommu_domain *domain) +static struct etnaviv_iommuv2_context * +to_v2_context(struct etnaviv_iommu_context *context) { - return container_of(domain, struct etnaviv_iommuv2_domain, domain); + return container_of(context, struct etnaviv_iommuv2_context, base); } -static int etnaviv_iommuv2_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot) +static void etnaviv_iommuv2_free(struct etnaviv_iommu_context *context) { - struct etnaviv_iommuv2_domain *etnaviv_domain = - to_etnaviv_domain(domain); - int mtlb_entry, stlb_entry; - u32 entry = (u32)paddr | MMUv2_PTE_PRESENT; + struct etnaviv_iommuv2_context *v2_context = to_v2_context(context); + int i; + + drm_mm_takedown(&context->mm); + + for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) { + if (v2_context->stlb_cpu[i]) + dma_free_wc(context->global->dev, SZ_4K, + v2_context->stlb_cpu[i], + v2_context->stlb_dma[i]); + } + + dma_free_wc(context->global->dev, SZ_4K, v2_context->mtlb_cpu, + v2_context->mtlb_dma); + + clear_bit(v2_context->id, context->global->v2.pta_alloc); + + vfree(v2_context); +} +static int +etnaviv_iommuv2_ensure_stlb(struct etnaviv_iommuv2_context *v2_context, + int stlb) +{ + if (v2_context->stlb_cpu[stlb]) + return 0; + + v2_context->stlb_cpu[stlb] = + dma_alloc_wc(v2_context->base.global->dev, SZ_4K, + &v2_context->stlb_dma[stlb], + GFP_KERNEL); + + if (!v2_context->stlb_cpu[stlb]) + return -ENOMEM; + + memset32(v2_context->stlb_cpu[stlb], MMUv2_PTE_EXCEPTION, + SZ_4K / sizeof(u32)); + + v2_context->mtlb_cpu[stlb] = + v2_context->stlb_dma[stlb] | MMUv2_PTE_PRESENT; + + return 0; +} + +static int etnaviv_iommuv2_map(struct etnaviv_iommu_context *context, + unsigned long iova, phys_addr_t paddr, + size_t size, int prot) +{ + struct etnaviv_iommuv2_context *v2_context = to_v2_context(context); + int mtlb_entry, stlb_entry, ret; + u32 entry = lower_32_bits(paddr) | MMUv2_PTE_PRESENT; if (size != SZ_4K) return -EINVAL; - if (prot & IOMMU_WRITE) + if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT)) + entry |= (upper_32_bits(paddr) & 0xff) << 4; + + if (prot & ETNAVIV_PROT_WRITE) entry |= MMUv2_PTE_WRITEABLE; mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT; stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT; - etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = entry; + ret = etnaviv_iommuv2_ensure_stlb(v2_context, mtlb_entry); + if (ret) + return ret; + + v2_context->stlb_cpu[mtlb_entry][stlb_entry] = entry; return 0; } -static size_t etnaviv_iommuv2_unmap(struct iommu_domain *domain, - unsigned long iova, size_t size) +static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_context *context, + unsigned long iova, size_t size) { - struct etnaviv_iommuv2_domain *etnaviv_domain = - to_etnaviv_domain(domain); + struct etnaviv_iommuv2_context *etnaviv_domain = to_v2_context(context); int mtlb_entry, stlb_entry; if (size != SZ_4K) @@ -97,194 +135,180 @@ static size_t etnaviv_iommuv2_unmap(struct iommu_domain *domain, return SZ_4K; } -static phys_addr_t etnaviv_iommuv2_iova_to_phys(struct iommu_domain *domain, - dma_addr_t iova) +static size_t etnaviv_iommuv2_dump_size(struct etnaviv_iommu_context *context) { - struct etnaviv_iommuv2_domain *etnaviv_domain = - to_etnaviv_domain(domain); - int mtlb_entry, stlb_entry; + struct etnaviv_iommuv2_context *v2_context = to_v2_context(context); + size_t dump_size = SZ_4K; + int i; - mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT; - stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT; + for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) + if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT) + dump_size += SZ_4K; - return etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] & ~(SZ_4K - 1); + return dump_size; } -static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain) +static void etnaviv_iommuv2_dump(struct etnaviv_iommu_context *context, void *buf) { - u32 *p; - int ret, i, j; - - /* allocate scratch page */ - etnaviv_domain->bad_page_cpu = dma_alloc_coherent(etnaviv_domain->dev, - SZ_4K, - &etnaviv_domain->bad_page_dma, - GFP_KERNEL); - if (!etnaviv_domain->bad_page_cpu) { - ret = -ENOMEM; - goto fail_mem; - } - p = etnaviv_domain->bad_page_cpu; - for (i = 0; i < SZ_4K / 4; i++) - *p++ = 0xdead55aa; - - etnaviv_domain->mtlb_cpu = dma_alloc_coherent(etnaviv_domain->dev, - SZ_4K, - &etnaviv_domain->mtlb_dma, - GFP_KERNEL); - if (!etnaviv_domain->mtlb_cpu) { - ret = -ENOMEM; - goto fail_mem; - } + struct etnaviv_iommuv2_context *v2_context = to_v2_context(context); + int i; - /* pre-populate STLB pages (may want to switch to on-demand later) */ - for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) { - etnaviv_domain->stlb_cpu[i] = - dma_alloc_coherent(etnaviv_domain->dev, - SZ_4K, - &etnaviv_domain->stlb_dma[i], - GFP_KERNEL); - if (!etnaviv_domain->stlb_cpu[i]) { - ret = -ENOMEM; - goto fail_mem; + memcpy(buf, v2_context->mtlb_cpu, SZ_4K); + buf += SZ_4K; + for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) + if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT) { + memcpy(buf, v2_context->stlb_cpu[i], SZ_4K); + buf += SZ_4K; } - p = etnaviv_domain->stlb_cpu[i]; - for (j = 0; j < SZ_4K / 4; j++) - *p++ = MMUv2_PTE_EXCEPTION; - - etnaviv_domain->mtlb_cpu[i] = etnaviv_domain->stlb_dma[i] | - MMUv2_PTE_PRESENT; - } +} - return 0; +static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu, + struct etnaviv_iommu_context *context) +{ + struct etnaviv_iommuv2_context *v2_context = to_v2_context(context); + u16 prefetch; -fail_mem: - if (etnaviv_domain->bad_page_cpu) - dma_free_coherent(etnaviv_domain->dev, SZ_4K, - etnaviv_domain->bad_page_cpu, - etnaviv_domain->bad_page_dma); + /* If the MMU is already enabled the state is still there. */ + if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE) + return; - if (etnaviv_domain->mtlb_cpu) - dma_free_coherent(etnaviv_domain->dev, SZ_4K, - etnaviv_domain->mtlb_cpu, - etnaviv_domain->mtlb_dma); + if (gpu->mmu_context) + etnaviv_iommu_context_put(gpu->mmu_context); + gpu->mmu_context = etnaviv_iommu_context_get(context); - for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) { - if (etnaviv_domain->stlb_cpu[i]) - dma_free_coherent(etnaviv_domain->dev, SZ_4K, - etnaviv_domain->stlb_cpu[i], - etnaviv_domain->stlb_dma[i]); - } + prefetch = etnaviv_buffer_config_mmuv2(gpu, + (u32)v2_context->mtlb_dma, + (u32)context->global->bad_page_dma); + etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer), + prefetch); + etnaviv_gpu_wait_idle(gpu, 100); - return ret; + gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE); } -static void etnaviv_iommuv2_domain_free(struct iommu_domain *domain) +static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu, + struct etnaviv_iommu_context *context) { - struct etnaviv_iommuv2_domain *etnaviv_domain = - to_etnaviv_domain(domain); - int i; - - dma_free_coherent(etnaviv_domain->dev, SZ_4K, - etnaviv_domain->bad_page_cpu, - etnaviv_domain->bad_page_dma); + struct etnaviv_iommuv2_context *v2_context = to_v2_context(context); + u16 prefetch; - dma_free_coherent(etnaviv_domain->dev, SZ_4K, - etnaviv_domain->mtlb_cpu, - etnaviv_domain->mtlb_dma); + /* If the MMU is already enabled the state is still there. */ + if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE) + return; - for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) { - if (etnaviv_domain->stlb_cpu[i]) - dma_free_coherent(etnaviv_domain->dev, SZ_4K, - etnaviv_domain->stlb_cpu[i], - etnaviv_domain->stlb_dma[i]); - } + if (gpu->mmu_context) + etnaviv_iommu_context_put(gpu->mmu_context); + gpu->mmu_context = etnaviv_iommu_context_get(context); + + gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW, + lower_32_bits(context->global->v2.pta_dma)); + gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH, + upper_32_bits(context->global->v2.pta_dma)); + gpu_write(gpu, VIVS_MMUv2_PTA_CONTROL, VIVS_MMUv2_PTA_CONTROL_ENABLE); + + gpu_write(gpu, VIVS_MMUv2_NONSEC_SAFE_ADDR_LOW, + lower_32_bits(context->global->bad_page_dma)); + gpu_write(gpu, VIVS_MMUv2_SEC_SAFE_ADDR_LOW, + lower_32_bits(context->global->bad_page_dma)); + gpu_write(gpu, VIVS_MMUv2_SAFE_ADDRESS_CONFIG, + VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH( + upper_32_bits(context->global->bad_page_dma)) | + VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH( + upper_32_bits(context->global->bad_page_dma))); + + context->global->v2.pta_cpu[v2_context->id] = v2_context->mtlb_dma | + VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K; + + /* trigger a PTA load through the FE */ + prefetch = etnaviv_buffer_config_pta(gpu, v2_context->id); + etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer), + prefetch); + etnaviv_gpu_wait_idle(gpu, 100); - vfree(etnaviv_domain); + gpu_write(gpu, VIVS_MMUv2_SEC_CONTROL, VIVS_MMUv2_SEC_CONTROL_ENABLE); } -static size_t etnaviv_iommuv2_dump_size(struct iommu_domain *domain) +u32 etnaviv_iommuv2_get_mtlb_addr(struct etnaviv_iommu_context *context) { - struct etnaviv_iommuv2_domain *etnaviv_domain = - to_etnaviv_domain(domain); - size_t dump_size = SZ_4K; - int i; - - for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) - if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT) - dump_size += SZ_4K; + struct etnaviv_iommuv2_context *v2_context = to_v2_context(context); - return dump_size; + return v2_context->mtlb_dma; } -static void etnaviv_iommuv2_dump(struct iommu_domain *domain, void *buf) +unsigned short etnaviv_iommuv2_get_pta_id(struct etnaviv_iommu_context *context) { - struct etnaviv_iommuv2_domain *etnaviv_domain = - to_etnaviv_domain(domain); - int i; + struct etnaviv_iommuv2_context *v2_context = to_v2_context(context); - memcpy(buf, etnaviv_domain->mtlb_cpu, SZ_4K); - buf += SZ_4K; - for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++, buf += SZ_4K) - if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT) - memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K); + return v2_context->id; +} +static void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu, + struct etnaviv_iommu_context *context) +{ + switch (gpu->sec_mode) { + case ETNA_SEC_NONE: + etnaviv_iommuv2_restore_nonsec(gpu, context); + break; + case ETNA_SEC_KERNEL: + etnaviv_iommuv2_restore_sec(gpu, context); + break; + default: + WARN(1, "unhandled GPU security mode\n"); + break; + } } -static const struct etnaviv_iommu_ops etnaviv_iommu_ops = { - .ops = { - .domain_free = etnaviv_iommuv2_domain_free, - .map = etnaviv_iommuv2_map, - .unmap = etnaviv_iommuv2_unmap, - .iova_to_phys = etnaviv_iommuv2_iova_to_phys, - .pgsize_bitmap = SZ_4K, - }, +const struct etnaviv_iommu_ops etnaviv_iommuv2_ops = { + .free = etnaviv_iommuv2_free, + .map = etnaviv_iommuv2_map, + .unmap = etnaviv_iommuv2_unmap, .dump_size = etnaviv_iommuv2_dump_size, .dump = etnaviv_iommuv2_dump, + .restore = etnaviv_iommuv2_restore, }; -void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu) +struct etnaviv_iommu_context * +etnaviv_iommuv2_context_alloc(struct etnaviv_iommu_global *global) { - struct etnaviv_iommuv2_domain *etnaviv_domain = - to_etnaviv_domain(gpu->mmu->domain); - u16 prefetch; - - /* If the MMU is already enabled the state is still there. */ - if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE) - return; + struct etnaviv_iommuv2_context *v2_context; + struct etnaviv_iommu_context *context; - prefetch = etnaviv_buffer_config_mmuv2(gpu, - (u32)etnaviv_domain->mtlb_dma, - (u32)etnaviv_domain->bad_page_dma); - etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(gpu->buffer), - prefetch); - etnaviv_gpu_wait_idle(gpu, 100); + v2_context = vzalloc(sizeof(*v2_context)); + if (!v2_context) + return NULL; - gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE); -} -struct iommu_domain *etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu) -{ - struct etnaviv_iommuv2_domain *etnaviv_domain; - int ret; + mutex_lock(&global->lock); + v2_context->id = find_first_zero_bit(global->v2.pta_alloc, + ETNAVIV_PTA_ENTRIES); + if (v2_context->id < ETNAVIV_PTA_ENTRIES) { + set_bit(v2_context->id, global->v2.pta_alloc); + } else { + mutex_unlock(&global->lock); + goto out_free; + } + mutex_unlock(&global->lock); - etnaviv_domain = vzalloc(sizeof(*etnaviv_domain)); - if (!etnaviv_domain) - return NULL; + v2_context->mtlb_cpu = dma_alloc_wc(global->dev, SZ_4K, + &v2_context->mtlb_dma, GFP_KERNEL); + if (!v2_context->mtlb_cpu) + goto out_free_id; - etnaviv_domain->dev = gpu->dev; + memset32(v2_context->mtlb_cpu, MMUv2_PTE_EXCEPTION, + MMUv2_MAX_STLB_ENTRIES); - etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING; - etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops; - etnaviv_domain->domain.pgsize_bitmap = SZ_4K; - etnaviv_domain->domain.geometry.aperture_start = 0; - etnaviv_domain->domain.geometry.aperture_end = ~0UL & ~(SZ_4K - 1); + global->v2.pta_cpu[v2_context->id] = v2_context->mtlb_dma; - ret = etnaviv_iommuv2_init(etnaviv_domain); - if (ret) - goto out_free; + context = &v2_context->base; + context->global = global; + kref_init(&context->refcount); + mutex_init(&context->lock); + INIT_LIST_HEAD(&context->mappings); + drm_mm_init(&context->mm, SZ_4K, (u64)SZ_1G * 4 - SZ_4K); - return &etnaviv_domain->domain; + return context; +out_free_id: + clear_bit(v2_context->id, global->v2.pta_alloc); out_free: - vfree(etnaviv_domain); + vfree(v2_context); return NULL; } |
