diff options
Diffstat (limited to 'drivers/gpu/drm/msm/adreno')
20 files changed, 415 insertions, 336 deletions
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c index 379a3d346c30..ec38db45d8a3 100644 --- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c @@ -113,7 +113,7 @@ static int a2xx_hw_init(struct msm_gpu *gpu) uint32_t *ptr, len; int i, ret; - a2xx_gpummu_params(gpu->aspace->mmu, &pt_base, &tran_error); + a2xx_gpummu_params(to_msm_vm(gpu->vm)->mmu, &pt_base, &tran_error); DBG("%s", gpu->name); @@ -466,19 +466,18 @@ static struct msm_gpu_state *a2xx_gpu_state_get(struct msm_gpu *gpu) return state; } -static struct msm_gem_address_space * -a2xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev) +static struct drm_gpuvm * +a2xx_create_vm(struct msm_gpu *gpu, struct platform_device *pdev) { struct msm_mmu *mmu = a2xx_gpummu_new(&pdev->dev, gpu); - struct msm_gem_address_space *aspace; + struct drm_gpuvm *vm; - aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M, - 0xfff * SZ_64K); + vm = msm_gem_vm_create(gpu->dev, mmu, "gpu", SZ_16M, 0xfff * SZ_64K, true); - if (IS_ERR(aspace) && !IS_ERR(mmu)) + if (IS_ERR(vm) && !IS_ERR(mmu)) mmu->funcs->destroy(mmu); - return aspace; + return vm; } static u32 a2xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) @@ -504,7 +503,7 @@ static const struct adreno_gpu_funcs funcs = { #endif .gpu_state_get = a2xx_gpu_state_get, .gpu_state_put = adreno_gpu_state_put, - .create_address_space = a2xx_create_address_space, + .create_vm = a2xx_create_vm, .get_rptr = a2xx_get_rptr, }, }; @@ -551,14 +550,6 @@ struct msm_gpu *a2xx_gpu_init(struct drm_device *dev) else adreno_gpu->registers = a220_registers; - if (!gpu->aspace) { - dev_err(dev->dev, "No memory protection without MMU\n"); - if (!allow_vram_carveout) { - ret = -ENXIO; - goto fail; - } - } - return gpu; fail: diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c index 39641551eeb6..0407c9bc8c1b 100644 --- a/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c +++ b/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c @@ -29,13 +29,16 @@ static void a2xx_gpummu_detach(struct msm_mmu *mmu) } static int a2xx_gpummu_map(struct msm_mmu *mmu, uint64_t iova, - struct sg_table *sgt, size_t len, int prot) + struct sg_table *sgt, size_t off, size_t len, + int prot) { struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu); unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE; struct sg_dma_page_iter dma_iter; unsigned prot_bits = 0; + WARN_ON(off != 0); + if (prot & IOMMU_WRITE) prot_bits |= 1; if (prot & IOMMU_READ) @@ -71,10 +74,6 @@ static int a2xx_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len) return 0; } -static void a2xx_gpummu_resume_translation(struct msm_mmu *mmu) -{ -} - static void a2xx_gpummu_destroy(struct msm_mmu *mmu) { struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu); @@ -90,7 +89,6 @@ static const struct msm_mmu_funcs funcs = { .map = a2xx_gpummu_map, .unmap = a2xx_gpummu_unmap, .destroy = a2xx_gpummu_destroy, - .resume_translation = a2xx_gpummu_resume_translation, }; struct msm_mmu *a2xx_gpummu_new(struct device *dev, struct msm_gpu *gpu) diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index b6df115bb567..a956cd79195e 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -526,7 +526,7 @@ static const struct adreno_gpu_funcs funcs = { .gpu_busy = a3xx_gpu_busy, .gpu_state_get = a3xx_gpu_state_get, .gpu_state_put = adreno_gpu_state_put, - .create_address_space = adreno_create_address_space, + .create_vm = adreno_create_vm, .get_rptr = a3xx_get_rptr, }, }; @@ -581,21 +581,6 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) goto fail; } - if (!gpu->aspace) { - /* TODO we think it is possible to configure the GPU to - * restrict access to VRAM carveout. But the required - * registers are unknown. For now just bail out and - * limp along with just modesetting. If it turns out - * to not be possible to restrict access, then we must - * implement a cmdstream validator. - */ - DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n"); - if (!allow_vram_carveout) { - ret = -ENXIO; - goto fail; - } - } - icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem"); if (IS_ERR(icc_path)) { ret = PTR_ERR(icc_path); diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index f1b18a6663f7..83f6329accba 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -645,7 +645,7 @@ static const struct adreno_gpu_funcs funcs = { .gpu_busy = a4xx_gpu_busy, .gpu_state_get = a4xx_gpu_state_get, .gpu_state_put = adreno_gpu_state_put, - .create_address_space = adreno_create_address_space, + .create_vm = adreno_create_vm, .get_rptr = a4xx_get_rptr, }, .get_timestamp = a4xx_get_timestamp, @@ -695,21 +695,6 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev) adreno_gpu->uche_trap_base = 0xffff0000ffff0000ull; - if (!gpu->aspace) { - /* TODO we think it is possible to configure the GPU to - * restrict access to VRAM carveout. But the required - * registers are unknown. For now just bail out and - * limp along with just modesetting. If it turns out - * to not be possible to restrict access, then we must - * implement a cmdstream validator. - */ - DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n"); - if (!allow_vram_carveout) { - ret = -ENXIO; - goto fail; - } - } - icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem"); if (IS_ERR(icc_path)) { ret = PTR_ERR(icc_path); diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c index 169b8fe688f8..625a4e787d8f 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c @@ -116,13 +116,13 @@ reset_set(void *data, u64 val) adreno_gpu->fw[ADRENO_FW_PFP] = NULL; if (a5xx_gpu->pm4_bo) { - msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace); + msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->vm); drm_gem_object_put(a5xx_gpu->pm4_bo); a5xx_gpu->pm4_bo = NULL; } if (a5xx_gpu->pfp_bo) { - msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace); + msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->vm); drm_gem_object_put(a5xx_gpu->pfp_bo); a5xx_gpu->pfp_bo = NULL; } diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 650e5bac225f..4a04dc43a8e6 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -131,6 +131,8 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) struct msm_ringbuffer *ring = submit->ring; unsigned int i, ibs = 0; + adreno_check_and_reenable_stall(adreno_gpu); + if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) { ring->cur_ctx_seqno = 0; a5xx_submit_in_rb(gpu, submit); @@ -620,7 +622,7 @@ static int a5xx_ucode_load(struct msm_gpu *gpu) a5xx_gpu->shadow = msm_gem_kernel_new(gpu->dev, sizeof(u32) * gpu->nr_rings, MSM_BO_WC | MSM_BO_MAP_PRIV, - gpu->aspace, &a5xx_gpu->shadow_bo, + gpu->vm, &a5xx_gpu->shadow_bo, &a5xx_gpu->shadow_iova); if (IS_ERR(a5xx_gpu->shadow)) @@ -833,8 +835,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F); - BUG_ON(adreno_gpu->ubwc_config.highest_bank_bit < 13); - hbb = adreno_gpu->ubwc_config.highest_bank_bit - 13; + BUG_ON(adreno_gpu->ubwc_config->highest_bank_bit < 13); + hbb = adreno_gpu->ubwc_config->highest_bank_bit - 13; gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, hbb << 7); gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, hbb << 1); @@ -1040,22 +1042,22 @@ static void a5xx_destroy(struct msm_gpu *gpu) a5xx_preempt_fini(gpu); if (a5xx_gpu->pm4_bo) { - msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace); + msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->vm); drm_gem_object_put(a5xx_gpu->pm4_bo); } if (a5xx_gpu->pfp_bo) { - msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace); + msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->vm); drm_gem_object_put(a5xx_gpu->pfp_bo); } if (a5xx_gpu->gpmu_bo) { - msm_gem_unpin_iova(a5xx_gpu->gpmu_bo, gpu->aspace); + msm_gem_unpin_iova(a5xx_gpu->gpmu_bo, gpu->vm); drm_gem_object_put(a5xx_gpu->gpmu_bo); } if (a5xx_gpu->shadow_bo) { - msm_gem_unpin_iova(a5xx_gpu->shadow_bo, gpu->aspace); + msm_gem_unpin_iova(a5xx_gpu->shadow_bo, gpu->vm); drm_gem_object_put(a5xx_gpu->shadow_bo); } @@ -1455,7 +1457,7 @@ static int a5xx_crashdumper_init(struct msm_gpu *gpu, struct a5xx_crashdumper *dumper) { dumper->ptr = msm_gem_kernel_new(gpu->dev, - SZ_1M, MSM_BO_WC, gpu->aspace, + SZ_1M, MSM_BO_WC, gpu->vm, &dumper->bo, &dumper->iova); if (!IS_ERR(dumper->ptr)) @@ -1555,7 +1557,7 @@ static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu, if (a5xx_crashdumper_run(gpu, &dumper)) { kfree(a5xx_state->hlsqregs); - msm_gem_kernel_put(dumper.bo, gpu->aspace); + msm_gem_kernel_put(dumper.bo, gpu->vm); return; } @@ -1563,7 +1565,7 @@ static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu, memcpy(a5xx_state->hlsqregs, dumper.ptr + (256 * SZ_1K), count * sizeof(u32)); - msm_gem_kernel_put(dumper.bo, gpu->aspace); + msm_gem_kernel_put(dumper.bo, gpu->vm); } static struct msm_gpu_state *a5xx_gpu_state_get(struct msm_gpu *gpu) @@ -1711,7 +1713,7 @@ static const struct adreno_gpu_funcs funcs = { .gpu_busy = a5xx_gpu_busy, .gpu_state_get = a5xx_gpu_state_get, .gpu_state_put = a5xx_gpu_state_put, - .create_address_space = adreno_create_address_space, + .create_vm = adreno_create_vm, .get_rptr = a5xx_get_rptr, }, .get_timestamp = a5xx_get_timestamp, @@ -1754,6 +1756,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev) struct msm_drm_private *priv = dev->dev_private; struct platform_device *pdev = priv->gpu_pdev; struct adreno_platform_config *config = pdev->dev.platform_data; + const struct qcom_ubwc_cfg_data *common_cfg; struct a5xx_gpu *a5xx_gpu = NULL; struct adreno_gpu *adreno_gpu; struct msm_gpu *gpu; @@ -1784,21 +1787,20 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev) return ERR_PTR(ret); } - if (gpu->aspace) - msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, a5xx_fault_handler); + msm_mmu_set_fault_handler(to_msm_vm(gpu->vm)->mmu, gpu, + a5xx_fault_handler); /* Set up the preemption specific bits and pieces for each ringbuffer */ a5xx_preempt_init(gpu); - /* Set the highest bank bit */ - if (adreno_is_a540(adreno_gpu) || adreno_is_a530(adreno_gpu)) - adreno_gpu->ubwc_config.highest_bank_bit = 15; - else - adreno_gpu->ubwc_config.highest_bank_bit = 14; + /* Inherit the common config and make some necessary fixups */ + common_cfg = qcom_ubwc_config_get_data(); + if (IS_ERR(common_cfg)) + return ERR_CAST(common_cfg); - /* a5xx only supports UBWC 1.0, these are not configurable */ - adreno_gpu->ubwc_config.macrotile_mode = 0; - adreno_gpu->ubwc_config.ubwc_swizzle = 0x7; + /* Copy the data into the internal struct to drop the const qualifier (temporarily) */ + adreno_gpu->_ubwc_config = *common_cfg; + adreno_gpu->ubwc_config = &adreno_gpu->_ubwc_config; adreno_gpu->uche_trap_base = 0x0001ffffffff0000ull; diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c index 6b91e0bd1514..d6da7351cfbb 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_power.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c @@ -363,7 +363,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2; ptr = msm_gem_kernel_new(drm, bosize, - MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, + MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->vm, &a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova); if (IS_ERR(ptr)) return; diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c index b5f9d40687d5..e4924b5e1c48 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c @@ -255,7 +255,7 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu, ptr = msm_gem_kernel_new(gpu->dev, A5XX_PREEMPT_RECORD_SIZE + A5XX_PREEMPT_COUNTER_SIZE, - MSM_BO_WC | MSM_BO_MAP_PRIV, gpu->aspace, &bo, &iova); + MSM_BO_WC | MSM_BO_MAP_PRIV, gpu->vm, &bo, &iova); if (IS_ERR(ptr)) return PTR_ERR(ptr); @@ -263,9 +263,9 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu, /* The buffer to store counters needs to be unprivileged */ counters = msm_gem_kernel_new(gpu->dev, A5XX_PREEMPT_COUNTER_SIZE, - MSM_BO_WC, gpu->aspace, &counters_bo, &counters_iova); + MSM_BO_WC, gpu->vm, &counters_bo, &counters_iova); if (IS_ERR(counters)) { - msm_gem_kernel_put(bo, gpu->aspace); + msm_gem_kernel_put(bo, gpu->vm); return PTR_ERR(counters); } @@ -296,8 +296,8 @@ void a5xx_preempt_fini(struct msm_gpu *gpu) int i; for (i = 0; i < gpu->nr_rings; i++) { - msm_gem_kernel_put(a5xx_gpu->preempt_bo[i], gpu->aspace); - msm_gem_kernel_put(a5xx_gpu->preempt_counters_bo[i], gpu->aspace); + msm_gem_kernel_put(a5xx_gpu->preempt_bo[i], gpu->vm); + msm_gem_kernel_put(a5xx_gpu->preempt_counters_bo[i], gpu->vm); } } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c index 70f7ad806c34..00e1afd46b81 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c @@ -1335,7 +1335,7 @@ static const uint32_t a7xx_pwrup_reglist_regs[] = { REG_A6XX_RB_NC_MODE_CNTL, REG_A6XX_RB_CMP_DBG_ECO_CNTL, REG_A7XX_GRAS_NC_MODE_CNTL, - REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE, + REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE_ENABLE, REG_A6XX_UCHE_GBIF_GX_CONFIG, REG_A6XX_UCHE_CLIENT_PF, REG_A6XX_TPL1_DBG_ECO_CNTL1, @@ -1442,6 +1442,13 @@ static const struct adreno_info a7xx_gpus[] = { .gmu_cgc_mode = 0x00020202, }, .preempt_record_size = 4192 * SZ_1K, + .speedbins = ADRENO_SPEEDBINS( + { 0, 0 }, + { 59, 1 }, + { 7, 2 }, + { 232, 3 }, + { 146, 4 }, + ), }, { .chip_ids = ADRENO_CHIP_IDS(0x43051401), /* "C520v2" */ .family = ADRENO_7XX_GEN3, @@ -1474,6 +1481,45 @@ static const struct adreno_info a7xx_gpus[] = { }, }, .preempt_record_size = 3572 * SZ_1K, + }, { + .chip_ids = ADRENO_CHIP_IDS(0x43030c00), + .family = ADRENO_7XX_GEN2, + .fw = { + [ADRENO_FW_SQE] = "gen71500_sqe.fw", + [ADRENO_FW_GMU] = "gen71500_gmu.bin", + }, + .gmem = SZ_1M + SZ_512K, + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | + ADRENO_QUIRK_HAS_HW_APRIV | + ADRENO_QUIRK_PREEMPTION, + .init = a6xx_gpu_init, + .a6xx = &(const struct a6xx_info) { + .hwcg = a740_hwcg, + .protect = &a730_protect, + .pwrup_reglist = &a7xx_pwrup_reglist, + .gmu_chipid = 0x70f0000, + .gmu_cgc_mode = 0x00020222, + .bcms = (const struct a6xx_bcm[]) { + { .name = "SH0", .buswidth = 16 }, + { .name = "MC0", .buswidth = 4 }, + { + .name = "ACV", + .fixed = true, + .perfmode = BIT(3), + .perfmode_bw = 16500000, + }, + { /* sentinel */ }, + }, + }, + .preempt_record_size = 4192 * SZ_1K, + .speedbins = ADRENO_SPEEDBINS( + { 0, 0 }, + { 294, 1 }, + { 263, 2 }, + { 233, 3 }, + { 141, 4 }, + ), } }; DECLARE_ADRENO_GPULIST(a7xx); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 38c0f8ef85c3..28e6705c6da6 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -1259,15 +1259,17 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu) { - msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace); - msm_gem_kernel_put(gmu->debug.obj, gmu->aspace); - msm_gem_kernel_put(gmu->icache.obj, gmu->aspace); - msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace); - msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace); - msm_gem_kernel_put(gmu->log.obj, gmu->aspace); - - gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu); - msm_gem_address_space_put(gmu->aspace); + struct msm_mmu *mmu = to_msm_vm(gmu->vm)->mmu; + + msm_gem_kernel_put(gmu->hfi.obj, gmu->vm); + msm_gem_kernel_put(gmu->debug.obj, gmu->vm); + msm_gem_kernel_put(gmu->icache.obj, gmu->vm); + msm_gem_kernel_put(gmu->dcache.obj, gmu->vm); + msm_gem_kernel_put(gmu->dummy.obj, gmu->vm); + msm_gem_kernel_put(gmu->log.obj, gmu->vm); + + mmu->funcs->detach(mmu); + drm_gpuvm_put(gmu->vm); } static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo, @@ -1296,7 +1298,7 @@ static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo, if (IS_ERR(bo->obj)) return PTR_ERR(bo->obj); - ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova, + ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->vm, &bo->iova, range_start, range_end); if (ret) { drm_gem_object_put(bo->obj); @@ -1311,7 +1313,7 @@ static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo, return 0; } -static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu) +static int a6xx_gmu_memory_probe(struct drm_device *drm, struct a6xx_gmu *gmu) { struct msm_mmu *mmu; @@ -1321,9 +1323,9 @@ static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu) if (IS_ERR(mmu)) return PTR_ERR(mmu); - gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000); - if (IS_ERR(gmu->aspace)) - return PTR_ERR(gmu->aspace); + gmu->vm = msm_gem_vm_create(drm, mmu, "gmu", 0x0, 0x80000000, true); + if (IS_ERR(gmu->vm)) + return PTR_ERR(gmu->vm); return 0; } @@ -1940,7 +1942,7 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) if (ret) goto err_put_device; - ret = a6xx_gmu_memory_probe(gmu); + ret = a6xx_gmu_memory_probe(adreno_gpu->base.dev, gmu); if (ret) goto err_put_device; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h index b2d4489b4024..d1ce11131ba6 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h @@ -62,7 +62,7 @@ struct a6xx_gmu { /* For serializing communication with the GMU: */ struct mutex lock; - struct msm_gem_address_space *aspace; + struct drm_gpuvm *vm; void __iomem *mmio; void __iomem *rscc; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index bf3758f010f4..45dd5fd1c2bf 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -111,7 +111,8 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, struct msm_ringbuffer *ring, struct msm_gem_submit *submit) { bool sysprof = refcount_read(&a6xx_gpu->base.base.sysprof_active) > 1; - struct msm_file_private *ctx = submit->queue->ctx; + struct msm_context *ctx = submit->queue->ctx; + struct drm_gpuvm *vm = msm_context_vm(submit->dev, ctx); struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; phys_addr_t ttbr; u32 asid; @@ -120,7 +121,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, if (ctx->seqno == ring->cur_ctx_seqno) return; - if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid)) + if (msm_iommu_pagetable_params(to_msm_vm(vm)->mmu, &ttbr, &asid)) return; if (adreno_gpu->info->family >= ADRENO_7XX_GEN1) { @@ -130,6 +131,20 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence))); OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence))); OUT_RING(ring, submit->seqno - 1); + + OUT_PKT7(ring, CP_THREAD_CONTROL, 1); + OUT_RING(ring, CP_SET_THREAD_BOTH); + + /* Reset state used to synchronize BR and BV */ + OUT_PKT7(ring, CP_RESET_CONTEXT_STATE, 1); + OUT_RING(ring, + CP_RESET_CONTEXT_STATE_0_CLEAR_ON_CHIP_TS | + CP_RESET_CONTEXT_STATE_0_CLEAR_RESOURCE_TABLE | + CP_RESET_CONTEXT_STATE_0_CLEAR_BV_BR_COUNTER | + CP_RESET_CONTEXT_STATE_0_RESET_GLOBAL_LOCAL_TS); + + OUT_PKT7(ring, CP_THREAD_CONTROL, 1); + OUT_RING(ring, CP_SET_THREAD_BR); } if (!sysprof) { @@ -212,6 +227,8 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) struct msm_ringbuffer *ring = submit->ring; unsigned int i, ibs = 0; + adreno_check_and_reenable_stall(adreno_gpu); + a6xx_set_pagetable(a6xx_gpu, ring, submit); get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0), @@ -335,6 +352,8 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) struct msm_ringbuffer *ring = submit->ring; unsigned int i, ibs = 0; + adreno_check_and_reenable_stall(adreno_gpu); + /* * Toggle concurrent binning for pagetable switch and set the thread to * BR since only it can execute the pagetable switch packets. @@ -585,117 +604,118 @@ static void a6xx_set_cp_protect(struct msm_gpu *gpu) gpu_write(gpu, REG_A6XX_CP_PROTECT(protect->count_max - 1), protect->regs[i]); } -static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu) +static int a6xx_calc_ubwc_config(struct adreno_gpu *gpu) { - gpu->ubwc_config.rgb565_predicator = 0; - gpu->ubwc_config.uavflagprd_inv = 0; - gpu->ubwc_config.min_acc_len = 0; - gpu->ubwc_config.ubwc_swizzle = 0x6; - gpu->ubwc_config.macrotile_mode = 0; - gpu->ubwc_config.highest_bank_bit = 15; + const struct qcom_ubwc_cfg_data *common_cfg; + struct qcom_ubwc_cfg_data *cfg = &gpu->_ubwc_config; + + /* Inherit the common config and make some necessary fixups */ + common_cfg = qcom_ubwc_config_get_data(); + if (IS_ERR(common_cfg)) + return PTR_ERR(common_cfg); + + /* Copy the data into the internal struct to drop the const qualifier (temporarily) */ + *cfg = *common_cfg; + + cfg->ubwc_swizzle = 0x6; + cfg->highest_bank_bit = 15; if (adreno_is_a610(gpu)) { - gpu->ubwc_config.highest_bank_bit = 13; - gpu->ubwc_config.min_acc_len = 1; - gpu->ubwc_config.ubwc_swizzle = 0x7; + cfg->highest_bank_bit = 13; + cfg->ubwc_swizzle = 0x7; } if (adreno_is_a618(gpu)) - gpu->ubwc_config.highest_bank_bit = 14; + cfg->highest_bank_bit = 14; if (adreno_is_a619(gpu)) /* TODO: Should be 14 but causes corruption at e.g. 1920x1200 on DP */ - gpu->ubwc_config.highest_bank_bit = 13; + cfg->highest_bank_bit = 13; if (adreno_is_a619_holi(gpu)) - gpu->ubwc_config.highest_bank_bit = 13; - - if (adreno_is_a621(gpu)) { - gpu->ubwc_config.highest_bank_bit = 13; - gpu->ubwc_config.amsbc = 1; - gpu->ubwc_config.uavflagprd_inv = 2; - } - - if (adreno_is_a623(gpu)) { - gpu->ubwc_config.highest_bank_bit = 16; - gpu->ubwc_config.amsbc = 1; - gpu->ubwc_config.rgb565_predicator = 1; - gpu->ubwc_config.uavflagprd_inv = 2; - gpu->ubwc_config.macrotile_mode = 1; - } + cfg->highest_bank_bit = 13; - if (adreno_is_a640_family(gpu)) - gpu->ubwc_config.amsbc = 1; + if (adreno_is_a621(gpu)) + cfg->highest_bank_bit = 13; - if (adreno_is_a680(gpu)) - gpu->ubwc_config.macrotile_mode = 1; + if (adreno_is_a623(gpu)) + cfg->highest_bank_bit = 16; if (adreno_is_a650(gpu) || adreno_is_a660(gpu) || adreno_is_a690(gpu) || adreno_is_a730(gpu) || adreno_is_a740_family(gpu)) { - /* TODO: get ddr type from bootloader and use 2 for LPDDR4 */ - gpu->ubwc_config.highest_bank_bit = 16; - gpu->ubwc_config.amsbc = 1; - gpu->ubwc_config.rgb565_predicator = 1; - gpu->ubwc_config.uavflagprd_inv = 2; - gpu->ubwc_config.macrotile_mode = 1; + /* TODO: get ddr type from bootloader and use 15 for LPDDR4 */ + cfg->highest_bank_bit = 16; } if (adreno_is_a663(gpu)) { - gpu->ubwc_config.highest_bank_bit = 13; - gpu->ubwc_config.amsbc = 1; - gpu->ubwc_config.rgb565_predicator = 1; - gpu->ubwc_config.uavflagprd_inv = 2; - gpu->ubwc_config.macrotile_mode = 1; - gpu->ubwc_config.ubwc_swizzle = 0x4; + cfg->highest_bank_bit = 13; + cfg->ubwc_swizzle = 0x4; } - if (adreno_is_7c3(gpu)) { - gpu->ubwc_config.highest_bank_bit = 14; - gpu->ubwc_config.amsbc = 1; - gpu->ubwc_config.uavflagprd_inv = 2; - gpu->ubwc_config.macrotile_mode = 1; - } + if (adreno_is_7c3(gpu)) + cfg->highest_bank_bit = 14; - if (adreno_is_a702(gpu)) { - gpu->ubwc_config.highest_bank_bit = 14; - gpu->ubwc_config.min_acc_len = 1; - } + if (adreno_is_a702(gpu)) + cfg->highest_bank_bit = 14; + + if (cfg->highest_bank_bit != common_cfg->highest_bank_bit) + DRM_WARN_ONCE("Inconclusive highest_bank_bit value: %u (GPU) vs %u (UBWC_CFG)\n", + cfg->highest_bank_bit, common_cfg->highest_bank_bit); + + if (cfg->ubwc_swizzle != common_cfg->ubwc_swizzle) + DRM_WARN_ONCE("Inconclusive ubwc_swizzle value: %u (GPU) vs %u (UBWC_CFG)\n", + cfg->ubwc_swizzle, common_cfg->ubwc_swizzle); + + gpu->ubwc_config = &gpu->_ubwc_config; + + return 0; } static void a6xx_set_ubwc_config(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + const struct qcom_ubwc_cfg_data *cfg = adreno_gpu->ubwc_config; /* * We subtract 13 from the highest bank bit (13 is the minimum value * allowed by hw) and write the lowest two bits of the remaining value * as hbb_lo and the one above it as hbb_hi to the hardware. */ - BUG_ON(adreno_gpu->ubwc_config.highest_bank_bit < 13); - u32 hbb = adreno_gpu->ubwc_config.highest_bank_bit - 13; + BUG_ON(cfg->highest_bank_bit < 13); + u32 hbb = cfg->highest_bank_bit - 13; + bool rgb565_predicator = cfg->ubwc_enc_version >= UBWC_4_0; + u32 level2_swizzling_dis = !(cfg->ubwc_swizzle & UBWC_SWIZZLE_ENABLE_LVL2); + bool ubwc_mode = qcom_ubwc_get_ubwc_mode(cfg); + bool amsbc = cfg->ubwc_enc_version >= UBWC_3_0; + bool min_acc_len_64b = false; + u8 uavflagprd_inv = 0; u32 hbb_hi = hbb >> 2; u32 hbb_lo = hbb & 3; - u32 ubwc_mode = adreno_gpu->ubwc_config.ubwc_swizzle & 1; - u32 level2_swizzling_dis = !(adreno_gpu->ubwc_config.ubwc_swizzle & 2); + + if (adreno_is_a650_family(adreno_gpu) || adreno_is_a7xx(adreno_gpu)) + uavflagprd_inv = 2; + + if (adreno_is_a610(adreno_gpu) || adreno_is_a702(adreno_gpu)) + min_acc_len_64b = true; gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, level2_swizzling_dis << 12 | - adreno_gpu->ubwc_config.rgb565_predicator << 11 | - hbb_hi << 10 | adreno_gpu->ubwc_config.amsbc << 4 | - adreno_gpu->ubwc_config.min_acc_len << 3 | + rgb565_predicator << 11 | + hbb_hi << 10 | amsbc << 4 | + min_acc_len_64b << 3 | hbb_lo << 1 | ubwc_mode); gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, level2_swizzling_dis << 6 | hbb_hi << 4 | - adreno_gpu->ubwc_config.min_acc_len << 3 | + min_acc_len_64b << 3 | hbb_lo << 1 | ubwc_mode); gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, level2_swizzling_dis << 12 | hbb_hi << 10 | - adreno_gpu->ubwc_config.uavflagprd_inv << 4 | - adreno_gpu->ubwc_config.min_acc_len << 3 | + uavflagprd_inv << 4 | + min_acc_len_64b << 3 | hbb_lo << 1 | ubwc_mode); if (adreno_is_a7xx(adreno_gpu)) @@ -703,10 +723,10 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu) FIELD_PREP(GENMASK(8, 5), hbb_lo)); gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, - adreno_gpu->ubwc_config.min_acc_len << 23 | hbb_lo << 21); + min_acc_len_64b << 23 | hbb_lo << 21); gpu_write(gpu, REG_A6XX_RBBM_NC_MODE_CNTL, - adreno_gpu->ubwc_config.macrotile_mode); + cfg->macrotile_mode); } static void a7xx_patch_pwrup_reglist(struct msm_gpu *gpu) @@ -952,7 +972,7 @@ static int a6xx_ucode_load(struct msm_gpu *gpu) msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw"); if (!a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo)) { - msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace); + msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->vm); drm_gem_object_put(a6xx_gpu->sqe_bo); a6xx_gpu->sqe_bo = NULL; @@ -969,7 +989,7 @@ static int a6xx_ucode_load(struct msm_gpu *gpu) a6xx_gpu->shadow = msm_gem_kernel_new(gpu->dev, sizeof(u32) * gpu->nr_rings, MSM_BO_WC | MSM_BO_MAP_PRIV, - gpu->aspace, &a6xx_gpu->shadow_bo, + gpu->vm, &a6xx_gpu->shadow_bo, &a6xx_gpu->shadow_iova); if (IS_ERR(a6xx_gpu->shadow)) @@ -980,7 +1000,7 @@ static int a6xx_ucode_load(struct msm_gpu *gpu) a6xx_gpu->pwrup_reglist_ptr = msm_gem_kernel_new(gpu->dev, PAGE_SIZE, MSM_BO_WC | MSM_BO_MAP_PRIV, - gpu->aspace, &a6xx_gpu->pwrup_reglist_bo, + gpu->vm, &a6xx_gpu->pwrup_reglist_bo, &a6xx_gpu->pwrup_reglist_iova); if (IS_ERR(a6xx_gpu->pwrup_reglist_ptr)) @@ -2193,12 +2213,12 @@ static void a6xx_destroy(struct msm_gpu *gpu) struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); if (a6xx_gpu->sqe_bo) { - msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace); + msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->vm); drm_gem_object_put(a6xx_gpu->sqe_bo); } if (a6xx_gpu->shadow_bo) { - msm_gem_unpin_iova(a6xx_gpu->shadow_bo, gpu->aspace); + msm_gem_unpin_iova(a6xx_gpu->shadow_bo, gpu->vm); drm_gem_object_put(a6xx_gpu->shadow_bo); } @@ -2238,8 +2258,8 @@ static void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp, mutex_unlock(&a6xx_gpu->gmu.lock); } -static struct msm_gem_address_space * -a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev) +static struct drm_gpuvm * +a6xx_create_vm(struct msm_gpu *gpu, struct platform_device *pdev) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); @@ -2253,22 +2273,21 @@ a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev) !device_iommu_capable(&pdev->dev, IOMMU_CAP_CACHE_COHERENCY)) quirks |= IO_PGTABLE_QUIRK_ARM_OUTER_WBWA; - return adreno_iommu_create_address_space(gpu, pdev, quirks); + return adreno_iommu_create_vm(gpu, pdev, quirks); } -static struct msm_gem_address_space * -a6xx_create_private_address_space(struct msm_gpu *gpu) +static struct drm_gpuvm * +a6xx_create_private_vm(struct msm_gpu *gpu, bool kernel_managed) { struct msm_mmu *mmu; - mmu = msm_iommu_pagetable_create(gpu->aspace->mmu); + mmu = msm_iommu_pagetable_create(to_msm_vm(gpu->vm)->mmu, kernel_managed); if (IS_ERR(mmu)) return ERR_CAST(mmu); - return msm_gem_address_space_create(mmu, - "gpu", ADRENO_VM_START, - adreno_private_address_space_size(gpu)); + return msm_gem_vm_create(gpu->dev, mmu, "gpu", ADRENO_VM_START, + adreno_private_vm_size(gpu), kernel_managed); } static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) @@ -2385,8 +2404,8 @@ static const struct adreno_gpu_funcs funcs = { .gpu_state_get = a6xx_gpu_state_get, .gpu_state_put = a6xx_gpu_state_put, #endif - .create_address_space = a6xx_create_address_space, - .create_private_address_space = a6xx_create_private_address_space, + .create_vm = a6xx_create_vm, + .create_private_vm = a6xx_create_private_vm, .get_rptr = a6xx_get_rptr, .progress = a6xx_progress, }, @@ -2414,8 +2433,8 @@ static const struct adreno_gpu_funcs funcs_gmuwrapper = { .gpu_state_get = a6xx_gpu_state_get, .gpu_state_put = a6xx_gpu_state_put, #endif - .create_address_space = a6xx_create_address_space, - .create_private_address_space = a6xx_create_private_address_space, + .create_vm = a6xx_create_vm, + .create_private_vm = a6xx_create_private_vm, .get_rptr = a6xx_get_rptr, .progress = a6xx_progress, }, @@ -2445,8 +2464,8 @@ static const struct adreno_gpu_funcs funcs_a7xx = { .gpu_state_get = a6xx_gpu_state_get, .gpu_state_put = a6xx_gpu_state_put, #endif - .create_address_space = a6xx_create_address_space, - .create_private_address_space = a6xx_create_private_address_space, + .create_vm = a6xx_create_vm, + .create_private_vm = a6xx_create_private_vm, .get_rptr = a6xx_get_rptr, .progress = a6xx_progress, }, @@ -2542,11 +2561,15 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) adreno_gpu->uche_trap_base = 0x1fffffffff000ull; - if (gpu->aspace) - msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, - a6xx_fault_handler); + msm_mmu_set_fault_handler(to_msm_vm(gpu->vm)->mmu, gpu, + a6xx_fault_handler); + + ret = a6xx_calc_ubwc_config(adreno_gpu); + if (ret) { + a6xx_destroy(&(a6xx_gpu->base.base)); + return ERR_PTR(ret); + } - a6xx_calc_ubwc_config(adreno_gpu); /* Set up the preemption specific bits and pieces for each ringbuffer */ a6xx_preempt_init(gpu); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h index 9201a53dd341..6e71f617fc3d 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h @@ -6,6 +6,10 @@ #include "adreno_gpu.h" +#include "a6xx_enums.xml.h" +#include "a7xx_enums.xml.h" +#include "a6xx_perfcntrs.xml.h" +#include "a7xx_perfcntrs.xml.h" #include "a6xx.xml.h" #include "a6xx_gmu.h" diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c index 341a72a67401..faca2a0243ab 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c @@ -132,7 +132,7 @@ static int a6xx_crashdumper_init(struct msm_gpu *gpu, struct a6xx_crashdumper *dumper) { dumper->ptr = msm_gem_kernel_new(gpu->dev, - SZ_1M, MSM_BO_WC, gpu->aspace, + SZ_1M, MSM_BO_WC, gpu->vm, &dumper->bo, &dumper->iova); if (!IS_ERR(dumper->ptr)) @@ -158,7 +158,7 @@ static int a6xx_crashdumper_run(struct msm_gpu *gpu, /* Make sure all pending memory writes are posted */ wmb(); - gpu_write64(gpu, REG_A6XX_CP_CRASH_SCRIPT_BASE, dumper->iova); + gpu_write64(gpu, REG_A6XX_CP_CRASH_DUMP_SCRIPT_BASE, dumper->iova); gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 1); @@ -1619,7 +1619,7 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu) a7xx_get_clusters(gpu, a6xx_state, dumper); a7xx_get_dbgahb_clusters(gpu, a6xx_state, dumper); - msm_gem_kernel_put(dumper->bo, gpu->aspace); + msm_gem_kernel_put(dumper->bo, gpu->vm); } a7xx_get_post_crashdumper_registers(gpu, a6xx_state); @@ -1631,7 +1631,7 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu) a6xx_get_clusters(gpu, a6xx_state, dumper); a6xx_get_dbgahb_clusters(gpu, a6xx_state, dumper); - msm_gem_kernel_put(dumper->bo, gpu->aspace); + msm_gem_kernel_put(dumper->bo, gpu->vm); } } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h index e545106c70be..95d93ac6812a 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h @@ -212,7 +212,7 @@ static const struct a6xx_shader_block { SHADER(A6XX_SP_LB_5_DATA, 0x200), SHADER(A6XX_SP_CB_BINDLESS_DATA, 0x800), SHADER(A6XX_SP_CB_LEGACY_DATA, 0x280), - SHADER(A6XX_SP_UAV_DATA, 0x80), + SHADER(A6XX_SP_GFX_UAV_BASE_DATA, 0x80), SHADER(A6XX_SP_INST_TAG, 0x80), SHADER(A6XX_SP_CB_BINDLESS_TAG, 0x80), SHADER(A6XX_SP_TMO_UMO_TAG, 0x80), diff --git a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c index 3b17fd2dba89..6a12a35dabff 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c @@ -210,7 +210,7 @@ void a6xx_preempt_hw_init(struct msm_gpu *gpu) gpu_write64(gpu, REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO, 0); /* Enable the GMEM save/restore feature for preemption */ - gpu_write(gpu, REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE, 0x1); + gpu_write(gpu, REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE_ENABLE, 0x1); /* Reset the preemption state */ set_preempt_state(a6xx_gpu, PREEMPT_NONE); @@ -344,7 +344,7 @@ static int preempt_init_ring(struct a6xx_gpu *a6xx_gpu, ptr = msm_gem_kernel_new(gpu->dev, PREEMPT_RECORD_SIZE(adreno_gpu), - MSM_BO_WC | MSM_BO_MAP_PRIV, gpu->aspace, &bo, &iova); + MSM_BO_WC | MSM_BO_MAP_PRIV, gpu->vm, &bo, &iova); if (IS_ERR(ptr)) return PTR_ERR(ptr); @@ -362,7 +362,7 @@ static int preempt_init_ring(struct a6xx_gpu *a6xx_gpu, ptr = msm_gem_kernel_new(gpu->dev, PREEMPT_SMMU_INFO_SIZE, MSM_BO_WC | MSM_BO_MAP_PRIV | MSM_BO_GPU_READONLY, - gpu->aspace, &bo, &iova); + gpu->vm, &bo, &iova); if (IS_ERR(ptr)) return PTR_ERR(ptr); @@ -377,7 +377,7 @@ static int preempt_init_ring(struct a6xx_gpu *a6xx_gpu, struct a7xx_cp_smmu_info *smmu_info_ptr = ptr; - msm_iommu_pagetable_params(gpu->aspace->mmu, &ttbr, &asid); + msm_iommu_pagetable_params(to_msm_vm(gpu->vm)->mmu, &ttbr, &asid); smmu_info_ptr->magic = GEN7_CP_SMMU_INFO_MAGIC; smmu_info_ptr->ttbr0 = ttbr; @@ -405,7 +405,7 @@ void a6xx_preempt_fini(struct msm_gpu *gpu) int i; for (i = 0; i < gpu->nr_rings; i++) - msm_gem_kernel_put(a6xx_gpu->preempt_bo[i], gpu->aspace); + msm_gem_kernel_put(a6xx_gpu->preempt_bo[i], gpu->vm); } void a6xx_preempt_init(struct msm_gpu *gpu) @@ -431,7 +431,7 @@ void a6xx_preempt_init(struct msm_gpu *gpu) a6xx_gpu->preempt_postamble_ptr = msm_gem_kernel_new(gpu->dev, PAGE_SIZE, MSM_BO_WC | MSM_BO_MAP_PRIV | MSM_BO_GPU_READONLY, - gpu->aspace, &a6xx_gpu->preempt_postamble_bo, + gpu->vm, &a6xx_gpu->preempt_postamble_bo, &a6xx_gpu->preempt_postamble_iova); preempt_prepare_postamble(a6xx_gpu); diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index f5e1490d07c1..50945bfe9b49 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -16,10 +16,6 @@ bool snapshot_debugbus = false; MODULE_PARM_DESC(snapshot_debugbus, "Include debugbus sections in GPU devcoredump (if not fused off)"); module_param_named(snapshot_debugbus, snapshot_debugbus, bool, 0600); -bool allow_vram_carveout = false; -MODULE_PARM_DESC(allow_vram_carveout, "Allow using VRAM Carveout, in place of IOMMU"); -module_param_named(allow_vram_carveout, allow_vram_carveout, bool, 0600); - int enable_preemption = -1; MODULE_PARM_DESC(enable_preemption, "Enable preemption (A7xx only) (1=on , 0=disable, -1=auto (default))"); module_param(enable_preemption, int, 0600); @@ -137,9 +133,8 @@ err_disable_rpm: return NULL; } -static int find_chipid(struct device *dev, uint32_t *chipid) +static int find_chipid(struct device_node *node, uint32_t *chipid) { - struct device_node *node = dev->of_node; const char *compat; int ret; @@ -173,15 +168,36 @@ static int find_chipid(struct device *dev, uint32_t *chipid) /* and if that fails, fall back to legacy "qcom,chipid" property: */ ret = of_property_read_u32(node, "qcom,chipid", chipid); if (ret) { - DRM_DEV_ERROR(dev, "could not parse qcom,chipid: %d\n", ret); + DRM_ERROR("%pOF: could not parse qcom,chipid: %d\n", + node, ret); return ret; } - dev_warn(dev, "Using legacy qcom,chipid binding!\n"); + pr_warn("%pOF: Using legacy qcom,chipid binding!\n", node); return 0; } +bool adreno_has_gpu(struct device_node *node) +{ + const struct adreno_info *info; + uint32_t chip_id; + int ret; + + ret = find_chipid(node, &chip_id); + if (ret) + return false; + + info = adreno_info(chip_id); + if (!info) { + pr_warn("%pOF: Unknown GPU revision: %"ADRENO_CHIPID_FMT"\n", + node, ADRENO_CHIPID_ARGS(chip_id)); + return false; + } + + return true; +} + static int adreno_bind(struct device *dev, struct device *master, void *data) { static struct adreno_platform_config config = {}; @@ -191,19 +207,18 @@ static int adreno_bind(struct device *dev, struct device *master, void *data) struct msm_gpu *gpu; int ret; - ret = find_chipid(dev, &config.chip_id); - if (ret) + ret = find_chipid(dev->of_node, &config.chip_id); + /* We shouldn't have gotten this far if we can't parse the chip_id */ + if (WARN_ON(ret)) return ret; dev->platform_data = &config; priv->gpu_pdev = to_platform_device(dev); info = adreno_info(config.chip_id); - if (!info) { - dev_warn(drm->dev, "Unknown GPU revision: %"ADRENO_CHIPID_FMT"\n", - ADRENO_CHIPID_ARGS(config.chip_id)); + /* We shouldn't have gotten this far if we don't recognize the GPU: */ + if (WARN_ON(!info)) return -ENXIO; - } config.info = info; @@ -245,42 +260,23 @@ static const struct component_ops a3xx_ops = { .unbind = adreno_unbind, }; -static void adreno_device_register_headless(void) -{ - /* on imx5, we don't have a top-level mdp/dpu node - * this creates a dummy node for the driver for that case - */ - struct platform_device_info dummy_info = { - .parent = NULL, - .name = "msm", - .id = -1, - .res = NULL, - .num_res = 0, - .data = NULL, - .size_data = 0, - .dma_mask = ~0, - }; - platform_device_register_full(&dummy_info); -} - static int adreno_probe(struct platform_device *pdev) { + if (of_device_is_compatible(pdev->dev.of_node, "amd,imageon") || + msm_gpu_no_components()) + return msm_gpu_probe(pdev, &a3xx_ops); - int ret; - - ret = component_add(&pdev->dev, &a3xx_ops); - if (ret) - return ret; - - if (of_device_is_compatible(pdev->dev.of_node, "amd,imageon")) - adreno_device_register_headless(); - - return 0; + return component_add(&pdev->dev, &a3xx_ops); } static void adreno_remove(struct platform_device *pdev) { - component_del(&pdev->dev, &a3xx_ops); + struct msm_drm_private *priv = platform_get_drvdata(pdev); + + if (priv->kms_init) + component_del(&pdev->dev, &a3xx_ops); + else + msm_gpu_remove(pdev, &a3xx_ops); } static void adreno_shutdown(struct platform_device *pdev) diff --git a/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h b/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h index 9a327d543f27..e02cabb39f19 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h @@ -1311,8 +1311,8 @@ static struct a6xx_indexed_registers gen7_9_0_cp_indexed_reg_list[] = { REG_A7XX_CP_BV_SQE_UCODE_DBG_DATA, 0x08000}, { "CP_BV_SQE_STAT_ADDR", REG_A7XX_CP_BV_SQE_STAT_ADDR, REG_A7XX_CP_BV_SQE_STAT_DATA, 0x00040}, - { "CP_RESOURCE_TBL", REG_A7XX_CP_RESOURCE_TBL_DBG_ADDR, - REG_A7XX_CP_RESOURCE_TBL_DBG_DATA, 0x04100}, + { "CP_RESOURCE_TBL", REG_A7XX_CP_RESOURCE_TABLE_DBG_ADDR, + REG_A7XX_CP_RESOURCE_TABLE_DBG_DATA, 0x04100}, { "CP_LPAC_DRAW_STATE_ADDR", REG_A7XX_CP_LPAC_DRAW_STATE_ADDR, REG_A7XX_CP_LPAC_DRAW_STATE_DATA, 0x00200}, { "CP_LPAC_ROQ", REG_A7XX_CP_LPAC_ROQ_DBG_ADDR, diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 2348ffb35f7e..f1230465bf0d 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -191,25 +191,27 @@ int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid) return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid); } -struct msm_gem_address_space * -adreno_create_address_space(struct msm_gpu *gpu, - struct platform_device *pdev) +struct drm_gpuvm * +adreno_create_vm(struct msm_gpu *gpu, + struct platform_device *pdev) { - return adreno_iommu_create_address_space(gpu, pdev, 0); + return adreno_iommu_create_vm(gpu, pdev, 0); } -struct msm_gem_address_space * -adreno_iommu_create_address_space(struct msm_gpu *gpu, - struct platform_device *pdev, - unsigned long quirks) +struct drm_gpuvm * +adreno_iommu_create_vm(struct msm_gpu *gpu, + struct platform_device *pdev, + unsigned long quirks) { struct iommu_domain_geometry *geometry; struct msm_mmu *mmu; - struct msm_gem_address_space *aspace; + struct drm_gpuvm *vm; u64 start, size; mmu = msm_iommu_gpu_new(&pdev->dev, gpu, quirks); - if (IS_ERR_OR_NULL(mmu)) + if (!mmu) + return ERR_PTR(-ENODEV); + else if (IS_ERR_OR_NULL(mmu)) return ERR_CAST(mmu); geometry = msm_iommu_get_geometry(mmu); @@ -224,16 +226,16 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu, start = max_t(u64, SZ_16M, geometry->aperture_start); size = geometry->aperture_end - start + 1; - aspace = msm_gem_address_space_create(mmu, "gpu", - start & GENMASK_ULL(48, 0), size); + vm = msm_gem_vm_create(gpu->dev, mmu, "gpu", start & GENMASK_ULL(48, 0), + size, true); - if (IS_ERR(aspace) && !IS_ERR(mmu)) + if (IS_ERR(vm) && !IS_ERR(mmu)) mmu->funcs->destroy(mmu); - return aspace; + return vm; } -u64 adreno_private_address_space_size(struct msm_gpu *gpu) +u64 adreno_private_vm_size(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev); @@ -259,25 +261,59 @@ u64 adreno_private_address_space_size(struct msm_gpu *gpu) return BIT(ttbr1_cfg->ias) - ADRENO_VM_START; } +void adreno_check_and_reenable_stall(struct adreno_gpu *adreno_gpu) +{ + struct msm_gpu *gpu = &adreno_gpu->base; + struct msm_drm_private *priv = gpu->dev->dev_private; + unsigned long flags; + + /* + * Wait until the cooldown period has passed and we would actually + * collect a crashdump to re-enable stall-on-fault. + */ + spin_lock_irqsave(&priv->fault_stall_lock, flags); + if (!priv->stall_enabled && + ktime_after(ktime_get(), priv->stall_reenable_time) && + !READ_ONCE(gpu->crashstate)) { + struct msm_mmu *mmu = to_msm_vm(gpu->vm)->mmu; + + priv->stall_enabled = true; + + mmu->funcs->set_stall(mmu, true); + } + spin_unlock_irqrestore(&priv->fault_stall_lock, flags); +} + #define ARM_SMMU_FSR_TF BIT(1) #define ARM_SMMU_FSR_PF BIT(3) #define ARM_SMMU_FSR_EF BIT(4) +#define ARM_SMMU_FSR_SS BIT(30) int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags, struct adreno_smmu_fault_info *info, const char *block, u32 scratch[4]) { + struct msm_drm_private *priv = gpu->dev->dev_private; + struct msm_mmu *mmu = to_msm_vm(gpu->vm)->mmu; const char *type = "UNKNOWN"; - bool do_devcoredump = info && !READ_ONCE(gpu->crashstate); + bool do_devcoredump = info && (info->fsr & ARM_SMMU_FSR_SS) && + !READ_ONCE(gpu->crashstate); + unsigned long irq_flags; /* - * If we aren't going to be resuming later from fault_worker, then do - * it now. + * In case there is a subsequent storm of pagefaults, disable + * stall-on-fault for at least half a second. */ - if (!do_devcoredump) { - gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu); + spin_lock_irqsave(&priv->fault_stall_lock, irq_flags); + if (priv->stall_enabled) { + priv->stall_enabled = false; + + mmu->funcs->set_stall(mmu, false); } + priv->stall_reenable_time = ktime_add_ms(ktime_get(), 500); + spin_unlock_irqrestore(&priv->fault_stall_lock, irq_flags); + /* * Print a default message if we couldn't get the data from the * adreno-smmu-priv @@ -304,26 +340,37 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags, scratch[0], scratch[1], scratch[2], scratch[3]); if (do_devcoredump) { + struct msm_gpu_fault_info fault_info = {}; + /* Turn off the hangcheck timer to keep it from bothering us */ timer_delete(&gpu->hangcheck_timer); - gpu->fault_info.ttbr0 = info->ttbr0; - gpu->fault_info.iova = iova; - gpu->fault_info.flags = flags; - gpu->fault_info.type = type; - gpu->fault_info.block = block; + fault_info.ttbr0 = info->ttbr0; + fault_info.iova = iova; + fault_info.flags = flags; + fault_info.type = type; + fault_info.block = block; - kthread_queue_work(gpu->worker, &gpu->fault_work); + msm_gpu_fault_crashstate_capture(gpu, &fault_info); } return 0; } -int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx, +static bool +adreno_smmu_has_prr(struct msm_gpu *gpu) +{ + struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev); + return adreno_smmu && adreno_smmu->set_prr_addr; +} + +int adreno_get_param(struct msm_gpu *gpu, struct msm_context *ctx, uint32_t param, uint64_t *value, uint32_t *len) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct drm_device *drm = gpu->dev; + /* Note ctx can be NULL when called from rd_open(): */ + struct drm_gpuvm *vm = ctx ? msm_context_vm(drm, ctx) : NULL; /* No pointer params yet */ if (*len != 0) @@ -369,8 +416,8 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx, *value = 0; return 0; case MSM_PARAM_FAULTS: - if (ctx->aspace) - *value = gpu->global_faults + ctx->aspace->faults; + if (vm) + *value = gpu->global_faults + to_msm_vm(vm)->faults; else *value = gpu->global_faults; return 0; @@ -378,36 +425,39 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx, *value = gpu->suspend_count; return 0; case MSM_PARAM_VA_START: - if (ctx->aspace == gpu->aspace) + if (vm == gpu->vm) return UERR(EINVAL, drm, "requires per-process pgtables"); - *value = ctx->aspace->va_start; + *value = vm->mm_start; return 0; case MSM_PARAM_VA_SIZE: - if (ctx->aspace == gpu->aspace) + if (vm == gpu->vm) return UERR(EINVAL, drm, "requires per-process pgtables"); - *value = ctx->aspace->va_size; + *value = vm->mm_range; return 0; case MSM_PARAM_HIGHEST_BANK_BIT: - *value = adreno_gpu->ubwc_config.highest_bank_bit; + *value = adreno_gpu->ubwc_config->highest_bank_bit; return 0; case MSM_PARAM_RAYTRACING: *value = adreno_gpu->has_ray_tracing; return 0; case MSM_PARAM_UBWC_SWIZZLE: - *value = adreno_gpu->ubwc_config.ubwc_swizzle; + *value = adreno_gpu->ubwc_config->ubwc_swizzle; return 0; case MSM_PARAM_MACROTILE_MODE: - *value = adreno_gpu->ubwc_config.macrotile_mode; + *value = adreno_gpu->ubwc_config->macrotile_mode; return 0; case MSM_PARAM_UCHE_TRAP_BASE: *value = adreno_gpu->uche_trap_base; return 0; + case MSM_PARAM_HAS_PRR: + *value = adreno_smmu_has_prr(gpu); + return 0; default: return UERR(EINVAL, drm, "%s: invalid param: %u", gpu->name, param); } } -int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx, +int adreno_set_param(struct msm_gpu *gpu, struct msm_context *ctx, uint32_t param, uint64_t value, uint32_t len) { struct drm_device *drm = gpu->dev; @@ -453,7 +503,22 @@ int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx, case MSM_PARAM_SYSPROF: if (!capable(CAP_SYS_ADMIN)) return UERR(EPERM, drm, "invalid permissions"); - return msm_file_private_set_sysprof(ctx, gpu, value); + return msm_context_set_sysprof(ctx, gpu, value); + case MSM_PARAM_EN_VM_BIND: + /* We can only support VM_BIND with per-process pgtables: */ + if (ctx->vm == gpu->vm) + return UERR(EINVAL, drm, "requires per-process pgtables"); + + /* + * We can only swtich to VM_BIND mode if the VM has not yet + * been created: + */ + if (ctx->vm) + return UERR(EBUSY, drm, "VM already created"); + + ctx->userspace_managed_vm = value; + + return 0; default: return UERR(EINVAL, drm, "%s: invalid param: %u", gpu->name, param); } @@ -575,7 +640,7 @@ struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu, void *ptr; ptr = msm_gem_kernel_new(gpu->dev, fw->size - 4, - MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova); + MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->vm, &bo, iova); if (IS_ERR(ptr)) return ERR_CAST(ptr); @@ -768,6 +833,7 @@ void adreno_gpu_state_destroy(struct msm_gpu_state *state) for (i = 0; state->bos && i < state->nr_bos; i++) kvfree(state->bos[i].data); + kfree(state->vm_logs); kfree(state->bos); kfree(state->comm); kfree(state->cmd); @@ -908,6 +974,16 @@ void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state, info->ptes[0], info->ptes[1], info->ptes[2], info->ptes[3]); } + if (state->vm_logs) { + drm_puts(p, "vm-log:\n"); + for (i = 0; i < state->nr_vm_logs; i++) { + struct msm_gem_vm_log_entry *e = &state->vm_logs[i]; + drm_printf(p, " - %s:%d: 0x%016llx-0x%016llx\n", + e->op, e->queue_id, e->iova, + e->iova + e->range); + } + } + drm_printf(p, "rbbm-status: 0x%08x\n", state->rbbm_status); drm_puts(p, "ringbuffer:\n"); diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index a8f4bf416e64..9dc93c247196 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -12,13 +12,14 @@ #include <linux/firmware.h> #include <linux/iopoll.h> +#include <linux/soc/qcom/ubwc.h> + #include "msm_gpu.h" #include "adreno_common.xml.h" #include "adreno_pm4.xml.h" extern bool snapshot_debugbus; -extern bool allow_vram_carveout; enum { ADRENO_FW_PM4 = 0, @@ -205,44 +206,12 @@ struct adreno_gpu { /* firmware: */ const struct firmware *fw[ADRENO_FW_MAX]; - struct { - /** - * @rgb565_predicator: Unknown, introduced with A650 family, - * related to UBWC mode/ver 4 - */ - u32 rgb565_predicator; - /** @uavflagprd_inv: Unknown, introduced with A650 family */ - u32 uavflagprd_inv; - /** @min_acc_len: Whether the minimum access length is 64 bits */ - u32 min_acc_len; - /** - * @ubwc_swizzle: Whether to enable level 1, 2 & 3 bank swizzling. - * - * UBWC 1.0 always enables all three levels. - * UBWC 2.0 removes level 1 bank swizzling, leaving levels 2 & 3. - * UBWC 4.0 adds the optional ability to disable levels 2 & 3. - * - * This is a bitmask where BIT(0) enables level 1, BIT(1) - * controls level 2, and BIT(2) enables level 3. - */ - u32 ubwc_swizzle; - /** - * @highest_bank_bit: Highest Bank Bit - * - * The Highest Bank Bit value represents the bit of the highest - * DDR bank. This should ideally use DRAM type detection. - */ - u32 highest_bank_bit; - u32 amsbc; - /** - * @macrotile_mode: Macrotile Mode - * - * Whether to use 4-channel macrotiling mode or the newer - * 8-channel macrotiling mode introduced in UBWC 3.1. 0 is - * 4-channel and 1 is 8-channel. - */ - u32 macrotile_mode; - } ubwc_config; + /* + * The migration to the central UBWC config db is still in flight - keep + * a copy containing some local fixups until that's done. + */ + const struct qcom_ubwc_cfg_data *ubwc_config; + struct qcom_ubwc_cfg_data _ubwc_config; /* * Register offsets are different between some GPUs. @@ -580,10 +549,10 @@ static inline int adreno_is_a7xx(struct adreno_gpu *gpu) /* Put vm_start above 32b to catch issues with not setting xyz_BASE_HI */ #define ADRENO_VM_START 0x100000000ULL -u64 adreno_private_address_space_size(struct msm_gpu *gpu); -int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx, +u64 adreno_private_vm_size(struct msm_gpu *gpu); +int adreno_get_param(struct msm_gpu *gpu, struct msm_context *ctx, uint32_t param, uint64_t *value, uint32_t *len); -int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx, +int adreno_set_param(struct msm_gpu *gpu, struct msm_context *ctx, uint32_t param, uint64_t value, uint32_t len); const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname); @@ -623,19 +592,21 @@ void adreno_show_object(struct drm_printer *p, void **ptr, int len, * Common helper function to initialize the default address space for arm-smmu * attached targets */ -struct msm_gem_address_space * -adreno_create_address_space(struct msm_gpu *gpu, - struct platform_device *pdev); +struct drm_gpuvm * +adreno_create_vm(struct msm_gpu *gpu, + struct platform_device *pdev); -struct msm_gem_address_space * -adreno_iommu_create_address_space(struct msm_gpu *gpu, - struct platform_device *pdev, - unsigned long quirks); +struct drm_gpuvm * +adreno_iommu_create_vm(struct msm_gpu *gpu, + struct platform_device *pdev, + unsigned long quirks); int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags, struct adreno_smmu_fault_info *info, const char *block, u32 scratch[4]); +void adreno_check_and_reenable_stall(struct adreno_gpu *gpu); + int adreno_read_speedbin(struct device *dev, u32 *speedbin); /* |