diff options
Diffstat (limited to 'drivers/gpu/drm/msm/adreno/a5xx_gpu.c')
| -rw-r--r-- | drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 172 |
1 files changed, 96 insertions, 76 deletions
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index e5916c106796..56eaff2ee4e4 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -65,6 +65,8 @@ void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit) { + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); struct msm_ringbuffer *ring = submit->ring; struct drm_gem_object *obj; uint32_t *ptr, dwords; @@ -75,7 +77,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit case MSM_SUBMIT_CMD_IB_TARGET_BUF: break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: - if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) + if (ring->cur_ctx_seqno == submit->queue->ctx->seqno) break; fallthrough; case MSM_SUBMIT_CMD_BUF: @@ -109,6 +111,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit } } + a5xx_gpu->last_seqno[ring->id] = submit->seqno; a5xx_flush(gpu, ring, true); a5xx_preempt_trigger(gpu); @@ -128,8 +131,10 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) struct msm_ringbuffer *ring = submit->ring; unsigned int i, ibs = 0; + adreno_check_and_reenable_stall(adreno_gpu); + if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) { - gpu->cur_ctx_seqno = 0; + ring->cur_ctx_seqno = 0; a5xx_submit_in_rb(gpu, submit); return; } @@ -150,9 +155,13 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); OUT_RING(ring, 1); - /* Enable local preemption for finegrain preemption */ + /* + * Disable local preemption by default because it requires + * user-space to be aware of it and provide additional handling + * to restore rendering state or do various flushes on switch. + */ OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1); - OUT_RING(ring, 0x1); + OUT_RING(ring, 0x0); /* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */ OUT_PKT7(ring, CP_YIELD_ENABLE, 1); @@ -164,7 +173,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) case MSM_SUBMIT_CMD_IB_TARGET_BUF: break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: - if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) + if (ring->cur_ctx_seqno == submit->queue->ctx->seqno) break; fallthrough; case MSM_SUBMIT_CMD_BUF: @@ -206,6 +215,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) /* Write the fence to the scratch register */ OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1); OUT_RING(ring, submit->seqno); + a5xx_gpu->last_seqno[ring->id] = submit->seqno; /* * Execute a CACHE_FLUSH_TS event. This will ensure that the @@ -439,7 +449,8 @@ void a5xx_set_hwcg(struct msm_gpu *gpu, bool state) const struct adreno_five_hwcg_regs *regs; unsigned int i, sz; - if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu)) { + if (adreno_is_a505(adreno_gpu) || adreno_is_a506(adreno_gpu) || + adreno_is_a508(adreno_gpu)) { regs = a50x_hwcg; sz = ARRAY_SIZE(a50x_hwcg); } else if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu)) { @@ -483,7 +494,8 @@ static int a5xx_me_init(struct msm_gpu *gpu) OUT_RING(ring, 0x00000000); /* Specify workarounds for various microcode issues */ - if (adreno_is_a506(adreno_gpu) || adreno_is_a530(adreno_gpu)) { + if (adreno_is_a505(adreno_gpu) || adreno_is_a506(adreno_gpu) || + adreno_is_a530(adreno_gpu)) { /* Workaround for token end syncs * Force a WFI after every direct-render 3D mode draw and every * 2D mode 3 draw @@ -610,7 +622,7 @@ static int a5xx_ucode_load(struct msm_gpu *gpu) a5xx_gpu->shadow = msm_gem_kernel_new(gpu->dev, sizeof(u32) * gpu->nr_rings, MSM_BO_WC | MSM_BO_MAP_PRIV, - gpu->aspace, &a5xx_gpu->shadow_bo, + gpu->vm, &a5xx_gpu->shadow_bo, &a5xx_gpu->shadow_iova); if (IS_ERR(a5xx_gpu->shadow)) @@ -684,7 +696,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); - u32 regbit; + u32 hbb; int ret; gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003); @@ -740,10 +752,10 @@ static int a5xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A5XX_UCHE_CACHE_WAYS, 0x02); /* Disable L2 bypass in the UCHE */ - gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_LO, 0xFFFF0000); - gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_HI, 0x0001FFFF); - gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_LO, 0xFFFF0000); - gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_HI, 0x0001FFFF); + gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_LO, lower_32_bits(adreno_gpu->uche_trap_base)); + gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_HI, upper_32_bits(adreno_gpu->uche_trap_base)); + gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_LO, lower_32_bits(adreno_gpu->uche_trap_base)); + gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_HI, upper_32_bits(adreno_gpu->uche_trap_base)); /* Set the GMEM VA range (0 to gpu->gmem) */ gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000); @@ -752,10 +764,11 @@ static int a5xx_hw_init(struct msm_gpu *gpu) 0x00100000 + adreno_gpu->info->gmem - 1); gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000); - if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu) || - adreno_is_a510(adreno_gpu)) { + if (adreno_is_a505(adreno_gpu) || adreno_is_a506(adreno_gpu) || + adreno_is_a508(adreno_gpu) || adreno_is_a510(adreno_gpu)) { gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x20); - if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu)) + if (adreno_is_a505(adreno_gpu) || adreno_is_a506(adreno_gpu) || + adreno_is_a508(adreno_gpu)) gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400); else gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x20); @@ -771,7 +784,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16); } - if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu)) + if (adreno_is_a505(adreno_gpu) || adreno_is_a506(adreno_gpu) || + adreno_is_a508(adreno_gpu)) gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, (0x100 << 11 | 0x100 << 22)); else if (adreno_is_a509(adreno_gpu) || adreno_is_a510(adreno_gpu) || @@ -789,8 +803,9 @@ static int a5xx_hw_init(struct msm_gpu *gpu) * Disable the RB sampler datapath DP2 clock gating optimization * for 1-SP GPUs, as it is enabled by default. */ - if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu) || - adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu)) + if (adreno_is_a505(adreno_gpu) || adreno_is_a506(adreno_gpu) || + adreno_is_a508(adreno_gpu) || adreno_is_a509(adreno_gpu) || + adreno_is_a512(adreno_gpu)) gpu_rmw(gpu, REG_A5XX_RB_DBG_ECO_CNTL, 0, (1 << 9)); /* Disable UCHE global filter as SP can invalidate/flush independently */ @@ -820,18 +835,15 @@ static int a5xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F); - /* Set the highest bank bit */ - if (adreno_is_a540(adreno_gpu) || adreno_is_a530(adreno_gpu)) - regbit = 2; - else - regbit = 1; + BUG_ON(adreno_gpu->ubwc_config->highest_bank_bit < 13); + hbb = adreno_gpu->ubwc_config->highest_bank_bit - 13; - gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, regbit << 7); - gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, regbit << 1); + gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, hbb << 7); + gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, hbb << 1); if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu) || adreno_is_a540(adreno_gpu)) - gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2, regbit); + gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2, hbb); /* Disable All flat shading optimization (ALLFLATOPTDIS) */ gpu_rmw(gpu, REG_A5XX_VPC_DBG_ECO_CNTL, 0, (1 << 10)); @@ -1030,22 +1042,22 @@ static void a5xx_destroy(struct msm_gpu *gpu) a5xx_preempt_fini(gpu); if (a5xx_gpu->pm4_bo) { - msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace); + msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->vm); drm_gem_object_put(a5xx_gpu->pm4_bo); } if (a5xx_gpu->pfp_bo) { - msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace); + msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->vm); drm_gem_object_put(a5xx_gpu->pfp_bo); } if (a5xx_gpu->gpmu_bo) { - msm_gem_unpin_iova(a5xx_gpu->gpmu_bo, gpu->aspace); + msm_gem_unpin_iova(a5xx_gpu->gpmu_bo, gpu->vm); drm_gem_object_put(a5xx_gpu->gpmu_bo); } if (a5xx_gpu->shadow_bo) { - msm_gem_unpin_iova(a5xx_gpu->shadow_bo, gpu->aspace); + msm_gem_unpin_iova(a5xx_gpu->shadow_bo, gpu->vm); drm_gem_object_put(a5xx_gpu->shadow_bo); } @@ -1243,7 +1255,7 @@ static void a5xx_fault_detect_irq(struct msm_gpu *gpu) gpu_read(gpu, REG_A5XX_CP_IB2_BUFSZ)); /* Turn off the hangcheck timer to keep it from bothering us */ - del_timer(&gpu->hangcheck_timer); + timer_delete(&gpu->hangcheck_timer); kthread_queue_work(gpu->worker, &gpu->recover_work); } @@ -1348,7 +1360,7 @@ static int a5xx_pm_resume(struct msm_gpu *gpu) if (ret) return ret; - /* Adreno 506, 508, 509, 510, 512 needs manual RBBM sus/res control */ + /* Adreno 505, 506, 508, 509, 510, 512 needs manual RBBM sus/res control */ if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu))) { /* Halt the sp_input_clk at HM level */ gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0x00000055); @@ -1391,9 +1403,9 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu) u32 mask = 0xf; int i, ret; - /* A506, A508, A510 have 3 XIN ports in VBIF */ - if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu) || - adreno_is_a510(adreno_gpu)) + /* A505, A506, A508, A510 have 3 XIN ports in VBIF */ + if (adreno_is_a505(adreno_gpu) || adreno_is_a506(adreno_gpu) || + adreno_is_a508(adreno_gpu) || adreno_is_a510(adreno_gpu)) mask = 0x7; /* Clear the VBIF pipe before shutting down */ @@ -1445,7 +1457,7 @@ static int a5xx_crashdumper_init(struct msm_gpu *gpu, struct a5xx_crashdumper *dumper) { dumper->ptr = msm_gem_kernel_new(gpu->dev, - SZ_1M, MSM_BO_WC, gpu->aspace, + SZ_1M, MSM_BO_WC, gpu->vm, &dumper->bo, &dumper->iova); if (!IS_ERR(dumper->ptr)) @@ -1545,7 +1557,7 @@ static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu, if (a5xx_crashdumper_run(gpu, &dumper)) { kfree(a5xx_state->hlsqregs); - msm_gem_kernel_put(dumper.bo, gpu->aspace); + msm_gem_kernel_put(dumper.bo, gpu->vm); return; } @@ -1553,7 +1565,7 @@ static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu, memcpy(a5xx_state->hlsqregs, dumper.ptr + (256 * SZ_1K), count * sizeof(u32)); - msm_gem_kernel_put(dumper.bo, gpu->aspace); + msm_gem_kernel_put(dumper.bo, gpu->vm); } static struct msm_gpu_state *a5xx_gpu_state_get(struct msm_gpu *gpu) @@ -1679,34 +1691,6 @@ static uint32_t a5xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) return ring->memptrs->rptr = gpu_read(gpu, REG_A5XX_CP_RB_RPTR); } -static const struct adreno_gpu_funcs funcs = { - .base = { - .get_param = adreno_get_param, - .set_param = adreno_set_param, - .hw_init = a5xx_hw_init, - .ucode_load = a5xx_ucode_load, - .pm_suspend = a5xx_pm_suspend, - .pm_resume = a5xx_pm_resume, - .recover = a5xx_recover, - .submit = a5xx_submit, - .active_ring = a5xx_active_ring, - .irq = a5xx_irq, - .destroy = a5xx_destroy, -#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) - .show = a5xx_show, -#endif -#if defined(CONFIG_DEBUG_FS) - .debugfs_init = a5xx_debugfs_init, -#endif - .gpu_busy = a5xx_gpu_busy, - .gpu_state_get = a5xx_gpu_state_get, - .gpu_state_put = a5xx_gpu_state_put, - .create_address_space = adreno_create_address_space, - .get_rptr = a5xx_get_rptr, - }, - .get_timestamp = a5xx_get_timestamp, -}; - static void check_speed_bin(struct device *dev) { struct nvmem_cell *cell; @@ -1739,22 +1723,18 @@ static void check_speed_bin(struct device *dev) devm_pm_opp_set_supported_hw(dev, &val, 1); } -struct msm_gpu *a5xx_gpu_init(struct drm_device *dev) +static struct msm_gpu *a5xx_gpu_init(struct drm_device *dev) { struct msm_drm_private *priv = dev->dev_private; struct platform_device *pdev = priv->gpu_pdev; struct adreno_platform_config *config = pdev->dev.platform_data; + const struct qcom_ubwc_cfg_data *common_cfg; struct a5xx_gpu *a5xx_gpu = NULL; struct adreno_gpu *adreno_gpu; struct msm_gpu *gpu; unsigned int nr_rings; int ret; - if (!pdev) { - DRM_DEV_ERROR(dev->dev, "No A5XX device is defined\n"); - return ERR_PTR(-ENXIO); - } - a5xx_gpu = kzalloc(sizeof(*a5xx_gpu), GFP_KERNEL); if (!a5xx_gpu) return ERR_PTR(-ENOMEM); @@ -1773,17 +1753,57 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev) if (config->info->revn == 510) nr_rings = 1; - ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, nr_rings); + ret = adreno_gpu_init(dev, pdev, adreno_gpu, config->info->funcs, nr_rings); if (ret) { a5xx_destroy(&(a5xx_gpu->base.base)); return ERR_PTR(ret); } - if (gpu->aspace) - msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, a5xx_fault_handler); + msm_mmu_set_fault_handler(to_msm_vm(gpu->vm)->mmu, gpu, + a5xx_fault_handler); /* Set up the preemption specific bits and pieces for each ringbuffer */ a5xx_preempt_init(gpu); + /* Inherit the common config and make some necessary fixups */ + common_cfg = qcom_ubwc_config_get_data(); + if (IS_ERR(common_cfg)) + return ERR_CAST(common_cfg); + + /* Copy the data into the internal struct to drop the const qualifier (temporarily) */ + adreno_gpu->_ubwc_config = *common_cfg; + adreno_gpu->ubwc_config = &adreno_gpu->_ubwc_config; + + adreno_gpu->uche_trap_base = 0x0001ffffffff0000ull; + return gpu; } + +const struct adreno_gpu_funcs a5xx_gpu_funcs = { + .base = { + .get_param = adreno_get_param, + .set_param = adreno_set_param, + .hw_init = a5xx_hw_init, + .ucode_load = a5xx_ucode_load, + .pm_suspend = a5xx_pm_suspend, + .pm_resume = a5xx_pm_resume, + .recover = a5xx_recover, + .submit = a5xx_submit, + .active_ring = a5xx_active_ring, + .irq = a5xx_irq, + .destroy = a5xx_destroy, +#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) + .show = a5xx_show, +#endif +#if defined(CONFIG_DEBUG_FS) + .debugfs_init = a5xx_debugfs_init, +#endif + .gpu_busy = a5xx_gpu_busy, + .gpu_state_get = a5xx_gpu_state_get, + .gpu_state_put = a5xx_gpu_state_put, + .create_vm = adreno_create_vm, + .get_rptr = a5xx_get_rptr, + }, + .init = a5xx_gpu_init, + .get_timestamp = a5xx_get_timestamp, +}; |
