diff options
Diffstat (limited to 'drivers/gpu/drm/msm/adreno/a4xx_gpu.c')
| -rw-r--r-- | drivers/gpu/drm/msm/adreno/a4xx_gpu.c | 89 |
1 files changed, 44 insertions, 45 deletions
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index 7cb8d9849c07..db06c06067ae 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -34,7 +34,7 @@ static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: /* ignore if there has not been a ctx switch: */ - if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) + if (ring->cur_ctx_seqno == submit->queue->ctx->seqno) break; fallthrough; case MSM_SUBMIT_CMD_BUF: @@ -145,7 +145,7 @@ static void a4xx_enable_hwcg(struct msm_gpu *gpu) gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, 0x00220000); /* Early A430's have a timing issue with SP/TP power collapse; disabling HW clock gating prevents it. */ - if (adreno_is_a430(adreno_gpu) && adreno_gpu->rev.patchid < 2) + if (adreno_is_a430(adreno_gpu) && adreno_patchid(adreno_gpu) < 2) gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0); else gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0xAAAAAAAA); @@ -251,8 +251,8 @@ static int a4xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A4XX_UCHE_CACHE_WAYS_VFD, 0x07); /* Disable L2 bypass to avoid UCHE out of bounds errors */ - gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_LO, 0xffff0000); - gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_HI, 0xffff0000); + gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_LO, lower_32_bits(adreno_gpu->uche_trap_base)); + gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_HI, upper_32_bits(adreno_gpu->uche_trap_base)); gpu_write(gpu, REG_A4XX_CP_DEBUG, (1 << 25) | (adreno_is_a420(adreno_gpu) ? (1 << 29) : 0)); @@ -606,48 +606,35 @@ static int a4xx_pm_suspend(struct msm_gpu *gpu) { static int a4xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) { - *value = gpu_read64(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO, - REG_A4XX_RBBM_PERFCTR_CP_0_HI); + *value = gpu_read64(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO); return 0; } +static u64 a4xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate) +{ + u64 busy_cycles; + + busy_cycles = gpu_read64(gpu, REG_A4XX_RBBM_PERFCTR_RBBM_1_LO); + *out_sample_rate = clk_get_rate(gpu->core_clk); + + return busy_cycles; +} + static u32 a4xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) { ring->memptrs->rptr = gpu_read(gpu, REG_A4XX_CP_RB_RPTR); return ring->memptrs->rptr; } -static const struct adreno_gpu_funcs funcs = { - .base = { - .get_param = adreno_get_param, - .set_param = adreno_set_param, - .hw_init = a4xx_hw_init, - .pm_suspend = a4xx_pm_suspend, - .pm_resume = a4xx_pm_resume, - .recover = a4xx_recover, - .submit = a4xx_submit, - .active_ring = adreno_active_ring, - .irq = a4xx_irq, - .destroy = a4xx_destroy, -#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) - .show = adreno_show, -#endif - .gpu_state_get = a4xx_gpu_state_get, - .gpu_state_put = adreno_gpu_state_put, - .create_address_space = adreno_iommu_create_address_space, - .get_rptr = a4xx_get_rptr, - }, - .get_timestamp = a4xx_get_timestamp, -}; - -struct msm_gpu *a4xx_gpu_init(struct drm_device *dev) +static struct msm_gpu *a4xx_gpu_init(struct drm_device *dev) { struct a4xx_gpu *a4xx_gpu = NULL; struct adreno_gpu *adreno_gpu; struct msm_gpu *gpu; struct msm_drm_private *priv = dev->dev_private; struct platform_device *pdev = priv->gpu_pdev; + struct adreno_platform_config *config = pdev->dev.platform_data; struct icc_path *ocmem_icc_path; struct icc_path *icc_path; int ret; @@ -670,7 +657,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev) gpu->perfcntrs = NULL; gpu->num_perfcntrs = 0; - ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); + ret = adreno_gpu_init(dev, pdev, adreno_gpu, config->info->funcs, 1); if (ret) goto fail; @@ -683,20 +670,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev) if (ret) goto fail; - if (!gpu->aspace) { - /* TODO we think it is possible to configure the GPU to - * restrict access to VRAM carveout. But the required - * registers are unknown. For now just bail out and - * limp along with just modesetting. If it turns out - * to not be possible to restrict access, then we must - * implement a cmdstream validator. - */ - DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n"); - if (!allow_vram_carveout) { - ret = -ENXIO; - goto fail; - } - } + adreno_gpu->uche_trap_base = 0xffff0000ffff0000ull; icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem"); if (IS_ERR(icc_path)) { @@ -729,3 +703,28 @@ fail: return ERR_PTR(ret); } + +const struct adreno_gpu_funcs a4xx_gpu_funcs = { + .base = { + .get_param = adreno_get_param, + .set_param = adreno_set_param, + .hw_init = a4xx_hw_init, + .pm_suspend = a4xx_pm_suspend, + .pm_resume = a4xx_pm_resume, + .recover = a4xx_recover, + .submit = a4xx_submit, + .active_ring = adreno_active_ring, + .irq = a4xx_irq, + .destroy = a4xx_destroy, +#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) + .show = adreno_show, +#endif + .gpu_busy = a4xx_gpu_busy, + .gpu_state_get = a4xx_gpu_state_get, + .gpu_state_put = adreno_gpu_state_put, + .create_vm = adreno_create_vm, + .get_rptr = a4xx_get_rptr, + }, + .init = a4xx_gpu_init, + .get_timestamp = a4xx_get_timestamp, +}; |
