summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm/adreno/a5xx_gpu.c')
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c44
1 files changed, 23 insertions, 21 deletions
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 650e5bac225f..4a04dc43a8e6 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -131,6 +131,8 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
struct msm_ringbuffer *ring = submit->ring;
unsigned int i, ibs = 0;
+ adreno_check_and_reenable_stall(adreno_gpu);
+
if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) {
ring->cur_ctx_seqno = 0;
a5xx_submit_in_rb(gpu, submit);
@@ -620,7 +622,7 @@ static int a5xx_ucode_load(struct msm_gpu *gpu)
a5xx_gpu->shadow = msm_gem_kernel_new(gpu->dev,
sizeof(u32) * gpu->nr_rings,
MSM_BO_WC | MSM_BO_MAP_PRIV,
- gpu->aspace, &a5xx_gpu->shadow_bo,
+ gpu->vm, &a5xx_gpu->shadow_bo,
&a5xx_gpu->shadow_iova);
if (IS_ERR(a5xx_gpu->shadow))
@@ -833,8 +835,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
- BUG_ON(adreno_gpu->ubwc_config.highest_bank_bit < 13);
- hbb = adreno_gpu->ubwc_config.highest_bank_bit - 13;
+ BUG_ON(adreno_gpu->ubwc_config->highest_bank_bit < 13);
+ hbb = adreno_gpu->ubwc_config->highest_bank_bit - 13;
gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, hbb << 7);
gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, hbb << 1);
@@ -1040,22 +1042,22 @@ static void a5xx_destroy(struct msm_gpu *gpu)
a5xx_preempt_fini(gpu);
if (a5xx_gpu->pm4_bo) {
- msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->vm);
drm_gem_object_put(a5xx_gpu->pm4_bo);
}
if (a5xx_gpu->pfp_bo) {
- msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->vm);
drm_gem_object_put(a5xx_gpu->pfp_bo);
}
if (a5xx_gpu->gpmu_bo) {
- msm_gem_unpin_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->gpmu_bo, gpu->vm);
drm_gem_object_put(a5xx_gpu->gpmu_bo);
}
if (a5xx_gpu->shadow_bo) {
- msm_gem_unpin_iova(a5xx_gpu->shadow_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->shadow_bo, gpu->vm);
drm_gem_object_put(a5xx_gpu->shadow_bo);
}
@@ -1455,7 +1457,7 @@ static int a5xx_crashdumper_init(struct msm_gpu *gpu,
struct a5xx_crashdumper *dumper)
{
dumper->ptr = msm_gem_kernel_new(gpu->dev,
- SZ_1M, MSM_BO_WC, gpu->aspace,
+ SZ_1M, MSM_BO_WC, gpu->vm,
&dumper->bo, &dumper->iova);
if (!IS_ERR(dumper->ptr))
@@ -1555,7 +1557,7 @@ static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu,
if (a5xx_crashdumper_run(gpu, &dumper)) {
kfree(a5xx_state->hlsqregs);
- msm_gem_kernel_put(dumper.bo, gpu->aspace);
+ msm_gem_kernel_put(dumper.bo, gpu->vm);
return;
}
@@ -1563,7 +1565,7 @@ static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu,
memcpy(a5xx_state->hlsqregs, dumper.ptr + (256 * SZ_1K),
count * sizeof(u32));
- msm_gem_kernel_put(dumper.bo, gpu->aspace);
+ msm_gem_kernel_put(dumper.bo, gpu->vm);
}
static struct msm_gpu_state *a5xx_gpu_state_get(struct msm_gpu *gpu)
@@ -1711,7 +1713,7 @@ static const struct adreno_gpu_funcs funcs = {
.gpu_busy = a5xx_gpu_busy,
.gpu_state_get = a5xx_gpu_state_get,
.gpu_state_put = a5xx_gpu_state_put,
- .create_address_space = adreno_create_address_space,
+ .create_vm = adreno_create_vm,
.get_rptr = a5xx_get_rptr,
},
.get_timestamp = a5xx_get_timestamp,
@@ -1754,6 +1756,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
struct adreno_platform_config *config = pdev->dev.platform_data;
+ const struct qcom_ubwc_cfg_data *common_cfg;
struct a5xx_gpu *a5xx_gpu = NULL;
struct adreno_gpu *adreno_gpu;
struct msm_gpu *gpu;
@@ -1784,21 +1787,20 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
return ERR_PTR(ret);
}
- if (gpu->aspace)
- msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, a5xx_fault_handler);
+ msm_mmu_set_fault_handler(to_msm_vm(gpu->vm)->mmu, gpu,
+ a5xx_fault_handler);
/* Set up the preemption specific bits and pieces for each ringbuffer */
a5xx_preempt_init(gpu);
- /* Set the highest bank bit */
- if (adreno_is_a540(adreno_gpu) || adreno_is_a530(adreno_gpu))
- adreno_gpu->ubwc_config.highest_bank_bit = 15;
- else
- adreno_gpu->ubwc_config.highest_bank_bit = 14;
+ /* Inherit the common config and make some necessary fixups */
+ common_cfg = qcom_ubwc_config_get_data();
+ if (IS_ERR(common_cfg))
+ return ERR_CAST(common_cfg);
- /* a5xx only supports UBWC 1.0, these are not configurable */
- adreno_gpu->ubwc_config.macrotile_mode = 0;
- adreno_gpu->ubwc_config.ubwc_swizzle = 0x7;
+ /* Copy the data into the internal struct to drop the const qualifier (temporarily) */
+ adreno_gpu->_ubwc_config = *common_cfg;
+ adreno_gpu->ubwc_config = &adreno_gpu->_ubwc_config;
adreno_gpu->uche_trap_base = 0x0001ffffffff0000ull;