summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm/adreno/a5xx_gpu.c')
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c29
1 files changed, 11 insertions, 18 deletions
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 3dcec7acb384..660ba0db8900 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -605,11 +605,9 @@ static int a5xx_ucode_init(struct msm_gpu *gpu)
a5xx_ucode_check_version(a5xx_gpu, a5xx_gpu->pfp_bo);
}
- gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO,
- REG_A5XX_CP_ME_INSTR_BASE_HI, a5xx_gpu->pm4_iova);
+ gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO, a5xx_gpu->pm4_iova);
- gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO,
- REG_A5XX_CP_PFP_INSTR_BASE_HI, a5xx_gpu->pfp_iova);
+ gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO, a5xx_gpu->pfp_iova);
return 0;
}
@@ -868,8 +866,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
* memory rendering at this point in time and we don't want to block off
* part of the virtual memory space.
*/
- gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
- REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
+ gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, 0x00000000);
gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
/* Put the GPU into 64 bit by default */
@@ -908,8 +905,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
return ret;
/* Set the ringbuffer address */
- gpu_write64(gpu, REG_A5XX_CP_RB_BASE, REG_A5XX_CP_RB_BASE_HI,
- gpu->rb[0]->iova);
+ gpu_write64(gpu, REG_A5XX_CP_RB_BASE, gpu->rb[0]->iova);
/*
* If the microcode supports the WHERE_AM_I opcode then we can use that
@@ -936,7 +932,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
}
gpu_write64(gpu, REG_A5XX_CP_RB_RPTR_ADDR,
- REG_A5XX_CP_RB_RPTR_ADDR_HI, shadowptr(a5xx_gpu, gpu->rb[0]));
+ shadowptr(a5xx_gpu, gpu->rb[0]));
} else if (gpu->nr_rings > 1) {
/* Disable preemption if WHERE_AM_I isn't available */
a5xx_preempt_fini(gpu);
@@ -1239,9 +1235,9 @@ static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
gpu_read(gpu, REG_A5XX_RBBM_STATUS),
gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
gpu_read(gpu, REG_A5XX_CP_RB_WPTR),
- gpu_read64(gpu, REG_A5XX_CP_IB1_BASE, REG_A5XX_CP_IB1_BASE_HI),
+ gpu_read64(gpu, REG_A5XX_CP_IB1_BASE),
gpu_read(gpu, REG_A5XX_CP_IB1_BUFSZ),
- gpu_read64(gpu, REG_A5XX_CP_IB2_BASE, REG_A5XX_CP_IB2_BASE_HI),
+ gpu_read64(gpu, REG_A5XX_CP_IB2_BASE),
gpu_read(gpu, REG_A5XX_CP_IB2_BUFSZ));
/* Turn off the hangcheck timer to keep it from bothering us */
@@ -1427,8 +1423,7 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu)
static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
{
- *value = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO,
- REG_A5XX_RBBM_ALWAYSON_COUNTER_HI);
+ *value = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO);
return 0;
}
@@ -1465,8 +1460,7 @@ static int a5xx_crashdumper_run(struct msm_gpu *gpu,
if (IS_ERR_OR_NULL(dumper->ptr))
return -EINVAL;
- gpu_write64(gpu, REG_A5XX_CP_CRASH_SCRIPT_BASE_LO,
- REG_A5XX_CP_CRASH_SCRIPT_BASE_HI, dumper->iova);
+ gpu_write64(gpu, REG_A5XX_CP_CRASH_SCRIPT_BASE_LO, dumper->iova);
gpu_write(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, 1);
@@ -1666,8 +1660,7 @@ static u64 a5xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate)
{
u64 busy_cycles;
- busy_cycles = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO,
- REG_A5XX_RBBM_PERFCTR_RBBM_0_HI);
+ busy_cycles = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO);
*out_sample_rate = clk_get_rate(gpu->core_clk);
return busy_cycles;
@@ -1705,7 +1698,7 @@ static const struct adreno_gpu_funcs funcs = {
.gpu_busy = a5xx_gpu_busy,
.gpu_state_get = a5xx_gpu_state_get,
.gpu_state_put = a5xx_gpu_state_put,
- .create_address_space = adreno_iommu_create_address_space,
+ .create_address_space = adreno_create_address_space,
.get_rptr = a5xx_get_rptr,
},
.get_timestamp = a5xx_get_timestamp,