diff options
Diffstat (limited to 'drivers/gpu/drm/msm/adreno/a6xx_gmu.c')
| -rw-r--r-- | drivers/gpu/drm/msm/adreno/a6xx_gmu.c | 1273 |
1 files changed, 1009 insertions, 264 deletions
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index b349692219b7..5903cd891b49 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -1,11 +1,15 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */ +#include <linux/bitfield.h> #include <linux/clk.h> #include <linux/interconnect.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> #include <linux/pm_domain.h> #include <linux/pm_opp.h> #include <soc/qcom/cmd-db.h> +#include <soc/qcom/tcs.h> #include <drm/drm_gem.h> #include "a6xx_gpu.h" @@ -24,7 +28,7 @@ static void a6xx_gmu_fault(struct a6xx_gmu *gmu) gmu->hung = true; /* Turn off the hangcheck timer while we are resetting */ - del_timer(&gpu->hangcheck_timer); + timer_delete(&gpu->hangcheck_timer); /* Queue the GPU handler because we need to treat this as a recovery */ kthread_queue_work(gpu->worker, &gpu->recover_work); @@ -89,25 +93,39 @@ bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu) /* Check to see if the GX rail is still powered */ bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu) { + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; u32 val; /* This can be called from gpu state code so make sure GMU is valid */ if (!gmu->initialized) return false; + /* If GMU is absent, then GX power domain is ON as long as GPU is in active state */ + if (adreno_has_gmu_wrapper(adreno_gpu)) + return true; + val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS); + if (adreno_is_a7xx(adreno_gpu)) + return !(val & + (A7XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF | + A7XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF)); + return !(val & (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF | A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF)); } -void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp) +void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp, + bool suspended) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + const struct a6xx_info *info = adreno_gpu->info->a6xx; struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); struct a6xx_gmu *gmu = &a6xx_gpu->gmu; u32 perf_index; + u32 bw_index = 0; unsigned long gpu_freq; int ret = 0; @@ -120,6 +138,37 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp) if (gpu_freq == gmu->gpu_freqs[perf_index]) break; + /* If enabled, find the corresponding DDR bandwidth index */ + if (info->bcms && gmu->nr_gpu_bws > 1) { + unsigned int bw = dev_pm_opp_get_bw(opp, true, 0); + + for (bw_index = 0; bw_index < gmu->nr_gpu_bws - 1; bw_index++) { + if (bw == gmu->gpu_bw_table[bw_index]) + break; + } + + /* Vote AB as a fraction of the max bandwidth, starting from A750 */ + if (bw && adreno_is_a750_family(adreno_gpu)) { + u64 tmp; + + /* For now, vote for 25% of the bandwidth */ + tmp = bw * 25; + do_div(tmp, 100); + + /* + * The AB vote consists of a 16 bit wide quantized level + * against the maximum supported bandwidth. + * Quantization can be calculated as below: + * vote = (bandwidth * 2^16) / max bandwidth + */ + tmp *= MAX_AB_VOTE; + do_div(tmp, gmu->gpu_bw_table[gmu->nr_gpu_bws - 1]); + + bw_index |= AB_VOTE(clamp(tmp, 1, MAX_AB_VOTE)); + bw_index |= AB_VOTE_ENABLE; + } + } + gmu->current_perf_index = perf_index; gmu->freq = gmu->gpu_freqs[perf_index]; @@ -127,15 +176,18 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp) /* * This can get called from devfreq while the hardware is idle. Don't - * bring up the power if it isn't already active + * bring up the power if it isn't already active. All we're doing here + * is updating the frequency so that when we come back online we're at + * the right rate. */ - if (pm_runtime_get_if_in_use(gmu->dev) == 0) + if (suspended) return; if (!gmu->legacy) { - a6xx_hfi_set_freq(gmu, perf_index); - dev_pm_opp_set_opp(&gpu->pdev->dev, opp); - pm_runtime_put(gmu->dev); + a6xx_hfi_set_freq(gmu, perf_index, bw_index); + /* With Bandwidth voting, we now vote for all resources, so skip OPP set */ + if (!bw_index) + dev_pm_opp_set_opp(&gpu->pdev->dev, opp); return; } @@ -159,7 +211,6 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp) dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret); dev_pm_opp_set_opp(&gpu->pdev->dev, opp); - pm_runtime_put(gmu->dev); } unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu) @@ -173,14 +224,19 @@ unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu) static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu) { - u32 val; + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; int local = gmu->idle_level; + u32 val; /* SPTP and IFPC both report as IFPC */ if (gmu->idle_level == GMU_IDLE_STATE_SPTP) local = GMU_IDLE_STATE_IFPC; - val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); + if (adreno_is_a8xx(adreno_gpu)) + val = gmu_read(gmu, REG_A8XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); + else + val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); if (val == local) { if (gmu->idle_level != GMU_IDLE_STATE_IFPC || @@ -199,9 +255,10 @@ int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu) static int a6xx_gmu_start(struct a6xx_gmu *gmu) { + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + u32 mask, reset_val, val; int ret; - u32 val; - u32 mask, reset_val; val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8); if (val <= 0x20010004) { @@ -217,7 +274,13 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu) /* Set the log wptr index * note: downstream saves the value in poweroff and restores it here */ - gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, 0); + if (adreno_is_a8xx(adreno_gpu)) + gmu_write(gmu, REG_A8XX_GMU_GENERAL_9, 0); + else if (adreno_is_a7xx(adreno_gpu)) + gmu_write(gmu, REG_A7XX_GMU_GENERAL_9, 0); + else + gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, 0); + gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0); @@ -227,6 +290,8 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu) if (ret) DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n"); + set_bit(GMU_STATUS_FW_START, &gmu->status); + return ret; } @@ -292,10 +357,18 @@ static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = { /* Trigger a OOB (out of band) request to the GMU */ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) { + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; int ret; u32 val; int request, ack; + WARN_ON_ONCE(!mutex_is_locked(&gmu->lock)); + + /* Skip OOB calls since RGMU is not enabled */ + if (adreno_has_rgmu(adreno_gpu)) + return 0; + if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits)) return -EINVAL; @@ -316,9 +389,23 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) /* Trigger the equested OOB operation */ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request); - /* Wait for the acknowledge interrupt */ - ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, - val & (1 << ack), 100, 10000); + do { + /* Wait for the acknowledge interrupt */ + ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, + val & (1 << ack), 100, 10000); + + if (!ret) + break; + + if (completion_done(&a6xx_gpu->base.fault_coredump_done)) + break; + + /* We may timeout because the GMU is temporarily wedged from + * pending faults from the GPU and we are taking a devcoredump. + * Wait until the MMU is resumed and try again. + */ + wait_for_completion(&a6xx_gpu->base.fault_coredump_done); + } while (true); if (ret) DRM_DEV_ERROR(gmu->dev, @@ -335,8 +422,16 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) /* Clear a pending OOB state in the GMU */ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) { + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; int bit; + WARN_ON_ONCE(!mutex_is_locked(&gmu->lock)); + + /* Skip OOB calls since RGMU is not enabled */ + if (adreno_has_rgmu(adreno_gpu)) + return; + if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits)) return; @@ -349,12 +444,15 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) } /* Enable CPU control of SPTP power power collapse */ -static int a6xx_sptprac_enable(struct a6xx_gmu *gmu) +int a6xx_sptprac_enable(struct a6xx_gmu *gmu) { int ret; u32 val; - if (!gmu->legacy) + WARN_ON(!gmu->legacy); + + /* Nothing to do if GMU does the power management */ + if (gmu->idle_level > GMU_IDLE_STATE_ACTIVE) return 0; gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000); @@ -371,7 +469,7 @@ static int a6xx_sptprac_enable(struct a6xx_gmu *gmu) } /* Disable CPU control of SPTP power power collapse */ -static void a6xx_sptprac_disable(struct a6xx_gmu *gmu) +void a6xx_sptprac_disable(struct a6xx_gmu *gmu) { u32 val; int ret; @@ -410,13 +508,35 @@ static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu) return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER); } +static void a6xx_gemnoc_workaround(struct a6xx_gmu *gmu) +{ + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + + /* + * GEMNoC can power collapse whilst the GPU is being powered down, resulting + * in the power down sequence not being fully executed. That in turn can + * prevent CX_GDSC from collapsing. Assert Qactive to avoid this. + */ + if (adreno_is_a8xx(adreno_gpu)) + gmu_write(gmu, REG_A8XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, BIT(0)); + else if (adreno_is_a7xx(adreno_gpu) || (adreno_is_a621(adreno_gpu) || + adreno_is_7c3(adreno_gpu))) + gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, BIT(0)); +} + /* Let the GMU know that we are about to go into slumber */ static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu) { + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; int ret; /* Disable the power counter so the GMU isn't busy */ - gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0); + if (adreno_is_a8xx(adreno_gpu)) + gmu_write(gmu, REG_A8XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0); + else + gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0); /* Disable SPTP_PC if the CPU is responsible for it */ if (gmu->idle_level < GMU_IDLE_STATE_SPTP) @@ -445,6 +565,7 @@ static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu) out: /* Put fence into allow mode */ gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0); + a6xx_gemnoc_workaround(gmu); return ret; } @@ -453,9 +574,10 @@ static int a6xx_rpmh_start(struct a6xx_gmu *gmu) int ret; u32 val; - gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1); - /* Wait for the register to finish posting */ - wmb(); + if (!test_and_clear_bit(GMU_STATUS_PDC_SLEEP, &gmu->status)) + return 0; + + gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, BIT(1)); ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val, val & (1 << 1), 100, 10000); @@ -474,62 +596,73 @@ static int a6xx_rpmh_start(struct a6xx_gmu *gmu) gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); - /* Set up CX GMU counter 0 to count busy ticks */ - gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000); - gmu_rmw(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, 0x20); - - /* Enable the power counter */ - gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1); return 0; } static void a6xx_rpmh_stop(struct a6xx_gmu *gmu) { + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + u32 bitmask = BIT(16); int ret; u32 val; + if (test_and_clear_bit(GMU_STATUS_FW_START, &gmu->status)) + return; + + if (adreno_is_a840(adreno_gpu)) + bitmask = BIT(30); + gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1); ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, - val, val & (1 << 16), 100, 10000); + val, val & bitmask, 100, 10000); if (ret) DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n"); gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); + + set_bit(GMU_STATUS_PDC_SLEEP, &gmu->status); } static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value) { - return msm_writel(value, ptr + (offset << 2)); + writel(value, ptr + (offset << 2)); } -static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev, - const char *name); - static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) { struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; struct platform_device *pdev = to_platform_device(gmu->dev); - void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc"); - void __iomem *seqptr; + u32 seqmem0_drv0_reg = REG_A6XX_RSCC_SEQ_MEM_0_DRV0; + void __iomem *seqptr = NULL; uint32_t pdc_address_offset; + void __iomem *pdcptr; bool pdc_in_aop = false; - if (!pdcptr) - goto err; + /* On A8x and above, RPMH/PDC configurations are entirely configured in AOP */ + if (adreno_is_a8xx(adreno_gpu)) + return; - if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu)) + pdcptr = devm_platform_ioremap_resource_byname(pdev, "gmu_pdc"); + if (IS_ERR(pdcptr)) + return; + + if (adreno_is_a650_family(adreno_gpu) || + adreno_is_a7xx(adreno_gpu)) pdc_in_aop = true; - else if (adreno_is_a618(adreno_gpu) || adreno_is_a640(adreno_gpu)) + else if (adreno_is_a618(adreno_gpu) || adreno_is_a640_family(adreno_gpu)) pdc_address_offset = 0x30090; + else if (adreno_is_a619(adreno_gpu)) + pdc_address_offset = 0x300a0; else pdc_address_offset = 0x30080; if (!pdc_in_aop) { - seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq"); - if (!seqptr) - goto err; + seqptr = devm_platform_ioremap_resource_byname(pdev, "gmu_pdc_seq"); + if (IS_ERR(seqptr)) + return; } /* Disable SDE clock gating */ @@ -541,20 +674,26 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0); gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0); gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0); - gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000); + gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, + adreno_is_a740_family(adreno_gpu) ? 0x80000021 : 0x80000000); gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0); gmu_write_rscc(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0); gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520); gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510); gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514); + /* The second spin of A7xx GPUs messed with some register offsets.. */ + if (adreno_is_a740_family(adreno_gpu)) + seqmem0_drv0_reg = REG_A7XX_RSCC_SEQ_MEM_0_DRV0_A740; + /* Load RSC sequencer uCode for sleep and wakeup */ - if (adreno_is_a650_family(adreno_gpu)) { - gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xeaaae5a0); - gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xe1a1ebab); - gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e0a581); - gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xecac82e2); - gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020edad); + if (adreno_is_a650_family(adreno_gpu) || + adreno_is_a7xx(adreno_gpu)) { + gmu_write_rscc(gmu, seqmem0_drv0_reg, 0xeaaae5a0); + gmu_write_rscc(gmu, seqmem0_drv0_reg + 1, 0xe1a1ebab); + gmu_write_rscc(gmu, seqmem0_drv0_reg + 2, 0xa2e0a581); + gmu_write_rscc(gmu, seqmem0_drv0_reg + 3, 0xecac82e2); + gmu_write_rscc(gmu, seqmem0_drv0_reg + 4, 0x0020edad); } else { gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0); gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7); @@ -597,7 +736,8 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108); pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000); - if (adreno_is_a618(adreno_gpu) || adreno_is_a650_family(adreno_gpu)) + if (adreno_is_a618(adreno_gpu) || adreno_is_a619(adreno_gpu) || + adreno_is_a650_family(adreno_gpu)) pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x2); else pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3); @@ -612,12 +752,6 @@ setup_pdc: /* ensure no writes happen before the uCode is fully written */ wmb(); - -err: - if (!IS_ERR_OR_NULL(pdcptr)) - iounmap(pdcptr); - if (!IS_ERR_OR_NULL(seqptr)) - iounmap(seqptr); } /* @@ -631,11 +765,18 @@ err: /* Set up the idle state for the GMU */ static void a6xx_gmu_power_config(struct a6xx_gmu *gmu) { + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + /* Disable GMU WB/RB buffer */ gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1); gmu_write(gmu, REG_A6XX_GMU_ICACHE_CONFIG, 0x1); gmu_write(gmu, REG_A6XX_GMU_DCACHE_CONFIG, 0x1); + /* A7xx knows better by default! */ + if (adreno_is_a7xx(adreno_gpu) || adreno_is_a8xx(adreno_gpu)) + return; + gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400); switch (gmu->idle_level) { @@ -672,12 +813,6 @@ struct block_header { u32 data[]; }; -/* this should be a general kernel helper */ -static int in_range(u32 addr, u32 start, u32 size) -{ - return addr >= start && addr < start + size; -} - static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk) { if (!in_range(blk->addr, bo->iova, bo->size)) @@ -687,6 +822,9 @@ static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk) return true; } +#define NEXT_BLK(blk) \ + ((const struct block_header *)((const char *)(blk) + sizeof(*(blk)) + (blk)->size)) + static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu) { struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); @@ -694,11 +832,14 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu) const struct firmware *fw_image = adreno_gpu->fw[ADRENO_FW_GMU]; const struct block_header *blk; u32 reg_offset; + u32 ver; u32 itcm_base = 0x00000000; u32 dtcm_base = 0x00040000; - if (adreno_is_a650_family(adreno_gpu)) + if (adreno_is_a650_family(adreno_gpu) || + adreno_is_a7xx(adreno_gpu) || + adreno_is_a8xx(adreno_gpu)) dtcm_base = 0x10004000; if (gmu->legacy) { @@ -717,7 +858,7 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu) for (blk = (const struct block_header *) fw_image->data; (const u8*) blk < fw_image->data + fw_image->size; - blk = (const struct block_header *) &blk->data[blk->size >> 2]) { + blk = NEXT_BLK(blk)) { if (blk->size == 0) continue; @@ -740,49 +881,56 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu) } } + ver = gmu_read(gmu, REG_A6XX_GMU_CORE_FW_VERSION); + DRM_INFO_ONCE("Loaded GMU firmware v%u.%u.%u\n", + FIELD_GET(A6XX_GMU_CORE_FW_VERSION_MAJOR__MASK, ver), + FIELD_GET(A6XX_GMU_CORE_FW_VERSION_MINOR__MASK, ver), + FIELD_GET(A6XX_GMU_CORE_FW_VERSION_STEP__MASK, ver)); + return 0; } static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) { - static bool rpmh_init; struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; + const struct a6xx_info *a6xx_info = adreno_gpu->info->a6xx; + const struct adreno_reglist *gbif_cx = a6xx_info->gbif_cx; + u32 fence_range_lower, fence_range_upper; + u32 chipid = 0; int ret; - u32 chipid; - if (adreno_is_a650_family(adreno_gpu)) { + /* Vote veto for FAL10 */ + if (adreno_is_a650_family(adreno_gpu) || adreno_is_a7xx(adreno_gpu)) { gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 1); gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1); + } else if (adreno_is_a8xx(adreno_gpu)) { + gmu_write(gmu, REG_A8XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 1); + gmu_write(gmu, REG_A8XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1); } - if (state == GMU_WARM_BOOT) { - ret = a6xx_rpmh_start(gmu); - if (ret) - return ret; - } else { + /* Turn on TCM (Tightly Coupled Memory) retention */ + if (adreno_is_a7xx(adreno_gpu)) + a6xx_llc_write(a6xx_gpu, REG_A7XX_CX_MISC_TCM_RET_CNTL, 1); + else if (!adreno_is_a8xx(adreno_gpu)) + gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1); + + ret = a6xx_rpmh_start(gmu); + if (ret) + return ret; + + if (state == GMU_COLD_BOOT) { if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU], "GMU firmware is not loaded\n")) return -ENOENT; - /* Turn on register retention */ - gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1); - - /* We only need to load the RPMh microcode once */ - if (!rpmh_init) { - a6xx_gmu_rpmh_init(gmu); - rpmh_init = true; - } else { - ret = a6xx_rpmh_start(gmu); - if (ret) - return ret; - } - ret = a6xx_gmu_fw_load(gmu); if (ret) return ret; } + /* Clear init result to make sure we are getting a fresh value */ gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0); gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02); @@ -790,18 +938,68 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova); gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1); + if (adreno_is_a8xx(adreno_gpu)) { + fence_range_upper = 0x32; + fence_range_lower = 0x8c0; + } else if (adreno_is_a7xx(adreno_gpu)) { + fence_range_upper = 0x32; + fence_range_lower = 0x8a0; + } else { + fence_range_upper = 0xa; + fence_range_lower = 0xa0; + } + gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0, - (1 << 31) | (0xa << 18) | (0xa0)); + BIT(31) | + FIELD_PREP(GENMASK(30, 18), fence_range_upper) | + FIELD_PREP(GENMASK(17, 0), fence_range_lower)); - chipid = adreno_gpu->rev.core << 24; - chipid |= adreno_gpu->rev.major << 16; - chipid |= adreno_gpu->rev.minor << 12; - chipid |= adreno_gpu->rev.patchid << 8; + /* + * Snapshots toggle the NMI bit which will result in a jump to the NMI + * handler instead of __main. Set the M3 config value to avoid that. + */ + gmu_write(gmu, REG_A6XX_GMU_CM3_CFG, 0x4052); - gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid); + if (a6xx_info->gmu_chipid) { + chipid = a6xx_info->gmu_chipid; + } else { + /* + * Note that the GMU has a slightly different layout for + * chip_id, for whatever reason, so a bit of massaging + * is needed. The upper 16b are the same, but minor and + * patchid are packed in four bits each with the lower + * 8b unused: + */ + chipid = adreno_gpu->chip_id & 0xffff0000; + chipid |= (adreno_gpu->chip_id << 4) & 0xf000; /* minor */ + chipid |= (adreno_gpu->chip_id << 8) & 0x0f00; /* patchid */ + } - gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG, - gmu->log.iova | (gmu->log.size / SZ_4K - 1)); + if (adreno_is_a8xx(adreno_gpu)) { + gmu_write(gmu, REG_A8XX_GMU_GENERAL_10, chipid); + gmu_write(gmu, REG_A8XX_GMU_GENERAL_8, + (gmu->log.iova & GENMASK(31, 12)) | + ((gmu->log.size / SZ_4K - 1) & GENMASK(7, 0))); + } else if (adreno_is_a7xx(adreno_gpu)) { + gmu_write(gmu, REG_A7XX_GMU_GENERAL_10, chipid); + gmu_write(gmu, REG_A7XX_GMU_GENERAL_8, + (gmu->log.iova & GENMASK(31, 12)) | + ((gmu->log.size / SZ_4K - 1) & GENMASK(7, 0))); + } else { + gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid); + + gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG, + gmu->log.iova | (gmu->log.size / SZ_4K - 1)); + } + + /* For A7x and newer, do the CX GBIF configurations before GMU wake up */ + for (int i = 0; (gbif_cx && gbif_cx[i].offset); i++) + gpu_write(gpu, gbif_cx[i].offset, gbif_cx[i].value); + + if (adreno_is_a8xx(adreno_gpu)) { + gpu_write(gpu, REG_A8XX_GBIF_CX_CONFIG, 0x20023000); + gmu_write(gmu, REG_A6XX_GMU_MRC_GBIF_QOS_CTRL, 0x33); + } /* Set up the lowest idle level on the GMU */ a6xx_gmu_power_config(gmu); @@ -814,10 +1012,7 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) ret = a6xx_gmu_gfx_rail_on(gmu); if (ret) return ret; - } - /* Enable SPTP_PC if the CPU is responsible for it */ - if (gmu->idle_level < GMU_IDLE_STATE_SPTP) { ret = a6xx_sptprac_enable(gmu); if (ret) return ret; @@ -852,22 +1047,57 @@ static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu) static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu) { - u32 val; + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + u32 val, seqmem_off = 0; + + /* The second spin of A7xx GPUs messed with some register offsets.. */ + if (adreno_is_a740_family(adreno_gpu) || adreno_is_a8xx(adreno_gpu)) + seqmem_off = 4; /* Make sure there are no outstanding RPMh votes */ - gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val, - (val & 1), 100, 10000); - gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val, - (val & 1), 100, 10000); - gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val, - (val & 1), 100, 10000); - gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val, - (val & 1), 100, 1000); + gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS + seqmem_off, + val, (val & 1), 100, 10000); + gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS + seqmem_off, + val, (val & 1), 100, 10000); + gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS + seqmem_off, + val, (val & 1), 100, 10000); + gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS + seqmem_off, + val, (val & 1), 100, 1000); + + if (!adreno_is_a740_family(adreno_gpu) && !adreno_is_a8xx(adreno_gpu)) + return; + + gmu_poll_timeout_rscc(gmu, REG_A7XX_RSCC_TCS4_DRV0_STATUS + seqmem_off, + val, (val & 1), 100, 10000); + gmu_poll_timeout_rscc(gmu, REG_A7XX_RSCC_TCS5_DRV0_STATUS + seqmem_off, + val, (val & 1), 100, 10000); + gmu_poll_timeout_rscc(gmu, REG_A7XX_RSCC_TCS6_DRV0_STATUS + seqmem_off, + val, (val & 1), 100, 10000); + gmu_poll_timeout_rscc(gmu, REG_A7XX_RSCC_TCS7_DRV0_STATUS + seqmem_off, + val, (val & 1), 100, 1000); + gmu_poll_timeout_rscc(gmu, REG_A7XX_RSCC_TCS8_DRV0_STATUS + seqmem_off, + val, (val & 1), 100, 10000); + gmu_poll_timeout_rscc(gmu, REG_A7XX_RSCC_TCS9_DRV0_STATUS + seqmem_off, + val, (val & 1), 100, 1000); } /* Force the GMU off in case it isn't responsive */ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu) { + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; + + /* + * Turn off keep alive that might have been enabled by the hang + * interrupt + */ + if (adreno_is_a8xx(adreno_gpu)) + gmu_write(&a6xx_gpu->gmu, REG_A8XX_GMU_GMU_PWR_COL_KEEPALIVE, 0); + else + gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0); + /* Flush all the queues */ a6xx_hfi_stop(gmu); @@ -877,8 +1107,27 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu) /* Force off SPTP in case the GMU is managing it */ a6xx_sptprac_disable(gmu); + a6xx_gemnoc_workaround(gmu); + /* Make sure there are no outstanding RPMh votes */ a6xx_gmu_rpmh_off(gmu); + + /* Clear the WRITEDROPPED fields and put fence into allow mode */ + gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS_CLR, 0x7); + gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0); + + /* Make sure the above writes go through */ + wmb(); + + /* Halt the gmu cm3 core */ + gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1); + + adreno_gpu->funcs->bus_halt(adreno_gpu, true); + + /* Reset GPU core blocks */ + a6xx_gpu_sw_reset(gpu, true); + + a6xx_rpmh_stop(gmu); } static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu) @@ -887,11 +1136,11 @@ static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu) unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index]; gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true); - if (IS_ERR_OR_NULL(gpu_opp)) + if (IS_ERR(gpu_opp)) return; gmu->freq = 0; /* so a6xx_gmu_set_freq() doesn't exit early */ - a6xx_gmu_set_freq(gpu, gpu_opp); + a6xx_gmu_set_freq(gpu, gpu_opp, false); dev_pm_opp_put(gpu_opp); } @@ -901,7 +1150,7 @@ static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu) unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index]; gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true); - if (IS_ERR_OR_NULL(gpu_opp)) + if (IS_ERR(gpu_opp)) return; dev_pm_opp_set_opp(&gpu->pdev->dev, gpu_opp); @@ -916,7 +1165,7 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) int status, ret; if (WARN(!gmu->initialized, "The GMU is not set up yet\n")) - return 0; + return -EINVAL; gmu->hung = false; @@ -933,6 +1182,8 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) /* Use a known rate to bring up the GMU */ clk_set_rate(gmu->core_clk, 200000000); + clk_set_rate(gmu->hub_clk, adreno_is_a740_family(adreno_gpu) ? + 200000000 : 150000000); ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); if (ret) { pm_runtime_put(gmu->gxpd); @@ -940,6 +1191,9 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) return ret; } + /* Read the slice info on A8x GPUs */ + a8xx_gpu_get_slice_info(gpu); + /* Set the bus quota to a reasonable value for boot */ a6xx_gmu_set_initial_bw(gpu, gmu); @@ -949,15 +1203,19 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) enable_irq(gmu->gmu_irq); /* Check to see if we are doing a cold or warm boot */ - status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ? - GMU_WARM_BOOT : GMU_COLD_BOOT; - - /* - * Warm boot path does not work on newer GPUs - * Presumably this is because icache/dcache regions must be restored - */ - if (!gmu->legacy) + if (adreno_is_a7xx(adreno_gpu) || adreno_is_a8xx(adreno_gpu)) { + status = a6xx_llc_read(a6xx_gpu, REG_A7XX_CX_MISC_TCM_RET_CNTL) == 1 ? + GMU_WARM_BOOT : GMU_COLD_BOOT; + } else if (gmu->legacy) { + status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ? + GMU_WARM_BOOT : GMU_COLD_BOOT; + } else { + /* + * Warm boot path does not work on newer A6xx GPUs + * Presumably this is because icache/dcache regions must be restored + */ status = GMU_COLD_BOOT; + } ret = a6xx_gmu_fw_start(gmu, status); if (ret) @@ -978,6 +1236,11 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) /* Set the GPU to the current freq */ a6xx_gmu_set_initial_freq(gpu, gmu); + if (refcount_read(&gpu->sysprof_active) > 1) { + ret = a6xx_gmu_set_oob(gmu, GMU_OOB_PERFCOUNTER_SET); + if (!ret) + set_bit(GMU_STATUS_OOB_PERF_SET, &gmu->status); + } out: /* On failure, shut down the GMU to leave it in a good state */ if (ret) { @@ -1005,81 +1268,59 @@ bool a6xx_gmu_isidle(struct a6xx_gmu *gmu) return true; } -#define GBIF_CLIENT_HALT_MASK BIT(0) -#define GBIF_ARB_HALT_MASK BIT(1) - -static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu) -{ - struct msm_gpu *gpu = &adreno_gpu->base; - - if (!a6xx_has_gbif(adreno_gpu)) { - gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf); - spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & - 0xf) == 0xf); - gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0); - - return; - } - - /* Halt new client requests on GBIF */ - gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK); - spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & - (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK); - - /* Halt all AXI requests on GBIF */ - gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK); - spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & - (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK); - - /* The GBIF halt needs to be explicitly cleared */ - gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0); -} - /* Gracefully try to shut down the GMU and by extension the GPU */ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu) { struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; u32 val; + int ret; /* - * The GMU may still be in slumber unless the GPU started so check and - * skip putting it back into slumber if so + * GMU firmware's internal power state gets messed up if we send "prepare_slumber" hfi when + * oob_gpu handshake wasn't done after the last wake up. So do a dummy handshake here when + * required */ - val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); + if (adreno_gpu->base.needs_hw_init) { + if (a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET)) + goto force_off; - if (val != 0xf) { - int ret = a6xx_gmu_wait_for_idle(gmu); + a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); + } - /* If the GMU isn't responding assume it is hung */ - if (ret) { - a6xx_gmu_force_off(gmu); - return; - } + if (test_and_clear_bit(GMU_STATUS_OOB_PERF_SET, &gmu->status)) + a6xx_gmu_clear_oob(gmu, GMU_OOB_PERFCOUNTER_SET); - a6xx_bus_clear_pending_transactions(adreno_gpu); + ret = a6xx_gmu_wait_for_idle(gmu); - /* tell the GMU we want to slumber */ - a6xx_gmu_notify_slumber(gmu); + /* If the GMU isn't responding assume it is hung */ + if (ret) + goto force_off; - ret = gmu_poll_timeout(gmu, - REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val, - !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB), - 100, 10000); + adreno_gpu->funcs->bus_halt(adreno_gpu, a6xx_gpu->hung); - /* - * Let the user know we failed to slumber but don't worry too - * much because we are powering down anyway - */ + /* tell the GMU we want to slumber */ + ret = a6xx_gmu_notify_slumber(gmu); + if (ret) + goto force_off; - if (ret) - DRM_DEV_ERROR(gmu->dev, - "Unable to slumber GMU: status = 0%x/0%x\n", - gmu_read(gmu, - REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS), - gmu_read(gmu, - REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2)); - } + ret = gmu_poll_timeout(gmu, + REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val, + !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB), + 100, 10000); + + /* + * Let the user know we failed to slumber but don't worry too + * much because we are powering down anyway + */ + + if (ret) + DRM_DEV_ERROR(gmu->dev, + "Unable to slumber GMU: status = 0%x/0%x\n", + gmu_read(gmu, + REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS), + gmu_read(gmu, + REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2)); /* Turn off HFI */ a6xx_hfi_stop(gmu); @@ -1089,6 +1330,11 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu) /* Tell RPMh to power off the GPU */ a6xx_rpmh_stop(gmu); + + return; + +force_off: + a6xx_gmu_force_off(gmu); } @@ -1129,19 +1375,21 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu) { - msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace, false); - msm_gem_kernel_put(gmu->debug.obj, gmu->aspace, false); - msm_gem_kernel_put(gmu->icache.obj, gmu->aspace, false); - msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace, false); - msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace, false); - msm_gem_kernel_put(gmu->log.obj, gmu->aspace, false); - - gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu); - msm_gem_address_space_put(gmu->aspace); + struct msm_mmu *mmu = to_msm_vm(gmu->vm)->mmu; + + msm_gem_kernel_put(gmu->hfi.obj, gmu->vm); + msm_gem_kernel_put(gmu->debug.obj, gmu->vm); + msm_gem_kernel_put(gmu->icache.obj, gmu->vm); + msm_gem_kernel_put(gmu->dcache.obj, gmu->vm); + msm_gem_kernel_put(gmu->dummy.obj, gmu->vm); + msm_gem_kernel_put(gmu->log.obj, gmu->vm); + + mmu->funcs->detach(mmu); + drm_gpuvm_put(gmu->vm); } static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo, - size_t size, u64 iova) + size_t size, u64 iova, const char *name) { struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); struct drm_device *dev = a6xx_gpu->base.base.dev; @@ -1166,8 +1414,8 @@ static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo, if (IS_ERR(bo->obj)) return PTR_ERR(bo->obj); - ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova, - range_start >> PAGE_SHIFT, range_end >> PAGE_SHIFT); + ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->vm, &bo->iova, + range_start, range_end); if (ret) { drm_gem_object_put(bo->obj); return ret; @@ -1176,23 +1424,119 @@ static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo, bo->virt = msm_gem_get_vaddr(bo->obj); bo->size = size; + msm_gem_object_set_name(bo->obj, "%s", name); + return 0; } -static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu) +static int a6xx_gmu_memory_probe(struct drm_device *drm, struct a6xx_gmu *gmu) { - struct iommu_domain *domain; struct msm_mmu *mmu; - domain = iommu_domain_alloc(&platform_bus_type); - if (!domain) - return -ENODEV; + mmu = msm_iommu_new(gmu->dev, 0); + if (IS_ERR(mmu)) + return PTR_ERR(mmu); + + gmu->vm = msm_gem_vm_create(drm, mmu, "gmu", 0x0, 0x80000000, true); + if (IS_ERR(gmu->vm)) + return PTR_ERR(gmu->vm); + + return 0; +} + +/** + * struct bcm_db - Auxiliary data pertaining to each Bus Clock Manager (BCM) + * @unit: divisor used to convert bytes/sec bw value to an RPMh msg + * @width: multiplier used to convert bytes/sec bw value to an RPMh msg + * @vcd: virtual clock domain that this bcm belongs to + * @reserved: reserved field + */ +struct bcm_db { + __le32 unit; + __le16 width; + u8 vcd; + u8 reserved; +}; + +static int a6xx_gmu_rpmh_bw_votes_init(struct adreno_gpu *adreno_gpu, + const struct a6xx_info *info, + struct a6xx_gmu *gmu) +{ + const struct bcm_db *bcm_data[GMU_MAX_BCMS] = { 0 }; + unsigned int bcm_index, bw_index, bcm_count = 0; + + /* Retrieve BCM data from cmd-db */ + for (bcm_index = 0; bcm_index < GMU_MAX_BCMS; bcm_index++) { + const struct a6xx_bcm *bcm = &info->bcms[bcm_index]; + size_t count; - mmu = msm_iommu_new(gmu->dev, domain); - gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000); - if (IS_ERR(gmu->aspace)) { - iommu_domain_free(domain); - return PTR_ERR(gmu->aspace); + /* Stop at NULL terminated bcm entry */ + if (!bcm->name) + break; + + bcm_data[bcm_index] = cmd_db_read_aux_data(bcm->name, &count); + if (IS_ERR(bcm_data[bcm_index])) + return PTR_ERR(bcm_data[bcm_index]); + + if (!count) { + dev_err(gmu->dev, "invalid BCM '%s' aux data size\n", + bcm->name); + return -EINVAL; + } + + bcm_count++; + } + + /* Generate BCM votes values for each bandwidth & BCM */ + for (bw_index = 0; bw_index < gmu->nr_gpu_bws; bw_index++) { + u32 *data = gmu->gpu_ib_votes[bw_index]; + u32 bw = gmu->gpu_bw_table[bw_index]; + + /* Calculations loosely copied from bcm_aggregate() & tcs_cmd_gen() */ + for (bcm_index = 0; bcm_index < bcm_count; bcm_index++) { + const struct a6xx_bcm *bcm = &info->bcms[bcm_index]; + bool commit = false; + u64 peak; + u32 vote; + + if (bcm_index == bcm_count - 1 || + (bcm_data[bcm_index + 1] && + bcm_data[bcm_index]->vcd != bcm_data[bcm_index + 1]->vcd)) + commit = true; + + if (!bw) { + data[bcm_index] = BCM_TCS_CMD(commit, false, 0, 0); + continue; + } + + if (bcm->fixed) { + u32 perfmode = 0; + + /* GMU on A6xx votes perfmode on all valid bandwidth */ + if (!adreno_is_a7xx(adreno_gpu) || + (bcm->perfmode_bw && bw >= bcm->perfmode_bw)) + perfmode = bcm->perfmode; + + data[bcm_index] = BCM_TCS_CMD(commit, true, 0, perfmode); + continue; + } + + /* Multiply the bandwidth by the width of the connection */ + peak = (u64)bw * le16_to_cpu(bcm_data[bcm_index]->width); + do_div(peak, bcm->buswidth); + + /* Input bandwidth value is in KBps, scale the value to BCM unit */ + peak *= 1000; + do_div(peak, le32_to_cpu(bcm_data[bcm_index]->unit)); + + vote = clamp(peak, 1, BCM_TCS_CMD_VOTE_MASK); + + /* GMUs on A7xx votes on both x & y */ + if (adreno_is_a7xx(adreno_gpu) || adreno_is_a8xx(adreno_gpu)) + data[bcm_index] = BCM_TCS_CMD(commit, true, vote, vote); + else + data[bcm_index] = BCM_TCS_CMD(commit, true, 0, vote); + } } return 0; @@ -1220,13 +1564,14 @@ static unsigned int a6xx_gmu_get_arc_level(struct device *dev, } static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes, - unsigned long *freqs, int freqs_count, const char *id) + unsigned long *freqs, int freqs_count, + const char *pri_id, const char *sec_id) { int i, j; const u16 *pri, *sec; size_t pri_count, sec_count; - pri = cmd_db_read_aux_data(id, &pri_count); + pri = cmd_db_read_aux_data(pri_id, &pri_count); if (IS_ERR(pri)) return PTR_ERR(pri); /* @@ -1237,7 +1582,7 @@ static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes, if (!pri_count) return -EINVAL; - sec = cmd_db_read_aux_data("mx.lvl", &sec_count); + sec = cmd_db_read_aux_data(sec_id, &sec_count); if (IS_ERR(sec)) return PTR_ERR(sec); @@ -1291,26 +1636,96 @@ static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes, return 0; } +static int a6xx_gmu_rpmh_dep_votes_init(struct device *dev, u32 *votes, + unsigned long *freqs, int freqs_count) +{ + const u16 *mx; + size_t count; + + mx = cmd_db_read_aux_data("mx.lvl", &count); + if (IS_ERR(mx)) + return PTR_ERR(mx); + /* + * The data comes back as an array of unsigned shorts so adjust the + * count accordingly + */ + count >>= 1; + if (!count) + return -EINVAL; + + /* Fix the vote for zero frequency */ + votes[0] = 0xffffffff; + + /* Construct a vote for rest of the corners */ + for (int i = 1; i < freqs_count; i++) { + unsigned int level = a6xx_gmu_get_arc_level(dev, freqs[i]); + u8 j, index = 0; + + /* Get the primary index that matches the arc level */ + for (j = 0; j < count; j++) { + if (mx[j] >= level) { + index = j; + break; + } + } + + if (j == count) { + DRM_DEV_ERROR(dev, + "Mx Level %u not found in the RPMh list\n", + level); + DRM_DEV_ERROR(dev, "Available levels:\n"); + for (j = 0; j < count; j++) + DRM_DEV_ERROR(dev, " %u\n", mx[j]); + + return -EINVAL; + } + + /* Construct the vote */ + votes[i] = (0x3fff << 14) | (index << 8) | (0xff); + } + + return 0; +} + /* * The GMU votes with the RPMh for itself and on behalf of the GPU but we need * to construct the list of votes on the CPU and send it over. Query the RPMh * voltage levels and build the votes + * The GMU can also vote for DDR interconnects, use the OPP bandwidth entries + * and BCM parameters to build the votes. */ static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu) { struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + const struct a6xx_info *info = adreno_gpu->info->a6xx; struct msm_gpu *gpu = &adreno_gpu->base; + const char *sec_id; + const u16 *gmxc; int ret; + gmxc = cmd_db_read_aux_data("gmxc.lvl", NULL); + if (gmxc == ERR_PTR(-EPROBE_DEFER)) + return -EPROBE_DEFER; + + /* If GMxC is present, prefer that as secondary rail for GX votes */ + sec_id = IS_ERR_OR_NULL(gmxc) ? "mx.lvl" : "gmxc.lvl"; + /* Build the GX votes */ ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes, - gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl"); + gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl", sec_id); /* Build the CX votes */ ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes, - gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl"); + gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl", "mx.lvl"); + + ret |= a6xx_gmu_rpmh_dep_votes_init(gmu->dev, gmu->dep_arc_votes, + gmu->gpu_freqs, gmu->nr_gpu_freqs); + + /* Build the interconnect votes */ + if (info->bcms && gmu->nr_gpu_bws > 1) + ret |= a6xx_gmu_rpmh_bw_votes_init(adreno_gpu, info, gmu); return ret; } @@ -1347,10 +1762,43 @@ static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs, return index; } +static int a6xx_gmu_build_bw_table(struct device *dev, unsigned long *bandwidths, + u32 size) +{ + int count = dev_pm_opp_get_opp_count(dev); + struct dev_pm_opp *opp; + int i, index = 0; + unsigned int bandwidth = 1; + + /* + * The OPP table doesn't contain the "off" bandwidth level so we need to + * add 1 to the table size to account for it + */ + + if (WARN(count + 1 > size, + "The GMU bandwidth table is being truncated\n")) + count = size - 1; + + /* Set the "off" bandwidth */ + bandwidths[index++] = 0; + + for (i = 0; i < count; i++) { + opp = dev_pm_opp_find_bw_ceil(dev, &bandwidth, 0); + if (IS_ERR(opp)) + break; + + dev_pm_opp_put(opp); + bandwidths[index++] = bandwidth++; + } + + return index; +} + static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu) { struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + const struct a6xx_info *info = adreno_gpu->info->a6xx; struct msm_gpu *gpu = &adreno_gpu->base; int ret = 0; @@ -1377,10 +1825,88 @@ static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu) gmu->current_perf_index = gmu->nr_gpu_freqs - 1; + /* + * The GMU also handles GPU Interconnect Votes so build a list + * of DDR bandwidths from the GPU OPP table + */ + if (info->bcms) + gmu->nr_gpu_bws = a6xx_gmu_build_bw_table(&gpu->pdev->dev, + gmu->gpu_bw_table, ARRAY_SIZE(gmu->gpu_bw_table)); + /* Build the list of RPMh votes that we'll send to the GMU */ return a6xx_gmu_rpmh_votes_init(gmu); } +static int a6xx_gmu_acd_probe(struct a6xx_gmu *gmu) +{ + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct a6xx_hfi_acd_table *cmd = &gmu->acd_table; + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; + int ret, i, cmd_idx = 0; + extern bool disable_acd; + + /* Skip ACD probe if requested via module param */ + if (disable_acd) { + DRM_DEV_ERROR(gmu->dev, "Skipping GPU ACD probe\n"); + return 0; + } + + cmd->version = 1; + cmd->stride = 1; + cmd->enable_by_level = 0; + + /* Skip freq = 0 and parse acd-level for rest of the OPPs */ + for (i = 1; i < gmu->nr_gpu_freqs; i++) { + struct dev_pm_opp *opp; + struct device_node *np; + unsigned long freq; + u32 val; + + freq = gmu->gpu_freqs[i]; + /* This is unlikely to fail because we are passing back a known freq */ + opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, freq, true); + np = dev_pm_opp_get_of_node(opp); + + ret = of_property_read_u32(np, "qcom,opp-acd-level", &val); + of_node_put(np); + dev_pm_opp_put(opp); + if (ret == -EINVAL) + continue; + else if (ret) { + DRM_DEV_ERROR(gmu->dev, "Unable to read acd level for freq %lu\n", freq); + return ret; + } + + cmd->enable_by_level |= BIT(i); + cmd->data[cmd_idx++] = val; + } + + cmd->num_levels = cmd_idx; + + /* It is a problem if qmp node is unavailable when ACD is required */ + if (cmd->enable_by_level && IS_ERR_OR_NULL(gmu->qmp)) { + DRM_DEV_ERROR(gmu->dev, "Unable to send ACD state to AOSS\n"); + return -EINVAL; + } + + /* Otherwise, nothing to do if qmp is unavailable */ + if (IS_ERR_OR_NULL(gmu->qmp)) + return 0; + + /* + * Notify AOSS about the ACD state. AOSS is supposed to assume that ACD is disabled on + * system reset. So it is harmless if we couldn't notify 'OFF' state + */ + ret = qmp_send(gmu->qmp, "{class: gpu, res: acd, val: %d}", !!cmd->enable_by_level); + if (ret && cmd->enable_by_level) { + DRM_DEV_ERROR(gmu->dev, "Failed to send ACD state to AOSS\n"); + return ret; + } + + return 0; +} + static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu) { int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks); @@ -1393,11 +1919,118 @@ static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu) gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks, gmu->nr_clocks, "gmu"); + gmu->hub_clk = msm_clk_bulk_get_clock(gmu->clocks, + gmu->nr_clocks, "hub"); + + return 0; +} + +static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev, + const char *name, irq_handler_t handler) +{ + int irq, ret; + + irq = platform_get_irq_byname(pdev, name); + + ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH | IRQF_NO_AUTOEN, name, gmu); + if (ret) { + DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n", + name, ret); + return ret; + } + + return irq; +} + +void a6xx_gmu_sysprof_setup(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + struct a6xx_gmu *gmu = &a6xx_gpu->gmu; + unsigned int sysprof_active; + + /* Nothing to do if GPU is suspended. We will handle this during GMU resume */ + if (!pm_runtime_get_if_active(&gpu->pdev->dev)) + return; + + mutex_lock(&gmu->lock); + + sysprof_active = refcount_read(&gpu->sysprof_active); + + /* + * 'Perfcounter select' register values are lost during IFPC collapse. To avoid that, + * use the currently unused perfcounter oob vote to block IFPC when sysprof is active + */ + if ((sysprof_active > 1) && !test_and_set_bit(GMU_STATUS_OOB_PERF_SET, &gmu->status)) + a6xx_gmu_set_oob(gmu, GMU_OOB_PERFCOUNTER_SET); + else if ((sysprof_active == 1) && test_and_clear_bit(GMU_STATUS_OOB_PERF_SET, &gmu->status)) + a6xx_gmu_clear_oob(gmu, GMU_OOB_PERFCOUNTER_SET); + + mutex_unlock(&gmu->lock); + + pm_runtime_put(&gpu->pdev->dev); +} + +void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu) +{ + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct a6xx_gmu *gmu = &a6xx_gpu->gmu; + + mutex_lock(&gmu->lock); + if (!gmu->initialized) { + mutex_unlock(&gmu->lock); + return; + } + + gmu->initialized = false; + + mutex_unlock(&gmu->lock); + + pm_runtime_force_suspend(gmu->dev); + + /* + * Since cxpd is a virt device, the devlink with gmu-dev will be removed + * automatically when we do detach + */ + dev_pm_domain_detach(gmu->cxpd, false); + + if (!IS_ERR_OR_NULL(gmu->gxpd)) { + pm_runtime_disable(gmu->gxpd); + dev_pm_domain_detach(gmu->gxpd, false); + } + + if (!IS_ERR_OR_NULL(gmu->qmp)) + qmp_put(gmu->qmp); + + iounmap(gmu->mmio); + gmu->mmio = NULL; + gmu->rscc = NULL; + + if (!adreno_has_gmu_wrapper(adreno_gpu) && + !adreno_has_rgmu(adreno_gpu)) { + a6xx_gmu_memory_free(gmu); + + free_irq(gmu->gmu_irq, gmu); + free_irq(gmu->hfi_irq, gmu); + } + + /* Drop reference taken in of_find_device_by_node */ + put_device(gmu->dev); +} + +static int cxpd_notifier_cb(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct a6xx_gmu *gmu = container_of(nb, struct a6xx_gmu, pd_nb); + + if (action == GENPD_NOTIFY_OFF) + complete_all(&gmu->pd_gate); + return 0; } static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev, - const char *name) + const char *name, resource_size_t *start) { void __iomem *ret; struct resource *res = platform_get_resource_byname(pdev, @@ -1414,65 +2047,107 @@ static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev, return ERR_PTR(-EINVAL); } + if (start) + *start = res->start; + return ret; } -static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev, - const char *name, irq_handler_t handler) +int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) { - int irq, ret; + struct platform_device *pdev = of_find_device_by_node(node); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; + struct a6xx_gmu *gmu = &a6xx_gpu->gmu; + resource_size_t start; + struct resource *res; + int ret; - irq = platform_get_irq_byname(pdev, name); + if (!pdev) + return -ENODEV; - ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH, name, gmu); - if (ret) { - DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n", - name, ret); + gmu->dev = &pdev->dev; + + ret = of_dma_configure(gmu->dev, node, true); + if (ret) return ret; + + pm_runtime_enable(gmu->dev); + + /* Mark legacy for manual SPTPRAC control */ + gmu->legacy = true; + + /* RGMU requires clocks */ + ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks); + if (ret < 0) + goto err_clk; + + gmu->nr_clocks = ret; + + /* Map the GMU registers */ + gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu", &start); + if (IS_ERR(gmu->mmio)) { + ret = PTR_ERR(gmu->mmio); + goto err_mmio; } - disable_irq(irq); + res = platform_get_resource_byname(gpu->pdev, IORESOURCE_MEM, "kgsl_3d0_reg_memory"); + if (!res) { + ret = -EINVAL; + goto err_mmio; + } - return irq; -} + /* Identify gmu base offset from gpu base address */ + gmu->mmio_offset = (u32)(start - res->start); -void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu) -{ - struct a6xx_gmu *gmu = &a6xx_gpu->gmu; - struct platform_device *pdev = to_platform_device(gmu->dev); + gmu->cxpd = dev_pm_domain_attach_by_name(gmu->dev, "cx"); + if (IS_ERR(gmu->cxpd)) { + ret = PTR_ERR(gmu->cxpd); + goto err_mmio; + } - if (!gmu->initialized) - return; + if (!device_link_add(gmu->dev, gmu->cxpd, DL_FLAG_PM_RUNTIME)) { + ret = -ENODEV; + goto detach_cxpd; + } - pm_runtime_force_suspend(gmu->dev); + init_completion(&gmu->pd_gate); + complete_all(&gmu->pd_gate); + gmu->pd_nb.notifier_call = cxpd_notifier_cb; - if (!IS_ERR_OR_NULL(gmu->gxpd)) { - pm_runtime_disable(gmu->gxpd); - dev_pm_domain_detach(gmu->gxpd, false); + /* Get a link to the GX power domain to reset the GPU */ + gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx"); + if (IS_ERR(gmu->gxpd)) { + ret = PTR_ERR(gmu->gxpd); + goto err_mmio; } - iounmap(gmu->mmio); - if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc")) - iounmap(gmu->rscc); - gmu->mmio = NULL; - gmu->rscc = NULL; + gmu->initialized = true; - a6xx_gmu_memory_free(gmu); + return 0; - free_irq(gmu->gmu_irq, gmu); - free_irq(gmu->hfi_irq, gmu); +detach_cxpd: + dev_pm_domain_detach(gmu->cxpd, false); + +err_mmio: + iounmap(gmu->mmio); +err_clk: /* Drop reference taken in of_find_device_by_node */ put_device(gmu->dev); - gmu->initialized = false; + return ret; } int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) { + struct platform_device *pdev = of_find_device_by_node(node); struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; struct a6xx_gmu *gmu = &a6xx_gpu->gmu; - struct platform_device *pdev = of_find_device_by_node(node); + struct device_link *link; + resource_size_t start; + struct resource *res; int ret; if (!pdev) @@ -1480,10 +2155,13 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) gmu->dev = &pdev->dev; - of_dma_configure(gmu->dev, node, true); + ret = of_dma_configure(gmu->dev, node, true); + if (ret) + return ret; - /* Fow now, don't do anything fancy until we get our feet under us */ - gmu->idle_level = GMU_IDLE_STATE_ACTIVE; + /* Set GMU idle level */ + gmu->idle_level = (adreno_gpu->info->quirks & ADRENO_QUIRK_IFPC) ? + GMU_IDLE_STATE_IFPC : GMU_IDLE_STATE_ACTIVE; pm_runtime_enable(gmu->dev); @@ -1492,7 +2170,7 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) if (ret) goto err_put_device; - ret = a6xx_gmu_memory_probe(gmu); + ret = a6xx_gmu_memory_probe(adreno_gpu->base.dev, gmu); if (ret) goto err_put_device; @@ -1504,65 +2182,92 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) * are otherwise unused by a660. */ gmu->dummy.size = SZ_4K; - if (adreno_is_a660(adreno_gpu)) { - ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7, 0x60400000); + if (adreno_is_a660_family(adreno_gpu) || + adreno_is_a7xx(adreno_gpu) || + adreno_is_a8xx(adreno_gpu)) { + ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7, + 0x60400000, "debug"); if (ret) goto err_memory; - gmu->dummy.size = SZ_8K; + gmu->dummy.size = SZ_16K; } /* Allocate memory for the GMU dummy page */ - ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, gmu->dummy.size, 0x60000000); + ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, gmu->dummy.size, + 0x60000000, "dummy"); if (ret) goto err_memory; - if (adreno_is_a650_family(adreno_gpu)) { + /* Note that a650 family also includes a660 family: */ + if (adreno_is_a650_family(adreno_gpu) || + adreno_is_a7xx(adreno_gpu) || + adreno_is_a8xx(adreno_gpu)) { ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, - SZ_16M - SZ_16K, 0x04000); + SZ_16M - SZ_16K, 0x04000, "icache"); if (ret) goto err_memory; - } else if (adreno_is_a640(adreno_gpu)) { + /* + * NOTE: when porting legacy ("pre-650-family") GPUs you may be tempted to add a condition + * to allocate icache/dcache here, as per downstream code flow, but it may not actually be + * necessary. If you omit this step and you don't get random pagefaults, you are likely + * good to go without this! + */ + } else if (adreno_is_a640_family(adreno_gpu)) { ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, - SZ_256K - SZ_16K, 0x04000); + SZ_256K - SZ_16K, 0x04000, "icache"); if (ret) goto err_memory; ret = a6xx_gmu_memory_alloc(gmu, &gmu->dcache, - SZ_256K - SZ_16K, 0x44000); + SZ_256K - SZ_16K, 0x44000, "dcache"); if (ret) goto err_memory; - } else { + } else if (adreno_is_a630_family(adreno_gpu)) { /* HFI v1, has sptprac */ gmu->legacy = true; /* Allocate memory for the GMU debug region */ - ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0); + ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0, "debug"); if (ret) goto err_memory; } - /* Allocate memory for for the HFI queues */ - ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0); + /* Allocate memory for the GMU log region */ + ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_16K, 0, "log"); if (ret) goto err_memory; - /* Allocate memory for the GMU log region */ - ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_4K, 0); + /* Allocate memory for for the HFI queues */ + ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0, "hfi"); if (ret) goto err_memory; /* Map the GMU registers */ - gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); + gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu", &start); if (IS_ERR(gmu->mmio)) { ret = PTR_ERR(gmu->mmio); goto err_memory; } - if (adreno_is_a650_family(adreno_gpu)) { - gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc"); - if (IS_ERR(gmu->rscc)) + res = platform_get_resource_byname(gpu->pdev, IORESOURCE_MEM, "kgsl_3d0_reg_memory"); + if (!res) { + ret = -EINVAL; + goto err_mmio; + } + + /* Identify gmu base offset from gpu base address */ + gmu->mmio_offset = (u32)(start - res->start); + + if (adreno_is_a650_family(adreno_gpu) || + adreno_is_a7xx(adreno_gpu)) { + gmu->rscc = devm_platform_ioremap_resource_byname(pdev, "rscc"); + if (IS_ERR(gmu->rscc)) { + ret = -ENODEV; goto err_mmio; + } + } else if (adreno_is_a8xx(adreno_gpu)) { + gmu->rscc = gmu->mmio + 0x19000; } else { gmu->rscc = gmu->mmio + 0x23000; } @@ -1571,8 +2276,33 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq); gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq); - if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) + if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) { + ret = -ENODEV; goto err_mmio; + } + + gmu->cxpd = dev_pm_domain_attach_by_name(gmu->dev, "cx"); + if (IS_ERR(gmu->cxpd)) { + ret = PTR_ERR(gmu->cxpd); + goto err_mmio; + } + + link = device_link_add(gmu->dev, gmu->cxpd, DL_FLAG_PM_RUNTIME); + if (!link) { + ret = -ENODEV; + goto detach_cxpd; + } + + /* Other errors are handled during GPU ACD probe */ + gmu->qmp = qmp_get(gmu->dev); + if (PTR_ERR_OR_ZERO(gmu->qmp) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto detach_gxpd; + } + + init_completion(&gmu->pd_gate); + complete_all(&gmu->pd_gate); + gmu->pd_nb.notifier_call = cxpd_notifier_cb; /* * Get a link to the GX power domain to reset the GPU in case of GMU @@ -1583,22 +2313,37 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) /* Get the power levels for the GMU and GPU */ a6xx_gmu_pwrlevels_probe(gmu); + ret = a6xx_gmu_acd_probe(gmu); + if (ret) + goto detach_gxpd; + /* Set up the HFI queues */ a6xx_hfi_init(gmu); + /* Initialize RPMh */ + a6xx_gmu_rpmh_init(gmu); + gmu->initialized = true; return 0; +detach_gxpd: + if (!IS_ERR_OR_NULL(gmu->gxpd)) + dev_pm_domain_detach(gmu->gxpd, false); + + if (!IS_ERR_OR_NULL(gmu->qmp)) + qmp_put(gmu->qmp); + + device_link_del(link); + +detach_cxpd: + dev_pm_domain_detach(gmu->cxpd, false); + err_mmio: iounmap(gmu->mmio); - if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc")) - iounmap(gmu->rscc); free_irq(gmu->gmu_irq, gmu); free_irq(gmu->hfi_irq, gmu); - ret = -ENODEV; - err_memory: a6xx_gmu_memory_free(gmu); err_put_device: |
