diff options
Diffstat (limited to 'drivers/gpu/drm/msm/adreno/a6xx_gmu.c')
| -rw-r--r-- | drivers/gpu/drm/msm/adreno/a6xx_gmu.c | 1905 |
1 files changed, 1501 insertions, 404 deletions
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 5beb83d1cf87..5903cd891b49 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -1,12 +1,38 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */ +/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */ +#include <linux/bitfield.h> #include <linux/clk.h> +#include <linux/interconnect.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/pm_domain.h> #include <linux/pm_opp.h> #include <soc/qcom/cmd-db.h> +#include <soc/qcom/tcs.h> +#include <drm/drm_gem.h> #include "a6xx_gpu.h" #include "a6xx_gmu.xml.h" +#include "msm_gem.h" +#include "msm_gpu_trace.h" +#include "msm_mmu.h" + +static void a6xx_gmu_fault(struct a6xx_gmu *gmu) +{ + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; + + /* FIXME: add a banner here */ + gmu->hung = true; + + /* Turn off the hangcheck timer while we are resetting */ + timer_delete(&gpu->hangcheck_timer); + + /* Queue the GPU handler because we need to treat this as a recovery */ + kthread_queue_work(gpu->worker, &gpu->recover_work); +} static irqreturn_t a6xx_gmu_irq(int irq, void *data) { @@ -19,8 +45,7 @@ static irqreturn_t a6xx_gmu_irq(int irq, void *data) if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) { dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n"); - /* Temporary until we can recover safely */ - BUG(); + a6xx_gmu_fault(gmu); } if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR) @@ -44,8 +69,7 @@ static irqreturn_t a6xx_hfi_irq(int irq, void *data) if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) { dev_err_ratelimited(gmu->dev, "GMU firmware fault\n"); - /* Temporary until we can recover safely */ - BUG(); + a6xx_gmu_fault(gmu); } return IRQ_HANDLED; @@ -56,7 +80,7 @@ bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu) u32 val; /* This can be called from gpu state code so make sure GMU is valid */ - if (IS_ERR_OR_NULL(gmu->mmio)) + if (!gmu->initialized) return false; val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS); @@ -69,27 +93,108 @@ bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu) /* Check to see if the GX rail is still powered */ bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu) { + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; u32 val; /* This can be called from gpu state code so make sure GMU is valid */ - if (IS_ERR_OR_NULL(gmu->mmio)) + if (!gmu->initialized) return false; + /* If GMU is absent, then GX power domain is ON as long as GPU is in active state */ + if (adreno_has_gmu_wrapper(adreno_gpu)) + return true; + val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS); + if (adreno_is_a7xx(adreno_gpu)) + return !(val & + (A7XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF | + A7XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF)); + return !(val & (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF | A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF)); } -static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index) +void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp, + bool suspended) { - int ret; + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + const struct a6xx_info *info = adreno_gpu->info->a6xx; + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + struct a6xx_gmu *gmu = &a6xx_gpu->gmu; + u32 perf_index; + u32 bw_index = 0; + unsigned long gpu_freq; + int ret = 0; + + gpu_freq = dev_pm_opp_get_freq(opp); + + if (gpu_freq == gmu->freq) + return; + + for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++) + if (gpu_freq == gmu->gpu_freqs[perf_index]) + break; + + /* If enabled, find the corresponding DDR bandwidth index */ + if (info->bcms && gmu->nr_gpu_bws > 1) { + unsigned int bw = dev_pm_opp_get_bw(opp, true, 0); + + for (bw_index = 0; bw_index < gmu->nr_gpu_bws - 1; bw_index++) { + if (bw == gmu->gpu_bw_table[bw_index]) + break; + } + + /* Vote AB as a fraction of the max bandwidth, starting from A750 */ + if (bw && adreno_is_a750_family(adreno_gpu)) { + u64 tmp; + + /* For now, vote for 25% of the bandwidth */ + tmp = bw * 25; + do_div(tmp, 100); + + /* + * The AB vote consists of a 16 bit wide quantized level + * against the maximum supported bandwidth. + * Quantization can be calculated as below: + * vote = (bandwidth * 2^16) / max bandwidth + */ + tmp *= MAX_AB_VOTE; + do_div(tmp, gmu->gpu_bw_table[gmu->nr_gpu_bws - 1]); + + bw_index |= AB_VOTE(clamp(tmp, 1, MAX_AB_VOTE)); + bw_index |= AB_VOTE_ENABLE; + } + } + + gmu->current_perf_index = perf_index; + gmu->freq = gmu->gpu_freqs[perf_index]; + + trace_msm_gmu_freq_change(gmu->freq, perf_index); + + /* + * This can get called from devfreq while the hardware is idle. Don't + * bring up the power if it isn't already active. All we're doing here + * is updating the frequency so that when we come back online we're at + * the right rate. + */ + if (suspended) + return; + + if (!gmu->legacy) { + a6xx_hfi_set_freq(gmu, perf_index, bw_index); + /* With Bandwidth voting, we now vote for all resources, so skip OPP set */ + if (!bw_index) + dev_pm_opp_set_opp(&gpu->pdev->dev, opp); + return; + } gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0); gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING, - ((3 & 0xf) << 28) | index); + ((3 & 0xf) << 28) | perf_index); /* * Send an invalid index as a vote for the bus bandwidth and let the @@ -105,24 +210,7 @@ static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index) if (ret) dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret); - gmu->freq = gmu->gpu_freqs[index]; -} - -void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq) -{ - struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); - struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); - struct a6xx_gmu *gmu = &a6xx_gpu->gmu; - u32 perf_index = 0; - - if (freq == gmu->freq) - return; - - for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++) - if (freq == gmu->gpu_freqs[perf_index]) - break; - - __a6xx_gmu_set_freq(gmu, perf_index); + dev_pm_opp_set_opp(&gpu->pdev->dev, opp); } unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu) @@ -136,14 +224,19 @@ unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu) static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu) { - u32 val; + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; int local = gmu->idle_level; + u32 val; /* SPTP and IFPC both report as IFPC */ if (gmu->idle_level == GMU_IDLE_STATE_SPTP) local = GMU_IDLE_STATE_IFPC; - val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); + if (adreno_is_a8xx(adreno_gpu)) + val = gmu_read(gmu, REG_A8XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); + else + val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); if (val == local) { if (gmu->idle_level != GMU_IDLE_STATE_IFPC || @@ -155,27 +248,50 @@ static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu) } /* Wait for the GMU to get to its most idle state */ -int a6xx_gmu_wait_for_idle(struct a6xx_gpu *a6xx_gpu) +int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu) { - struct a6xx_gmu *gmu = &a6xx_gpu->gmu; - return spin_until(a6xx_gmu_check_idle_level(gmu)); } static int a6xx_gmu_start(struct a6xx_gmu *gmu) { + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + u32 mask, reset_val, val; int ret; - u32 val; + + val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8); + if (val <= 0x20010004) { + mask = 0xffffffff; + reset_val = 0xbabeface; + } else { + mask = 0x1ff; + reset_val = 0x100; + } gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1); + + /* Set the log wptr index + * note: downstream saves the value in poweroff and restores it here + */ + if (adreno_is_a8xx(adreno_gpu)) + gmu_write(gmu, REG_A8XX_GMU_GENERAL_9, 0); + else if (adreno_is_a7xx(adreno_gpu)) + gmu_write(gmu, REG_A7XX_GMU_GENERAL_9, 0); + else + gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, 0); + + gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0); ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val, - val == 0xbabeface, 100, 10000); + (val & mask) == reset_val, 100, 10000); if (ret) DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n"); + set_bit(GMU_STATUS_FW_START, &gmu->status); + return ret; } @@ -194,45 +310,107 @@ static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu) return ret; } +struct a6xx_gmu_oob_bits { + int set, ack, set_new, ack_new, clear, clear_new; + const char *name; +}; + +/* These are the interrupt / ack bits for each OOB request that are set + * in a6xx_gmu_set_oob and a6xx_clear_oob + */ +static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = { + [GMU_OOB_GPU_SET] = { + .name = "GPU_SET", + .set = 16, + .ack = 24, + .set_new = 30, + .ack_new = 31, + .clear = 24, + .clear_new = 31, + }, + + [GMU_OOB_PERFCOUNTER_SET] = { + .name = "PERFCOUNTER", + .set = 17, + .ack = 25, + .set_new = 28, + .ack_new = 30, + .clear = 25, + .clear_new = 29, + }, + + [GMU_OOB_BOOT_SLUMBER] = { + .name = "BOOT_SLUMBER", + .set = 22, + .ack = 30, + .clear = 30, + }, + + [GMU_OOB_DCVS_SET] = { + .name = "GPU_DCVS", + .set = 23, + .ack = 31, + .clear = 31, + }, +}; + /* Trigger a OOB (out of band) request to the GMU */ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) { + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; int ret; u32 val; int request, ack; - const char *name; - switch (state) { - case GMU_OOB_GPU_SET: - request = GMU_OOB_GPU_SET_REQUEST; - ack = GMU_OOB_GPU_SET_ACK; - name = "GPU_SET"; - break; - case GMU_OOB_BOOT_SLUMBER: - request = GMU_OOB_BOOT_SLUMBER_REQUEST; - ack = GMU_OOB_BOOT_SLUMBER_ACK; - name = "BOOT_SLUMBER"; - break; - case GMU_OOB_DCVS_SET: - request = GMU_OOB_DCVS_REQUEST; - ack = GMU_OOB_DCVS_ACK; - name = "GPU_DCVS"; - break; - default: + WARN_ON_ONCE(!mutex_is_locked(&gmu->lock)); + + /* Skip OOB calls since RGMU is not enabled */ + if (adreno_has_rgmu(adreno_gpu)) + return 0; + + if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits)) return -EINVAL; + + if (gmu->legacy) { + request = a6xx_gmu_oob_bits[state].set; + ack = a6xx_gmu_oob_bits[state].ack; + } else { + request = a6xx_gmu_oob_bits[state].set_new; + ack = a6xx_gmu_oob_bits[state].ack_new; + if (!request || !ack) { + DRM_DEV_ERROR(gmu->dev, + "Invalid non-legacy GMU request %s\n", + a6xx_gmu_oob_bits[state].name); + return -EINVAL; + } } /* Trigger the equested OOB operation */ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request); - /* Wait for the acknowledge interrupt */ - ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, - val & (1 << ack), 100, 10000); + do { + /* Wait for the acknowledge interrupt */ + ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, + val & (1 << ack), 100, 10000); + + if (!ret) + break; + + if (completion_done(&a6xx_gpu->base.fault_coredump_done)) + break; + + /* We may timeout because the GMU is temporarily wedged from + * pending faults from the GPU and we are taking a devcoredump. + * Wait until the MMU is resumed and try again. + */ + wait_for_completion(&a6xx_gpu->base.fault_coredump_done); + } while (true); if (ret) DRM_DEV_ERROR(gmu->dev, "Timeout waiting for GMU OOB set %s: 0x%x\n", - name, + a6xx_gmu_oob_bits[state].name, gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO)); /* Clear the acknowledge interrupt */ @@ -244,28 +422,39 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) /* Clear a pending OOB state in the GMU */ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) { - switch (state) { - case GMU_OOB_GPU_SET: - gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, - 1 << GMU_OOB_GPU_SET_CLEAR); - break; - case GMU_OOB_BOOT_SLUMBER: - gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, - 1 << GMU_OOB_BOOT_SLUMBER_CLEAR); - break; - case GMU_OOB_DCVS_SET: - gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, - 1 << GMU_OOB_DCVS_CLEAR); - break; - } + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + int bit; + + WARN_ON_ONCE(!mutex_is_locked(&gmu->lock)); + + /* Skip OOB calls since RGMU is not enabled */ + if (adreno_has_rgmu(adreno_gpu)) + return; + + if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits)) + return; + + if (gmu->legacy) + bit = a6xx_gmu_oob_bits[state].clear; + else + bit = a6xx_gmu_oob_bits[state].clear_new; + + gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << bit); } /* Enable CPU control of SPTP power power collapse */ -static int a6xx_sptprac_enable(struct a6xx_gmu *gmu) +int a6xx_sptprac_enable(struct a6xx_gmu *gmu) { int ret; u32 val; + WARN_ON(!gmu->legacy); + + /* Nothing to do if GMU does the power management */ + if (gmu->idle_level > GMU_IDLE_STATE_ACTIVE) + return 0; + gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000); ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val, @@ -280,11 +469,14 @@ static int a6xx_sptprac_enable(struct a6xx_gmu *gmu) } /* Disable CPU control of SPTP power power collapse */ -static void a6xx_sptprac_disable(struct a6xx_gmu *gmu) +void a6xx_sptprac_disable(struct a6xx_gmu *gmu) { u32 val; int ret; + if (!gmu->legacy) + return; + /* Make sure retention is on */ gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11)); @@ -316,18 +508,45 @@ static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu) return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER); } +static void a6xx_gemnoc_workaround(struct a6xx_gmu *gmu) +{ + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + + /* + * GEMNoC can power collapse whilst the GPU is being powered down, resulting + * in the power down sequence not being fully executed. That in turn can + * prevent CX_GDSC from collapsing. Assert Qactive to avoid this. + */ + if (adreno_is_a8xx(adreno_gpu)) + gmu_write(gmu, REG_A8XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, BIT(0)); + else if (adreno_is_a7xx(adreno_gpu) || (adreno_is_a621(adreno_gpu) || + adreno_is_7c3(adreno_gpu))) + gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, BIT(0)); +} + /* Let the GMU know that we are about to go into slumber */ static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu) { + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; int ret; /* Disable the power counter so the GMU isn't busy */ - gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0); + if (adreno_is_a8xx(adreno_gpu)) + gmu_write(gmu, REG_A8XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0); + else + gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0); /* Disable SPTP_PC if the CPU is responsible for it */ if (gmu->idle_level < GMU_IDLE_STATE_SPTP) a6xx_sptprac_disable(gmu); + if (!gmu->legacy) { + ret = a6xx_hfi_send_prep_slumber(gmu); + goto out; + } + /* Tell the GMU to get ready to slumber */ gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1); @@ -343,8 +562,10 @@ static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu) } } +out: /* Put fence into allow mode */ gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0); + a6xx_gemnoc_workaround(gmu); return ret; } @@ -353,9 +574,10 @@ static int a6xx_rpmh_start(struct a6xx_gmu *gmu) int ret; u32 val; - gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1); - /* Wait for the register to finish posting */ - wmb(); + if (!test_and_clear_bit(GMU_STATUS_PDC_SLEEP, &gmu->status)) + return 0; + + gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, BIT(1)); ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val, val & (1 << 1), 100, 10000); @@ -364,7 +586,7 @@ static int a6xx_rpmh_start(struct a6xx_gmu *gmu) return ret; } - ret = gmu_poll_timeout(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val, + ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val, !val, 100, 10000); if (ret) { @@ -374,69 +596,114 @@ static int a6xx_rpmh_start(struct a6xx_gmu *gmu) gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); - /* Set up CX GMU counter 0 to count busy ticks */ - gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000); - gmu_rmw(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, 0x20); - - /* Enable the power counter */ - gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1); return 0; } static void a6xx_rpmh_stop(struct a6xx_gmu *gmu) { + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + u32 bitmask = BIT(16); int ret; u32 val; + if (test_and_clear_bit(GMU_STATUS_FW_START, &gmu->status)) + return; + + if (adreno_is_a840(adreno_gpu)) + bitmask = BIT(30); + gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1); - ret = gmu_poll_timeout(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, - val, val & (1 << 16), 100, 10000); + ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, + val, val & bitmask, 100, 10000); if (ret) DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n"); gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); + + set_bit(GMU_STATUS_PDC_SLEEP, &gmu->status); } static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value) { - return msm_writel(value, ptr + (offset << 2)); + writel(value, ptr + (offset << 2)); } -static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev, - const char *name); - static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) { + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; struct platform_device *pdev = to_platform_device(gmu->dev); - void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc"); - void __iomem *seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq"); + u32 seqmem0_drv0_reg = REG_A6XX_RSCC_SEQ_MEM_0_DRV0; + void __iomem *seqptr = NULL; + uint32_t pdc_address_offset; + void __iomem *pdcptr; + bool pdc_in_aop = false; + + /* On A8x and above, RPMH/PDC configurations are entirely configured in AOP */ + if (adreno_is_a8xx(adreno_gpu)) + return; - if (!pdcptr || !seqptr) - goto err; + pdcptr = devm_platform_ioremap_resource_byname(pdev, "gmu_pdc"); + if (IS_ERR(pdcptr)) + return; + + if (adreno_is_a650_family(adreno_gpu) || + adreno_is_a7xx(adreno_gpu)) + pdc_in_aop = true; + else if (adreno_is_a618(adreno_gpu) || adreno_is_a640_family(adreno_gpu)) + pdc_address_offset = 0x30090; + else if (adreno_is_a619(adreno_gpu)) + pdc_address_offset = 0x300a0; + else + pdc_address_offset = 0x30080; + + if (!pdc_in_aop) { + seqptr = devm_platform_ioremap_resource_byname(pdev, "gmu_pdc_seq"); + if (IS_ERR(seqptr)) + return; + } /* Disable SDE clock gating */ - gmu_write(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24)); + gmu_write_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24)); /* Setup RSC PDC handshake for sleep and wakeup */ - gmu_write(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1); - gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0); - gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0); - gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0); - gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0); - gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000); - gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0); - gmu_write(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0); - gmu_write(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520); - gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510); - gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514); + gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1); + gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0); + gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0); + gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0); + gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0); + gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, + adreno_is_a740_family(adreno_gpu) ? 0x80000021 : 0x80000000); + gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0); + gmu_write_rscc(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0); + gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520); + gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510); + gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514); + + /* The second spin of A7xx GPUs messed with some register offsets.. */ + if (adreno_is_a740_family(adreno_gpu)) + seqmem0_drv0_reg = REG_A7XX_RSCC_SEQ_MEM_0_DRV0_A740; /* Load RSC sequencer uCode for sleep and wakeup */ - gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0); - gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7); - gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1); - gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2); - gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8); + if (adreno_is_a650_family(adreno_gpu) || + adreno_is_a7xx(adreno_gpu)) { + gmu_write_rscc(gmu, seqmem0_drv0_reg, 0xeaaae5a0); + gmu_write_rscc(gmu, seqmem0_drv0_reg + 1, 0xe1a1ebab); + gmu_write_rscc(gmu, seqmem0_drv0_reg + 2, 0xa2e0a581); + gmu_write_rscc(gmu, seqmem0_drv0_reg + 3, 0xecac82e2); + gmu_write_rscc(gmu, seqmem0_drv0_reg + 4, 0x0020edad); + } else { + gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0); + gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7); + gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1); + gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2); + gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8); + } + + if (pdc_in_aop) + goto setup_pdc; /* Load PDC sequencer uCode for power up and power down sequence */ pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1); @@ -455,32 +722,36 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108); pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000); pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0); + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108); - pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080); + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, pdc_address_offset); pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0); + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7); pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0); pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0); pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108); pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010); pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2); + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108); pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000); - pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3); + if (adreno_is_a618(adreno_gpu) || adreno_is_a619(adreno_gpu) || + adreno_is_a650_family(adreno_gpu)) + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x2); + else + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3); pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108); - pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080); + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, pdc_address_offset); pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3); /* Setup GPU PDC */ +setup_pdc: pdc_write(pdcptr, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0); pdc_write(pdcptr, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001); /* ensure no writes happen before the uCode is fully written */ wmb(); - -err: - devm_iounmap(gmu->dev, pdcptr); - devm_iounmap(gmu->dev, seqptr); } /* @@ -494,8 +765,17 @@ err: /* Set up the idle state for the GMU */ static void a6xx_gmu_power_config(struct a6xx_gmu *gmu) { + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + /* Disable GMU WB/RB buffer */ gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1); + gmu_write(gmu, REG_A6XX_GMU_ICACHE_CONFIG, 0x1); + gmu_write(gmu, REG_A6XX_GMU_DCACHE_CONFIG, 0x1); + + /* A7xx knows better by default! */ + if (adreno_is_a7xx(adreno_gpu) || adreno_is_a8xx(adreno_gpu)) + return; gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400); @@ -506,7 +786,7 @@ static void a6xx_gmu_power_config(struct a6xx_gmu *gmu) gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0, A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE | A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE); - /* Fall through */ + fallthrough; case GMU_IDLE_STATE_SPTP: gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST, GMU_PWR_COL_HYST); @@ -525,67 +805,201 @@ static void a6xx_gmu_power_config(struct a6xx_gmu *gmu) A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE); } -static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) +struct block_header { + u32 addr; + u32 size; + u32 type; + u32 value; + u32 data[]; +}; + +static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk) +{ + if (!in_range(blk->addr, bo->iova, bo->size)) + return false; + + memcpy(bo->virt + blk->addr - bo->iova, blk->data, blk->size); + return true; +} + +#define NEXT_BLK(blk) \ + ((const struct block_header *)((const char *)(blk) + sizeof(*(blk)) + (blk)->size)) + +static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu) { - static bool rpmh_init; struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; - int i, ret; - u32 chipid; - u32 *image; + const struct firmware *fw_image = adreno_gpu->fw[ADRENO_FW_GMU]; + const struct block_header *blk; + u32 reg_offset; + u32 ver; - if (state == GMU_WARM_BOOT) { - ret = a6xx_rpmh_start(gmu); - if (ret) - return ret; - } else { - if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU], - "GMU firmware is not loaded\n")) - return -ENOENT; + u32 itcm_base = 0x00000000; + u32 dtcm_base = 0x00040000; + + if (adreno_is_a650_family(adreno_gpu) || + adreno_is_a7xx(adreno_gpu) || + adreno_is_a8xx(adreno_gpu)) + dtcm_base = 0x10004000; + if (gmu->legacy) { /* Sanity check the size of the firmware that was loaded */ - if (adreno_gpu->fw[ADRENO_FW_GMU]->size > 0x8000) { + if (fw_image->size > 0x8000) { DRM_DEV_ERROR(gmu->dev, "GMU firmware is bigger than the available region\n"); return -EINVAL; } - /* Turn on register retention */ - gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1); + gmu_write_bulk(gmu, REG_A6XX_GMU_CM3_ITCM_START, + (u32*) fw_image->data, fw_image->size); + return 0; + } - /* We only need to load the RPMh microcode once */ - if (!rpmh_init) { - a6xx_gmu_rpmh_init(gmu); - rpmh_init = true; - } else if (state != GMU_RESET) { - ret = a6xx_rpmh_start(gmu); - if (ret) - return ret; + + for (blk = (const struct block_header *) fw_image->data; + (const u8*) blk < fw_image->data + fw_image->size; + blk = NEXT_BLK(blk)) { + if (blk->size == 0) + continue; + + if (in_range(blk->addr, itcm_base, SZ_16K)) { + reg_offset = (blk->addr - itcm_base) >> 2; + gmu_write_bulk(gmu, + REG_A6XX_GMU_CM3_ITCM_START + reg_offset, + blk->data, blk->size); + } else if (in_range(blk->addr, dtcm_base, SZ_16K)) { + reg_offset = (blk->addr - dtcm_base) >> 2; + gmu_write_bulk(gmu, + REG_A6XX_GMU_CM3_DTCM_START + reg_offset, + blk->data, blk->size); + } else if (!fw_block_mem(&gmu->icache, blk) && + !fw_block_mem(&gmu->dcache, blk) && + !fw_block_mem(&gmu->dummy, blk)) { + DRM_DEV_ERROR(gmu->dev, + "failed to match fw block (addr=%.8x size=%d data[0]=%.8x)\n", + blk->addr, blk->size, blk->data[0]); } + } + + ver = gmu_read(gmu, REG_A6XX_GMU_CORE_FW_VERSION); + DRM_INFO_ONCE("Loaded GMU firmware v%u.%u.%u\n", + FIELD_GET(A6XX_GMU_CORE_FW_VERSION_MAJOR__MASK, ver), + FIELD_GET(A6XX_GMU_CORE_FW_VERSION_MINOR__MASK, ver), + FIELD_GET(A6XX_GMU_CORE_FW_VERSION_STEP__MASK, ver)); + + return 0; +} + +static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) +{ + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; + const struct a6xx_info *a6xx_info = adreno_gpu->info->a6xx; + const struct adreno_reglist *gbif_cx = a6xx_info->gbif_cx; + u32 fence_range_lower, fence_range_upper; + u32 chipid = 0; + int ret; + + /* Vote veto for FAL10 */ + if (adreno_is_a650_family(adreno_gpu) || adreno_is_a7xx(adreno_gpu)) { + gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 1); + gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1); + } else if (adreno_is_a8xx(adreno_gpu)) { + gmu_write(gmu, REG_A8XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 1); + gmu_write(gmu, REG_A8XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1); + } + + /* Turn on TCM (Tightly Coupled Memory) retention */ + if (adreno_is_a7xx(adreno_gpu)) + a6xx_llc_write(a6xx_gpu, REG_A7XX_CX_MISC_TCM_RET_CNTL, 1); + else if (!adreno_is_a8xx(adreno_gpu)) + gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1); + + ret = a6xx_rpmh_start(gmu); + if (ret) + return ret; - image = (u32 *) adreno_gpu->fw[ADRENO_FW_GMU]->data; + if (state == GMU_COLD_BOOT) { + if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU], + "GMU firmware is not loaded\n")) + return -ENOENT; - for (i = 0; i < adreno_gpu->fw[ADRENO_FW_GMU]->size >> 2; i++) - gmu_write(gmu, REG_A6XX_GMU_CM3_ITCM_START + i, - image[i]); + ret = a6xx_gmu_fw_load(gmu); + if (ret) + return ret; } + /* Clear init result to make sure we are getting a fresh value */ gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0); gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02); /* Write the iova of the HFI table */ - gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi->iova); + gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova); gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1); + if (adreno_is_a8xx(adreno_gpu)) { + fence_range_upper = 0x32; + fence_range_lower = 0x8c0; + } else if (adreno_is_a7xx(adreno_gpu)) { + fence_range_upper = 0x32; + fence_range_lower = 0x8a0; + } else { + fence_range_upper = 0xa; + fence_range_lower = 0xa0; + } + gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0, - (1 << 31) | (0xa << 18) | (0xa0)); + BIT(31) | + FIELD_PREP(GENMASK(30, 18), fence_range_upper) | + FIELD_PREP(GENMASK(17, 0), fence_range_lower)); + + /* + * Snapshots toggle the NMI bit which will result in a jump to the NMI + * handler instead of __main. Set the M3 config value to avoid that. + */ + gmu_write(gmu, REG_A6XX_GMU_CM3_CFG, 0x4052); + + if (a6xx_info->gmu_chipid) { + chipid = a6xx_info->gmu_chipid; + } else { + /* + * Note that the GMU has a slightly different layout for + * chip_id, for whatever reason, so a bit of massaging + * is needed. The upper 16b are the same, but minor and + * patchid are packed in four bits each with the lower + * 8b unused: + */ + chipid = adreno_gpu->chip_id & 0xffff0000; + chipid |= (adreno_gpu->chip_id << 4) & 0xf000; /* minor */ + chipid |= (adreno_gpu->chip_id << 8) & 0x0f00; /* patchid */ + } + + if (adreno_is_a8xx(adreno_gpu)) { + gmu_write(gmu, REG_A8XX_GMU_GENERAL_10, chipid); + gmu_write(gmu, REG_A8XX_GMU_GENERAL_8, + (gmu->log.iova & GENMASK(31, 12)) | + ((gmu->log.size / SZ_4K - 1) & GENMASK(7, 0))); + } else if (adreno_is_a7xx(adreno_gpu)) { + gmu_write(gmu, REG_A7XX_GMU_GENERAL_10, chipid); + gmu_write(gmu, REG_A7XX_GMU_GENERAL_8, + (gmu->log.iova & GENMASK(31, 12)) | + ((gmu->log.size / SZ_4K - 1) & GENMASK(7, 0))); + } else { + gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid); + + gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG, + gmu->log.iova | (gmu->log.size / SZ_4K - 1)); + } - chipid = adreno_gpu->rev.core << 24; - chipid |= adreno_gpu->rev.major << 16; - chipid |= adreno_gpu->rev.minor << 12; - chipid |= adreno_gpu->rev.patchid << 8; + /* For A7x and newer, do the CX GBIF configurations before GMU wake up */ + for (int i = 0; (gbif_cx && gbif_cx[i].offset); i++) + gpu_write(gpu, gbif_cx[i].offset, gbif_cx[i].value); - gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid); + if (adreno_is_a8xx(adreno_gpu)) { + gpu_write(gpu, REG_A8XX_GBIF_CX_CONFIG, 0x20023000); + gmu_write(gmu, REG_A6XX_GMU_MRC_GBIF_QOS_CTRL, 0x33); + } /* Set up the lowest idle level on the GMU */ a6xx_gmu_power_config(gmu); @@ -594,12 +1008,11 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) if (ret) return ret; - ret = a6xx_gmu_gfx_rail_on(gmu); - if (ret) - return ret; + if (gmu->legacy) { + ret = a6xx_gmu_gfx_rail_on(gmu); + if (ret) + return ret; - /* Enable SPTP_PC if the CPU is responsible for it */ - if (gmu->idle_level < GMU_IDLE_STATE_SPTP) { ret = a6xx_sptprac_enable(gmu); if (ret) return ret; @@ -623,20 +1036,6 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \ A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR) -static void a6xx_gmu_irq_enable(struct a6xx_gmu *gmu) -{ - gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0); - gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0); - - gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, - ~A6XX_GMU_IRQ_MASK); - gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, - ~A6XX_HFI_IRQ_MASK); - - enable_irq(gmu->gmu_irq); - enable_irq(gmu->hfi_irq); -} - static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu) { disable_irq(gmu->gmu_irq); @@ -646,11 +1045,58 @@ static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu) gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0); } -int a6xx_gmu_reset(struct a6xx_gpu *a6xx_gpu) +static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu) { - struct a6xx_gmu *gmu = &a6xx_gpu->gmu; - int ret; - u32 val; + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + u32 val, seqmem_off = 0; + + /* The second spin of A7xx GPUs messed with some register offsets.. */ + if (adreno_is_a740_family(adreno_gpu) || adreno_is_a8xx(adreno_gpu)) + seqmem_off = 4; + + /* Make sure there are no outstanding RPMh votes */ + gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS + seqmem_off, + val, (val & 1), 100, 10000); + gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS + seqmem_off, + val, (val & 1), 100, 10000); + gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS + seqmem_off, + val, (val & 1), 100, 10000); + gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS + seqmem_off, + val, (val & 1), 100, 1000); + + if (!adreno_is_a740_family(adreno_gpu) && !adreno_is_a8xx(adreno_gpu)) + return; + + gmu_poll_timeout_rscc(gmu, REG_A7XX_RSCC_TCS4_DRV0_STATUS + seqmem_off, + val, (val & 1), 100, 10000); + gmu_poll_timeout_rscc(gmu, REG_A7XX_RSCC_TCS5_DRV0_STATUS + seqmem_off, + val, (val & 1), 100, 10000); + gmu_poll_timeout_rscc(gmu, REG_A7XX_RSCC_TCS6_DRV0_STATUS + seqmem_off, + val, (val & 1), 100, 10000); + gmu_poll_timeout_rscc(gmu, REG_A7XX_RSCC_TCS7_DRV0_STATUS + seqmem_off, + val, (val & 1), 100, 1000); + gmu_poll_timeout_rscc(gmu, REG_A7XX_RSCC_TCS8_DRV0_STATUS + seqmem_off, + val, (val & 1), 100, 10000); + gmu_poll_timeout_rscc(gmu, REG_A7XX_RSCC_TCS9_DRV0_STATUS + seqmem_off, + val, (val & 1), 100, 1000); +} + +/* Force the GMU off in case it isn't responsive */ +static void a6xx_gmu_force_off(struct a6xx_gmu *gmu) +{ + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; + + /* + * Turn off keep alive that might have been enabled by the hang + * interrupt + */ + if (adreno_is_a8xx(adreno_gpu)) + gmu_write(&a6xx_gpu->gmu, REG_A8XX_GMU_GMU_PWR_COL_KEEPALIVE, 0); + else + gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0); /* Flush all the queues */ a6xx_hfi_stop(gmu); @@ -661,84 +1107,148 @@ int a6xx_gmu_reset(struct a6xx_gpu *a6xx_gpu) /* Force off SPTP in case the GMU is managing it */ a6xx_sptprac_disable(gmu); + a6xx_gemnoc_workaround(gmu); + /* Make sure there are no outstanding RPMh votes */ - gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val, - (val & 1), 100, 10000); - gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val, - (val & 1), 100, 10000); - gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val, - (val & 1), 100, 10000); - gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val, - (val & 1), 100, 1000); - - /* Force off the GX GSDC */ - regulator_force_disable(gmu->gx); - - /* Disable the resources */ - clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks); - pm_runtime_put_sync(gmu->dev); + a6xx_gmu_rpmh_off(gmu); - /* Re-enable the resources */ - pm_runtime_get_sync(gmu->dev); + /* Clear the WRITEDROPPED fields and put fence into allow mode */ + gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS_CLR, 0x7); + gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0); - /* Use a known rate to bring up the GMU */ - clk_set_rate(gmu->core_clk, 200000000); - ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); - if (ret) - goto out; + /* Make sure the above writes go through */ + wmb(); - a6xx_gmu_irq_enable(gmu); + /* Halt the gmu cm3 core */ + gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1); - ret = a6xx_gmu_fw_start(gmu, GMU_RESET); - if (!ret) - ret = a6xx_hfi_start(gmu, GMU_COLD_BOOT); + adreno_gpu->funcs->bus_halt(adreno_gpu, true); - /* Set the GPU back to the highest power frequency */ - __a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1); + /* Reset GPU core blocks */ + a6xx_gpu_sw_reset(gpu, true); -out: - if (ret) - a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER); + a6xx_rpmh_stop(gmu); +} - return ret; +static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu) +{ + struct dev_pm_opp *gpu_opp; + unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index]; + + gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true); + if (IS_ERR(gpu_opp)) + return; + + gmu->freq = 0; /* so a6xx_gmu_set_freq() doesn't exit early */ + a6xx_gmu_set_freq(gpu, gpu_opp, false); + dev_pm_opp_put(gpu_opp); +} + +static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu) +{ + struct dev_pm_opp *gpu_opp; + unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index]; + + gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true); + if (IS_ERR(gpu_opp)) + return; + + dev_pm_opp_set_opp(&gpu->pdev->dev, gpu_opp); + dev_pm_opp_put(gpu_opp); } int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) { + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; struct a6xx_gmu *gmu = &a6xx_gpu->gmu; int status, ret; - if (WARN(!gmu->mmio, "The GMU is not set up yet\n")) - return 0; + if (WARN(!gmu->initialized, "The GMU is not set up yet\n")) + return -EINVAL; + + gmu->hung = false; /* Turn on the resources */ pm_runtime_get_sync(gmu->dev); + /* + * "enable" the GX power domain which won't actually do anything but it + * will make sure that the refcounting is correct in case we need to + * bring down the GX after a GMU failure + */ + if (!IS_ERR_OR_NULL(gmu->gxpd)) + pm_runtime_get_sync(gmu->gxpd); + /* Use a known rate to bring up the GMU */ clk_set_rate(gmu->core_clk, 200000000); + clk_set_rate(gmu->hub_clk, adreno_is_a740_family(adreno_gpu) ? + 200000000 : 150000000); ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); - if (ret) - goto out; + if (ret) { + pm_runtime_put(gmu->gxpd); + pm_runtime_put(gmu->dev); + return ret; + } + + /* Read the slice info on A8x GPUs */ + a8xx_gpu_get_slice_info(gpu); - a6xx_gmu_irq_enable(gmu); + /* Set the bus quota to a reasonable value for boot */ + a6xx_gmu_set_initial_bw(gpu, gmu); + + /* Enable the GMU interrupt */ + gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0); + gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~A6XX_GMU_IRQ_MASK); + enable_irq(gmu->gmu_irq); /* Check to see if we are doing a cold or warm boot */ - status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ? - GMU_WARM_BOOT : GMU_COLD_BOOT; + if (adreno_is_a7xx(adreno_gpu) || adreno_is_a8xx(adreno_gpu)) { + status = a6xx_llc_read(a6xx_gpu, REG_A7XX_CX_MISC_TCM_RET_CNTL) == 1 ? + GMU_WARM_BOOT : GMU_COLD_BOOT; + } else if (gmu->legacy) { + status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ? + GMU_WARM_BOOT : GMU_COLD_BOOT; + } else { + /* + * Warm boot path does not work on newer A6xx GPUs + * Presumably this is because icache/dcache regions must be restored + */ + status = GMU_COLD_BOOT; + } ret = a6xx_gmu_fw_start(gmu, status); if (ret) goto out; ret = a6xx_hfi_start(gmu, status); + if (ret) + goto out; + + /* + * Turn on the GMU firmware fault interrupt after we know the boot + * sequence is successful + */ + gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0); + gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK); + enable_irq(gmu->hfi_irq); - /* Set the GPU to the highest power frequency */ - __a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1); + /* Set the GPU to the current freq */ + a6xx_gmu_set_initial_freq(gpu, gmu); + if (refcount_read(&gpu->sysprof_active) > 1) { + ret = a6xx_gmu_set_oob(gmu, GMU_OOB_PERFCOUNTER_SET); + if (!ret) + set_bit(GMU_STATUS_OOB_PERF_SET, &gmu->status); + } out: - /* Make sure to turn off the boot OOB request on error */ - if (ret) - a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER); + /* On failure, shut down the GMU to leave it in a good state */ + if (ret) { + disable_irq(gmu->gmu_irq); + a6xx_rpmh_stop(gmu); + pm_runtime_put(gmu->gxpd); + pm_runtime_put(gmu->dev); + } return ret; } @@ -747,7 +1257,7 @@ bool a6xx_gmu_isidle(struct a6xx_gmu *gmu) { u32 reg; - if (!gmu->mmio) + if (!gmu->initialized) return true; reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS); @@ -758,44 +1268,59 @@ bool a6xx_gmu_isidle(struct a6xx_gmu *gmu) return true; } -int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) +/* Gracefully try to shut down the GMU and by extension the GPU */ +static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu) { - struct a6xx_gmu *gmu = &a6xx_gpu->gmu; + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; u32 val; + int ret; /* - * The GMU may still be in slumber unless the GPU started so check and - * skip putting it back into slumber if so + * GMU firmware's internal power state gets messed up if we send "prepare_slumber" hfi when + * oob_gpu handshake wasn't done after the last wake up. So do a dummy handshake here when + * required */ - val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); + if (adreno_gpu->base.needs_hw_init) { + if (a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET)) + goto force_off; - if (val != 0xf) { - int ret = a6xx_gmu_wait_for_idle(a6xx_gpu); + a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); + } - /* Temporary until we can recover safely */ - BUG_ON(ret); + if (test_and_clear_bit(GMU_STATUS_OOB_PERF_SET, &gmu->status)) + a6xx_gmu_clear_oob(gmu, GMU_OOB_PERFCOUNTER_SET); - /* tell the GMU we want to slumber */ - a6xx_gmu_notify_slumber(gmu); + ret = a6xx_gmu_wait_for_idle(gmu); - ret = gmu_poll_timeout(gmu, - REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val, - !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB), - 100, 10000); + /* If the GMU isn't responding assume it is hung */ + if (ret) + goto force_off; - /* - * Let the user know we failed to slumber but don't worry too - * much because we are powering down anyway - */ + adreno_gpu->funcs->bus_halt(adreno_gpu, a6xx_gpu->hung); - if (ret) - DRM_DEV_ERROR(gmu->dev, - "Unable to slumber GMU: status = 0%x/0%x\n", - gmu_read(gmu, - REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS), - gmu_read(gmu, - REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2)); - } + /* tell the GMU we want to slumber */ + ret = a6xx_gmu_notify_slumber(gmu); + if (ret) + goto force_off; + + ret = gmu_poll_timeout(gmu, + REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val, + !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB), + 100, 10000); + + /* + * Let the user know we failed to slumber but don't worry too + * much because we are powering down anyway + */ + + if (ret) + DRM_DEV_ERROR(gmu->dev, + "Unable to slumber GMU: status = 0%x/0%x\n", + gmu_read(gmu, + REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS), + gmu_read(gmu, + REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2)); /* Turn off HFI */ a6xx_hfi_stop(gmu); @@ -806,147 +1331,232 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) /* Tell RPMh to power off the GPU */ a6xx_rpmh_stop(gmu); - clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks); - - pm_runtime_put_sync(gmu->dev); + return; - return 0; +force_off: + a6xx_gmu_force_off(gmu); } -static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo) + +int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) { - int count, i; - u64 iova; + struct a6xx_gmu *gmu = &a6xx_gpu->gmu; + struct msm_gpu *gpu = &a6xx_gpu->base.base; - if (IS_ERR_OR_NULL(bo)) - return; + if (!pm_runtime_active(gmu->dev)) + return 0; + + /* + * Force the GMU off if we detected a hang, otherwise try to shut it + * down gracefully + */ + if (gmu->hung) + a6xx_gmu_force_off(gmu); + else + a6xx_gmu_shutdown(gmu); - count = bo->size >> PAGE_SHIFT; - iova = bo->iova; + /* Remove the bus vote */ + dev_pm_opp_set_opp(&gpu->pdev->dev, NULL); - for (i = 0; i < count; i++, iova += PAGE_SIZE) { - iommu_unmap(gmu->domain, iova, PAGE_SIZE); - __free_pages(bo->pages[i], 0); - } + /* + * Make sure the GX domain is off before turning off the GMU (CX) + * domain. Usually the GMU does this but only if the shutdown sequence + * was successful + */ + if (!IS_ERR_OR_NULL(gmu->gxpd)) + pm_runtime_put_sync(gmu->gxpd); + + clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks); + + pm_runtime_put_sync(gmu->dev); - kfree(bo->pages); - kfree(bo); + return 0; } -static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, - size_t size) +static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu) { - struct a6xx_gmu_bo *bo; - int ret, count, i; + struct msm_mmu *mmu = to_msm_vm(gmu->vm)->mmu; - bo = kzalloc(sizeof(*bo), GFP_KERNEL); - if (!bo) - return ERR_PTR(-ENOMEM); + msm_gem_kernel_put(gmu->hfi.obj, gmu->vm); + msm_gem_kernel_put(gmu->debug.obj, gmu->vm); + msm_gem_kernel_put(gmu->icache.obj, gmu->vm); + msm_gem_kernel_put(gmu->dcache.obj, gmu->vm); + msm_gem_kernel_put(gmu->dummy.obj, gmu->vm); + msm_gem_kernel_put(gmu->log.obj, gmu->vm); - bo->size = PAGE_ALIGN(size); + mmu->funcs->detach(mmu); + drm_gpuvm_put(gmu->vm); +} - count = bo->size >> PAGE_SHIFT; +static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo, + size_t size, u64 iova, const char *name) +{ + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct drm_device *dev = a6xx_gpu->base.base.dev; + uint32_t flags = MSM_BO_WC; + u64 range_start, range_end; + int ret; - bo->pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL); - if (!bo->pages) { - kfree(bo); - return ERR_PTR(-ENOMEM); + size = PAGE_ALIGN(size); + if (!iova) { + /* no fixed address - use GMU's uncached range */ + range_start = 0x60000000 + PAGE_SIZE; /* skip dummy page */ + range_end = 0x80000000; + } else { + /* range for fixed address */ + range_start = iova; + range_end = iova + size; + /* use IOMMU_PRIV for icache/dcache */ + flags |= MSM_BO_MAP_PRIV; } - for (i = 0; i < count; i++) { - bo->pages[i] = alloc_page(GFP_KERNEL); - if (!bo->pages[i]) - goto err; + bo->obj = msm_gem_new(dev, size, flags); + if (IS_ERR(bo->obj)) + return PTR_ERR(bo->obj); + + ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->vm, &bo->iova, + range_start, range_end); + if (ret) { + drm_gem_object_put(bo->obj); + return ret; } - bo->iova = gmu->uncached_iova_base; + bo->virt = msm_gem_get_vaddr(bo->obj); + bo->size = size; - for (i = 0; i < count; i++) { - ret = iommu_map(gmu->domain, - bo->iova + (PAGE_SIZE * i), - page_to_phys(bo->pages[i]), PAGE_SIZE, - IOMMU_READ | IOMMU_WRITE); + msm_gem_object_set_name(bo->obj, "%s", name); + + return 0; +} - if (ret) { - DRM_DEV_ERROR(gmu->dev, "Unable to map GMU buffer object\n"); +static int a6xx_gmu_memory_probe(struct drm_device *drm, struct a6xx_gmu *gmu) +{ + struct msm_mmu *mmu; - for (i = i - 1 ; i >= 0; i--) - iommu_unmap(gmu->domain, - bo->iova + (PAGE_SIZE * i), - PAGE_SIZE); + mmu = msm_iommu_new(gmu->dev, 0); + if (IS_ERR(mmu)) + return PTR_ERR(mmu); - goto err; - } - } + gmu->vm = msm_gem_vm_create(drm, mmu, "gmu", 0x0, 0x80000000, true); + if (IS_ERR(gmu->vm)) + return PTR_ERR(gmu->vm); - bo->virt = vmap(bo->pages, count, VM_IOREMAP, - pgprot_writecombine(PAGE_KERNEL)); - if (!bo->virt) - goto err; + return 0; +} - /* Align future IOVA addresses on 1MB boundaries */ - gmu->uncached_iova_base += ALIGN(size, SZ_1M); +/** + * struct bcm_db - Auxiliary data pertaining to each Bus Clock Manager (BCM) + * @unit: divisor used to convert bytes/sec bw value to an RPMh msg + * @width: multiplier used to convert bytes/sec bw value to an RPMh msg + * @vcd: virtual clock domain that this bcm belongs to + * @reserved: reserved field + */ +struct bcm_db { + __le32 unit; + __le16 width; + u8 vcd; + u8 reserved; +}; + +static int a6xx_gmu_rpmh_bw_votes_init(struct adreno_gpu *adreno_gpu, + const struct a6xx_info *info, + struct a6xx_gmu *gmu) +{ + const struct bcm_db *bcm_data[GMU_MAX_BCMS] = { 0 }; + unsigned int bcm_index, bw_index, bcm_count = 0; - return bo; + /* Retrieve BCM data from cmd-db */ + for (bcm_index = 0; bcm_index < GMU_MAX_BCMS; bcm_index++) { + const struct a6xx_bcm *bcm = &info->bcms[bcm_index]; + size_t count; -err: - for (i = 0; i < count; i++) { - if (bo->pages[i]) - __free_pages(bo->pages[i], 0); + /* Stop at NULL terminated bcm entry */ + if (!bcm->name) + break; + + bcm_data[bcm_index] = cmd_db_read_aux_data(bcm->name, &count); + if (IS_ERR(bcm_data[bcm_index])) + return PTR_ERR(bcm_data[bcm_index]); + + if (!count) { + dev_err(gmu->dev, "invalid BCM '%s' aux data size\n", + bcm->name); + return -EINVAL; + } + + bcm_count++; } - kfree(bo->pages); - kfree(bo); + /* Generate BCM votes values for each bandwidth & BCM */ + for (bw_index = 0; bw_index < gmu->nr_gpu_bws; bw_index++) { + u32 *data = gmu->gpu_ib_votes[bw_index]; + u32 bw = gmu->gpu_bw_table[bw_index]; + + /* Calculations loosely copied from bcm_aggregate() & tcs_cmd_gen() */ + for (bcm_index = 0; bcm_index < bcm_count; bcm_index++) { + const struct a6xx_bcm *bcm = &info->bcms[bcm_index]; + bool commit = false; + u64 peak; + u32 vote; + + if (bcm_index == bcm_count - 1 || + (bcm_data[bcm_index + 1] && + bcm_data[bcm_index]->vcd != bcm_data[bcm_index + 1]->vcd)) + commit = true; + + if (!bw) { + data[bcm_index] = BCM_TCS_CMD(commit, false, 0, 0); + continue; + } - return ERR_PTR(-ENOMEM); -} + if (bcm->fixed) { + u32 perfmode = 0; -static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu) -{ - int ret; + /* GMU on A6xx votes perfmode on all valid bandwidth */ + if (!adreno_is_a7xx(adreno_gpu) || + (bcm->perfmode_bw && bw >= bcm->perfmode_bw)) + perfmode = bcm->perfmode; - /* - * The GMU address space is hardcoded to treat the range - * 0x60000000 - 0x80000000 as un-cached memory. All buffers shared - * between the GMU and the CPU will live in this space - */ - gmu->uncached_iova_base = 0x60000000; + data[bcm_index] = BCM_TCS_CMD(commit, true, 0, perfmode); + continue; + } + /* Multiply the bandwidth by the width of the connection */ + peak = (u64)bw * le16_to_cpu(bcm_data[bcm_index]->width); + do_div(peak, bcm->buswidth); - gmu->domain = iommu_domain_alloc(&platform_bus_type); - if (!gmu->domain) - return -ENODEV; + /* Input bandwidth value is in KBps, scale the value to BCM unit */ + peak *= 1000; + do_div(peak, le32_to_cpu(bcm_data[bcm_index]->unit)); - ret = iommu_attach_device(gmu->domain, gmu->dev); + vote = clamp(peak, 1, BCM_TCS_CMD_VOTE_MASK); - if (ret) { - iommu_domain_free(gmu->domain); - gmu->domain = NULL; + /* GMUs on A7xx votes on both x & y */ + if (adreno_is_a7xx(adreno_gpu) || adreno_is_a8xx(adreno_gpu)) + data[bcm_index] = BCM_TCS_CMD(commit, true, vote, vote); + else + data[bcm_index] = BCM_TCS_CMD(commit, true, 0, vote); + } } - return ret; + return 0; } /* Return the 'arc-level' for the given frequency */ -static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq) +static unsigned int a6xx_gmu_get_arc_level(struct device *dev, + unsigned long freq) { struct dev_pm_opp *opp; - struct device_node *np; - u32 val = 0; + unsigned int val; if (!freq) return 0; - opp = dev_pm_opp_find_freq_exact(dev, freq, true); + opp = dev_pm_opp_find_freq_exact(dev, freq, true); if (IS_ERR(opp)) return 0; - np = dev_pm_opp_get_of_node(opp); - - if (np) { - of_property_read_u32(np, "qcom,level", &val); - of_node_put(np); - } + val = dev_pm_opp_get_level(opp); dev_pm_opp_put(opp); @@ -954,13 +1564,14 @@ static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq) } static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes, - unsigned long *freqs, int freqs_count, const char *id) + unsigned long *freqs, int freqs_count, + const char *pri_id, const char *sec_id) { int i, j; const u16 *pri, *sec; size_t pri_count, sec_count; - pri = cmd_db_read_aux_data(id, &pri_count); + pri = cmd_db_read_aux_data(pri_id, &pri_count); if (IS_ERR(pri)) return PTR_ERR(pri); /* @@ -971,7 +1582,7 @@ static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes, if (!pri_count) return -EINVAL; - sec = cmd_db_read_aux_data("mx.lvl", &sec_count); + sec = cmd_db_read_aux_data(sec_id, &sec_count); if (IS_ERR(sec)) return PTR_ERR(sec); @@ -982,7 +1593,7 @@ static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes, /* Construct a vote for each frequency */ for (i = 0; i < freqs_count; i++) { u8 pindex = 0, sindex = 0; - u32 level = a6xx_gmu_get_arc_level(dev, freqs[i]); + unsigned int level = a6xx_gmu_get_arc_level(dev, freqs[i]); /* Get the primary index that matches the arc level */ for (j = 0; j < pri_count; j++) { @@ -994,8 +1605,8 @@ static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes, if (j == pri_count) { DRM_DEV_ERROR(dev, - "Level %u not found in in the RPMh list\n", - level); + "Level %u not found in the RPMh list\n", + level); DRM_DEV_ERROR(dev, "Available levels:\n"); for (j = 0; j < pri_count; j++) DRM_DEV_ERROR(dev, " %u\n", pri[j]); @@ -1025,26 +1636,96 @@ static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes, return 0; } +static int a6xx_gmu_rpmh_dep_votes_init(struct device *dev, u32 *votes, + unsigned long *freqs, int freqs_count) +{ + const u16 *mx; + size_t count; + + mx = cmd_db_read_aux_data("mx.lvl", &count); + if (IS_ERR(mx)) + return PTR_ERR(mx); + /* + * The data comes back as an array of unsigned shorts so adjust the + * count accordingly + */ + count >>= 1; + if (!count) + return -EINVAL; + + /* Fix the vote for zero frequency */ + votes[0] = 0xffffffff; + + /* Construct a vote for rest of the corners */ + for (int i = 1; i < freqs_count; i++) { + unsigned int level = a6xx_gmu_get_arc_level(dev, freqs[i]); + u8 j, index = 0; + + /* Get the primary index that matches the arc level */ + for (j = 0; j < count; j++) { + if (mx[j] >= level) { + index = j; + break; + } + } + + if (j == count) { + DRM_DEV_ERROR(dev, + "Mx Level %u not found in the RPMh list\n", + level); + DRM_DEV_ERROR(dev, "Available levels:\n"); + for (j = 0; j < count; j++) + DRM_DEV_ERROR(dev, " %u\n", mx[j]); + + return -EINVAL; + } + + /* Construct the vote */ + votes[i] = (0x3fff << 14) | (index << 8) | (0xff); + } + + return 0; +} + /* * The GMU votes with the RPMh for itself and on behalf of the GPU but we need * to construct the list of votes on the CPU and send it over. Query the RPMh * voltage levels and build the votes + * The GMU can also vote for DDR interconnects, use the OPP bandwidth entries + * and BCM parameters to build the votes. */ static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu) { struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + const struct a6xx_info *info = adreno_gpu->info->a6xx; struct msm_gpu *gpu = &adreno_gpu->base; + const char *sec_id; + const u16 *gmxc; int ret; + gmxc = cmd_db_read_aux_data("gmxc.lvl", NULL); + if (gmxc == ERR_PTR(-EPROBE_DEFER)) + return -EPROBE_DEFER; + + /* If GMxC is present, prefer that as secondary rail for GX votes */ + sec_id = IS_ERR_OR_NULL(gmxc) ? "mx.lvl" : "gmxc.lvl"; + /* Build the GX votes */ ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes, - gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl"); + gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl", sec_id); /* Build the CX votes */ ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes, - gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl"); + gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl", "mx.lvl"); + + ret |= a6xx_gmu_rpmh_dep_votes_init(gmu->dev, gmu->dep_arc_votes, + gmu->gpu_freqs, gmu->nr_gpu_freqs); + + /* Build the interconnect votes */ + if (info->bcms && gmu->nr_gpu_bws > 1) + ret |= a6xx_gmu_rpmh_bw_votes_init(adreno_gpu, info, gmu); return ret; } @@ -1081,10 +1762,43 @@ static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs, return index; } +static int a6xx_gmu_build_bw_table(struct device *dev, unsigned long *bandwidths, + u32 size) +{ + int count = dev_pm_opp_get_opp_count(dev); + struct dev_pm_opp *opp; + int i, index = 0; + unsigned int bandwidth = 1; + + /* + * The OPP table doesn't contain the "off" bandwidth level so we need to + * add 1 to the table size to account for it + */ + + if (WARN(count + 1 > size, + "The GMU bandwidth table is being truncated\n")) + count = size - 1; + + /* Set the "off" bandwidth */ + bandwidths[index++] = 0; + + for (i = 0; i < count; i++) { + opp = dev_pm_opp_find_bw_ceil(dev, &bandwidth, 0); + if (IS_ERR(opp)) + break; + + dev_pm_opp_put(opp); + bandwidths[index++] = bandwidth++; + } + + return index; +} + static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu) { struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + const struct a6xx_info *info = adreno_gpu->info->a6xx; struct msm_gpu *gpu = &adreno_gpu->base; int ret = 0; @@ -1093,7 +1807,7 @@ static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu) * The GMU handles its own frequency switching so build a list of * available frequencies to send during initialization */ - ret = dev_pm_opp_of_add_table(gmu->dev); + ret = devm_pm_opp_of_add_table(gmu->dev); if (ret) { DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n"); return ret; @@ -1109,13 +1823,93 @@ static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu) gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev, gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs)); + gmu->current_perf_index = gmu->nr_gpu_freqs - 1; + + /* + * The GMU also handles GPU Interconnect Votes so build a list + * of DDR bandwidths from the GPU OPP table + */ + if (info->bcms) + gmu->nr_gpu_bws = a6xx_gmu_build_bw_table(&gpu->pdev->dev, + gmu->gpu_bw_table, ARRAY_SIZE(gmu->gpu_bw_table)); + /* Build the list of RPMh votes that we'll send to the GMU */ return a6xx_gmu_rpmh_votes_init(gmu); } +static int a6xx_gmu_acd_probe(struct a6xx_gmu *gmu) +{ + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct a6xx_hfi_acd_table *cmd = &gmu->acd_table; + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; + int ret, i, cmd_idx = 0; + extern bool disable_acd; + + /* Skip ACD probe if requested via module param */ + if (disable_acd) { + DRM_DEV_ERROR(gmu->dev, "Skipping GPU ACD probe\n"); + return 0; + } + + cmd->version = 1; + cmd->stride = 1; + cmd->enable_by_level = 0; + + /* Skip freq = 0 and parse acd-level for rest of the OPPs */ + for (i = 1; i < gmu->nr_gpu_freqs; i++) { + struct dev_pm_opp *opp; + struct device_node *np; + unsigned long freq; + u32 val; + + freq = gmu->gpu_freqs[i]; + /* This is unlikely to fail because we are passing back a known freq */ + opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, freq, true); + np = dev_pm_opp_get_of_node(opp); + + ret = of_property_read_u32(np, "qcom,opp-acd-level", &val); + of_node_put(np); + dev_pm_opp_put(opp); + if (ret == -EINVAL) + continue; + else if (ret) { + DRM_DEV_ERROR(gmu->dev, "Unable to read acd level for freq %lu\n", freq); + return ret; + } + + cmd->enable_by_level |= BIT(i); + cmd->data[cmd_idx++] = val; + } + + cmd->num_levels = cmd_idx; + + /* It is a problem if qmp node is unavailable when ACD is required */ + if (cmd->enable_by_level && IS_ERR_OR_NULL(gmu->qmp)) { + DRM_DEV_ERROR(gmu->dev, "Unable to send ACD state to AOSS\n"); + return -EINVAL; + } + + /* Otherwise, nothing to do if qmp is unavailable */ + if (IS_ERR_OR_NULL(gmu->qmp)) + return 0; + + /* + * Notify AOSS about the ACD state. AOSS is supposed to assume that ACD is disabled on + * system reset. So it is harmless if we couldn't notify 'OFF' state + */ + ret = qmp_send(gmu->qmp, "{class: gpu, res: acd, val: %d}", !!cmd->enable_by_level); + if (ret && cmd->enable_by_level) { + DRM_DEV_ERROR(gmu->dev, "Failed to send ACD state to AOSS\n"); + return ret; + } + + return 0; +} + static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu) { - int ret = msm_clk_bulk_get(gmu->dev, &gmu->clocks); + int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks); if (ret < 1) return ret; @@ -1125,11 +1919,118 @@ static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu) gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks, gmu->nr_clocks, "gmu"); + gmu->hub_clk = msm_clk_bulk_get_clock(gmu->clocks, + gmu->nr_clocks, "hub"); + + return 0; +} + +static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev, + const char *name, irq_handler_t handler) +{ + int irq, ret; + + irq = platform_get_irq_byname(pdev, name); + + ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH | IRQF_NO_AUTOEN, name, gmu); + if (ret) { + DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n", + name, ret); + return ret; + } + + return irq; +} + +void a6xx_gmu_sysprof_setup(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + struct a6xx_gmu *gmu = &a6xx_gpu->gmu; + unsigned int sysprof_active; + + /* Nothing to do if GPU is suspended. We will handle this during GMU resume */ + if (!pm_runtime_get_if_active(&gpu->pdev->dev)) + return; + + mutex_lock(&gmu->lock); + + sysprof_active = refcount_read(&gpu->sysprof_active); + + /* + * 'Perfcounter select' register values are lost during IFPC collapse. To avoid that, + * use the currently unused perfcounter oob vote to block IFPC when sysprof is active + */ + if ((sysprof_active > 1) && !test_and_set_bit(GMU_STATUS_OOB_PERF_SET, &gmu->status)) + a6xx_gmu_set_oob(gmu, GMU_OOB_PERFCOUNTER_SET); + else if ((sysprof_active == 1) && test_and_clear_bit(GMU_STATUS_OOB_PERF_SET, &gmu->status)) + a6xx_gmu_clear_oob(gmu, GMU_OOB_PERFCOUNTER_SET); + + mutex_unlock(&gmu->lock); + + pm_runtime_put(&gpu->pdev->dev); +} + +void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu) +{ + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct a6xx_gmu *gmu = &a6xx_gpu->gmu; + + mutex_lock(&gmu->lock); + if (!gmu->initialized) { + mutex_unlock(&gmu->lock); + return; + } + + gmu->initialized = false; + + mutex_unlock(&gmu->lock); + + pm_runtime_force_suspend(gmu->dev); + + /* + * Since cxpd is a virt device, the devlink with gmu-dev will be removed + * automatically when we do detach + */ + dev_pm_domain_detach(gmu->cxpd, false); + + if (!IS_ERR_OR_NULL(gmu->gxpd)) { + pm_runtime_disable(gmu->gxpd); + dev_pm_domain_detach(gmu->gxpd, false); + } + + if (!IS_ERR_OR_NULL(gmu->qmp)) + qmp_put(gmu->qmp); + + iounmap(gmu->mmio); + gmu->mmio = NULL; + gmu->rscc = NULL; + + if (!adreno_has_gmu_wrapper(adreno_gpu) && + !adreno_has_rgmu(adreno_gpu)) { + a6xx_gmu_memory_free(gmu); + + free_irq(gmu->gmu_irq, gmu); + free_irq(gmu->hfi_irq, gmu); + } + + /* Drop reference taken in of_find_device_by_node */ + put_device(gmu->dev); +} + +static int cxpd_notifier_cb(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct a6xx_gmu *gmu = container_of(nb, struct a6xx_gmu, pd_nb); + + if (action == GENPD_NOTIFY_OFF) + complete_all(&gmu->pd_gate); + return 0; } static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev, - const char *name) + const char *name, resource_size_t *start) { void __iomem *ret; struct resource *res = platform_get_resource_byname(pdev, @@ -1140,56 +2041,113 @@ static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev, return ERR_PTR(-EINVAL); } - ret = devm_ioremap(&pdev->dev, res->start, resource_size(res)); + ret = ioremap(res->start, resource_size(res)); if (!ret) { DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name); return ERR_PTR(-EINVAL); } + if (start) + *start = res->start; + return ret; } -static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev, - const char *name, irq_handler_t handler) +int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) { - int irq, ret; + struct platform_device *pdev = of_find_device_by_node(node); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; + struct a6xx_gmu *gmu = &a6xx_gpu->gmu; + resource_size_t start; + struct resource *res; + int ret; - irq = platform_get_irq_byname(pdev, name); + if (!pdev) + return -ENODEV; - ret = devm_request_irq(&pdev->dev, irq, handler, IRQF_TRIGGER_HIGH, - name, gmu); - if (ret) { - DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s\n", name); + gmu->dev = &pdev->dev; + + ret = of_dma_configure(gmu->dev, node, true); + if (ret) return ret; + + pm_runtime_enable(gmu->dev); + + /* Mark legacy for manual SPTPRAC control */ + gmu->legacy = true; + + /* RGMU requires clocks */ + ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks); + if (ret < 0) + goto err_clk; + + gmu->nr_clocks = ret; + + /* Map the GMU registers */ + gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu", &start); + if (IS_ERR(gmu->mmio)) { + ret = PTR_ERR(gmu->mmio); + goto err_mmio; } - disable_irq(irq); + res = platform_get_resource_byname(gpu->pdev, IORESOURCE_MEM, "kgsl_3d0_reg_memory"); + if (!res) { + ret = -EINVAL; + goto err_mmio; + } - return irq; -} + /* Identify gmu base offset from gpu base address */ + gmu->mmio_offset = (u32)(start - res->start); -void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu) -{ - struct a6xx_gmu *gmu = &a6xx_gpu->gmu; + gmu->cxpd = dev_pm_domain_attach_by_name(gmu->dev, "cx"); + if (IS_ERR(gmu->cxpd)) { + ret = PTR_ERR(gmu->cxpd); + goto err_mmio; + } - if (IS_ERR_OR_NULL(gmu->mmio)) - return; + if (!device_link_add(gmu->dev, gmu->cxpd, DL_FLAG_PM_RUNTIME)) { + ret = -ENODEV; + goto detach_cxpd; + } - pm_runtime_disable(gmu->dev); - a6xx_gmu_stop(a6xx_gpu); + init_completion(&gmu->pd_gate); + complete_all(&gmu->pd_gate); + gmu->pd_nb.notifier_call = cxpd_notifier_cb; - a6xx_gmu_irq_disable(gmu); - a6xx_gmu_memory_free(gmu, gmu->hfi); + /* Get a link to the GX power domain to reset the GPU */ + gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx"); + if (IS_ERR(gmu->gxpd)) { + ret = PTR_ERR(gmu->gxpd); + goto err_mmio; + } + + gmu->initialized = true; - iommu_detach_device(gmu->domain, gmu->dev); + return 0; + +detach_cxpd: + dev_pm_domain_detach(gmu->cxpd, false); + +err_mmio: + iounmap(gmu->mmio); - iommu_domain_free(gmu->domain); +err_clk: + /* Drop reference taken in of_find_device_by_node */ + put_device(gmu->dev); + + return ret; } -int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node) +int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) { - struct a6xx_gmu *gmu = &a6xx_gpu->gmu; struct platform_device *pdev = of_find_device_by_node(node); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; + struct a6xx_gmu *gmu = &a6xx_gpu->gmu; + struct device_link *link; + resource_size_t start; + struct resource *res; int ret; if (!pdev) @@ -1197,61 +2155,200 @@ int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node) gmu->dev = &pdev->dev; - of_dma_configure(gmu->dev, node, true); + ret = of_dma_configure(gmu->dev, node, true); + if (ret) + return ret; - /* Fow now, don't do anything fancy until we get our feet under us */ - gmu->idle_level = GMU_IDLE_STATE_ACTIVE; + /* Set GMU idle level */ + gmu->idle_level = (adreno_gpu->info->quirks & ADRENO_QUIRK_IFPC) ? + GMU_IDLE_STATE_IFPC : GMU_IDLE_STATE_ACTIVE; pm_runtime_enable(gmu->dev); - gmu->gx = devm_regulator_get(gmu->dev, "vdd"); /* Get the list of clocks */ ret = a6xx_gmu_clocks_probe(gmu); if (ret) - return ret; + goto err_put_device; - /* Set up the IOMMU context bank */ - ret = a6xx_gmu_memory_probe(gmu); + ret = a6xx_gmu_memory_probe(adreno_gpu->base.dev, gmu); if (ret) - return ret; + goto err_put_device; - /* Allocate memory for for the HFI queues */ - gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K); - if (IS_ERR(gmu->hfi)) - goto err; - /* Allocate memory for the GMU debug region */ - gmu->debug = a6xx_gmu_memory_alloc(gmu, SZ_16K); - if (IS_ERR(gmu->debug)) - goto err; + /* A660 now requires handling "prealloc requests" in GMU firmware + * For now just hardcode allocations based on the known firmware. + * note: there is no indication that these correspond to "dummy" or + * "debug" regions, but this "guess" allows reusing these BOs which + * are otherwise unused by a660. + */ + gmu->dummy.size = SZ_4K; + if (adreno_is_a660_family(adreno_gpu) || + adreno_is_a7xx(adreno_gpu) || + adreno_is_a8xx(adreno_gpu)) { + ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7, + 0x60400000, "debug"); + if (ret) + goto err_memory; + + gmu->dummy.size = SZ_16K; + } + + /* Allocate memory for the GMU dummy page */ + ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, gmu->dummy.size, + 0x60000000, "dummy"); + if (ret) + goto err_memory; + + /* Note that a650 family also includes a660 family: */ + if (adreno_is_a650_family(adreno_gpu) || + adreno_is_a7xx(adreno_gpu) || + adreno_is_a8xx(adreno_gpu)) { + ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, + SZ_16M - SZ_16K, 0x04000, "icache"); + if (ret) + goto err_memory; + /* + * NOTE: when porting legacy ("pre-650-family") GPUs you may be tempted to add a condition + * to allocate icache/dcache here, as per downstream code flow, but it may not actually be + * necessary. If you omit this step and you don't get random pagefaults, you are likely + * good to go without this! + */ + } else if (adreno_is_a640_family(adreno_gpu)) { + ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, + SZ_256K - SZ_16K, 0x04000, "icache"); + if (ret) + goto err_memory; + + ret = a6xx_gmu_memory_alloc(gmu, &gmu->dcache, + SZ_256K - SZ_16K, 0x44000, "dcache"); + if (ret) + goto err_memory; + } else if (adreno_is_a630_family(adreno_gpu)) { + /* HFI v1, has sptprac */ + gmu->legacy = true; + + /* Allocate memory for the GMU debug region */ + ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0, "debug"); + if (ret) + goto err_memory; + } + + /* Allocate memory for the GMU log region */ + ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_16K, 0, "log"); + if (ret) + goto err_memory; + + /* Allocate memory for for the HFI queues */ + ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0, "hfi"); + if (ret) + goto err_memory; /* Map the GMU registers */ - gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); - if (IS_ERR(gmu->mmio)) - goto err; + gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu", &start); + if (IS_ERR(gmu->mmio)) { + ret = PTR_ERR(gmu->mmio); + goto err_memory; + } + + res = platform_get_resource_byname(gpu->pdev, IORESOURCE_MEM, "kgsl_3d0_reg_memory"); + if (!res) { + ret = -EINVAL; + goto err_mmio; + } + + /* Identify gmu base offset from gpu base address */ + gmu->mmio_offset = (u32)(start - res->start); + + if (adreno_is_a650_family(adreno_gpu) || + adreno_is_a7xx(adreno_gpu)) { + gmu->rscc = devm_platform_ioremap_resource_byname(pdev, "rscc"); + if (IS_ERR(gmu->rscc)) { + ret = -ENODEV; + goto err_mmio; + } + } else if (adreno_is_a8xx(adreno_gpu)) { + gmu->rscc = gmu->mmio + 0x19000; + } else { + gmu->rscc = gmu->mmio + 0x23000; + } /* Get the HFI and GMU interrupts */ gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq); gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq); - if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) - goto err; + if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) { + ret = -ENODEV; + goto err_mmio; + } + + gmu->cxpd = dev_pm_domain_attach_by_name(gmu->dev, "cx"); + if (IS_ERR(gmu->cxpd)) { + ret = PTR_ERR(gmu->cxpd); + goto err_mmio; + } + + link = device_link_add(gmu->dev, gmu->cxpd, DL_FLAG_PM_RUNTIME); + if (!link) { + ret = -ENODEV; + goto detach_cxpd; + } + + /* Other errors are handled during GPU ACD probe */ + gmu->qmp = qmp_get(gmu->dev); + if (PTR_ERR_OR_ZERO(gmu->qmp) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto detach_gxpd; + } + + init_completion(&gmu->pd_gate); + complete_all(&gmu->pd_gate); + gmu->pd_nb.notifier_call = cxpd_notifier_cb; + + /* + * Get a link to the GX power domain to reset the GPU in case of GMU + * crash + */ + gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx"); /* Get the power levels for the GMU and GPU */ a6xx_gmu_pwrlevels_probe(gmu); + ret = a6xx_gmu_acd_probe(gmu); + if (ret) + goto detach_gxpd; + /* Set up the HFI queues */ a6xx_hfi_init(gmu); + /* Initialize RPMh */ + a6xx_gmu_rpmh_init(gmu); + + gmu->initialized = true; + return 0; -err: - a6xx_gmu_memory_free(gmu, gmu->hfi); - if (gmu->domain) { - iommu_detach_device(gmu->domain, gmu->dev); +detach_gxpd: + if (!IS_ERR_OR_NULL(gmu->gxpd)) + dev_pm_domain_detach(gmu->gxpd, false); - iommu_domain_free(gmu->domain); - } + if (!IS_ERR_OR_NULL(gmu->qmp)) + qmp_put(gmu->qmp); + + device_link_del(link); - return -ENODEV; +detach_cxpd: + dev_pm_domain_detach(gmu->cxpd, false); + +err_mmio: + iounmap(gmu->mmio); + free_irq(gmu->gmu_irq, gmu); + free_irq(gmu->hfi_irq, gmu); + +err_memory: + a6xx_gmu_memory_free(gmu); +err_put_device: + /* Drop reference taken in of_find_device_by_node */ + put_device(gmu->dev); + + return ret; } |
