summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm/adreno/a6xx_gmu.c')
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c128
1 files changed, 101 insertions, 27 deletions
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index c8711938a5f4..28e6705c6da6 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -1064,14 +1064,6 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
gmu->hung = false;
- /* Notify AOSS about the ACD state (unimplemented for now => disable it) */
- if (!IS_ERR(gmu->qmp)) {
- ret = qmp_send(gmu->qmp, "{class: gpu, res: acd, val: %d}",
- 0 /* Hardcode ACD to be disabled for now */);
- if (ret)
- dev_err(gmu->dev, "failed to send GPU ACD state\n");
- }
-
/* Turn on the resources */
pm_runtime_get_sync(gmu->dev);
@@ -1267,15 +1259,17 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu)
{
- msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace);
- msm_gem_kernel_put(gmu->debug.obj, gmu->aspace);
- msm_gem_kernel_put(gmu->icache.obj, gmu->aspace);
- msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace);
- msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace);
- msm_gem_kernel_put(gmu->log.obj, gmu->aspace);
-
- gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu);
- msm_gem_address_space_put(gmu->aspace);
+ struct msm_mmu *mmu = to_msm_vm(gmu->vm)->mmu;
+
+ msm_gem_kernel_put(gmu->hfi.obj, gmu->vm);
+ msm_gem_kernel_put(gmu->debug.obj, gmu->vm);
+ msm_gem_kernel_put(gmu->icache.obj, gmu->vm);
+ msm_gem_kernel_put(gmu->dcache.obj, gmu->vm);
+ msm_gem_kernel_put(gmu->dummy.obj, gmu->vm);
+ msm_gem_kernel_put(gmu->log.obj, gmu->vm);
+
+ mmu->funcs->detach(mmu);
+ drm_gpuvm_put(gmu->vm);
}
static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
@@ -1304,7 +1298,7 @@ static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
if (IS_ERR(bo->obj))
return PTR_ERR(bo->obj);
- ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova,
+ ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->vm, &bo->iova,
range_start, range_end);
if (ret) {
drm_gem_object_put(bo->obj);
@@ -1319,7 +1313,7 @@ static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
return 0;
}
-static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
+static int a6xx_gmu_memory_probe(struct drm_device *drm, struct a6xx_gmu *gmu)
{
struct msm_mmu *mmu;
@@ -1329,9 +1323,9 @@ static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
if (IS_ERR(mmu))
return PTR_ERR(mmu);
- gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000);
- if (IS_ERR(gmu->aspace))
- return PTR_ERR(gmu->aspace);
+ gmu->vm = msm_gem_vm_create(drm, mmu, "gmu", 0x0, 0x80000000, true);
+ if (IS_ERR(gmu->vm))
+ return PTR_ERR(gmu->vm);
return 0;
}
@@ -1671,6 +1665,75 @@ static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
return a6xx_gmu_rpmh_votes_init(gmu);
}
+static int a6xx_gmu_acd_probe(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct a6xx_hfi_acd_table *cmd = &gmu->acd_table;
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ int ret, i, cmd_idx = 0;
+ extern bool disable_acd;
+
+ /* Skip ACD probe if requested via module param */
+ if (disable_acd) {
+ DRM_DEV_ERROR(gmu->dev, "Skipping GPU ACD probe\n");
+ return 0;
+ }
+
+ cmd->version = 1;
+ cmd->stride = 1;
+ cmd->enable_by_level = 0;
+
+ /* Skip freq = 0 and parse acd-level for rest of the OPPs */
+ for (i = 1; i < gmu->nr_gpu_freqs; i++) {
+ struct dev_pm_opp *opp;
+ struct device_node *np;
+ unsigned long freq;
+ u32 val;
+
+ freq = gmu->gpu_freqs[i];
+ opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, freq, true);
+ np = dev_pm_opp_get_of_node(opp);
+
+ ret = of_property_read_u32(np, "qcom,opp-acd-level", &val);
+ of_node_put(np);
+ dev_pm_opp_put(opp);
+ if (ret == -EINVAL)
+ continue;
+ else if (ret) {
+ DRM_DEV_ERROR(gmu->dev, "Unable to read acd level for freq %lu\n", freq);
+ return ret;
+ }
+
+ cmd->enable_by_level |= BIT(i);
+ cmd->data[cmd_idx++] = val;
+ }
+
+ cmd->num_levels = cmd_idx;
+
+ /* It is a problem if qmp node is unavailable when ACD is required */
+ if (cmd->enable_by_level && IS_ERR_OR_NULL(gmu->qmp)) {
+ DRM_DEV_ERROR(gmu->dev, "Unable to send ACD state to AOSS\n");
+ return -EINVAL;
+ }
+
+ /* Otherwise, nothing to do if qmp is unavailable */
+ if (IS_ERR_OR_NULL(gmu->qmp))
+ return 0;
+
+ /*
+ * Notify AOSS about the ACD state. AOSS is supposed to assume that ACD is disabled on
+ * system reset. So it is harmless if we couldn't notify 'OFF' state
+ */
+ ret = qmp_send(gmu->qmp, "{class: gpu, res: acd, val: %d}", !!cmd->enable_by_level);
+ if (ret && cmd->enable_by_level) {
+ DRM_DEV_ERROR(gmu->dev, "Failed to send ACD state to AOSS\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu)
{
int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks);
@@ -1879,7 +1942,7 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
if (ret)
goto err_put_device;
- ret = a6xx_gmu_memory_probe(gmu);
+ ret = a6xx_gmu_memory_probe(adreno_gpu->base.dev, gmu);
if (ret)
goto err_put_device;
@@ -1989,10 +2052,11 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
goto detach_cxpd;
}
+ /* Other errors are handled during GPU ACD probe */
gmu->qmp = qmp_get(gmu->dev);
- if (IS_ERR(gmu->qmp) && adreno_is_a7xx(adreno_gpu)) {
- ret = PTR_ERR(gmu->qmp);
- goto remove_device_link;
+ if (PTR_ERR_OR_ZERO(gmu->qmp) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto detach_gxpd;
}
init_completion(&gmu->pd_gate);
@@ -2008,6 +2072,10 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
/* Get the power levels for the GMU and GPU */
a6xx_gmu_pwrlevels_probe(gmu);
+ ret = a6xx_gmu_acd_probe(gmu);
+ if (ret)
+ goto detach_gxpd;
+
/* Set up the HFI queues */
a6xx_hfi_init(gmu);
@@ -2018,7 +2086,13 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
return 0;
-remove_device_link:
+detach_gxpd:
+ if (!IS_ERR_OR_NULL(gmu->gxpd))
+ dev_pm_domain_detach(gmu->gxpd, false);
+
+ if (!IS_ERR_OR_NULL(gmu->qmp))
+ qmp_put(gmu->qmp);
+
device_link_del(link);
detach_cxpd: