summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/panfrost
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/panfrost')
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c78
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h13
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c5
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_dump.c12
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.c85
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c4
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_regs.h1
7 files changed, 156 insertions, 42 deletions
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index 28f7046e1b1a..c90ad5ee34e7 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -403,7 +403,7 @@ void panfrost_device_reset(struct panfrost_device *pfdev)
panfrost_job_enable_interrupts(pfdev);
}
-static int panfrost_device_resume(struct device *dev)
+static int panfrost_device_runtime_resume(struct device *dev)
{
struct panfrost_device *pfdev = dev_get_drvdata(dev);
@@ -413,7 +413,7 @@ static int panfrost_device_resume(struct device *dev)
return 0;
}
-static int panfrost_device_suspend(struct device *dev)
+static int panfrost_device_runtime_suspend(struct device *dev)
{
struct panfrost_device *pfdev = dev_get_drvdata(dev);
@@ -426,5 +426,75 @@ static int panfrost_device_suspend(struct device *dev)
return 0;
}
-EXPORT_GPL_RUNTIME_DEV_PM_OPS(panfrost_pm_ops, panfrost_device_suspend,
- panfrost_device_resume, NULL);
+static int panfrost_device_resume(struct device *dev)
+{
+ struct panfrost_device *pfdev = dev_get_drvdata(dev);
+ int ret;
+
+ if (pfdev->comp->pm_features & BIT(GPU_PM_VREG_OFF)) {
+ unsigned long freq = pfdev->pfdevfreq.fast_rate;
+ struct dev_pm_opp *opp;
+
+ opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+ dev_pm_opp_set_opp(dev, opp);
+ dev_pm_opp_put(opp);
+ }
+
+ if (pfdev->comp->pm_features & BIT(GPU_PM_CLK_DIS)) {
+ ret = clk_enable(pfdev->clock);
+ if (ret)
+ goto err_clk;
+
+ if (pfdev->bus_clock) {
+ ret = clk_enable(pfdev->bus_clock);
+ if (ret)
+ goto err_bus_clk;
+ }
+ }
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ goto err_resume;
+
+ return 0;
+
+err_resume:
+ if (pfdev->comp->pm_features & BIT(GPU_PM_CLK_DIS) && pfdev->bus_clock)
+ clk_disable(pfdev->bus_clock);
+err_bus_clk:
+ if (pfdev->comp->pm_features & BIT(GPU_PM_CLK_DIS))
+ clk_disable(pfdev->clock);
+err_clk:
+ if (pfdev->comp->pm_features & BIT(GPU_PM_VREG_OFF))
+ dev_pm_opp_set_opp(dev, NULL);
+ return ret;
+}
+
+static int panfrost_device_suspend(struct device *dev)
+{
+ struct panfrost_device *pfdev = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_force_suspend(dev);
+ if (ret)
+ return ret;
+
+ if (pfdev->comp->pm_features & BIT(GPU_PM_CLK_DIS)) {
+ if (pfdev->bus_clock)
+ clk_disable(pfdev->bus_clock);
+
+ clk_disable(pfdev->clock);
+ }
+
+ if (pfdev->comp->pm_features & BIT(GPU_PM_VREG_OFF))
+ dev_pm_opp_set_opp(dev, NULL);
+
+ return 0;
+}
+
+EXPORT_GPL_DEV_PM_OPS(panfrost_pm_ops) = {
+ RUNTIME_PM_OPS(panfrost_device_runtime_suspend, panfrost_device_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(panfrost_device_suspend, panfrost_device_resume)
+};
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index 1ef38f60d5dc..0fc558db6bfd 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -25,6 +25,16 @@ struct panfrost_perfcnt;
#define NUM_JOB_SLOTS 3
#define MAX_PM_DOMAINS 5
+/**
+ * enum panfrost_gpu_pm - Supported kernel power management features
+ * @GPU_PM_CLK_DIS: Allow disabling clocks during system suspend
+ * @GPU_PM_VREG_OFF: Allow turning off regulators during system suspend
+ */
+enum panfrost_gpu_pm {
+ GPU_PM_CLK_DIS,
+ GPU_PM_VREG_OFF,
+};
+
struct panfrost_features {
u16 id;
u16 revision;
@@ -75,6 +85,9 @@ struct panfrost_compatible {
/* Vendor implementation quirks callback */
void (*vendor_quirk)(struct panfrost_device *pfdev);
+
+ /* Allowed PM features */
+ u8 pm_features;
};
struct panfrost_device {
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 7cabf4e3d1f2..a926d71e8131 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -274,7 +274,7 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
ret = drm_sched_job_init(&job->base,
&file_priv->sched_entity[slot],
- NULL);
+ 1, NULL);
if (ret)
goto out_put_job;
@@ -734,6 +734,7 @@ static const struct panfrost_compatible mediatek_mt8183_b_data = {
.supply_names = mediatek_mt8183_b_supplies,
.num_pm_domains = ARRAY_SIZE(mediatek_mt8183_pm_domains),
.pm_domain_names = mediatek_mt8183_pm_domains,
+ .pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF),
};
static const char * const mediatek_mt8186_pm_domains[] = { "core0", "core1" };
@@ -742,6 +743,7 @@ static const struct panfrost_compatible mediatek_mt8186_data = {
.supply_names = mediatek_mt8183_b_supplies,
.num_pm_domains = ARRAY_SIZE(mediatek_mt8186_pm_domains),
.pm_domain_names = mediatek_mt8186_pm_domains,
+ .pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF),
};
static const char * const mediatek_mt8192_supplies[] = { "mali", NULL };
@@ -752,6 +754,7 @@ static const struct panfrost_compatible mediatek_mt8192_data = {
.supply_names = mediatek_mt8192_supplies,
.num_pm_domains = ARRAY_SIZE(mediatek_mt8192_pm_domains),
.pm_domain_names = mediatek_mt8192_pm_domains,
+ .pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF),
};
static const struct of_device_id dt_match[] = {
diff --git a/drivers/gpu/drm/panfrost/panfrost_dump.c b/drivers/gpu/drm/panfrost/panfrost_dump.c
index e7942ac449c6..47751302f1bc 100644
--- a/drivers/gpu/drm/panfrost/panfrost_dump.c
+++ b/drivers/gpu/drm/panfrost/panfrost_dump.c
@@ -220,16 +220,8 @@ void panfrost_core_dump(struct panfrost_job *job)
iter.hdr->bomap.data[0] = bomap - bomap_start;
- for_each_sgtable_page(bo->base.sgt, &page_iter, 0) {
- struct page *page = sg_page_iter_page(&page_iter);
-
- if (!IS_ERR(page)) {
- *bomap++ = page_to_phys(page);
- } else {
- dev_err(pfdev->dev, "Panfrost Dump: wrong page\n");
- *bomap++ = 0;
- }
- }
+ for_each_sgtable_page(bo->base.sgt, &page_iter, 0)
+ *bomap++ = page_to_phys(sg_page_iter_page(&page_iter));
iter.hdr->bomap.iova = mapping->mmnode.start << PAGE_SHIFT;
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
index f0be7e19b13e..09f5e1563ebd 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
@@ -60,14 +60,21 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev)
gpu_write(pfdev, GPU_INT_MASK, 0);
gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_RESET_COMPLETED);
- gpu_write(pfdev, GPU_CMD, GPU_CMD_SOFT_RESET);
+ gpu_write(pfdev, GPU_CMD, GPU_CMD_SOFT_RESET);
ret = readl_relaxed_poll_timeout(pfdev->iomem + GPU_INT_RAWSTAT,
- val, val & GPU_IRQ_RESET_COMPLETED, 100, 10000);
+ val, val & GPU_IRQ_RESET_COMPLETED, 10, 10000);
if (ret) {
- dev_err(pfdev->dev, "gpu soft reset timed out\n");
- return ret;
+ dev_err(pfdev->dev, "gpu soft reset timed out, attempting hard reset\n");
+
+ gpu_write(pfdev, GPU_CMD, GPU_CMD_HARD_RESET);
+ ret = readl_relaxed_poll_timeout(pfdev->iomem + GPU_INT_RAWSTAT, val,
+ val & GPU_IRQ_RESET_COMPLETED, 100, 10000);
+ if (ret) {
+ dev_err(pfdev->dev, "gpu hard reset timed out\n");
+ return ret;
+ }
}
gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_MASK_ALL);
@@ -362,32 +369,42 @@ unsigned long long panfrost_cycle_counter_read(struct panfrost_device *pfdev)
return ((u64)hi << 32) | lo;
}
+static u64 panfrost_get_core_mask(struct panfrost_device *pfdev)
+{
+ u64 core_mask;
+
+ if (pfdev->features.l2_present == 1)
+ return U64_MAX;
+
+ /*
+ * Only support one core group now.
+ * ~(l2_present - 1) unsets all bits in l2_present except
+ * the bottom bit. (l2_present - 2) has all the bits in
+ * the first core group set. AND them together to generate
+ * a mask of cores in the first core group.
+ */
+ core_mask = ~(pfdev->features.l2_present - 1) &
+ (pfdev->features.l2_present - 2);
+ dev_info_once(pfdev->dev, "using only 1st core group (%lu cores from %lu)\n",
+ hweight64(core_mask),
+ hweight64(pfdev->features.shader_present));
+
+ return core_mask;
+}
+
void panfrost_gpu_power_on(struct panfrost_device *pfdev)
{
int ret;
u32 val;
- u64 core_mask = U64_MAX;
+ u64 core_mask;
panfrost_gpu_init_quirks(pfdev);
+ core_mask = panfrost_get_core_mask(pfdev);
- if (pfdev->features.l2_present != 1) {
- /*
- * Only support one core group now.
- * ~(l2_present - 1) unsets all bits in l2_present except
- * the bottom bit. (l2_present - 2) has all the bits in
- * the first core group set. AND them together to generate
- * a mask of cores in the first core group.
- */
- core_mask = ~(pfdev->features.l2_present - 1) &
- (pfdev->features.l2_present - 2);
- dev_info_once(pfdev->dev, "using only 1st core group (%lu cores from %lu)\n",
- hweight64(core_mask),
- hweight64(pfdev->features.shader_present));
- }
gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present & core_mask);
ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO,
val, val == (pfdev->features.l2_present & core_mask),
- 100, 20000);
+ 10, 20000);
if (ret)
dev_err(pfdev->dev, "error powering up gpu L2");
@@ -395,22 +412,40 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev)
pfdev->features.shader_present & core_mask);
ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_READY_LO,
val, val == (pfdev->features.shader_present & core_mask),
- 100, 20000);
+ 10, 20000);
if (ret)
dev_err(pfdev->dev, "error powering up gpu shader");
gpu_write(pfdev, TILER_PWRON_LO, pfdev->features.tiler_present);
ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_READY_LO,
- val, val == pfdev->features.tiler_present, 100, 1000);
+ val, val == pfdev->features.tiler_present, 10, 1000);
if (ret)
dev_err(pfdev->dev, "error powering up gpu tiler");
}
void panfrost_gpu_power_off(struct panfrost_device *pfdev)
{
- gpu_write(pfdev, TILER_PWROFF_LO, 0);
- gpu_write(pfdev, SHADER_PWROFF_LO, 0);
- gpu_write(pfdev, L2_PWROFF_LO, 0);
+ u64 core_mask = panfrost_get_core_mask(pfdev);
+ int ret;
+ u32 val;
+
+ gpu_write(pfdev, SHADER_PWROFF_LO, pfdev->features.shader_present & core_mask);
+ ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_PWRTRANS_LO,
+ val, !val, 1, 1000);
+ if (ret)
+ dev_err(pfdev->dev, "shader power transition timeout");
+
+ gpu_write(pfdev, TILER_PWROFF_LO, pfdev->features.tiler_present);
+ ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_PWRTRANS_LO,
+ val, !val, 1, 1000);
+ if (ret)
+ dev_err(pfdev->dev, "tiler power transition timeout");
+
+ gpu_write(pfdev, L2_PWROFF_LO, pfdev->features.l2_present & core_mask);
+ ret = readl_poll_timeout(pfdev->iomem + L2_PWRTRANS_LO,
+ val, !val, 0, 1000);
+ if (ret)
+ dev_err(pfdev->dev, "l2 power transition timeout");
}
int panfrost_gpu_init(struct panfrost_device *pfdev)
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index ecd2e035147f..f9446e197428 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -852,7 +852,7 @@ int panfrost_job_init(struct panfrost_device *pfdev)
js->queue[j].fence_context = dma_fence_context_alloc(1);
ret = drm_sched_init(&js->queue[j].sched,
- &panfrost_sched_ops,
+ &panfrost_sched_ops, NULL,
DRM_SCHED_PRIORITY_COUNT,
nentries, 0,
msecs_to_jiffies(JOB_TIMEOUT_MS),
@@ -963,7 +963,7 @@ int panfrost_job_is_idle(struct panfrost_device *pfdev)
for (i = 0; i < NUM_JOB_SLOTS; i++) {
/* If there are any jobs in the HW queue, we're not idle */
- if (atomic_read(&js->queue[i].sched.hw_rq_count))
+ if (atomic_read(&js->queue[i].sched.credit_count))
return false;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h
index 55ec807550b3..c25743b05c55 100644
--- a/drivers/gpu/drm/panfrost/panfrost_regs.h
+++ b/drivers/gpu/drm/panfrost/panfrost_regs.h
@@ -44,6 +44,7 @@
GPU_IRQ_MULTIPLE_FAULT)
#define GPU_CMD 0x30
#define GPU_CMD_SOFT_RESET 0x01
+#define GPU_CMD_HARD_RESET 0x02
#define GPU_CMD_PERFCNT_CLEAR 0x03
#define GPU_CMD_PERFCNT_SAMPLE 0x04
#define GPU_CMD_CYCLE_COUNT_START 0x05