summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/panthor/panthor_device.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/panthor/panthor_device.c')
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.c165
1 files changed, 108 insertions, 57 deletions
diff --git a/drivers/gpu/drm/panthor/panthor_device.c b/drivers/gpu/drm/panthor/panthor_device.c
index 6fbff516c1c1..e133b1e0ad6d 100644
--- a/drivers/gpu/drm/panthor/panthor_device.c
+++ b/drivers/gpu/drm/panthor/panthor_device.c
@@ -13,15 +13,36 @@
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include "panthor_devfreq.h"
#include "panthor_device.h"
#include "panthor_fw.h"
#include "panthor_gpu.h"
+#include "panthor_hw.h"
#include "panthor_mmu.h"
+#include "panthor_pwr.h"
#include "panthor_regs.h"
#include "panthor_sched.h"
+static int panthor_gpu_coherency_init(struct panthor_device *ptdev)
+{
+ ptdev->coherent = device_get_dma_attr(ptdev->base.dev) == DEV_DMA_COHERENT;
+
+ if (!ptdev->coherent)
+ return 0;
+
+ /* Check if the ACE-Lite coherency protocol is actually supported by the GPU.
+ * ACE protocol has never been supported for command stream frontend GPUs.
+ */
+ if ((gpu_read(ptdev, GPU_COHERENCY_FEATURES) &
+ GPU_COHERENCY_PROT_BIT(ACE_LITE)))
+ return 0;
+
+ drm_err(&ptdev->base, "Coherency not supported by the device");
+ return -ENOTSUPP;
+}
+
static int panthor_clk_init(struct panthor_device *ptdev)
{
ptdev->clks.core = devm_clk_get(ptdev->base.dev, NULL);
@@ -46,6 +67,16 @@ static int panthor_clk_init(struct panthor_device *ptdev)
return 0;
}
+static int panthor_init_power(struct device *dev)
+{
+ struct dev_pm_domain_list *pd_list = NULL;
+
+ if (dev->pm_domain)
+ return 0;
+
+ return devm_pm_domain_attach_list(dev, NULL, &pd_list);
+}
+
void panthor_device_unplug(struct panthor_device *ptdev)
{
/* This function can be called from two different path: the reset work
@@ -64,6 +95,8 @@ void panthor_device_unplug(struct panthor_device *ptdev)
return;
}
+ drm_WARN_ON(&ptdev->base, pm_runtime_get_sync(ptdev->base.dev) < 0);
+
/* Call drm_dev_unplug() so any access to HW blocks happening after
* that point get rejected.
*/
@@ -74,8 +107,6 @@ void panthor_device_unplug(struct panthor_device *ptdev)
*/
mutex_unlock(&ptdev->unplug.lock);
- drm_WARN_ON(&ptdev->base, pm_runtime_get_sync(ptdev->base.dev) < 0);
-
/* Now, try to cleanly shutdown the GPU before the device resources
* get reclaimed.
*/
@@ -83,6 +114,7 @@ void panthor_device_unplug(struct panthor_device *ptdev)
panthor_fw_unplug(ptdev);
panthor_mmu_unplug(ptdev);
panthor_gpu_unplug(ptdev);
+ panthor_pwr_unplug(ptdev);
pm_runtime_dont_use_autosuspend(ptdev->base.dev);
pm_runtime_put_sync_suspend(ptdev->base.dev);
@@ -101,7 +133,7 @@ static void panthor_device_reset_cleanup(struct drm_device *ddev, void *data)
{
struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
- cancel_work_sync(&ptdev->reset.work);
+ disable_work_sync(&ptdev->reset.work);
destroy_workqueue(ptdev->reset.wq);
}
@@ -110,14 +142,11 @@ static void panthor_device_reset_work(struct work_struct *work)
struct panthor_device *ptdev = container_of(work, struct panthor_device, reset.work);
int ret = 0, cookie;
- if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_ACTIVE) {
- /*
- * No need for a reset as the device has been (or will be)
- * powered down
- */
- atomic_set(&ptdev->reset.pending, 0);
+ /* If the device is entering suspend, we don't reset. A slow reset will
+ * be forced at resume time instead.
+ */
+ if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_ACTIVE)
return;
- }
if (!drm_dev_enter(&ptdev->base, &cookie))
return;
@@ -125,8 +154,8 @@ static void panthor_device_reset_work(struct work_struct *work)
panthor_sched_pre_reset(ptdev);
panthor_fw_pre_reset(ptdev, true);
panthor_mmu_pre_reset(ptdev);
- panthor_gpu_soft_reset(ptdev);
- panthor_gpu_l2_power_on(ptdev);
+ panthor_hw_soft_reset(ptdev);
+ panthor_hw_l2_power_on(ptdev);
panthor_mmu_post_reset(ptdev);
ret = panthor_fw_post_reset(ptdev);
atomic_set(&ptdev->reset.pending, 0);
@@ -156,7 +185,7 @@ int panthor_device_init(struct panthor_device *ptdev)
struct page *p;
int ret;
- ptdev->coherent = device_get_dma_attr(ptdev->base.dev) == DEV_DMA_COHERENT;
+ ptdev->soc_data = of_device_get_match_data(ptdev->base.dev);
init_completion(&ptdev->unplug.done);
ret = drmm_mutex_init(&ptdev->base, &ptdev->unplug.lock);
@@ -167,6 +196,11 @@ int panthor_device_init(struct panthor_device *ptdev)
if (ret)
return ret;
+#ifdef CONFIG_DEBUG_FS
+ drmm_mutex_init(&ptdev->base, &ptdev->gems.lock);
+ INIT_LIST_HEAD(&ptdev->gems.node);
+#endif
+
atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
p = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!p)
@@ -200,6 +234,12 @@ int panthor_device_init(struct panthor_device *ptdev)
if (ret)
return ret;
+ ret = panthor_init_power(ptdev->base.dev);
+ if (ret < 0) {
+ drm_err(&ptdev->base, "init power domains failed, ret=%d", ret);
+ return ret;
+ }
+
ret = panthor_devfreq_init(ptdev);
if (ret)
return ret;
@@ -226,10 +266,22 @@ int panthor_device_init(struct panthor_device *ptdev)
return ret;
}
- ret = panthor_gpu_init(ptdev);
+ ret = panthor_hw_init(ptdev);
if (ret)
goto err_rpm_put;
+ ret = panthor_pwr_init(ptdev);
+ if (ret)
+ goto err_rpm_put;
+
+ ret = panthor_gpu_init(ptdev);
+ if (ret)
+ goto err_unplug_pwr;
+
+ ret = panthor_gpu_coherency_init(ptdev);
+ if (ret)
+ goto err_unplug_gpu;
+
ret = panthor_mmu_init(ptdev);
if (ret)
goto err_unplug_gpu;
@@ -266,6 +318,9 @@ err_unplug_mmu:
err_unplug_gpu:
panthor_gpu_unplug(ptdev);
+err_unplug_pwr:
+ panthor_pwr_unplug(ptdev);
+
err_rpm_put:
pm_runtime_put_sync_suspend(ptdev->base.dev);
return ret;
@@ -415,6 +470,24 @@ int panthor_device_mmap_io(struct panthor_device *ptdev, struct vm_area_struct *
return 0;
}
+static int panthor_device_resume_hw_components(struct panthor_device *ptdev)
+{
+ int ret;
+
+ panthor_pwr_resume(ptdev);
+ panthor_gpu_resume(ptdev);
+ panthor_mmu_resume(ptdev);
+
+ ret = panthor_fw_resume(ptdev);
+ if (!ret)
+ return 0;
+
+ panthor_mmu_suspend(ptdev);
+ panthor_gpu_suspend(ptdev);
+ panthor_pwr_suspend(ptdev);
+ return ret;
+}
+
int panthor_device_resume(struct device *dev)
{
struct panthor_device *ptdev = dev_get_drvdata(dev);
@@ -437,31 +510,34 @@ int panthor_device_resume(struct device *dev)
if (ret)
goto err_disable_stacks_clk;
- ret = panthor_devfreq_resume(ptdev);
- if (ret)
- goto err_disable_coregroup_clk;
+ panthor_devfreq_resume(ptdev);
if (panthor_device_is_initialized(ptdev) &&
drm_dev_enter(&ptdev->base, &cookie)) {
- panthor_gpu_resume(ptdev);
- panthor_mmu_resume(ptdev);
- ret = drm_WARN_ON(&ptdev->base, panthor_fw_resume(ptdev));
- if (!ret) {
- panthor_sched_resume(ptdev);
- } else {
- panthor_mmu_suspend(ptdev);
- panthor_gpu_suspend(ptdev);
+ /* If there was a reset pending at the time we suspended the
+ * device, we force a slow reset.
+ */
+ if (atomic_read(&ptdev->reset.pending)) {
+ ptdev->reset.fast = false;
+ atomic_set(&ptdev->reset.pending, 0);
}
+ ret = panthor_device_resume_hw_components(ptdev);
+ if (ret && ptdev->reset.fast) {
+ drm_err(&ptdev->base, "Fast reset failed, trying a slow reset");
+ ptdev->reset.fast = false;
+ ret = panthor_device_resume_hw_components(ptdev);
+ }
+
+ if (!ret)
+ panthor_sched_resume(ptdev);
+
drm_dev_exit(cookie);
if (ret)
goto err_suspend_devfreq;
}
- if (atomic_read(&ptdev->reset.pending))
- queue_work(ptdev->reset.wq, &ptdev->reset.work);
-
/* Clear all IOMEM mappings pointing to this device after we've
* resumed. This way the fake mappings pointing to the dummy pages
* are removed and the real iomem mapping will be restored on next
@@ -476,8 +552,6 @@ int panthor_device_resume(struct device *dev)
err_suspend_devfreq:
panthor_devfreq_suspend(ptdev);
-
-err_disable_coregroup_clk:
clk_disable_unprepare(ptdev->clks.coregroup);
err_disable_stacks_clk:
@@ -488,13 +562,14 @@ err_disable_core_clk:
err_set_suspended:
atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
+ atomic_set(&ptdev->pm.recovery_needed, 1);
return ret;
}
int panthor_device_suspend(struct device *dev)
{
struct panthor_device *ptdev = dev_get_drvdata(dev);
- int ret, cookie;
+ int cookie;
if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_ACTIVE)
return -EINVAL;
@@ -523,39 +598,15 @@ int panthor_device_suspend(struct device *dev)
panthor_fw_suspend(ptdev);
panthor_mmu_suspend(ptdev);
panthor_gpu_suspend(ptdev);
+ panthor_pwr_suspend(ptdev);
drm_dev_exit(cookie);
}
- ret = panthor_devfreq_suspend(ptdev);
- if (ret) {
- if (panthor_device_is_initialized(ptdev) &&
- drm_dev_enter(&ptdev->base, &cookie)) {
- panthor_gpu_resume(ptdev);
- panthor_mmu_resume(ptdev);
- drm_WARN_ON(&ptdev->base, panthor_fw_resume(ptdev));
- panthor_sched_resume(ptdev);
- drm_dev_exit(cookie);
- }
-
- goto err_set_active;
- }
+ panthor_devfreq_suspend(ptdev);
clk_disable_unprepare(ptdev->clks.coregroup);
clk_disable_unprepare(ptdev->clks.stacks);
clk_disable_unprepare(ptdev->clks.core);
atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
return 0;
-
-err_set_active:
- /* If something failed and we have to revert back to an
- * active state, we also need to clear the MMIO userspace
- * mappings, so any dumb pages that were mapped while we
- * were trying to suspend gets invalidated.
- */
- mutex_lock(&ptdev->pm.mmio_lock);
- atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_ACTIVE);
- unmap_mapping_range(ptdev->base.anon_inode->i_mapping,
- DRM_PANTHOR_USER_MMIO_OFFSET, 0, 1);
- mutex_unlock(&ptdev->pm.mmio_lock);
- return ret;
}