diff options
Diffstat (limited to 'drivers/gpu/drm/imagination/pvr_power.c')
| -rw-r--r-- | drivers/gpu/drm/imagination/pvr_power.c | 318 |
1 files changed, 294 insertions, 24 deletions
diff --git a/drivers/gpu/drm/imagination/pvr_power.c b/drivers/gpu/drm/imagination/pvr_power.c index ba7816fd28ec..b9f801c63260 100644 --- a/drivers/gpu/drm/imagination/pvr_power.c +++ b/drivers/gpu/drm/imagination/pvr_power.c @@ -10,11 +10,17 @@ #include <drm/drm_drv.h> #include <drm/drm_managed.h> +#include <drm/drm_print.h> +#include <linux/cleanup.h> #include <linux/clk.h> #include <linux/interrupt.h> #include <linux/mutex.h> +#include <linux/of.h> #include <linux/platform_device.h> +#include <linux/pm_domain.h> #include <linux/pm_runtime.h> +#include <linux/pwrseq/consumer.h> +#include <linux/reset.h> #include <linux/timer.h> #include <linux/types.h> #include <linux/workqueue.h> @@ -230,6 +236,118 @@ pvr_watchdog_init(struct pvr_device *pvr_dev) return 0; } +static int pvr_power_init_manual(struct pvr_device *pvr_dev) +{ + struct drm_device *drm_dev = from_pvr_device(pvr_dev); + struct reset_control *reset; + + reset = devm_reset_control_get_optional_exclusive(drm_dev->dev, NULL); + if (IS_ERR(reset)) + return dev_err_probe(drm_dev->dev, PTR_ERR(reset), + "failed to get gpu reset line\n"); + + pvr_dev->reset = reset; + + return 0; +} + +static int pvr_power_on_sequence_manual(struct pvr_device *pvr_dev) +{ + int err; + + err = clk_prepare_enable(pvr_dev->core_clk); + if (err) + return err; + + err = clk_prepare_enable(pvr_dev->sys_clk); + if (err) + goto err_core_clk_disable; + + err = clk_prepare_enable(pvr_dev->mem_clk); + if (err) + goto err_sys_clk_disable; + + /* + * According to the hardware manual, a delay of at least 32 clock + * cycles is required between de-asserting the clkgen reset and + * de-asserting the GPU reset. Assuming a worst-case scenario with + * a very high GPU clock frequency, a delay of 1 microsecond is + * sufficient to ensure this requirement is met across all + * feasible GPU clock speeds. + */ + udelay(1); + + err = reset_control_deassert(pvr_dev->reset); + if (err) + goto err_mem_clk_disable; + + return 0; + +err_mem_clk_disable: + clk_disable_unprepare(pvr_dev->mem_clk); + +err_sys_clk_disable: + clk_disable_unprepare(pvr_dev->sys_clk); + +err_core_clk_disable: + clk_disable_unprepare(pvr_dev->core_clk); + + return err; +} + +static int pvr_power_off_sequence_manual(struct pvr_device *pvr_dev) +{ + int err; + + err = reset_control_assert(pvr_dev->reset); + + clk_disable_unprepare(pvr_dev->mem_clk); + clk_disable_unprepare(pvr_dev->sys_clk); + clk_disable_unprepare(pvr_dev->core_clk); + + return err; +} + +const struct pvr_power_sequence_ops pvr_power_sequence_ops_manual = { + .init = pvr_power_init_manual, + .power_on = pvr_power_on_sequence_manual, + .power_off = pvr_power_off_sequence_manual, +}; + +static int pvr_power_init_pwrseq(struct pvr_device *pvr_dev) +{ + struct device *dev = from_pvr_device(pvr_dev)->dev; + + pvr_dev->pwrseq = devm_pwrseq_get(dev, "gpu-power"); + if (IS_ERR(pvr_dev->pwrseq)) { + /* + * This platform requires a sequencer. If we can't get it, we + * must return the error (including -EPROBE_DEFER to wait for + * the provider to appear) + */ + return dev_err_probe(dev, PTR_ERR(pvr_dev->pwrseq), + "Failed to get required power sequencer\n"); + } + + return 0; +} + +static int pvr_power_on_sequence_pwrseq(struct pvr_device *pvr_dev) +{ + return pwrseq_power_on(pvr_dev->pwrseq); +} + +static int pvr_power_off_sequence_pwrseq(struct pvr_device *pvr_dev) +{ + return pwrseq_power_off(pvr_dev->pwrseq); +} + +const struct pvr_power_sequence_ops pvr_power_sequence_ops_pwrseq = { + .init = pvr_power_init_pwrseq, + .power_on = pvr_power_on_sequence_pwrseq, + .power_off = pvr_power_off_sequence_pwrseq, +}; + int pvr_power_device_suspend(struct device *dev) { @@ -248,9 +366,7 @@ pvr_power_device_suspend(struct device *dev) goto err_drm_dev_exit; } - clk_disable_unprepare(pvr_dev->mem_clk); - clk_disable_unprepare(pvr_dev->sys_clk); - clk_disable_unprepare(pvr_dev->core_clk); + err = pvr_dev->device_data->pwr_ops->power_off(pvr_dev); err_drm_dev_exit: drm_dev_exit(idx); @@ -270,36 +386,22 @@ pvr_power_device_resume(struct device *dev) if (!drm_dev_enter(drm_dev, &idx)) return -EIO; - err = clk_prepare_enable(pvr_dev->core_clk); + err = pvr_dev->device_data->pwr_ops->power_on(pvr_dev); if (err) goto err_drm_dev_exit; - err = clk_prepare_enable(pvr_dev->sys_clk); - if (err) - goto err_core_clk_disable; - - err = clk_prepare_enable(pvr_dev->mem_clk); - if (err) - goto err_sys_clk_disable; - if (pvr_dev->fw_dev.booted) { err = pvr_power_fw_enable(pvr_dev); if (err) - goto err_mem_clk_disable; + goto err_power_off; } drm_dev_exit(idx); return 0; -err_mem_clk_disable: - clk_disable_unprepare(pvr_dev->mem_clk); - -err_sys_clk_disable: - clk_disable_unprepare(pvr_dev->sys_clk); - -err_core_clk_disable: - clk_disable_unprepare(pvr_dev->core_clk); +err_power_off: + pvr_dev->device_data->pwr_ops->power_off(pvr_dev); err_drm_dev_exit: drm_dev_exit(idx); @@ -317,6 +419,63 @@ pvr_power_device_idle(struct device *dev) return pvr_power_is_idle(pvr_dev) ? 0 : -EBUSY; } +static int +pvr_power_clear_error(struct pvr_device *pvr_dev) +{ + struct device *dev = from_pvr_device(pvr_dev)->dev; + int err; + + /* Ensure the device state is known and nothing is happening past this point */ + pm_runtime_disable(dev); + + /* Attempt to clear the runtime PM error by setting the current state again */ + if (pm_runtime_status_suspended(dev)) + err = pm_runtime_set_suspended(dev); + else + err = pm_runtime_set_active(dev); + + if (err) { + drm_err(from_pvr_device(pvr_dev), + "%s: Failed to clear runtime PM error (new error %d)\n", + __func__, err); + } + + pm_runtime_enable(dev); + + return err; +} + +/** + * pvr_power_get_clear() - Acquire a power reference, correcting any errors + * @pvr_dev: Device pointer + * + * Attempt to acquire a power reference on the device. If the runtime PM + * is in error state, attempt to clear the error and retry. + * + * Returns: + * * 0 on success, or + * * Any error code returned by pvr_power_get() or the runtime PM API. + */ +static int +pvr_power_get_clear(struct pvr_device *pvr_dev) +{ + int err; + + err = pvr_power_get(pvr_dev); + if (err == 0) + return err; + + drm_warn(from_pvr_device(pvr_dev), + "%s: pvr_power_get returned error %d, attempting recovery\n", + __func__, err); + + err = pvr_power_clear_error(pvr_dev); + if (err) + return err; + + return pvr_power_get(pvr_dev); +} + /** * pvr_power_reset() - Reset the GPU * @pvr_dev: Device pointer @@ -341,7 +500,7 @@ pvr_power_reset(struct pvr_device *pvr_dev, bool hard_reset) * Take a power reference during the reset. This should prevent any interference with the * power state during reset. */ - WARN_ON(pvr_power_get(pvr_dev)); + WARN_ON(pvr_power_get_clear(pvr_dev)); down_write(&pvr_dev->reset_sem); @@ -363,13 +522,13 @@ pvr_power_reset(struct pvr_device *pvr_dev, bool hard_reset) if (!err) { if (hard_reset) { pvr_dev->fw_dev.booted = false; - WARN_ON(pm_runtime_force_suspend(from_pvr_device(pvr_dev)->dev)); + WARN_ON(pvr_power_device_suspend(from_pvr_device(pvr_dev)->dev)); err = pvr_fw_hard_reset(pvr_dev); if (err) goto err_device_lost; - err = pm_runtime_force_resume(from_pvr_device(pvr_dev)->dev); + err = pvr_power_device_resume(from_pvr_device(pvr_dev)->dev); pvr_dev->fw_dev.booted = true; if (err) goto err_device_lost; @@ -431,3 +590,114 @@ pvr_watchdog_fini(struct pvr_device *pvr_dev) { cancel_delayed_work_sync(&pvr_dev->watchdog.work); } + +int pvr_power_domains_init(struct pvr_device *pvr_dev) +{ + struct device *dev = from_pvr_device(pvr_dev)->dev; + + struct device_link **domain_links __free(kfree) = NULL; + struct device **domain_devs __free(kfree) = NULL; + int domain_count; + int link_count; + + char dev_name[2] = "a"; + int err; + int i; + + domain_count = of_count_phandle_with_args(dev->of_node, "power-domains", + "#power-domain-cells"); + if (domain_count < 0) + return domain_count; + + if (domain_count <= 1) + return 0; + + link_count = domain_count + (domain_count - 1); + + domain_devs = kcalloc(domain_count, sizeof(*domain_devs), GFP_KERNEL); + if (!domain_devs) + return -ENOMEM; + + domain_links = kcalloc(link_count, sizeof(*domain_links), GFP_KERNEL); + if (!domain_links) + return -ENOMEM; + + for (i = 0; i < domain_count; i++) { + struct device *domain_dev; + + dev_name[0] = 'a' + i; + domain_dev = dev_pm_domain_attach_by_name(dev, dev_name); + if (IS_ERR_OR_NULL(domain_dev)) { + err = domain_dev ? PTR_ERR(domain_dev) : -ENODEV; + goto err_detach; + } + + domain_devs[i] = domain_dev; + } + + for (i = 0; i < domain_count; i++) { + struct device_link *link; + + link = device_link_add(dev, domain_devs[i], DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME); + if (!link) { + err = -ENODEV; + goto err_unlink; + } + + domain_links[i] = link; + } + + for (i = domain_count; i < link_count; i++) { + struct device_link *link; + + link = device_link_add(domain_devs[i - domain_count + 1], + domain_devs[i - domain_count], + DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME); + if (!link) { + err = -ENODEV; + goto err_unlink; + } + + domain_links[i] = link; + } + + pvr_dev->power = (struct pvr_device_power){ + .domain_devs = no_free_ptr(domain_devs), + .domain_links = no_free_ptr(domain_links), + .domain_count = domain_count, + }; + + return 0; + +err_unlink: + while (--i >= 0) + device_link_del(domain_links[i]); + + i = domain_count; + +err_detach: + while (--i >= 0) + dev_pm_domain_detach(domain_devs[i], true); + + return err; +} + +void pvr_power_domains_fini(struct pvr_device *pvr_dev) +{ + const int domain_count = pvr_dev->power.domain_count; + + int i = domain_count + (domain_count - 1); + + while (--i >= 0) + device_link_del(pvr_dev->power.domain_links[i]); + + i = domain_count; + + while (--i >= 0) + dev_pm_domain_detach(pvr_dev->power.domain_devs[i], true); + + kfree(pvr_dev->power.domain_links); + kfree(pvr_dev->power.domain_devs); + + pvr_dev->power = (struct pvr_device_power){ 0 }; +} |
