summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/display
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/display')
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c8
13 files changed, 50 insertions, 29 deletions
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 5c7fdc82ac22..d8533603ad05 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -7180,11 +7180,12 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence,
break;
case FENCE_FREE:
{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_atomic_helper *helper =
- &to_i915(state->base.dev)->display.atomic_helper;
+ &i915->display.atomic_helper;
if (llist_add(&state->freed, &helper->free_list))
- schedule_work(&helper->free_work);
+ queue_work(i915->unordered_wq, &helper->free_work);
break;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index dc8de861339d..b909814ae02b 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -442,7 +442,7 @@ void intel_display_driver_remove_noirq(struct drm_i915_private *i915)
intel_unregister_dsm_handler();
/* flush any delayed tasks or pending work */
- flush_scheduled_work();
+ flush_workqueue(i915->unordered_wq);
intel_hdcp_component_fini(i915);
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 8a88de67ff0a..5f479f3828bb 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -1057,7 +1057,7 @@ void intel_dmc_init(struct drm_i915_private *i915)
i915->display.dmc.dmc = dmc;
drm_dbg_kms(&i915->drm, "Loading %s\n", dmc->fw_path);
- schedule_work(&dmc->work);
+ queue_work(i915->unordered_wq, &dmc->work);
return;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index f4192fda1a76..09dc6c88ad28 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -5251,7 +5251,7 @@ static void intel_dp_oob_hotplug_event(struct drm_connector *connector)
spin_lock_irq(&i915->irq_lock);
i915->display.hotplug.event_bits |= BIT(encoder->hpd_pin);
spin_unlock_irq(&i915->irq_lock);
- queue_delayed_work(system_wq, &i915->display.hotplug.hotplug_work, 0);
+ queue_delayed_work(i915->unordered_wq, &i915->display.hotplug.hotplug_work, 0);
}
static const struct drm_connector_funcs intel_dp_connector_funcs = {
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 176b610642e7..a263773f4d68 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -1064,6 +1064,7 @@ static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (!intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base)) {
lt_dbg(intel_dp, DP_PHY_DPRX, "Link Training failed on disconnected sink.\n");
@@ -1081,7 +1082,7 @@ static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp,
}
/* Schedule a Hotplug Uevent to userspace to start modeset */
- schedule_work(&intel_connector->modeset_retry_work);
+ queue_work(i915->unordered_wq, &intel_connector->modeset_retry_work);
}
/* Perform the link training on all LTTPRs and the DPRX on a link. */
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c
index 760e63cdc0c8..0d35b6be5b6a 100644
--- a/drivers/gpu/drm/i915/display/intel_drrs.c
+++ b/drivers/gpu/drm/i915/display/intel_drrs.c
@@ -111,7 +111,9 @@ static void intel_drrs_set_state(struct intel_crtc *crtc,
static void intel_drrs_schedule_work(struct intel_crtc *crtc)
{
- mod_delayed_work(system_wq, &crtc->drrs.work, msecs_to_jiffies(1000));
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ mod_delayed_work(i915->unordered_wq, &crtc->drrs.work, msecs_to_jiffies(1000));
}
static unsigned int intel_drrs_frontbuffer_bits(const struct intel_crtc_state *crtc_state)
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 29aa029d249d..7f8b2d7713c7 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -1600,7 +1600,7 @@ static void __intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc)
if (READ_ONCE(fbc->underrun_detected))
return;
- schedule_work(&fbc->underrun_work);
+ queue_work(fbc->i915->unordered_wq, &fbc->underrun_work);
}
/**
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 2c3f7befed17..4d6209c9268b 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -694,7 +694,8 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
/* Don't block our own workqueue as this can
* be run in parallel with other i915.ko tasks.
*/
- schedule_work(&dev_priv->display.fbdev.suspend_work);
+ queue_work(dev_priv->unordered_wq,
+ &dev_priv->display.fbdev.suspend_work);
return;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 17542c28dfd5..5ed450111f77 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -983,6 +983,7 @@ static void intel_hdcp_update_value(struct intel_connector *connector,
struct drm_device *dev = connector->base.dev;
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct intel_hdcp *hdcp = &connector->hdcp;
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
drm_WARN_ON(connector->base.dev, !mutex_is_locked(&hdcp->mutex));
@@ -1001,7 +1002,7 @@ static void intel_hdcp_update_value(struct intel_connector *connector,
hdcp->value = value;
if (update_property) {
drm_connector_get(&connector->base);
- schedule_work(&hdcp->prop_work);
+ queue_work(i915->unordered_wq, &hdcp->prop_work);
}
}
@@ -2090,16 +2091,17 @@ static void intel_hdcp_check_work(struct work_struct *work)
struct intel_hdcp,
check_work);
struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
if (drm_connector_is_unregistered(&connector->base))
return;
if (!intel_hdcp2_check_link(connector))
- schedule_delayed_work(&hdcp->check_work,
- DRM_HDCP2_CHECK_PERIOD_MS);
+ queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
+ DRM_HDCP2_CHECK_PERIOD_MS);
else if (!intel_hdcp_check_link(connector))
- schedule_delayed_work(&hdcp->check_work,
- DRM_HDCP_CHECK_PERIOD_MS);
+ queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
+ DRM_HDCP_CHECK_PERIOD_MS);
}
static int i915_hdcp_component_bind(struct device *i915_kdev,
@@ -2398,7 +2400,8 @@ int intel_hdcp_enable(struct intel_atomic_state *state,
}
if (!ret) {
- schedule_delayed_work(&hdcp->check_work, check_link_interval);
+ queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
+ check_link_interval);
intel_hdcp_update_value(connector,
DRM_MODE_CONTENT_PROTECTION_ENABLED,
true);
@@ -2447,6 +2450,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
to_intel_connector(conn_state->connector);
struct intel_hdcp *hdcp = &connector->hdcp;
bool content_protection_type_changed, desired_and_not_enabled = false;
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
if (!connector->hdcp.shim)
return;
@@ -2473,7 +2477,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
mutex_lock(&hdcp->mutex);
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
drm_connector_get(&connector->base);
- schedule_work(&hdcp->prop_work);
+ queue_work(i915->unordered_wq, &hdcp->prop_work);
mutex_unlock(&hdcp->mutex);
}
@@ -2490,7 +2494,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
*/
if (!desired_and_not_enabled && !content_protection_type_changed) {
drm_connector_get(&connector->base);
- schedule_work(&hdcp->prop_work);
+ queue_work(i915->unordered_wq, &hdcp->prop_work);
}
}
@@ -2602,6 +2606,7 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
{
struct intel_hdcp *hdcp = &connector->hdcp;
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
if (!hdcp->shim)
return;
@@ -2609,5 +2614,5 @@ void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
atomic_inc(&connector->hdcp.cp_irq_count);
wake_up_all(&connector->hdcp.cp_irq_queue);
- schedule_delayed_work(&hdcp->check_work, 0);
+ queue_delayed_work(i915->unordered_wq, &hdcp->check_work, 0);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 23a5e1a875f1..1160fa20433b 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -212,7 +212,8 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
/* Enable polling and queue hotplug re-enabling. */
if (hpd_disabled) {
drm_kms_helper_poll_enable(&dev_priv->drm);
- mod_delayed_work(system_wq, &dev_priv->display.hotplug.reenable_work,
+ mod_delayed_work(dev_priv->unordered_wq,
+ &dev_priv->display.hotplug.reenable_work,
msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
}
}
@@ -339,7 +340,8 @@ static void i915_digport_work_func(struct work_struct *work)
spin_lock_irq(&dev_priv->irq_lock);
dev_priv->display.hotplug.event_bits |= old_bits;
spin_unlock_irq(&dev_priv->irq_lock);
- queue_delayed_work(system_wq, &dev_priv->display.hotplug.hotplug_work, 0);
+ queue_delayed_work(dev_priv->unordered_wq,
+ &dev_priv->display.hotplug.hotplug_work, 0);
}
}
@@ -446,7 +448,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
dev_priv->display.hotplug.retry_bits |= retry;
spin_unlock_irq(&dev_priv->irq_lock);
- mod_delayed_work(system_wq, &dev_priv->display.hotplug.hotplug_work,
+ mod_delayed_work(dev_priv->unordered_wq,
+ &dev_priv->display.hotplug.hotplug_work,
msecs_to_jiffies(HPD_RETRY_DELAY));
}
}
@@ -577,7 +580,8 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (queue_dig)
queue_work(dev_priv->display.hotplug.dp_wq, &dev_priv->display.hotplug.dig_port_work);
if (queue_hp)
- queue_delayed_work(system_wq, &dev_priv->display.hotplug.hotplug_work, 0);
+ queue_delayed_work(dev_priv->unordered_wq,
+ &dev_priv->display.hotplug.hotplug_work, 0);
}
/**
@@ -687,7 +691,8 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
* As well, there's no issue if we race here since we always reschedule
* this worker anyway
*/
- schedule_work(&dev_priv->display.hotplug.poll_init_work);
+ queue_work(dev_priv->unordered_wq,
+ &dev_priv->display.hotplug.poll_init_work);
}
/**
@@ -715,7 +720,8 @@ void intel_hpd_poll_disable(struct drm_i915_private *dev_priv)
return;
WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, false);
- schedule_work(&dev_priv->display.hotplug.poll_init_work);
+ queue_work(dev_priv->unordered_wq,
+ &dev_priv->display.hotplug.poll_init_work);
}
void intel_hpd_init_early(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index b7973a05d022..84078fb82b2f 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -635,7 +635,8 @@ static void asle_work(struct work_struct *work)
void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
{
if (dev_priv->display.opregion.asle)
- schedule_work(&dev_priv->display.opregion.asle_work);
+ queue_work(dev_priv->unordered_wq,
+ &dev_priv->display.opregion.asle_work);
}
#define ACPI_EV_DISPLAY_SWITCH (1<<0)
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index 5e7ba594e7e7..73f0f1714b37 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -867,6 +867,7 @@ static void edp_panel_vdd_work(struct work_struct *__work)
static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
unsigned long delay;
/*
@@ -882,7 +883,8 @@ static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
* operations.
*/
delay = msecs_to_jiffies(intel_dp->pps.panel_power_cycle_delay * 5);
- schedule_delayed_work(&intel_dp->pps.panel_vdd_work, delay);
+ queue_delayed_work(i915->unordered_wq,
+ &intel_dp->pps.panel_vdd_work, delay);
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index ea0389c5f656..d58ed9b62e67 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -341,7 +341,7 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
*/
intel_de_rmw(dev_priv, imr_reg, 0, psr_irq_psr_error_bit_get(intel_dp));
- schedule_work(&intel_dp->psr.work);
+ queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
}
}
@@ -2440,6 +2440,8 @@ static void
tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
enum fb_op_origin origin)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled ||
!intel_dp->psr.active)
return;
@@ -2453,7 +2455,7 @@ tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
return;
tgl_psr2_enable_dc3co(intel_dp);
- mod_delayed_work(system_wq, &intel_dp->psr.dc3co_work,
+ mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work,
intel_dp->psr.dc3co_exit_delay);
}
@@ -2493,7 +2495,7 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)
psr_force_hw_tracking_exit(intel_dp);
if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
- schedule_work(&intel_dp->psr.work);
+ queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
}
}