diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2015-04-07 16:20:32 +0100 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2015-04-10 08:56:02 +0200 |
commit | 1854d5ca0dd7a9fc11243ff220a3e93fce2b4d3e (patch) | |
tree | 6a83912410f343ae5abd1a03aa37a454b9ca2a24 /drivers/gpu/drm/i915/intel_pm.c | |
parent | 6ad790c0f5ac55fd13f322c23519f0d6f0721864 (diff) |
drm/i915: Deminish contribution of wait-boosting from clients
With boosting for missed pageflips, we have a much stronger indication
of when we need to (temporarily) boost GPU frequency to ensure smooth
delivery of frames. So now only allow each client to perform one RPS boost
in each period of GPU activity due to stalling on results.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Deepak S <deepak.s@linux.intel.com>
Reviewed-by: Deepak S <deepak.s@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_pm.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_pm.c | 18 |
1 files changed, 15 insertions, 3 deletions
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index acf1a318fda9..1ab9e897994a 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4091,10 +4091,14 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv) dev_priv->rps.last_adj = 0; I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); } + + while (!list_empty(&dev_priv->rps.clients)) + list_del_init(dev_priv->rps.clients.next); mutex_unlock(&dev_priv->rps.hw_lock); } -void gen6_rps_boost(struct drm_i915_private *dev_priv) +void gen6_rps_boost(struct drm_i915_private *dev_priv, + struct drm_i915_file_private *file_priv) { u32 val; @@ -4102,9 +4106,16 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv) val = dev_priv->rps.max_freq_softlimit; if (dev_priv->rps.enabled && dev_priv->mm.busy && - dev_priv->rps.cur_freq < val) { + dev_priv->rps.cur_freq < val && + (file_priv == NULL || list_empty(&file_priv->rps_boost))) { intel_set_rps(dev_priv->dev, val); dev_priv->rps.last_adj = 0; + + if (file_priv != NULL) { + list_add(&file_priv->rps_boost, &dev_priv->rps.clients); + file_priv->rps_boosts++; + } else + dev_priv->rps.boosts++; } mutex_unlock(&dev_priv->rps.hw_lock); } @@ -6782,7 +6793,7 @@ static void __intel_rps_boost_work(struct work_struct *work) struct request_boost *boost = container_of(work, struct request_boost, work); if (!i915_gem_request_completed(boost->rq, true)) - gen6_rps_boost(to_i915(boost->rq->ring->dev)); + gen6_rps_boost(to_i915(boost->rq->ring->dev), NULL); i915_gem_request_unreference__unlocked(boost->rq); kfree(boost); @@ -6815,6 +6826,7 @@ void intel_pm_setup(struct drm_device *dev) INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, intel_gen6_powersave_work); + INIT_LIST_HEAD(&dev_priv->rps.clients); dev_priv->pm.suspended = false; } |