summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gt/intel_gt_requests.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2019-12-23 21:10:08 +0000
committerChris Wilson <chris@chris-wilson.co.uk>2019-12-24 09:51:32 +0000
commit30084b143de7a320ab8fc609a8ad3a396232d002 (patch)
treeea81708354097514b0b96f3d504aaa7ad7dcb1ff /drivers/gpu/drm/i915/gt/intel_gt_requests.c
parentb42d3b159ad8073602e466244f49f29f327e6798 (diff)
drm/i915/gt: Flush other retirees inside intel_gt_retire_requests()
Our goal in wait_for_idle (intel_gt_retire_requests) is to the current workload *and* their idle barriers. This requires us to notice the late arrival of those, which is done by inspecting the list of active timelines. However, if a concurrent retirer is running that new timeline may not be added until after we drop the lock -- so flush concurrent retirers before we take the lock and inspect the list. Closes: https://gitlab.freedesktop.org/drm/intel/issues/878 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Acked-by: Andi Shyti <andi.shyti@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191223211008.2371613-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/gt/intel_gt_requests.c')
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.c16
1 files changed, 5 insertions, 11 deletions
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
index 9e75fa1b6bc1..fc691c130ba6 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
@@ -26,21 +26,18 @@ static bool retire_requests(struct intel_timeline *tl)
return !i915_active_fence_isset(&tl->last_request);
}
-static bool flush_submission(struct intel_gt *gt)
+static void flush_submission(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- bool active = false;
if (!intel_gt_pm_is_awake(gt))
- return false;
+ return;
for_each_engine(engine, gt, id) {
- active |= intel_engine_flush_submission(engine);
- active |= flush_work(&engine->retire_work);
+ intel_engine_flush_submission(engine);
+ flush_work(&engine->retire_work);
}
-
- return active;
}
static void engine_retire(struct work_struct *work)
@@ -126,7 +123,6 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
timeout = -timeout, interruptible = false;
flush_submission(gt); /* kick the ksoftirqd tasklets */
-
spin_lock(&timelines->lock);
list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
if (!mutex_trylock(&tl->mutex)) {
@@ -153,6 +149,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
active_count += !retire_requests(tl);
+ flush_submission(gt); /* sync with concurrent retirees */
spin_lock(&timelines->lock);
/* Resume iteration after dropping lock */
@@ -173,9 +170,6 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
list_for_each_entry_safe(tl, tn, &free, link)
__intel_timeline_free(&tl->kref);
- if (flush_submission(gt))
- active_count++;
-
return active_count ? timeout : 0;
}