summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_request.c
diff options
context:
space:
mode:
authorLionel Landwerlin <lionel.g.landwerlin@intel.com>2020-05-08 19:54:48 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2020-05-08 20:57:56 +0100
commit3136deb7ba223f7ce3117cdfcd918a6f48e2d221 (patch)
treea8bf07c5c75d53c427541f5deaa97faaa7e2e747 /drivers/gpu/drm/i915/i915_request.c
parente41627db6f364f9e3157b15a7646f8dd35f75508 (diff)
drm/i915: Peel dma-fence-chains for await
To allow faster engine to engine synchronization, peel the layer of dma-fence-chain to expose potential i915 fences so that the i915_request code can emit HW semaphore wait/signal operations in the ring which is faster than waking up the host to submit unblocked workloads after interrupt notification. This is similar to the peeling we do for e.g. dma_fence_array. Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20200508185448.29709-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/i915_request.c')
-rw-r--r--drivers/gpu/drm/i915/i915_request.c29
1 files changed, 28 insertions, 1 deletions
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 94189c7d43cd..9df2cb8d66d0 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -23,6 +23,7 @@
*/
#include <linux/dma-fence-array.h>
+#include <linux/dma-fence-chain.h>
#include <linux/irq_work.h>
#include <linux/prefetch.h>
#include <linux/sched.h>
@@ -1068,13 +1069,39 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
}
static int
-i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
+__i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
{
return i915_sw_fence_await_dma_fence(&rq->submit, fence,
fence->context ? I915_FENCE_TIMEOUT : 0,
I915_FENCE_GFP);
}
+static int
+i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
+{
+ struct dma_fence *iter;
+ int err = 0;
+
+ if (!to_dma_fence_chain(fence))
+ return __i915_request_await_external(rq, fence);
+
+ dma_fence_chain_for_each(iter, fence) {
+ struct dma_fence_chain *chain = to_dma_fence_chain(iter);
+
+ if (!dma_fence_is_i915(chain->fence)) {
+ err = __i915_request_await_external(rq, iter);
+ break;
+ }
+
+ err = i915_request_await_dma_fence(rq, chain->fence);
+ if (err < 0)
+ break;
+ }
+
+ dma_fence_put(iter);
+ return err;
+}
+
int
i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
{