summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_request.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_request.c')
-rw-r--r--drivers/gpu/drm/i915/i915_request.c40
1 files changed, 4 insertions, 36 deletions
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 00b7c4eb3f32..526c1e9acbd5 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -368,8 +368,6 @@ __await_execution(struct i915_request *rq,
}
spin_unlock_irq(&signal->lock);
- /* Copy across semaphore status as we need the same behaviour */
- rq->sched.flags |= signal->sched.flags;
return 0;
}
@@ -537,10 +535,8 @@ void __i915_request_unsubmit(struct i915_request *request)
spin_unlock(&request->lock);
/* We've already spun, don't charge on resubmitting. */
- if (request->sched.semaphores && i915_request_started(request)) {
- request->sched.attr.priority |= I915_PRIORITY_NOSEMAPHORE;
+ if (request->sched.semaphores && i915_request_started(request))
request->sched.semaphores = 0;
- }
/*
* We don't need to wake_up any waiters on request->execute, they
@@ -598,15 +594,6 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
return NOTIFY_DONE;
}
-static void irq_semaphore_cb(struct irq_work *wrk)
-{
- struct i915_request *rq =
- container_of(wrk, typeof(*rq), semaphore_work);
-
- i915_schedule_bump_priority(rq, I915_PRIORITY_NOSEMAPHORE);
- i915_request_put(rq);
-}
-
static int __i915_sw_fence_call
semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
@@ -614,11 +601,6 @@ semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
switch (state) {
case FENCE_COMPLETE:
- if (!(READ_ONCE(rq->sched.attr.priority) & I915_PRIORITY_NOSEMAPHORE)) {
- i915_request_get(rq);
- init_irq_work(&rq->semaphore_work, irq_semaphore_cb);
- irq_work_queue(&rq->semaphore_work);
- }
break;
case FENCE_FREE:
@@ -997,6 +979,7 @@ emit_semaphore_wait(struct i915_request *to,
gfp_t gfp)
{
const intel_engine_mask_t mask = READ_ONCE(from->engine)->mask;
+ struct i915_sw_fence *wait = &to->submit;
if (!intel_context_use_semaphores(to->context))
goto await_fence;
@@ -1031,11 +1014,10 @@ emit_semaphore_wait(struct i915_request *to,
goto await_fence;
to->sched.semaphores |= mask;
- to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
- return 0;
+ wait = &to->semaphore;
await_fence:
- return i915_sw_fence_await_dma_fence(&to->submit,
+ return i915_sw_fence_await_dma_fence(wait,
&from->fence, 0,
I915_FENCE_GFP);
}
@@ -1070,17 +1052,6 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
if (ret < 0)
return ret;
- if (to->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN) {
- ret = i915_sw_fence_await_dma_fence(&to->semaphore,
- &from->fence, 0,
- I915_FENCE_GFP);
- if (ret < 0)
- return ret;
- }
-
- if (from->sched.flags & I915_SCHED_HAS_EXTERNAL_CHAIN)
- to->sched.flags |= I915_SCHED_HAS_EXTERNAL_CHAIN;
-
return 0;
}
@@ -1528,9 +1499,6 @@ void i915_request_add(struct i915_request *rq)
attr = ctx->sched;
rcu_read_unlock();
- if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
- attr.priority |= I915_PRIORITY_NOSEMAPHORE;
-
__i915_request_queue(rq, &attr);
mutex_unlock(&tl->mutex);